use crate::error::*;
use rusqlite::{named_params, Connection, OpenFlags, Transaction};
use serde_json::{Map, Value};
use sql_support::ConnExt;
use std::collections::HashSet;
use std::path::Path;
struct LegacyRow {
col_name: String, record: String, }
impl LegacyRow {
fn parse(&self) -> Option<Parsed<'_>> {
if self.col_name.len() < 8 {
log::trace!("collection_name of '{}' is too short", self.col_name);
return None;
}
if &self.col_name[..8] != "default/" {
log::trace!("collection_name of '{}' isn't ours", self.col_name);
return None;
}
let ext_id = &self.col_name[8..];
let mut record_map = match serde_json::from_str(&self.record) {
Ok(Value::Object(m)) => m,
Ok(o) => {
log::info!("skipping non-json-object 'record' column");
log::trace!("record value is json, but not an object: {}", o);
return None;
}
Err(e) => {
log::info!("skipping non-json 'record' column");
log::trace!("record value isn't json: {}", e);
return None;
}
};
let key = match record_map.remove("key") {
Some(Value::String(s)) if !s.is_empty() => s,
Some(o) => {
log::trace!("key is json but not a string: {}", o);
return None;
}
_ => {
log::trace!("key doesn't exist in the map");
return None;
}
};
let data = match record_map.remove("data") {
Some(d) => d,
_ => {
log::trace!("data doesn't exist in the map");
return None;
}
};
Some(Parsed { ext_id, key, data })
}
}
struct Parsed<'a> {
ext_id: &'a str,
key: String,
data: serde_json::Value,
}
pub fn migrate(tx: &Transaction<'_>, filename: &Path) -> Result<MigrationInfo> {
let mut last_ext_id = "".to_string();
let mut curr_values: Vec<(String, serde_json::Value)> = Vec::new();
let (rows, mut mi) = read_rows(filename);
for row in rows {
log::trace!("processing '{}' - '{}'", row.col_name, row.record);
let parsed = match row.parse() {
Some(p) => p,
None => continue,
};
if parsed.ext_id != last_ext_id {
if !last_ext_id.is_empty() && !curr_values.is_empty() {
let entries = do_insert(tx, &last_ext_id, curr_values)?;
mi.extensions_successful += 1;
mi.entries_successful += entries;
}
last_ext_id = parsed.ext_id.to_string();
curr_values = Vec::new();
}
if parsed.ext_id == last_ext_id {
curr_values.push((parsed.key.to_string(), parsed.data));
log::trace!(
"extension {} now has {} keys",
parsed.ext_id,
curr_values.len()
);
}
}
if !last_ext_id.is_empty() && !curr_values.is_empty() {
let entries = do_insert(tx, &last_ext_id, curr_values)?;
mi.extensions_successful += 1;
mi.entries_successful += entries;
}
log::info!("migrated {} extensions: {:?}", mi.extensions_successful, mi);
Ok(mi)
}
fn read_rows(filename: &Path) -> (Vec<LegacyRow>, MigrationInfo) {
let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX | OpenFlags::SQLITE_OPEN_READ_ONLY;
let src_conn = match Connection::open_with_flags(filename, flags) {
Ok(conn) => conn,
Err(e) => {
log::warn!("Failed to open the source DB: {}", e);
return (Vec::new(), MigrationInfo::open_failure());
}
};
let mut stmt = match src_conn.prepare(
"SELECT collection_name, record FROM collection_data
WHERE collection_name != 'default/storage-sync-crypto'
ORDER BY collection_name",
) {
Ok(stmt) => stmt,
Err(e) => {
log::warn!("Failed to prepare the statement: {}", e);
return (Vec::new(), MigrationInfo::open_failure());
}
};
let rows = match stmt.query_and_then([], |row| -> Result<LegacyRow> {
Ok(LegacyRow {
col_name: row.get(0)?,
record: row.get(1)?,
})
}) {
Ok(r) => r,
Err(e) => {
log::warn!("Failed to read any rows from the source DB: {}", e);
return (Vec::new(), MigrationInfo::open_failure());
}
};
let all_rows: Vec<Result<LegacyRow>> = rows.collect();
let entries = all_rows.len();
let successful_rows: Vec<LegacyRow> = all_rows.into_iter().filter_map(Result::ok).collect();
let distinct_extensions: HashSet<_> = successful_rows.iter().map(|c| &c.col_name).collect();
let mi = MigrationInfo {
entries,
extensions: distinct_extensions.len(),
extensions_successful: 0,
entries_successful: 0,
open_failure: false,
};
(successful_rows, mi)
}
fn do_insert(tx: &Transaction<'_>, ext_id: &str, vals: Vec<(String, Value)>) -> Result<usize> {
let mut map = Map::with_capacity(vals.len());
for (key, val) in vals {
map.insert(key, val);
}
let num_entries = map.len();
tx.execute_cached(
"INSERT OR REPLACE INTO storage_sync_data(ext_id, data, sync_change_counter)
VALUES (:ext_id, :data, 1)",
rusqlite::named_params! {
":ext_id": &ext_id,
":data": &Value::Object(map),
},
)?;
Ok(num_entries)
}
#[derive(Debug, Clone, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct MigrationInfo {
pub entries: usize,
pub entries_successful: usize,
pub extensions: usize,
pub extensions_successful: usize,
pub open_failure: bool,
}
impl MigrationInfo {
fn open_failure() -> Self {
Self {
open_failure: true,
..Self::default()
}
}
const META_KEY: &'static str = "migration_info";
pub(crate) fn store(&self, conn: &Connection) -> Result<()> {
let json = serde_json::to_string(self)?;
conn.execute(
"INSERT OR REPLACE INTO meta(key, value) VALUES (:k, :v)",
named_params! {
":k": Self::META_KEY,
":v": &json
},
)?;
Ok(())
}
pub(crate) fn take(tx: &Transaction<'_>) -> Result<Option<Self>> {
let s = tx.try_query_one::<String, _>(
"SELECT value FROM meta WHERE key = :k",
named_params! {
":k": Self::META_KEY,
},
false,
)?;
tx.execute(
"DELETE FROM meta WHERE key = :k",
named_params! {
":k": Self::META_KEY,
},
)?;
if let Some(s) = s {
match serde_json::from_str(&s) {
Ok(v) => Ok(Some(v)),
Err(e) => {
debug_assert!(false, "Failed to read migration JSON: {:?}", e);
error_support::report_error!(
"webext-storage-migration-json",
"Failed to read migration JSON: {}",
e
);
Ok(None)
}
}
} else {
Ok(None)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::api;
use crate::db::{test::new_mem_db, StorageDb};
use serde_json::json;
use tempfile::tempdir;
fn init_source_db(path: impl AsRef<Path>, f: impl FnOnce(&Connection)) {
let flags = OpenFlags::SQLITE_OPEN_NO_MUTEX
| OpenFlags::SQLITE_OPEN_CREATE
| OpenFlags::SQLITE_OPEN_READ_WRITE;
let mut conn = Connection::open_with_flags(path, flags).expect("open should work");
let tx = conn.transaction().expect("should be able to get a tx");
tx.execute_batch(
"CREATE TABLE collection_data (
collection_name TEXT,
record_id TEXT,
record TEXT
);",
)
.expect("create should work");
f(&tx);
tx.commit().expect("should commit");
conn.close().expect("close should work");
}
fn do_migrate<F>(expect_mi: MigrationInfo, f: F) -> StorageDb
where
F: FnOnce(&Connection),
{
let tmpdir = tempdir().unwrap();
let path = tmpdir.path().join("source.db");
init_source_db(path, f);
let db = new_mem_db();
let conn = db.get_connection().expect("should retrieve connection");
let tx = conn.unchecked_transaction().expect("tx should work");
let mi = migrate(&tx, &tmpdir.path().join("source.db")).expect("migrate should work");
tx.commit().expect("should work");
assert_eq!(mi, expect_mi);
db
}
fn assert_has(c: &Connection, ext_id: &str, expect: Value) {
assert_eq!(
api::get(c, ext_id, json!(null)).expect("should get"),
expect
);
}
const HAPPY_PATH_SQL: &str = r#"
INSERT INTO collection_data(collection_name, record)
VALUES
('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-userWelcomed","key":"userWelcomed","data":1570659224457,"_status":"synced","last_modified":1579755940527}'),
('default/{e7fefcf3-b39c-4f17-5215-ebfe120a7031}', '{"id":"key-isWho","key":"isWho","data":"4ec8109f","_status":"synced","last_modified":1579755940497}'),
('default/storage-sync-crypto', '{"id":"keys","keys":{"default":["rQ=","lR="],"collections":{"extension@redux.devtools":["Bd=","ju="]}}}'),
('default/https-everywhere@eff.org', '{"id":"key-userRules","key":"userRules","data":[],"_status":"synced","last_modified":1570079920045}'),
('default/https-everywhere@eff.org', '{"id":"key-ruleActiveStates","key":"ruleActiveStates","data":{},"_status":"synced","last_modified":1570079919993}'),
('default/https-everywhere@eff.org', '{"id":"key-migration_5F_version","key":"migration_version","data":2,"_status":"synced","last_modified":1570079919966}')
"#;
const HAPPY_PATH_MIGRATION_INFO: MigrationInfo = MigrationInfo {
entries: 5,
entries_successful: 5,
extensions: 2,
extensions_successful: 2,
open_failure: false,
};
#[allow(clippy::unreadable_literal)]
#[test]
fn test_happy_paths() {
let db = do_migrate(HAPPY_PATH_MIGRATION_INFO, |c| {
c.execute_batch(HAPPY_PATH_SQL).expect("should populate")
});
let conn = db.get_connection().expect("should retrieve connection");
assert_has(
conn,
"{e7fefcf3-b39c-4f17-5215-ebfe120a7031}",
json!({"userWelcomed": 1570659224457u64, "isWho": "4ec8109f"}),
);
assert_has(
conn,
"https-everywhere@eff.org",
json!({"userRules": [], "ruleActiveStates": {}, "migration_version": 2}),
);
}
#[test]
fn test_sad_paths() {
do_migrate(
MigrationInfo {
entries: 10,
entries_successful: 0,
extensions: 6,
extensions_successful: 0,
open_failure: false,
},
|c| {
c.execute_batch(
r#"INSERT INTO collection_data(collection_name, record)
VALUES
('default/test', '{"key":2,"data":1}'), -- key not a string
('default/test', '{"key":"","data":1}'), -- key empty string
('default/test', '{"xey":"k","data":1}'), -- key missing
('default/test', '{"key":"k","xata":1}'), -- data missing
('default/test', '{"key":"k","data":1'), -- invalid json
('xx/test', '{"key":"k","data":1}'), -- bad key format
('default', '{"key":"k","data":1}'), -- bad key format 2
('default/', '{"key":"k","data":1}'), -- bad key format 3
('defaultx/test', '{"key":"k","data":1}'), -- bad key format 4
('', '') -- empty strings
"#,
)
.expect("should populate");
},
);
}
#[test]
fn test_migration_info_storage() {
let tmpdir = tempdir().unwrap();
let path = tmpdir.path().join("source.db");
init_source_db(&path, |c| {
c.execute_batch(HAPPY_PATH_SQL).expect("should populate")
});
let db = crate::store::test::new_mem_store();
db.migrate(&path).expect("migration should work");
let mi = db
.take_migration_info()
.expect("take failed with info present");
assert_eq!(mi, Some(HAPPY_PATH_MIGRATION_INFO));
let mi2 = db
.take_migration_info()
.expect("take failed with info missing");
assert_eq!(mi2, None);
}
}