1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/

// This contains sync functionality we've managed to share between addresses
// and credit-cards. It's not "generic" in the way that traits are, it's
// literally just code we can share.
// For example, this code doesn't abstract storage away - it knows we are
// using a sql database and knows that the schemas for addresses and cards are
// very similar.

use crate::error::*;
use interrupt_support::Interruptee;
use rusqlite::{types::ToSql, Connection, Row};
use sync15::bso::OutgoingBso;
use sync15::ServerTimestamp;
use sync_guid::Guid;

/// Stages incoming records (excluding incoming tombstones) in preparation for
/// applying incoming changes for the syncing autofill records.
/// The incoming record is a String, because for credit-cards it is the encrypted
/// version of a JSON record.
pub(super) fn common_stage_incoming_records(
    conn: &Connection,
    table_name: &str,
    incoming: Vec<(Guid, String, ServerTimestamp)>,
    signal: &dyn Interruptee,
) -> Result<()> {
    log::info!(
        "staging {} incoming records into {}",
        incoming.len(),
        table_name
    );
    let chunk_size = 2;
    let vals: Vec<(Guid, String)> = incoming
        .into_iter()
        .map(|(guid, data, _)| (guid, data))
        .collect();
    sql_support::each_sized_chunk(
        &vals,
        sql_support::default_max_variable_number() / chunk_size,
        |chunk, _| -> Result<()> {
            signal.err_if_interrupted()?;
            let sql = format!(
                "INSERT OR REPLACE INTO temp.{table_name} (guid, payload)
                 VALUES {vals}",
                table_name = table_name,
                vals = sql_support::repeat_multi_values(chunk.len(), 2)
            );
            let mut params = Vec::with_capacity(chunk.len() * chunk_size);
            for (guid, json) in chunk {
                params.push(guid as &dyn ToSql);
                params.push(json);
            }
            conn.execute(&sql, rusqlite::params_from_iter(params))?;
            Ok(())
        },
    )?;
    log::trace!("staged");
    Ok(())
}

pub(super) fn common_remove_record(conn: &Connection, table_name: &str, guid: &Guid) -> Result<()> {
    conn.execute(
        &format!(
            "DELETE FROM {}
            WHERE guid = :guid",
            table_name
        ),
        rusqlite::named_params! {
            ":guid": guid,
        },
    )?;
    Ok(())
}

// This optionally takes a mirror table name to update if
// there is a corresponding record with the same guid we'd like to update
pub(super) fn common_change_guid(
    conn: &Connection,
    table_name: &str,
    mirror_table_name: &str,
    old_guid: &Guid,
    new_guid: &Guid,
) -> Result<()> {
    assert_ne!(old_guid, new_guid);
    let nrows = conn.execute(
        &format!(
            "UPDATE {}
            SET guid = :new_guid,
            sync_change_counter = sync_change_counter + 1
            WHERE guid = :old_guid",
            table_name
        ),
        rusqlite::named_params! {
            ":old_guid": old_guid,
            ":new_guid": new_guid,
        },
    )?;
    // something's gone badly wrong if this didn't affect exactly 1 row.
    assert_eq!(nrows, 1);

    // If there is also a corresponding mirror row (e.g forking situations), update aswell
    conn.execute(
        &format!(
            "UPDATE {}
                SET guid = :new_guid
                WHERE guid = :old_guid",
            mirror_table_name
        ),
        rusqlite::named_params! {
            ":old_guid": old_guid,
            ":new_guid": new_guid,
        },
    )?;

    Ok(())
}

/// Records in the incoming staging table need to end up in the mirror.
pub(super) fn common_mirror_staged_records(
    conn: &Connection,
    staging_table_name: &str,
    mirror_table_name: &str,
) -> Result<()> {
    conn.execute(
        &format!(
            "INSERT OR REPLACE INTO {} (guid, payload)
             SELECT guid, payload FROM temp.{}",
            mirror_table_name, staging_table_name,
        ),
        [],
    )?;
    Ok(())
}

// A macro for our record merge implementation.
// We allow all "common" fields from the sub-types to be getters on the
// InsertableItem type.
// Macros don't have fine-grained visibility and is visible to the entire
// crate, so we give it a very specific name.
#[macro_export]
macro_rules! sync_merge_field_check {
    ($field_name:ident,
    $incoming:ident,
    $local:ident,
    $mirror:ident,
    $merged_record:ident
    ) => {
        let incoming_field = &$incoming.$field_name;
        let local_field = &$local.$field_name;
        let is_local_same;
        let is_incoming_same;

        match &$mirror {
            Some(m) => {
                let mirror_field = &m.$field_name;
                is_local_same = mirror_field == local_field;
                is_incoming_same = mirror_field == incoming_field;
            }
            None => {
                is_local_same = true;
                is_incoming_same = local_field == incoming_field;
            }
        };

        let should_use_local = is_incoming_same || local_field == incoming_field;

        if is_local_same && !is_incoming_same {
            $merged_record.$field_name = incoming_field.clone();
        } else if should_use_local {
            $merged_record.$field_name = local_field.clone();
        } else {
            // There are conflicting differences, so we "fork" the record - we
            // will end up giving the local one a new guid and save the remote
            // one with its incoming ID.
            return MergeResult::Forked {
                forked: get_forked_record($local.clone()),
            };
        }
    };
}

pub(super) fn common_get_outgoing_staging_records(
    conn: &Connection,
    data_sql: &str,
    tombstones_sql: &str,
    payload_from_data_row: &dyn Fn(&Row<'_>) -> Result<(OutgoingBso, i64)>,
) -> anyhow::Result<Vec<(OutgoingBso, i64)>> {
    let outgoing_records =
        common_get_outgoing_records(conn, data_sql, tombstones_sql, payload_from_data_row)?;
    Ok(outgoing_records.into_iter().collect::<Vec<_>>())
}

fn get_outgoing_records(
    conn: &Connection,
    sql: &str,
    record_from_data_row: &dyn Fn(&Row<'_>) -> Result<(OutgoingBso, i64)>,
) -> anyhow::Result<Vec<(OutgoingBso, i64)>> {
    conn.prepare(sql)?
        .query_map([], |row| Ok(record_from_data_row(row)))?
        .map(|r| {
            r.unwrap().map_err(|e| {
                log::error!(
                    "Failed to retrieve a record from a row with the following error: {}",
                    e
                );
                e.into()
            })
        })
        .collect::<std::result::Result<Vec<_>, _>>()
}

pub(super) fn common_get_outgoing_records(
    conn: &Connection,
    data_sql: &str,
    tombstone_sql: &str,
    record_from_data_row: &dyn Fn(&Row<'_>) -> Result<(OutgoingBso, i64)>,
) -> anyhow::Result<Vec<(OutgoingBso, i64)>> {
    let mut payload = get_outgoing_records(conn, data_sql, record_from_data_row)?;

    payload.append(&mut get_outgoing_records(conn, tombstone_sql, &|row| {
        Ok((
            OutgoingBso::new_tombstone(Guid::from_string(row.get("guid")?).into()),
            0,
        ))
    })?);

    Ok(payload)
}

pub(super) fn common_save_outgoing_records(
    conn: &Connection,
    table_name: &str,
    staging_records: Vec<(Guid, String, i64)>,
) -> anyhow::Result<()> {
    let chunk_size = 3;
    sql_support::each_sized_chunk(
        &staging_records,
        sql_support::default_max_variable_number() / chunk_size,
        |chunk, _| -> anyhow::Result<()> {
            let sql = format!(
                "INSERT OR REPLACE INTO temp.{table_name} (guid, payload, sync_change_counter)
                VALUES {staging_records}",
                table_name = table_name,
                staging_records = sql_support::repeat_multi_values(chunk.len(), chunk_size)
            );
            let mut params = Vec::with_capacity(chunk.len() * chunk_size);
            for (guid, json, sync_change_counter) in chunk {
                params.push(guid as &dyn ToSql);
                params.push(json);
                params.push(sync_change_counter);
            }
            conn.execute(&sql, rusqlite::params_from_iter(params))?;
            Ok(())
        },
    )?;
    Ok(())
}

pub(super) fn common_finish_synced_items(
    conn: &Connection,
    data_table_name: &str,
    mirror_table_name: &str,
    outgoing_staging_table_name: &str,
    records_synced: Vec<Guid>,
) -> anyhow::Result<()> {
    // Update the local change counter for uploaded items.
    reset_sync_change_counter(
        conn,
        data_table_name,
        outgoing_staging_table_name,
        records_synced,
    )?;
    // Copy from the outgoing staging table into the mirror.
    let sql = format!(
        "INSERT OR REPLACE INTO {mirror_table_name}
            SELECT guid, payload FROM temp.{outgoing_staging_table_name}",
        mirror_table_name = mirror_table_name,
        outgoing_staging_table_name = outgoing_staging_table_name,
    );
    conn.execute(&sql, [])?;
    Ok(())
}

// When we started syncing, we saved `sync_change_counter` in the staging
// table for every record. Now that we've uploaded the server, we need to
// decrement that value from the current value - anything that ends up with
// a non-zero value must have changed since while we were uploading so remains
// dirty. This does that decrement.
fn reset_sync_change_counter(
    conn: &Connection,
    data_table_name: &str,
    outgoing_table_name: &str,
    records_synced: Vec<Guid>,
) -> anyhow::Result<()> {
    sql_support::each_chunk(&records_synced, |chunk, _| -> anyhow::Result<()> {
        conn.execute(
            &format!(
                // We're making two checks that in practice should be redundant. First we're limiting the
                // number of records that we're pulling from the outgoing staging table to one. Lastly we're
                // ensuring that the updated local records are also in `records_synced` which should be the
                // case since the sync will fail entirely if the server rejects individual records.
                "UPDATE {data_table_name} AS data
                SET sync_change_counter = sync_change_counter -
                    (
                        SELECT outgoing.sync_change_counter
                        FROM temp.{outgoing_table_name} AS outgoing
                        WHERE outgoing.guid = data.guid LIMIT 1
                    )
                WHERE guid IN ({values})",
                data_table_name = data_table_name,
                outgoing_table_name = outgoing_table_name,
                values = sql_support::repeat_sql_values(chunk.len())
            ),
            rusqlite::params_from_iter(chunk),
        )?;
        Ok(())
    })?;

    Ok(())
}

// And common helpers for tests (although no actual tests!)
#[cfg(test)]
pub(super) mod tests {
    use super::super::*;
    use interrupt_support::NeverInterrupts;
    use serde_json::{json, Value};

    pub(in crate::sync) fn array_to_incoming(vals: Vec<Value>) -> Vec<IncomingBso> {
        vals.into_iter()
            .map(IncomingBso::from_test_content)
            .collect()
    }

    pub(in crate::sync) fn expand_test_guid(c: char) -> String {
        c.to_string().repeat(12)
    }

    pub(in crate::sync) fn test_json_tombstone(guid_prefix: char) -> Value {
        let t = json! {
            {
                "id": expand_test_guid(guid_prefix),
                "deleted": true,
            }
        };
        t
    }

    // Incoming record is identical to a local record.
    pub(in crate::sync) fn do_test_incoming_same<T: SyncRecord + std::fmt::Debug + Clone>(
        ri: &dyn ProcessIncomingRecordImpl<Record = T>,
        tx: &Transaction<'_>,
        record: T,
        bso: IncomingBso,
    ) {
        ri.insert_local_record(tx, record)
            .expect("insert should work");
        ri.stage_incoming(tx, vec![bso], &NeverInterrupts)
            .expect("stage should work");
        let mut states = ri.fetch_incoming_states(tx).expect("fetch should work");
        assert_eq!(states.len(), 1, "1 records == 1 state!");
        let action =
            crate::sync::plan_incoming(ri, tx, states.pop().unwrap()).expect("plan should work");
        // Even though the records are identical, we still merged the metadata
        // so treat this as an Update.
        assert!(matches!(action, crate::sync::IncomingAction::Update { .. }));
    }

    // Incoming tombstone for an existing local record.
    pub(in crate::sync) fn do_test_incoming_tombstone<T: SyncRecord + std::fmt::Debug + Clone>(
        ri: &dyn ProcessIncomingRecordImpl<Record = T>,
        tx: &Transaction<'_>,
        record: T,
    ) {
        let guid = record.id().clone();
        ri.insert_local_record(tx, record)
            .expect("insert should work");
        ri.stage_incoming(
            tx,
            vec![IncomingBso::new_test_tombstone(guid)],
            &NeverInterrupts,
        )
        .expect("stage should work");
        let mut states = ri.fetch_incoming_states(tx).expect("fetch should work");
        assert_eq!(states.len(), 1, "1 records == 1 state!");
        let action =
            crate::sync::plan_incoming(ri, tx, states.pop().unwrap()).expect("plan should work");
        // Even though the records are identical, we still merged the metadata
        // so treat this as an Update.
        assert!(matches!(
            action,
            crate::sync::IncomingAction::DeleteLocalRecord { .. }
        ));
    }

    // local record was scrubbed of encrypted data -- we should update it using server data
    pub(in crate::sync) fn do_test_scrubbed_local_data<T: SyncRecord + std::fmt::Debug + Clone>(
        ri: &dyn ProcessIncomingRecordImpl<Record = T>,
        tx: &Transaction<'_>,
        record: T,
        bso: IncomingBso,
    ) {
        ri.insert_local_record(tx, record)
            .expect("insert should work");
        ri.stage_incoming(tx, vec![bso], &NeverInterrupts)
            .expect("stage should work");
        let mut states = ri.fetch_incoming_states(tx).expect("fetch should work");
        assert_eq!(states.len(), 1, "1 records == 1 state!");
        assert!(
            matches!(states[0].local, LocalRecordInfo::Scrubbed { .. }),
            "state should be LocalRecordInfo::Scubbed but it is: {:?}",
            states[0].local
        );

        let action =
            crate::sync::plan_incoming(ri, tx, states.pop().unwrap()).expect("plan should work");
        assert!(matches!(action, crate::sync::IncomingAction::Update { .. }));
    }

    // "Staged" records are moved to the mirror by finish_incoming().
    pub(in crate::sync) fn do_test_staged_to_mirror<T: SyncRecord + std::fmt::Debug + Clone>(
        ri: &dyn ProcessIncomingRecordImpl<Record = T>,
        tx: &Transaction<'_>,
        record: T,
        bso1: IncomingBso,
        mirror_table_name: &str,
    ) {
        let guid1 = record.id().clone();
        let guid2 = Guid::random();
        let bso2 = IncomingBso::new_test_tombstone(guid2.clone());

        ri.stage_incoming(tx, vec![bso1, bso2], &NeverInterrupts)
            .expect("stage should work");

        ri.finish_incoming(tx).expect("finish should work");

        let sql = format!(
            "SELECT COUNT(*) FROM {} where guid = '{}' OR guid = '{}'",
            mirror_table_name, guid1, guid2
        );
        let num_rows = tx
            .query_row(&sql, [], |row| Ok(row.get::<_, u32>(0).unwrap()))
            .unwrap();
        assert_eq!(num_rows, 2);
    }

    fn exists_in_table(tx: &Transaction<'_>, table_name: &str, guid: &Guid) {
        let sql = format!(
            "SELECT COUNT(*) FROM {} where guid = '{}'",
            table_name, guid
        );
        let num_rows = tx
            .query_row(&sql, [], |row| Ok(row.get::<_, u32>(0).unwrap()))
            .unwrap();
        assert_eq!(num_rows, 1);
    }

    pub(in crate::sync) fn exists_with_counter_value_in_table(
        tx: &Transaction<'_>,
        table_name: &str,
        guid: &Guid,
        expected_counter_value: i64,
    ) {
        let sql = format!(
            "SELECT COUNT(*)
            FROM {table_name}
            WHERE sync_change_counter = {expected_counter_value}
                AND guid = :guid",
            table_name = table_name,
            expected_counter_value = expected_counter_value,
        );

        let num_rows = tx
            .query_row(&sql, [guid], |row| Ok(row.get::<_, u32>(0).unwrap()))
            .unwrap();
        assert_eq!(num_rows, 1);
    }

    pub(in crate::sync) fn do_test_outgoing_never_synced<
        T: SyncRecord + std::fmt::Debug + Clone,
    >(
        tx: &Transaction<'_>,
        ro: &dyn ProcessOutgoingRecordImpl<Record = T>,
        guid: &Guid,
        data_table_name: &str,
        mirror_table_name: &str,
        staging_table_name: &str,
    ) {
        // call fetch outgoing records
        assert!(ro.fetch_outgoing_records(tx).is_ok());

        // check that the record is in the outgoing table
        exists_in_table(tx, &format!("temp.{}", staging_table_name), guid);

        // call push synced items
        assert!(ro.finish_synced_items(tx, vec![guid.clone()]).is_ok());

        // check that the sync change counter
        exists_with_counter_value_in_table(tx, data_table_name, guid, 0);

        // check that the outgoing record is in the mirror
        exists_in_table(tx, mirror_table_name, guid);
    }

    pub(in crate::sync) fn do_test_outgoing_tombstone<T: SyncRecord + std::fmt::Debug + Clone>(
        tx: &Transaction<'_>,
        ro: &dyn ProcessOutgoingRecordImpl<Record = T>,
        guid: &Guid,
        data_table_name: &str,
        mirror_table_name: &str,
        staging_table_name: &str,
    ) {
        // call fetch outgoing records
        assert!(ro.fetch_outgoing_records(tx).is_ok());

        // check that the record is in the outgoing table
        exists_in_table(tx, &format!("temp.{}", staging_table_name), guid);

        // call push synced items
        assert!(ro.finish_synced_items(tx, vec![guid.clone()]).is_ok());

        // check that the record wasn't copied to the data table
        let sql = format!(
            "SELECT COUNT(*) FROM {} where guid = '{}'",
            data_table_name, guid
        );
        let num_rows = tx
            .query_row(&sql, [], |row| Ok(row.get::<_, u32>(0).unwrap()))
            .unwrap();
        assert_eq!(num_rows, 0);

        // check that the outgoing record is in the mirror
        exists_in_table(tx, mirror_table_name, guid);
    }

    pub(in crate::sync) fn do_test_outgoing_synced_with_local_change<
        T: SyncRecord + std::fmt::Debug + Clone,
    >(
        tx: &Transaction<'_>,
        ro: &dyn ProcessOutgoingRecordImpl<Record = T>,
        guid: &Guid,
        data_table_name: &str,
        mirror_table_name: &str,
        staging_table_name: &str,
    ) {
        // call fetch outgoing records
        assert!(ro.fetch_outgoing_records(tx).is_ok());

        // check that the record is in the outgoing table
        exists_in_table(tx, &format!("temp.{}", staging_table_name), guid);

        // call push synced items
        assert!(ro.finish_synced_items(tx, vec![guid.clone()]).is_ok());

        // check that the sync change counter
        exists_with_counter_value_in_table(tx, data_table_name, guid, 0);

        // check that the outgoing record is in the mirror
        exists_in_table(tx, mirror_table_name, guid);
    }

    pub(in crate::sync) fn do_test_outgoing_synced_with_no_change<
        T: SyncRecord + std::fmt::Debug + Clone,
    >(
        tx: &Transaction<'_>,
        ro: &dyn ProcessOutgoingRecordImpl<Record = T>,
        guid: &Guid,
        data_table_name: &str,
        staging_table_name: &str,
    ) {
        // call fetch outgoing records
        assert!(ro.fetch_outgoing_records(tx).is_ok());

        // check that the record is not in the outgoing table
        let sql = format!(
            "SELECT COUNT(*) FROM {} where guid = '{}'",
            &format!("temp.{}", staging_table_name),
            guid
        );
        let num_rows = tx
            .query_row(&sql, [], |row| Ok(row.get::<_, u32>(0).unwrap()))
            .unwrap();
        assert_eq!(num_rows, 0);

        // call push synced items
        assert!(ro.finish_synced_items(tx, Vec::<Guid>::new()).is_ok());

        // check that the sync change counter is unchanged
        exists_with_counter_value_in_table(tx, data_table_name, guid, 0);
    }
}