nimbus/stateful/
dbcache.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use crate::{
6    enrollment::{
7        map_features_by_feature_id, EnrolledFeature, EnrolledFeatureConfig, ExperimentEnrollment,
8    },
9    error::{warn, NimbusError, Result},
10    stateful::{
11        enrollment::get_enrollments,
12        gecko_prefs::GeckoPrefStore,
13        persistence::{Database, StoreId, Writer},
14    },
15    EnrolledExperiment, Experiment,
16};
17use std::collections::{HashMap, HashSet};
18use std::sync::{Arc, RwLock};
19
20// This module manages an in-memory cache of the database, so that some
21// functions exposed by nimbus can return results without blocking on any
22// IO. Consumers are expected to call our public `update()` function whenever
23// the database might have changed.
24
25// This struct is the cached data. This is never mutated, but instead
26// recreated every time the cache is updated.
27struct CachedData {
28    pub experiments: Vec<Experiment>,
29    pub enrollments: Vec<ExperimentEnrollment>,
30    pub experiments_by_slug: HashMap<String, EnrolledExperiment>,
31    pub features_by_feature_id: HashMap<String, EnrolledFeatureConfig>,
32    pub gecko_pref_to_enrollment_slugs: Option<HashMap<String, HashSet<String>>>,
33}
34
35// This is the public cache API. Each NimbusClient can create one of these and
36// it lives as long as the client - it encapsulates the synchronization needed
37// to allow the cache to work correctly.
38#[derive(Default)]
39pub struct DatabaseCache {
40    data: RwLock<Option<CachedData>>,
41}
42
43impl DatabaseCache {
44    // Call this function whenever it's possible that anything cached by this
45    // struct (eg, our enrollments) might have changed.
46    //
47    // This function must be passed a `&Database` and a `Writer`, which it
48    // will commit before updating the in-memory cache. This is a slightly weird
49    // API but it helps encorce two important properties:
50    //
51    //  * By requiring a `Writer`, we ensure mutual exclusion of other db writers
52    //    and thus prevent the possibility of caching stale data.
53    //  * By taking ownership of the `Writer`, we ensure that the calling code
54    //    updates the cache after all of its writes have been performed.
55    pub fn commit_and_update(
56        &self,
57        db: &Database,
58        writer: Writer,
59        coenrolling_ids: &HashSet<&str>,
60        gecko_pref_store: Option<Arc<GeckoPrefStore>>,
61    ) -> Result<()> {
62        // By passing in the active `writer` we read the state of enrollments
63        // as written by the calling code, before it's committed to the db.
64        let enrollments = get_enrollments(db, &writer)?;
65
66        // Build a lookup table for experiments by experiment slug.
67        // This will be used for get_experiment_branch() and get_active_experiments()
68        let mut experiments_by_slug = HashMap::with_capacity(enrollments.len());
69        for e in enrollments {
70            experiments_by_slug.insert(e.slug.clone(), e);
71        }
72
73        let enrollments: Vec<ExperimentEnrollment> =
74            db.get_store(StoreId::Enrollments).collect_all(&writer)?;
75        let experiments: Vec<Experiment> =
76            db.get_store(StoreId::Experiments).collect_all(&writer)?;
77
78        let features_by_feature_id =
79            map_features_by_feature_id(&enrollments, &experiments, coenrolling_ids);
80
81        let gecko_pref_to_enrollment_slugs = gecko_pref_store.map(|store| {
82            store.map_gecko_prefs_to_enrollment_slugs_and_update_store(
83                &experiments,
84                &enrollments,
85                &experiments_by_slug,
86            )
87        });
88
89        // This is where testing tools would override i.e. replace experimental feature configurations.
90        // i.e. testing tools would cause custom feature configs to be stored in a Store.
91        // Here, we get those overrides out of the store, and merge it with this map.
92
93        // This is where rollouts (promoted experiments on a given feature) will be merged in to the feature variables.
94
95        let data = CachedData {
96            experiments,
97            enrollments,
98            experiments_by_slug,
99            features_by_feature_id,
100            gecko_pref_to_enrollment_slugs,
101        };
102
103        // Try to commit the change to disk and update the cache as close
104        // together in time as possible. This leaves a small window where another
105        // thread could read new data from disk but see old data in the cache,
106        // but that seems benign in practice given the way we use the cache.
107        // The alternative would be to lock the cache while we commit to disk,
108        // and we don't want to risk blocking the main thread.
109        writer.commit()?;
110        let mut cached = self.data.write().unwrap();
111        cached.replace(data);
112        Ok(())
113    }
114
115    // Abstracts safely referencing our cached data.
116    //
117    // WARNING: because this manages locking, the callers of this need to be
118    // careful regarding deadlocks - if the callback takes other own locks then
119    // there's a risk of locks being taken in an inconsistent order. However,
120    // there's nothing this code specifically can do about that.
121    fn get_data<T, F>(&self, func: F) -> Result<T>
122    where
123        F: FnOnce(&CachedData) -> T,
124    {
125        match *self.data.read().unwrap() {
126            None => {
127                warn!("DatabaseCache attempting to read data before initialization is completed");
128                Err(NimbusError::DatabaseNotReady)
129            }
130            Some(ref data) => Ok(func(data)),
131        }
132    }
133
134    pub fn get_experiment_branch(&self, id: &str) -> Result<Option<String>> {
135        self.get_data(|data| -> Option<String> {
136            data.experiments_by_slug
137                .get(id)
138                .map(|experiment| experiment.branch_slug.clone())
139        })
140    }
141
142    // This gives access to the feature JSON. We pass it as a string because uniffi doesn't
143    // support JSON yet.
144    pub fn get_feature_config_variables(&self, feature_id: &str) -> Result<Option<String>> {
145        self.get_data(|data| {
146            let enrolled_feature = data.features_by_feature_id.get(feature_id)?;
147            let string = serde_json::to_string(&enrolled_feature.feature.value).unwrap();
148            Some(string)
149        })
150    }
151
152    pub fn get_enrollment_by_feature(&self, feature_id: &str) -> Result<Option<EnrolledFeature>> {
153        self.get_data(|data| {
154            data.features_by_feature_id
155                .get(feature_id)
156                .map(|feature| feature.into())
157        })
158    }
159
160    pub fn get_active_experiments(&self) -> Result<Vec<EnrolledExperiment>> {
161        self.get_data(|data| {
162            data.experiments_by_slug
163                .values()
164                .map(|e| e.to_owned())
165                .collect::<Vec<EnrolledExperiment>>()
166        })
167    }
168
169    pub fn get_experiments(&self) -> Result<Vec<Experiment>> {
170        self.get_data(|data| data.experiments.to_vec())
171    }
172
173    pub fn get_enrollments(&self) -> Result<Vec<ExperimentEnrollment>> {
174        self.get_data(|data| data.enrollments.to_owned())
175    }
176
177    pub fn get_enrollments_for_pref(&self, pref: &str) -> Result<Option<HashSet<String>>> {
178        self.get_data(|data| {
179            if let Some(a) = &data.gecko_pref_to_enrollment_slugs {
180                Ok(a.get(pref).cloned())
181            } else {
182                Ok(None)
183            }
184        })?
185    }
186}