nimbus/stateful/
dbcache.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use crate::{
6    enrollment::{
7        map_features_by_feature_id, EnrolledFeature, EnrolledFeatureConfig, ExperimentEnrollment,
8    },
9    error::{warn, NimbusError, Result},
10    stateful::{
11        enrollment::get_enrollments,
12        persistence::{Database, StoreId, Writer},
13    },
14    EnrolledExperiment, Experiment,
15};
16use std::collections::{HashMap, HashSet};
17use std::sync::RwLock;
18
19// This module manages an in-memory cache of the database, so that some
20// functions exposed by nimbus can return results without blocking on any
21// IO. Consumers are expected to call our public `update()` function whenever
22// the database might have changed.
23
24// This struct is the cached data. This is never mutated, but instead
25// recreated every time the cache is updated.
26struct CachedData {
27    pub experiments: Vec<Experiment>,
28    pub enrollments: Vec<ExperimentEnrollment>,
29    pub experiments_by_slug: HashMap<String, EnrolledExperiment>,
30    pub features_by_feature_id: HashMap<String, EnrolledFeatureConfig>,
31}
32
33// This is the public cache API. Each NimbusClient can create one of these and
34// it lives as long as the client - it encapsulates the synchronization needed
35// to allow the cache to work correctly.
36#[derive(Default)]
37pub struct DatabaseCache {
38    data: RwLock<Option<CachedData>>,
39}
40
41impl DatabaseCache {
42    // Call this function whenever it's possible that anything cached by this
43    // struct (eg, our enrollments) might have changed.
44    //
45    // This function must be passed a `&Database` and a `Writer`, which it
46    // will commit before updating the in-memory cache. This is a slightly weird
47    // API but it helps encorce two important properties:
48    //
49    //  * By requiring a `Writer`, we ensure mutual exclusion of other db writers
50    //    and thus prevent the possibility of caching stale data.
51    //  * By taking ownership of the `Writer`, we ensure that the calling code
52    //    updates the cache after all of its writes have been performed.
53    pub fn commit_and_update(
54        &self,
55        db: &Database,
56        writer: Writer,
57        coenrolling_ids: &HashSet<&str>,
58    ) -> Result<()> {
59        // By passing in the active `writer` we read the state of enrollments
60        // as written by the calling code, before it's committed to the db.
61        let enrollments = get_enrollments(db, &writer)?;
62
63        // Build a lookup table for experiments by experiment slug.
64        // This will be used for get_experiment_branch() and get_active_experiments()
65        let mut experiments_by_slug = HashMap::with_capacity(enrollments.len());
66        for e in enrollments {
67            experiments_by_slug.insert(e.slug.clone(), e);
68        }
69
70        let enrollments: Vec<ExperimentEnrollment> =
71            db.get_store(StoreId::Enrollments).collect_all(&writer)?;
72        let experiments: Vec<Experiment> =
73            db.get_store(StoreId::Experiments).collect_all(&writer)?;
74
75        let features_by_feature_id =
76            map_features_by_feature_id(&enrollments, &experiments, coenrolling_ids);
77
78        // This is where testing tools would override i.e. replace experimental feature configurations.
79        // i.e. testing tools would cause custom feature configs to be stored in a Store.
80        // Here, we get those overrides out of the store, and merge it with this map.
81
82        // This is where rollouts (promoted experiments on a given feature) will be merged in to the feature variables.
83
84        let data = CachedData {
85            experiments,
86            enrollments,
87            experiments_by_slug,
88            features_by_feature_id,
89        };
90
91        // Try to commit the change to disk and update the cache as close
92        // together in time as possible. This leaves a small window where another
93        // thread could read new data from disk but see old data in the cache,
94        // but that seems benign in practice given the way we use the cache.
95        // The alternative would be to lock the cache while we commit to disk,
96        // and we don't want to risk blocking the main thread.
97        writer.commit()?;
98        let mut cached = self.data.write().unwrap();
99        cached.replace(data);
100        Ok(())
101    }
102
103    // Abstracts safely referencing our cached data.
104    //
105    // WARNING: because this manages locking, the callers of this need to be
106    // careful regarding deadlocks - if the callback takes other own locks then
107    // there's a risk of locks being taken in an inconsistent order. However,
108    // there's nothing this code specifically can do about that.
109    fn get_data<T, F>(&self, func: F) -> Result<T>
110    where
111        F: FnOnce(&CachedData) -> T,
112    {
113        match *self.data.read().unwrap() {
114            None => {
115                warn!("DatabaseCache attempting to read data before initialization is completed");
116                Err(NimbusError::DatabaseNotReady)
117            }
118            Some(ref data) => Ok(func(data)),
119        }
120    }
121
122    pub fn get_experiment_branch(&self, id: &str) -> Result<Option<String>> {
123        self.get_data(|data| -> Option<String> {
124            data.experiments_by_slug
125                .get(id)
126                .map(|experiment| experiment.branch_slug.clone())
127        })
128    }
129
130    // This gives access to the feature JSON. We pass it as a string because uniffi doesn't
131    // support JSON yet.
132    pub fn get_feature_config_variables(&self, feature_id: &str) -> Result<Option<String>> {
133        self.get_data(|data| {
134            let enrolled_feature = data.features_by_feature_id.get(feature_id)?;
135            let string = serde_json::to_string(&enrolled_feature.feature.value).unwrap();
136            Some(string)
137        })
138    }
139
140    pub fn get_enrollment_by_feature(&self, feature_id: &str) -> Result<Option<EnrolledFeature>> {
141        self.get_data(|data| {
142            data.features_by_feature_id
143                .get(feature_id)
144                .map(|feature| feature.into())
145        })
146    }
147
148    pub fn get_active_experiments(&self) -> Result<Vec<EnrolledExperiment>> {
149        self.get_data(|data| {
150            data.experiments_by_slug
151                .values()
152                .map(|e| e.to_owned())
153                .collect::<Vec<EnrolledExperiment>>()
154        })
155    }
156
157    pub fn get_experiments(&self) -> Result<Vec<Experiment>> {
158        self.get_data(|data| data.experiments.to_vec())
159    }
160
161    pub fn get_enrollments(&self) -> Result<Vec<ExperimentEnrollment>> {
162        self.get_data(|data| data.enrollments.to_owned())
163    }
164}