nimbus/stateful/dbcache.rs
1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5use std::collections::{HashMap, HashSet};
6use std::sync::{Arc, RwLock};
7
8use crate::enrollment::{
9 EnrolledFeature, EnrolledFeatureConfig, ExperimentEnrollment, map_features_by_feature_id,
10};
11use crate::error::{NimbusError, Result, warn};
12use crate::stateful::enrollment::get_enrollments;
13use crate::stateful::gecko_prefs::GeckoPrefStore;
14use crate::stateful::persistence::{Database, StoreId, Writer};
15use crate::{EnrolledExperiment, Experiment};
16
17// This module manages an in-memory cache of the database, so that some
18// functions exposed by nimbus can return results without blocking on any
19// IO. Consumers are expected to call our public `update()` function whenever
20// the database might have changed.
21
22// This struct is the cached data. This is never mutated, but instead
23// recreated every time the cache is updated.
24struct CachedData {
25 pub experiments: Vec<Experiment>,
26 pub enrollments: Vec<ExperimentEnrollment>,
27 pub experiments_by_slug: HashMap<String, EnrolledExperiment>,
28 pub features_by_feature_id: HashMap<String, EnrolledFeatureConfig>,
29 pub gecko_pref_to_enrollment_slugs: Option<HashMap<String, HashSet<String>>>,
30}
31
32// This is the public cache API. Each NimbusClient can create one of these and
33// it lives as long as the client - it encapsulates the synchronization needed
34// to allow the cache to work correctly.
35#[derive(Default)]
36pub struct DatabaseCache {
37 data: RwLock<Option<CachedData>>,
38}
39
40impl DatabaseCache {
41 // Call this function whenever it's possible that anything cached by this
42 // struct (eg, our enrollments) might have changed.
43 //
44 // This function must be passed a `&Database` and a `Writer`, which it
45 // will commit before updating the in-memory cache. This is a slightly weird
46 // API but it helps encorce two important properties:
47 //
48 // * By requiring a `Writer`, we ensure mutual exclusion of other db writers
49 // and thus prevent the possibility of caching stale data.
50 // * By taking ownership of the `Writer`, we ensure that the calling code
51 // updates the cache after all of its writes have been performed.
52 pub fn commit_and_update(
53 &self,
54 db: &Database,
55 writer: Writer,
56 coenrolling_ids: &HashSet<&str>,
57 gecko_pref_store: Option<Arc<GeckoPrefStore>>,
58 ) -> Result<()> {
59 // By passing in the active `writer` we read the state of enrollments
60 // as written by the calling code, before it's committed to the db.
61 let enrollments = get_enrollments(db, &writer)?;
62
63 // Build a lookup table for experiments by experiment slug.
64 // This will be used for get_experiment_branch() and get_active_experiments()
65 let mut experiments_by_slug = HashMap::with_capacity(enrollments.len());
66 for e in enrollments {
67 experiments_by_slug.insert(e.slug.clone(), e);
68 }
69
70 let enrollments: Vec<ExperimentEnrollment> =
71 db.get_store(StoreId::Enrollments).collect_all(&writer)?;
72 let experiments: Vec<Experiment> =
73 db.get_store(StoreId::Experiments).collect_all(&writer)?;
74
75 let features_by_feature_id =
76 map_features_by_feature_id(&enrollments, &experiments, coenrolling_ids);
77
78 let gecko_pref_to_enrollment_slugs = gecko_pref_store.map(|store| {
79 store.map_gecko_prefs_to_enrollment_slugs_and_update_store(
80 &experiments,
81 &enrollments,
82 &experiments_by_slug,
83 )
84 });
85
86 // This is where testing tools would override i.e. replace experimental feature configurations.
87 // i.e. testing tools would cause custom feature configs to be stored in a Store.
88 // Here, we get those overrides out of the store, and merge it with this map.
89
90 // This is where rollouts (promoted experiments on a given feature) will be merged in to the feature variables.
91
92 let data = CachedData {
93 experiments,
94 enrollments,
95 experiments_by_slug,
96 features_by_feature_id,
97 gecko_pref_to_enrollment_slugs,
98 };
99
100 // Try to commit the change to disk and update the cache as close
101 // together in time as possible. This leaves a small window where another
102 // thread could read new data from disk but see old data in the cache,
103 // but that seems benign in practice given the way we use the cache.
104 // The alternative would be to lock the cache while we commit to disk,
105 // and we don't want to risk blocking the main thread.
106 writer.commit()?;
107 let mut cached = self.data.write().unwrap();
108 cached.replace(data);
109 Ok(())
110 }
111
112 // Abstracts safely referencing our cached data.
113 //
114 // WARNING: because this manages locking, the callers of this need to be
115 // careful regarding deadlocks - if the callback takes other own locks then
116 // there's a risk of locks being taken in an inconsistent order. However,
117 // there's nothing this code specifically can do about that.
118 fn get_data<T, F>(&self, func: F) -> Result<T>
119 where
120 F: FnOnce(&CachedData) -> T,
121 {
122 match *self.data.read().unwrap() {
123 None => {
124 warn!("DatabaseCache attempting to read data before initialization is completed");
125 Err(NimbusError::DatabaseNotReady)
126 }
127 Some(ref data) => Ok(func(data)),
128 }
129 }
130
131 pub fn get_experiment_branch(&self, id: &str) -> Result<Option<String>> {
132 self.get_data(|data| -> Option<String> {
133 data.experiments_by_slug
134 .get(id)
135 .map(|experiment| experiment.branch_slug.clone())
136 })
137 }
138
139 // This gives access to the feature JSON. We pass it as a string because uniffi doesn't
140 // support JSON yet.
141 pub fn get_feature_config_variables(&self, feature_id: &str) -> Result<Option<String>> {
142 self.get_data(|data| {
143 let enrolled_feature = data.features_by_feature_id.get(feature_id)?;
144 let string = serde_json::to_string(&enrolled_feature.feature.value).unwrap();
145 Some(string)
146 })
147 }
148
149 pub fn get_enrollment_by_feature(&self, feature_id: &str) -> Result<Option<EnrolledFeature>> {
150 self.get_data(|data| {
151 data.features_by_feature_id
152 .get(feature_id)
153 .map(|feature| feature.into())
154 })
155 }
156
157 pub fn get_active_experiments(&self) -> Result<Vec<EnrolledExperiment>> {
158 self.get_data(|data| {
159 data.experiments_by_slug
160 .values()
161 .map(|e| e.to_owned())
162 .collect::<Vec<EnrolledExperiment>>()
163 })
164 }
165
166 pub fn get_experiments(&self) -> Result<Vec<Experiment>> {
167 self.get_data(|data| data.experiments.to_vec())
168 }
169
170 pub fn get_enrollments(&self) -> Result<Vec<ExperimentEnrollment>> {
171 self.get_data(|data| data.enrollments.to_owned())
172 }
173
174 pub fn get_enrollments_for_pref(&self, pref: &str) -> Result<Option<HashSet<String>>> {
175 self.get_data(|data| {
176 if let Some(a) = &data.gecko_pref_to_enrollment_slugs {
177 Ok(a.get(pref).cloned())
178 } else {
179 Ok(None)
180 }
181 })?
182 }
183}