quick_cache/options.rs
1pub const DEFAULT_HOT_ALLOCATION: f64 = 0.97;
2pub const DEFAULT_GHOST_ALLOCATION: f64 = 0.5;
3
4/// Cache options. Built with [OptionsBuilder].
5#[derive(Debug, Clone)]
6pub struct Options {
7 pub(crate) shards: usize,
8 pub(crate) hot_allocation: f64,
9 pub(crate) ghost_allocation: f64,
10 pub(crate) estimated_items_capacity: usize,
11 pub(crate) weight_capacity: u64,
12}
13
14/// Builder for [Options].
15///
16/// # Example
17///
18/// ```rust
19/// use quick_cache::{sync::{Cache, DefaultLifecycle}, OptionsBuilder, UnitWeighter, DefaultHashBuilder};
20///
21/// Cache::<String, String>::with_options(
22/// OptionsBuilder::new()
23/// .estimated_items_capacity(10000)
24/// .weight_capacity(10000)
25/// .build()
26/// .unwrap(),
27/// UnitWeighter,
28/// DefaultHashBuilder::default(),
29/// DefaultLifecycle::default(),
30/// );
31/// ```
32#[derive(Debug, Clone, Default)]
33pub struct OptionsBuilder {
34 shards: Option<usize>,
35 hot_allocation: Option<f64>,
36 ghost_allocation: Option<f64>,
37 estimated_items_capacity: Option<usize>,
38 weight_capacity: Option<u64>,
39}
40
41#[derive(Debug, Clone)]
42pub struct Error(&'static str);
43
44impl std::fmt::Display for Error {
45 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
46 std::fmt::Display::fmt(&self.0, f)
47 }
48}
49
50impl std::error::Error for Error {}
51
52impl OptionsBuilder {
53 #[inline]
54 pub fn new() -> Self {
55 Self::default()
56 }
57
58 /// Set the number of internal shards for the sync cache. Each shard has independent synchronization
59 /// and capacity. This means that the Cache can be used from multiple threads
60 /// with little contention but the capacity of each shard is a portion of the total.
61 ///
62 /// Defaults to: `number of detected cores * 4`
63 ///
64 /// Note that this number isn't enforced and will be adjusted internally to
65 /// the next power of two. Too small shards (depending on estimated capacity)
66 /// may also cause the actual shard count to decrease.
67 #[inline]
68 pub fn shards(&mut self, shards: usize) -> &mut Self {
69 self.shards = Some(shards);
70 self
71 }
72
73 /// The estimated number of items the cache is expected to hold,
74 /// roughly equivalent to `weight_capacity / average item weight`.
75 /// An estimation within one or even two orders of magnitude of the real value is often good enough.
76 ///
77 /// This is used to estimate the maximum number of shards (to avoid shards that are too small)
78 /// and to estimate space required to track items recently evicted from the cache.
79 #[inline]
80 pub fn estimated_items_capacity(&mut self, estimated_items_capacity: usize) -> &mut Self {
81 self.estimated_items_capacity = Some(estimated_items_capacity);
82 self
83 }
84
85 /// The max weight that the cache can hold.
86 #[inline]
87 pub fn weight_capacity(&mut self, weight_capacity: u64) -> &mut Self {
88 self.weight_capacity = Some(weight_capacity);
89 self
90 }
91
92 /// What percentage `[0..=1.0]` of the cache space to reserve for "hot" items.
93 /// If your workload exhibit heavy bias towards recency instead of frequency try
94 /// lowering this setting. In practice the useful ranges are between 50% to 99%
95 /// (usually on the higher side).
96 ///
97 /// Defaults to: `0.97` (97%).
98 #[inline]
99 pub fn hot_allocation(&mut self, hot_allocation: f64) -> &mut Self {
100 assert!(
101 hot_allocation.clamp(0.0, 1.0) == hot_allocation,
102 "hot_allocation must be within [0, 1]"
103 );
104 self.hot_allocation = Some(hot_allocation);
105 self
106 }
107
108 /// The cache optimistically tracks recently seen keys that are not resident
109 /// in the cache. These keys are called ghost keys. If a ghost key is seen
110 /// again items it will be admitted as "hot".
111 /// The ghost allocation percentage defines how much space to allocate for
112 /// the ghost keys considering the `estimated_items_capacity`.
113 ///
114 /// Defaults to: `0.5` (50%).
115 #[inline]
116 pub fn ghost_allocation(&mut self, ghost_allocation: f64) -> &mut Self {
117 assert!(
118 ghost_allocation.clamp(0.0, 1.0) == ghost_allocation,
119 "ghost_allocation must be within [0, 1]"
120 );
121 self.ghost_allocation = Some(ghost_allocation);
122 self
123 }
124
125 /// Builds an `Option` struct which can be used in the `Cache::with_options` constructor.
126 #[inline]
127 pub fn build(&self) -> Result<Options, Error> {
128 let shards = self.shards.unwrap_or_else(|| available_parallelism() * 4);
129 let hot_allocation = self.hot_allocation.unwrap_or(DEFAULT_HOT_ALLOCATION);
130 let ghost_allocation = self.ghost_allocation.unwrap_or(DEFAULT_GHOST_ALLOCATION);
131 let weight_capacity = self
132 .weight_capacity
133 .ok_or(Error("weight_capacity is not set"))?;
134 let estimated_items_capacity = self
135 .estimated_items_capacity
136 .ok_or(Error("estimated_items_capacity is not set"))?;
137 Ok(Options {
138 shards,
139 hot_allocation,
140 ghost_allocation,
141 estimated_items_capacity,
142 weight_capacity,
143 })
144 }
145}
146
147/// Memoized wrapper for `std::thread::available_parallelism`, which can be incredibly slow.
148fn available_parallelism() -> usize {
149 use std::sync::atomic::{AtomicUsize, Ordering};
150 static AVAILABLE_PARALLELISM: AtomicUsize = AtomicUsize::new(0);
151 let mut ap = AVAILABLE_PARALLELISM.load(Ordering::Relaxed);
152 if ap == 0 {
153 ap = std::thread::available_parallelism()
154 .map(|n| n.get())
155 .unwrap_or(1);
156 AVAILABLE_PARALLELISM.store(ap, Ordering::Relaxed);
157 }
158 ap
159}