quick_cache/
lib.rs

1//! Lightweight, high performance concurrent cache. It allows very fast access to the cached items
2//! with little overhead compared to a plain concurrent hash table. No allocations are ever performed
3//! unless the cache internal state table needs growing (which will eventually stabilize).
4//!
5//! # Eviction policy
6//!
7//! The current eviction policy is a modified version of the Clock-PRO algorithm, very similar to the
8//! later published S3-FIFO algorithm. It's "scan resistent" and provides high hit rates,
9//! significantly better than a LRU eviction policy and comparable to other state-of-the art algorithms
10//! like W-TinyLFU.
11//!
12//! # Thread safety and Concurrency
13//!
14//! Both `sync` (thread-safe) and `unsync` (non thread-safe) implementations are provided. The latter
15//! offers slightly better performance when thread safety is not required.
16//!
17//! # Equivalent keys
18//!
19//! The cache uses the [`Equivalent`](https://docs.rs/equivalent/1.0.1/equivalent/trait.Equivalent.html) trait
20//! for gets/removals. It can help work around the `Borrow` limitations.
21//! For example, if the cache key is a tuple `(K, Q)`, you wouldn't be able to access such keys without
22//! building a `&(K, Q)` and thus potentially cloning `K` and/or `Q`.
23//!
24//! # User defined weight
25//!
26//! By implementing the [Weighter] trait the user can define different weights for each cache entry.
27//!
28//! # Atomic operations
29//!
30//! By using the `get_or_insert` or `get_value_or_guard` family of functions (both sync and async variants
31//! are available, they can be mix and matched) the user can coordinate the insertion of entries, so only
32//! one value is "computed" and inserted after a cache miss.
33//!
34//! The `entry` family of functions provide a closure-based API for atomically
35//! inspecting and acting on existing entries (keep, remove, or replace) while also coordinating
36//! insertion on cache misses.
37//!
38//! # Lifecycle hooks
39//!
40//! A user can optionally provide a custom [Lifecycle] implementation to hook into the lifecycle of cache entries.
41//!
42//! Example use cases:
43//! * item pinning, so even if the item occupies weight but isn't allowed to be evicted
44//! * send evicted items to a channel, achieving the equivalent to an eviction listener feature.
45//! * zero out item weights so they are left in the cache instead of evicted.
46//!
47//! # Approximate memory usage
48//!
49//! The memory overhead per entry is `21` bytes.
50//!
51//! The memory usage of the cache data structures can be estimated as:
52//! `(size_of::<K>() + size_of::<V>() + 21) * (length * 1.5).next_power_of_two()`
53//!
54//! Actual memory usage may vary depending on the cache options and the key and value types, which can have external
55//! allocations (e.g. `String`, `Vec`, etc.). The above formula only accounts for the cache's data structures.
56//!
57//! The `1.5` value in the formula above results from `1 + G`, where `G` is the configured ghost allocation specified
58//! in [`OptionsBuilder::ghost_allocation`], which is `0.5` by default.
59//!
60//! # Hasher
61//!
62//! By default the crate uses [ahash](https://crates.io/crates/ahash), which is enabled (by default) via
63//! a crate feature with the same name. If the `ahash` feature is disabled the crate defaults to the std lib
64//! implementation instead (currently Siphash13). Note that a custom hasher can also be provided if desirable.
65//!
66//! # Synchronization primitives
67//!
68//! By default the crate uses [parking_lot](https://crates.io/crates/parking_lot), which is enabled (by default) via
69//! a crate feature with the same name. If `parking_lot` is disabled and `sharded-lock` is enabled, the crate uses
70//! [`crossbeam_utils::sync::ShardedLock`](https://docs.rs/crossbeam-utils/latest/crossbeam_utils/sync/struct.ShardedLock.html)
71//! from [crossbeam-utils](https://crates.io/crates/crossbeam-utils) instead. If both are disabled the crate defaults
72//! to the std lib implementation. The `parking_lot` and `sharded-lock` features are mutually exclusive.
73//!
74//! # Cargo Features
75//!
76//! | Feature | Default | Description |
77//! |---------|---------|-------------|
78//! | `ahash` | ✓ | Use [ahash](https://crates.io/crates/ahash) as the default hasher. When disabled, falls back to std lib's `RandomState` (currently SipHash-1-3). |
79//! | `parking_lot` | ✓ | Use [parking_lot](https://crates.io/crates/parking_lot) for synchronization primitives. Mutually exclusive with `sharded-lock`. |
80//! | `sharded-lock` | | Use [`crossbeam_utils::sync::ShardedLock`](https://docs.rs/crossbeam-utils/latest/crossbeam_utils/sync/struct.ShardedLock.html) for synchronization primitives. Mutually exclusive with `parking_lot`. |
81//! | `shuttle` | | Enable [shuttle](https://crates.io/crates/shuttle) testing support for concurrency testing. |
82//! | `stats` | | Enable cache statistics tracking via the `hits()` and `misses()` methods. |
83#![allow(clippy::type_complexity)]
84#![cfg_attr(docsrs, feature(doc_cfg))]
85
86#[cfg(all(feature = "parking_lot", feature = "sharded-lock"))]
87compile_error!("features `parking_lot` and `sharded-lock` are mutually exclusive");
88
89#[cfg(not(fuzzing))]
90mod linked_slab;
91#[cfg(fuzzing)]
92pub mod linked_slab;
93mod options;
94#[cfg(not(feature = "shuttle"))]
95mod rw_lock;
96mod shard;
97mod shim;
98/// Concurrent cache variants that can be used from multiple threads.
99pub mod sync;
100mod sync_placeholder;
101/// Non-concurrent cache variants.
102pub mod unsync;
103pub use equivalent::Equivalent;
104
105#[cfg(all(test, feature = "shuttle"))]
106mod shuttle_tests;
107
108pub use options::{Options, OptionsBuilder};
109
110#[cfg(feature = "ahash")]
111pub type DefaultHashBuilder = ahash::RandomState;
112#[cfg(not(feature = "ahash"))]
113pub type DefaultHashBuilder = std::collections::hash_map::RandomState;
114
115/// Defines the weight of a cache entry.
116///
117/// # Example
118///
119/// ```
120/// use quick_cache::{sync::Cache, Weighter};
121///
122/// #[derive(Clone)]
123/// struct StringWeighter;
124///
125/// impl Weighter<u64, String> for StringWeighter {
126///     fn weight(&self, _key: &u64, val: &String) -> u64 {
127///         // Be cautious about zero weights!
128///         val.len() as u64
129///     }
130/// }
131///
132/// let cache = Cache::with_weighter(100, 100_000, StringWeighter);
133/// cache.insert(1, "1".to_string());
134/// ```
135pub trait Weighter<Key, Val> {
136    /// Returns the weight of the cache item.
137    ///
138    /// For performance reasons, this function should be trivially cheap as
139    /// it's called during the cache eviction routine.
140    /// If weight is expensive to calculate, consider caching it alongside the value.
141    ///
142    /// Zero (0) weight items are allowed and will be ignored when looking for eviction
143    /// candidates. Such items can only be manually removed or overwritten.
144    ///
145    /// Note that it's undefined behavior for a cache item to change its weight.
146    /// The only exception to this is when Lifecycle::before_evict is called.
147    ///
148    /// It's also undefined behavior in release mode if the summing of weights overflows,
149    /// although this is unlikely to be a problem in practice.
150    fn weight(&self, key: &Key, val: &Val) -> u64;
151}
152
153/// Each cache entry weights exactly `1` unit of weight.
154#[derive(Debug, Clone, Default)]
155pub struct UnitWeighter;
156
157impl<Key, Val> Weighter<Key, Val> for UnitWeighter {
158    #[inline]
159    fn weight(&self, _key: &Key, _val: &Val) -> u64 {
160        1
161    }
162}
163
164/// Hooks into the lifetime of the cache items.
165///
166/// The functions should be small and very fast, otherwise the cache performance might be negatively affected.
167pub trait Lifecycle<Key, Val> {
168    type RequestState;
169
170    /// Returns whether the item is pinned. Items that are pinned can't be evicted.
171    /// Note that a pinned item can still be replaced with get_mut, insert, replace and similar APIs.
172    ///
173    /// Compared to zero (0) weight items, pinned items still consume (non-zero) weight even if they can't
174    /// be evicted. Furthermore, zero (0) weight items are separated from the other entries, which allows
175    /// having a large number of them without impacting performance, but moving them in/out or the evictable
176    /// section has a small cost. Pinning on the other hand doesn't separate entries, so during eviction
177    /// the cache may visit pinned entries but will ignore them.
178    #[allow(unused_variables)]
179    #[inline]
180    fn is_pinned(&self, key: &Key, val: &Val) -> bool {
181        false
182    }
183
184    /// Called before the insert request starts, e.g.: insert, replace.
185    fn begin_request(&self) -> Self::RequestState;
186
187    /// Called when a cache item is about to be evicted.
188    /// Note that value replacement (e.g. insertions for the same key) won't call this method.
189    ///
190    /// This is the only time the item can change its weight. If the item weight becomes zero (0) it
191    /// will be left in the cache, otherwise it'll still be removed. Zero (0) weight items aren't evictable
192    /// and are kept separated from the other items so it's possible to have a large number of them without
193    /// negatively affecting eviction performance.
194    #[allow(unused_variables)]
195    #[inline]
196    fn before_evict(&self, state: &mut Self::RequestState, key: &Key, val: &mut Val) {}
197
198    /// Called when an item is evicted.
199    fn on_evict(&self, state: &mut Self::RequestState, key: Key, val: Val);
200
201    /// Called after a request finishes, e.g.: insert, replace.
202    ///
203    /// Notes:
204    /// This will _not_ be called when using `_with_lifecycle` apis, which will return the RequestState instead.
205    /// This will _not_ be called if the request errored (e.g. a replace didn't find a value to replace).
206    /// If needed, Drop for RequestState can be used to detect these cases.
207    #[allow(unused_variables)]
208    #[inline]
209    fn end_request(&self, state: Self::RequestState) {}
210}
211
212/// The memory used by the cache
213///
214/// This struct exposes some implementation details, may change in the future
215#[non_exhaustive]
216#[derive(Debug, Copy, Clone)]
217pub struct MemoryUsed {
218    pub entries: usize,
219    pub map: usize,
220}
221
222impl MemoryUsed {
223    pub fn total(&self) -> usize {
224        self.entries + self.map
225    }
226}
227
228#[cfg(test)]
229mod tests {
230    use std::{
231        hash::Hash,
232        sync::{atomic::AtomicUsize, Arc},
233        time::Duration,
234    };
235
236    use super::*;
237    #[derive(Clone)]
238    struct StringWeighter;
239
240    impl Weighter<u64, String> for StringWeighter {
241        fn weight(&self, _key: &u64, val: &String) -> u64 {
242            val.len() as u64
243        }
244    }
245
246    #[test]
247    fn test_new() {
248        sync::Cache::<(u64, u64), u64>::new(0);
249        sync::Cache::<(u64, u64), u64>::new(1);
250        sync::Cache::<(u64, u64), u64>::new(2);
251        sync::Cache::<(u64, u64), u64>::new(3);
252        sync::Cache::<(u64, u64), u64>::new(usize::MAX);
253        sync::Cache::<u64, u64>::new(0);
254        sync::Cache::<u64, u64>::new(1);
255        sync::Cache::<u64, u64>::new(2);
256        sync::Cache::<u64, u64>::new(3);
257        sync::Cache::<u64, u64>::new(usize::MAX);
258    }
259
260    #[test]
261    fn test_capacity_one() {
262        // Sync cache with capacity 1 should be able to hold one item
263        let cache = sync::Cache::<u64, u64>::new(1);
264        cache.insert(1, 10);
265        assert_eq!(cache.get(&1), Some(10));
266        // Inserting a second key should evict the first
267        cache.insert(2, 20);
268        assert_eq!(cache.get(&2), Some(20));
269        assert_eq!(cache.get(&1), None);
270
271        // Unsync cache with capacity 1 should also work
272        let mut cache = unsync::Cache::<u64, u64>::new(1);
273        cache.insert(1, 10);
274        assert_eq!(cache.get(&1), Some(&10));
275        cache.insert(2, 20);
276        assert_eq!(cache.get(&2), Some(&20));
277        assert_eq!(cache.get(&1), None);
278
279        // Capacity 0 should store nothing
280        let cache = sync::Cache::<u64, u64>::new(0);
281        cache.insert(1, 10);
282        assert_eq!(cache.get(&1), None);
283    }
284
285    #[test]
286    fn test_custom_cost() {
287        let cache = sync::Cache::with_weighter(100, 100_000, StringWeighter);
288        cache.insert(1, "1".to_string());
289        cache.insert(54, "54".to_string());
290        cache.insert(1000, "1000".to_string());
291        assert_eq!(cache.get(&1000).unwrap(), "1000");
292    }
293
294    #[test]
295    fn test_change_get_mut_change_weight() {
296        let mut cache = unsync::Cache::with_weighter(100, 100_000, StringWeighter);
297        cache.insert(1, "1".to_string());
298        assert_eq!(cache.get(&1).unwrap(), "1");
299        assert_eq!(cache.weight(), 1);
300        let _old = {
301            cache
302                .get_mut(&1)
303                .map(|mut v| std::mem::replace(&mut *v, "11".to_string()))
304        };
305        let _old = {
306            cache
307                .get_mut(&1)
308                .map(|mut v| std::mem::replace(&mut *v, "".to_string()))
309        };
310        assert_eq!(cache.get(&1).unwrap(), "");
311        assert_eq!(cache.weight(), 0);
312        cache.validate(false);
313    }
314
315    #[derive(Debug, Hash)]
316    pub struct Pair<A, B>(pub A, pub B);
317
318    impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
319    where
320        C: PartialEq<A>,
321        D: PartialEq<B>,
322    {
323        fn eq(&self, rhs: &(A, B)) -> bool {
324            self.0 == rhs.0 && self.1 == rhs.1
325        }
326    }
327
328    impl<A, B, X> Equivalent<X> for Pair<A, B>
329    where
330        Pair<A, B>: PartialEq<X>,
331        A: Hash + Eq,
332        B: Hash + Eq,
333    {
334        fn equivalent(&self, other: &X) -> bool {
335            *self == *other
336        }
337    }
338
339    #[test]
340    fn test_equivalent() {
341        let mut cache = unsync::Cache::new(5);
342        cache.insert(("square".to_string(), 2022), "blue".to_string());
343        cache.insert(("square".to_string(), 2023), "black".to_string());
344        assert_eq!(cache.get(&Pair("square", 2022)).unwrap(), "blue");
345    }
346
347    #[test]
348    fn test_borrow_keys() {
349        let cache = sync::Cache::<(Vec<u8>, Vec<u8>), u64>::new(0);
350        cache.get(&Pair(&b""[..], &b""[..]));
351        let cache = sync::Cache::<(String, String), u64>::new(0);
352        cache.get(&Pair("", ""));
353    }
354
355    #[test]
356    #[cfg_attr(miri, ignore)]
357    fn test_get_or_insert() {
358        use rand::prelude::*;
359        for _i in 0..2000 {
360            dbg!(_i);
361            let mut entered = AtomicUsize::default();
362            let cache = sync::Cache::<(u64, u64), u64>::new(100);
363            const THREADS: usize = 100;
364            let wg = std::sync::Barrier::new(THREADS);
365            let solve_at = rand::rng().random_range(0..THREADS);
366            std::thread::scope(|s| {
367                for _ in 0..THREADS {
368                    s.spawn(|| {
369                        wg.wait();
370                        let result = cache.get_or_insert_with(&(1, 1), || {
371                            let before = entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
372                            if before == solve_at {
373                                Ok(1)
374                            } else {
375                                Err(())
376                            }
377                        });
378                        assert!(matches!(result, Ok(1) | Err(())));
379                    });
380                }
381            });
382            assert_eq!(*entered.get_mut(), solve_at + 1);
383        }
384    }
385
386    #[test]
387    fn test_get_or_insert_unsync() {
388        let mut cache = unsync::Cache::<u64, u64>::new(100);
389        let guard = cache.get_ref_or_guard(&0).unwrap_err();
390        guard.insert(0);
391        assert_eq!(cache.get_ref_or_guard(&0).ok().copied(), Some(0));
392        let guard = cache.get_mut_or_guard(&1).err().unwrap();
393        guard.insert(1);
394        let v = *cache.get_mut_or_guard(&1).ok().unwrap().unwrap();
395        assert_eq!(v, 1);
396        let result = cache.get_or_insert_with::<_, ()>(&0, || panic!());
397        assert_eq!(result, Ok(Some(&0)));
398        let result = cache.get_or_insert_with::<_, ()>(&1, || panic!());
399        assert_eq!(result, Ok(Some(&1)));
400        let result = cache.get_or_insert_with::<_, ()>(&3, || Ok(3));
401        assert_eq!(result, Ok(Some(&3)));
402        let result = cache.get_or_insert_with::<_, ()>(&4, || Err(()));
403        assert_eq!(result, Err(()));
404    }
405
406    #[tokio::test]
407    async fn test_get_or_insert_sync() {
408        use crate::sync::*;
409        let cache = sync::Cache::<u64, u64>::new(100);
410        let GuardResult::Guard(guard) = cache.get_value_or_guard(&0, None) else {
411            panic!();
412        };
413        guard.insert(0).unwrap();
414        let GuardResult::Value(v) = cache.get_value_or_guard(&0, None) else {
415            panic!();
416        };
417        assert_eq!(v, 0);
418        let Err(guard) = cache.get_value_or_guard_async(&1).await else {
419            panic!();
420        };
421        guard.insert(1).unwrap();
422        let Ok(v) = cache.get_value_or_guard_async(&1).await else {
423            panic!();
424        };
425        assert_eq!(v, 1);
426
427        let result = cache.get_or_insert_with::<_, ()>(&0, || panic!());
428        assert_eq!(result, Ok(0));
429        let result = cache.get_or_insert_with::<_, ()>(&3, || Ok(3));
430        assert_eq!(result, Ok(3));
431        let result = cache.get_or_insert_with::<_, ()>(&4, || Err(()));
432        assert_eq!(result, Err(()));
433        let result = cache
434            .get_or_insert_async::<_, ()>(&0, async { panic!() })
435            .await;
436        assert_eq!(result, Ok(0));
437        let result = cache
438            .get_or_insert_async::<_, ()>(&4, async { Err(()) })
439            .await;
440        assert_eq!(result, Err(()));
441        let result = cache
442            .get_or_insert_async::<_, ()>(&4, async { Ok(4) })
443            .await;
444        assert_eq!(result, Ok(4));
445    }
446
447    #[test]
448    fn test_retain_unsync() {
449        let mut cache = unsync::Cache::<u64, u64>::new(100);
450        let ranges = 0..10;
451        for i in ranges.clone() {
452            let guard = cache.get_ref_or_guard(&i).unwrap_err();
453            guard.insert(i);
454            assert_eq!(cache.get_ref_or_guard(&i).ok().copied(), Some(i));
455        }
456        let small = 3;
457        cache.retain(|&key, &val| val > small && key > small);
458        for i in ranges.clone() {
459            let actual = cache.get(&i);
460            if i > small {
461                assert!(actual.is_some());
462                assert_eq!(*actual.unwrap(), i);
463            } else {
464                assert!(actual.is_none());
465            }
466        }
467        let big = 7;
468        cache.retain(|&key, &val| val < big && key < big);
469        for i in ranges {
470            let actual = cache.get(&i);
471            if i > small && i < big {
472                assert!(actual.is_some());
473                assert_eq!(*actual.unwrap(), i);
474            } else {
475                assert!(actual.is_none());
476            }
477        }
478    }
479
480    #[tokio::test]
481    async fn test_retain_sync() {
482        use crate::sync::*;
483        let cache = Cache::<u64, u64>::new(100);
484        let ranges = 0..10;
485        for i in ranges.clone() {
486            let GuardResult::Guard(guard) = cache.get_value_or_guard(&i, None) else {
487                panic!();
488            };
489            guard.insert(i).unwrap();
490            let GuardResult::Value(v) = cache.get_value_or_guard(&i, None) else {
491                panic!();
492            };
493            assert_eq!(v, i);
494        }
495        let small = 4;
496        cache.retain(|&key, &val| val > small && key > small);
497        for i in ranges.clone() {
498            let actual = cache.get(&i);
499            if i > small {
500                assert!(actual.is_some());
501                assert_eq!(actual.unwrap(), i);
502            } else {
503                assert!(actual.is_none());
504            }
505        }
506        let big = 8;
507        cache.retain(|&key, &val| val < big && key < big);
508        for i in ranges {
509            let actual = cache.get(&i);
510            if i > small && i < big {
511                assert!(actual.is_some());
512                assert_eq!(actual.unwrap(), i);
513            } else {
514                assert!(actual.is_none());
515            }
516        }
517    }
518
519    #[test]
520    #[cfg_attr(miri, ignore)]
521    fn test_value_or_guard() {
522        use crate::sync::*;
523        use rand::prelude::*;
524        for _i in 0..2000 {
525            dbg!(_i);
526            let mut entered = AtomicUsize::default();
527            let cache = sync::Cache::<(u64, u64), u64>::new(100);
528            const THREADS: usize = 100;
529            let wg = std::sync::Barrier::new(THREADS);
530            let solve_at = rand::rng().random_range(0..THREADS);
531            std::thread::scope(|s| {
532                for _ in 0..THREADS {
533                    s.spawn(|| {
534                        wg.wait();
535                        loop {
536                            match cache.get_value_or_guard(&(1, 1), Some(Duration::from_millis(1)))
537                            {
538                                GuardResult::Value(v) => assert_eq!(v, 1),
539                                GuardResult::Guard(g) => {
540                                    let before =
541                                        entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
542                                    if before == solve_at {
543                                        g.insert(1).unwrap();
544                                    }
545                                }
546                                GuardResult::Timeout => continue,
547                            }
548                            break;
549                        }
550                    });
551                }
552            });
553            assert_eq!(*entered.get_mut(), solve_at + 1);
554        }
555    }
556
557    #[tokio::test(flavor = "multi_thread")]
558    #[cfg_attr(miri, ignore)]
559    async fn test_get_or_insert_async() {
560        use rand::prelude::*;
561        for _i in 0..5000 {
562            dbg!(_i);
563            let entered = Arc::new(AtomicUsize::default());
564            let cache = Arc::new(sync::Cache::<(u64, u64), u64>::new(100));
565            const TASKS: usize = 100;
566            let wg = Arc::new(tokio::sync::Barrier::new(TASKS));
567            let solve_at = rand::rng().random_range(0..TASKS);
568            let mut tasks = Vec::new();
569            for _ in 0..TASKS {
570                let cache = cache.clone();
571                let wg = wg.clone();
572                let entered = entered.clone();
573                let task = tokio::spawn(async move {
574                    wg.wait().await;
575                    let result = cache
576                        .get_or_insert_async(&(1, 1), async {
577                            let before = entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
578                            if before == solve_at {
579                                Ok(1)
580                            } else {
581                                Err(())
582                            }
583                        })
584                        .await;
585                    assert!(matches!(result, Ok(1) | Err(())));
586                });
587                tasks.push(task);
588            }
589            for task in tasks {
590                task.await.unwrap();
591            }
592            assert_eq!(cache.get(&(1, 1)), Some(1));
593            assert_eq!(
594                entered.load(std::sync::atomic::Ordering::Relaxed),
595                solve_at + 1
596            );
597        }
598    }
599
600    #[tokio::test(flavor = "multi_thread")]
601    #[cfg_attr(miri, ignore)]
602    async fn test_value_or_guard_async() {
603        use rand::prelude::*;
604        for _i in 0..5000 {
605            dbg!(_i);
606            let entered = Arc::new(AtomicUsize::default());
607            let cache = Arc::new(sync::Cache::<(u64, u64), u64>::new(100));
608            const TASKS: usize = 100;
609            let wg = Arc::new(tokio::sync::Barrier::new(TASKS));
610            let solve_at = rand::rng().random_range(0..TASKS);
611            let mut tasks = Vec::new();
612            for _ in 0..TASKS {
613                let cache = cache.clone();
614                let wg = wg.clone();
615                let entered = entered.clone();
616                let task = tokio::spawn(async move {
617                    wg.wait().await;
618                    loop {
619                        match tokio::time::timeout(
620                            Duration::from_millis(1),
621                            cache.get_value_or_guard_async(&(1, 1)),
622                        )
623                        .await
624                        {
625                            Ok(Ok(r)) => assert_eq!(r, 1),
626                            Ok(Err(g)) => {
627                                let before =
628                                    entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
629                                if before == solve_at {
630                                    g.insert(1).unwrap();
631                                }
632                            }
633                            Err(_) => continue,
634                        }
635                        break;
636                    }
637                });
638                tasks.push(task);
639            }
640            for task in tasks {
641                task.await.unwrap();
642            }
643            assert_eq!(cache.get(&(1, 1)), Some(1));
644            assert_eq!(
645                entered.load(std::sync::atomic::Ordering::Relaxed),
646                solve_at + 1
647            );
648        }
649    }
650}