1#![allow(clippy::type_complexity)]
84#![cfg_attr(docsrs, feature(doc_cfg))]
85
86#[cfg(all(feature = "parking_lot", feature = "sharded-lock"))]
87compile_error!("features `parking_lot` and `sharded-lock` are mutually exclusive");
88
89#[cfg(not(fuzzing))]
90mod linked_slab;
91#[cfg(fuzzing)]
92pub mod linked_slab;
93mod options;
94#[cfg(not(feature = "shuttle"))]
95mod rw_lock;
96mod shard;
97mod shim;
98pub mod sync;
100mod sync_placeholder;
101pub mod unsync;
103pub use equivalent::Equivalent;
104
105#[cfg(all(test, feature = "shuttle"))]
106mod shuttle_tests;
107
108pub use options::{Options, OptionsBuilder};
109
110#[cfg(feature = "ahash")]
111pub type DefaultHashBuilder = ahash::RandomState;
112#[cfg(not(feature = "ahash"))]
113pub type DefaultHashBuilder = std::collections::hash_map::RandomState;
114
115pub trait Weighter<Key, Val> {
136 fn weight(&self, key: &Key, val: &Val) -> u64;
151}
152
153#[derive(Debug, Clone, Default)]
155pub struct UnitWeighter;
156
157impl<Key, Val> Weighter<Key, Val> for UnitWeighter {
158 #[inline]
159 fn weight(&self, _key: &Key, _val: &Val) -> u64 {
160 1
161 }
162}
163
164pub trait Lifecycle<Key, Val> {
168 type RequestState;
169
170 #[allow(unused_variables)]
179 #[inline]
180 fn is_pinned(&self, key: &Key, val: &Val) -> bool {
181 false
182 }
183
184 fn begin_request(&self) -> Self::RequestState;
186
187 #[allow(unused_variables)]
195 #[inline]
196 fn before_evict(&self, state: &mut Self::RequestState, key: &Key, val: &mut Val) {}
197
198 fn on_evict(&self, state: &mut Self::RequestState, key: Key, val: Val);
200
201 #[allow(unused_variables)]
208 #[inline]
209 fn end_request(&self, state: Self::RequestState) {}
210}
211
212#[non_exhaustive]
216#[derive(Debug, Copy, Clone)]
217pub struct MemoryUsed {
218 pub entries: usize,
219 pub map: usize,
220}
221
222impl MemoryUsed {
223 pub fn total(&self) -> usize {
224 self.entries + self.map
225 }
226}
227
228#[cfg(test)]
229mod tests {
230 use std::{
231 hash::Hash,
232 sync::{atomic::AtomicUsize, Arc},
233 time::Duration,
234 };
235
236 use super::*;
237 #[derive(Clone)]
238 struct StringWeighter;
239
240 impl Weighter<u64, String> for StringWeighter {
241 fn weight(&self, _key: &u64, val: &String) -> u64 {
242 val.len() as u64
243 }
244 }
245
246 #[test]
247 fn test_new() {
248 sync::Cache::<(u64, u64), u64>::new(0);
249 sync::Cache::<(u64, u64), u64>::new(1);
250 sync::Cache::<(u64, u64), u64>::new(2);
251 sync::Cache::<(u64, u64), u64>::new(3);
252 sync::Cache::<(u64, u64), u64>::new(usize::MAX);
253 sync::Cache::<u64, u64>::new(0);
254 sync::Cache::<u64, u64>::new(1);
255 sync::Cache::<u64, u64>::new(2);
256 sync::Cache::<u64, u64>::new(3);
257 sync::Cache::<u64, u64>::new(usize::MAX);
258 }
259
260 #[test]
261 fn test_capacity_one() {
262 let cache = sync::Cache::<u64, u64>::new(1);
264 cache.insert(1, 10);
265 assert_eq!(cache.get(&1), Some(10));
266 cache.insert(2, 20);
268 assert_eq!(cache.get(&2), Some(20));
269 assert_eq!(cache.get(&1), None);
270
271 let mut cache = unsync::Cache::<u64, u64>::new(1);
273 cache.insert(1, 10);
274 assert_eq!(cache.get(&1), Some(&10));
275 cache.insert(2, 20);
276 assert_eq!(cache.get(&2), Some(&20));
277 assert_eq!(cache.get(&1), None);
278
279 let cache = sync::Cache::<u64, u64>::new(0);
281 cache.insert(1, 10);
282 assert_eq!(cache.get(&1), None);
283 }
284
285 #[test]
286 fn test_custom_cost() {
287 let cache = sync::Cache::with_weighter(100, 100_000, StringWeighter);
288 cache.insert(1, "1".to_string());
289 cache.insert(54, "54".to_string());
290 cache.insert(1000, "1000".to_string());
291 assert_eq!(cache.get(&1000).unwrap(), "1000");
292 }
293
294 #[test]
295 fn test_change_get_mut_change_weight() {
296 let mut cache = unsync::Cache::with_weighter(100, 100_000, StringWeighter);
297 cache.insert(1, "1".to_string());
298 assert_eq!(cache.get(&1).unwrap(), "1");
299 assert_eq!(cache.weight(), 1);
300 let _old = {
301 cache
302 .get_mut(&1)
303 .map(|mut v| std::mem::replace(&mut *v, "11".to_string()))
304 };
305 let _old = {
306 cache
307 .get_mut(&1)
308 .map(|mut v| std::mem::replace(&mut *v, "".to_string()))
309 };
310 assert_eq!(cache.get(&1).unwrap(), "");
311 assert_eq!(cache.weight(), 0);
312 cache.validate(false);
313 }
314
315 #[derive(Debug, Hash)]
316 pub struct Pair<A, B>(pub A, pub B);
317
318 impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
319 where
320 C: PartialEq<A>,
321 D: PartialEq<B>,
322 {
323 fn eq(&self, rhs: &(A, B)) -> bool {
324 self.0 == rhs.0 && self.1 == rhs.1
325 }
326 }
327
328 impl<A, B, X> Equivalent<X> for Pair<A, B>
329 where
330 Pair<A, B>: PartialEq<X>,
331 A: Hash + Eq,
332 B: Hash + Eq,
333 {
334 fn equivalent(&self, other: &X) -> bool {
335 *self == *other
336 }
337 }
338
339 #[test]
340 fn test_equivalent() {
341 let mut cache = unsync::Cache::new(5);
342 cache.insert(("square".to_string(), 2022), "blue".to_string());
343 cache.insert(("square".to_string(), 2023), "black".to_string());
344 assert_eq!(cache.get(&Pair("square", 2022)).unwrap(), "blue");
345 }
346
347 #[test]
348 fn test_borrow_keys() {
349 let cache = sync::Cache::<(Vec<u8>, Vec<u8>), u64>::new(0);
350 cache.get(&Pair(&b""[..], &b""[..]));
351 let cache = sync::Cache::<(String, String), u64>::new(0);
352 cache.get(&Pair("", ""));
353 }
354
355 #[test]
356 #[cfg_attr(miri, ignore)]
357 fn test_get_or_insert() {
358 use rand::prelude::*;
359 for _i in 0..2000 {
360 dbg!(_i);
361 let mut entered = AtomicUsize::default();
362 let cache = sync::Cache::<(u64, u64), u64>::new(100);
363 const THREADS: usize = 100;
364 let wg = std::sync::Barrier::new(THREADS);
365 let solve_at = rand::rng().random_range(0..THREADS);
366 std::thread::scope(|s| {
367 for _ in 0..THREADS {
368 s.spawn(|| {
369 wg.wait();
370 let result = cache.get_or_insert_with(&(1, 1), || {
371 let before = entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
372 if before == solve_at {
373 Ok(1)
374 } else {
375 Err(())
376 }
377 });
378 assert!(matches!(result, Ok(1) | Err(())));
379 });
380 }
381 });
382 assert_eq!(*entered.get_mut(), solve_at + 1);
383 }
384 }
385
386 #[test]
387 fn test_get_or_insert_unsync() {
388 let mut cache = unsync::Cache::<u64, u64>::new(100);
389 let guard = cache.get_ref_or_guard(&0).unwrap_err();
390 guard.insert(0);
391 assert_eq!(cache.get_ref_or_guard(&0).ok().copied(), Some(0));
392 let guard = cache.get_mut_or_guard(&1).err().unwrap();
393 guard.insert(1);
394 let v = *cache.get_mut_or_guard(&1).ok().unwrap().unwrap();
395 assert_eq!(v, 1);
396 let result = cache.get_or_insert_with::<_, ()>(&0, || panic!());
397 assert_eq!(result, Ok(Some(&0)));
398 let result = cache.get_or_insert_with::<_, ()>(&1, || panic!());
399 assert_eq!(result, Ok(Some(&1)));
400 let result = cache.get_or_insert_with::<_, ()>(&3, || Ok(3));
401 assert_eq!(result, Ok(Some(&3)));
402 let result = cache.get_or_insert_with::<_, ()>(&4, || Err(()));
403 assert_eq!(result, Err(()));
404 }
405
406 #[tokio::test]
407 async fn test_get_or_insert_sync() {
408 use crate::sync::*;
409 let cache = sync::Cache::<u64, u64>::new(100);
410 let GuardResult::Guard(guard) = cache.get_value_or_guard(&0, None) else {
411 panic!();
412 };
413 guard.insert(0).unwrap();
414 let GuardResult::Value(v) = cache.get_value_or_guard(&0, None) else {
415 panic!();
416 };
417 assert_eq!(v, 0);
418 let Err(guard) = cache.get_value_or_guard_async(&1).await else {
419 panic!();
420 };
421 guard.insert(1).unwrap();
422 let Ok(v) = cache.get_value_or_guard_async(&1).await else {
423 panic!();
424 };
425 assert_eq!(v, 1);
426
427 let result = cache.get_or_insert_with::<_, ()>(&0, || panic!());
428 assert_eq!(result, Ok(0));
429 let result = cache.get_or_insert_with::<_, ()>(&3, || Ok(3));
430 assert_eq!(result, Ok(3));
431 let result = cache.get_or_insert_with::<_, ()>(&4, || Err(()));
432 assert_eq!(result, Err(()));
433 let result = cache
434 .get_or_insert_async::<_, ()>(&0, async { panic!() })
435 .await;
436 assert_eq!(result, Ok(0));
437 let result = cache
438 .get_or_insert_async::<_, ()>(&4, async { Err(()) })
439 .await;
440 assert_eq!(result, Err(()));
441 let result = cache
442 .get_or_insert_async::<_, ()>(&4, async { Ok(4) })
443 .await;
444 assert_eq!(result, Ok(4));
445 }
446
447 #[test]
448 fn test_retain_unsync() {
449 let mut cache = unsync::Cache::<u64, u64>::new(100);
450 let ranges = 0..10;
451 for i in ranges.clone() {
452 let guard = cache.get_ref_or_guard(&i).unwrap_err();
453 guard.insert(i);
454 assert_eq!(cache.get_ref_or_guard(&i).ok().copied(), Some(i));
455 }
456 let small = 3;
457 cache.retain(|&key, &val| val > small && key > small);
458 for i in ranges.clone() {
459 let actual = cache.get(&i);
460 if i > small {
461 assert!(actual.is_some());
462 assert_eq!(*actual.unwrap(), i);
463 } else {
464 assert!(actual.is_none());
465 }
466 }
467 let big = 7;
468 cache.retain(|&key, &val| val < big && key < big);
469 for i in ranges {
470 let actual = cache.get(&i);
471 if i > small && i < big {
472 assert!(actual.is_some());
473 assert_eq!(*actual.unwrap(), i);
474 } else {
475 assert!(actual.is_none());
476 }
477 }
478 }
479
480 #[tokio::test]
481 async fn test_retain_sync() {
482 use crate::sync::*;
483 let cache = Cache::<u64, u64>::new(100);
484 let ranges = 0..10;
485 for i in ranges.clone() {
486 let GuardResult::Guard(guard) = cache.get_value_or_guard(&i, None) else {
487 panic!();
488 };
489 guard.insert(i).unwrap();
490 let GuardResult::Value(v) = cache.get_value_or_guard(&i, None) else {
491 panic!();
492 };
493 assert_eq!(v, i);
494 }
495 let small = 4;
496 cache.retain(|&key, &val| val > small && key > small);
497 for i in ranges.clone() {
498 let actual = cache.get(&i);
499 if i > small {
500 assert!(actual.is_some());
501 assert_eq!(actual.unwrap(), i);
502 } else {
503 assert!(actual.is_none());
504 }
505 }
506 let big = 8;
507 cache.retain(|&key, &val| val < big && key < big);
508 for i in ranges {
509 let actual = cache.get(&i);
510 if i > small && i < big {
511 assert!(actual.is_some());
512 assert_eq!(actual.unwrap(), i);
513 } else {
514 assert!(actual.is_none());
515 }
516 }
517 }
518
519 #[test]
520 #[cfg_attr(miri, ignore)]
521 fn test_value_or_guard() {
522 use crate::sync::*;
523 use rand::prelude::*;
524 for _i in 0..2000 {
525 dbg!(_i);
526 let mut entered = AtomicUsize::default();
527 let cache = sync::Cache::<(u64, u64), u64>::new(100);
528 const THREADS: usize = 100;
529 let wg = std::sync::Barrier::new(THREADS);
530 let solve_at = rand::rng().random_range(0..THREADS);
531 std::thread::scope(|s| {
532 for _ in 0..THREADS {
533 s.spawn(|| {
534 wg.wait();
535 loop {
536 match cache.get_value_or_guard(&(1, 1), Some(Duration::from_millis(1)))
537 {
538 GuardResult::Value(v) => assert_eq!(v, 1),
539 GuardResult::Guard(g) => {
540 let before =
541 entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
542 if before == solve_at {
543 g.insert(1).unwrap();
544 }
545 }
546 GuardResult::Timeout => continue,
547 }
548 break;
549 }
550 });
551 }
552 });
553 assert_eq!(*entered.get_mut(), solve_at + 1);
554 }
555 }
556
557 #[tokio::test(flavor = "multi_thread")]
558 #[cfg_attr(miri, ignore)]
559 async fn test_get_or_insert_async() {
560 use rand::prelude::*;
561 for _i in 0..5000 {
562 dbg!(_i);
563 let entered = Arc::new(AtomicUsize::default());
564 let cache = Arc::new(sync::Cache::<(u64, u64), u64>::new(100));
565 const TASKS: usize = 100;
566 let wg = Arc::new(tokio::sync::Barrier::new(TASKS));
567 let solve_at = rand::rng().random_range(0..TASKS);
568 let mut tasks = Vec::new();
569 for _ in 0..TASKS {
570 let cache = cache.clone();
571 let wg = wg.clone();
572 let entered = entered.clone();
573 let task = tokio::spawn(async move {
574 wg.wait().await;
575 let result = cache
576 .get_or_insert_async(&(1, 1), async {
577 let before = entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
578 if before == solve_at {
579 Ok(1)
580 } else {
581 Err(())
582 }
583 })
584 .await;
585 assert!(matches!(result, Ok(1) | Err(())));
586 });
587 tasks.push(task);
588 }
589 for task in tasks {
590 task.await.unwrap();
591 }
592 assert_eq!(cache.get(&(1, 1)), Some(1));
593 assert_eq!(
594 entered.load(std::sync::atomic::Ordering::Relaxed),
595 solve_at + 1
596 );
597 }
598 }
599
600 #[tokio::test(flavor = "multi_thread")]
601 #[cfg_attr(miri, ignore)]
602 async fn test_value_or_guard_async() {
603 use rand::prelude::*;
604 for _i in 0..5000 {
605 dbg!(_i);
606 let entered = Arc::new(AtomicUsize::default());
607 let cache = Arc::new(sync::Cache::<(u64, u64), u64>::new(100));
608 const TASKS: usize = 100;
609 let wg = Arc::new(tokio::sync::Barrier::new(TASKS));
610 let solve_at = rand::rng().random_range(0..TASKS);
611 let mut tasks = Vec::new();
612 for _ in 0..TASKS {
613 let cache = cache.clone();
614 let wg = wg.clone();
615 let entered = entered.clone();
616 let task = tokio::spawn(async move {
617 wg.wait().await;
618 loop {
619 match tokio::time::timeout(
620 Duration::from_millis(1),
621 cache.get_value_or_guard_async(&(1, 1)),
622 )
623 .await
624 {
625 Ok(Ok(r)) => assert_eq!(r, 1),
626 Ok(Err(g)) => {
627 let before =
628 entered.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
629 if before == solve_at {
630 g.insert(1).unwrap();
631 }
632 }
633 Err(_) => continue,
634 }
635 break;
636 }
637 });
638 tasks.push(task);
639 }
640 for task in tasks {
641 task.await.unwrap();
642 }
643 assert_eq!(cache.get(&(1, 1)), Some(1));
644 assert_eq!(
645 entered.load(std::sync::atomic::Ordering::Relaxed),
646 solve_at + 1
647 );
648 }
649 }
650}