webrender/
lru_cache.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5use crate::freelist::{FreeList, FreeListHandle, WeakFreeListHandle};
6use std::{mem, num};
7
8/*
9  This module implements a least recently used cache structure, which is
10  used by the texture cache to manage the lifetime of items inside the
11  texture cache. It has a few special pieces of functionality that the
12  texture cache requires, but should be usable as a general LRU cache
13  type if useful in other areas.
14
15  The cache is implemented with two types of backing freelists. These allow
16  random access to the underlying data, while being efficient in both
17  memory access and allocation patterns.
18
19  The "entries" freelist stores the elements being cached (for example, the
20  CacheEntry structure for the texture cache). These elements are stored
21  in arbitrary order, reusing empty slots in the freelist where possible.
22
23  The "lru_index" freelists store the LRU tracking information. Although the
24  tracking elements are stored in arbitrary order inside a freelist for
25  efficiency, they use next/prev links to represent a doubly-linked list,
26  kept sorted in order of recent use. The next link is also used to store
27  the current freelist within the array when the element is not occupied.
28
29  The LRU cache allows having multiple LRU "partitions". Every entry is tracked
30  by exactly one partition at any time; all partitions refer to entries in the
31  shared freelist. Entries can move between partitions, if replace_or_insert is
32  called with a new partition index for an existing handle.
33  The partitioning is used by the texture cache so that, for example, allocating
34  more glyph entries does not cause eviction of image entries (which go into
35  a different shared texture). If an existing handle's entry is reallocated with
36  a new size, it might need to move from a shared texture to a standalone
37  texture; in this case the handle will move to a different LRU partition.
38 */
39
40/// Stores the data supplied by the user to be cached, and an index
41/// into the LRU tracking freelist for this element.
42#[cfg_attr(feature = "capture", derive(Serialize))]
43#[cfg_attr(feature = "replay", derive(Deserialize))]
44#[derive(MallocSizeOf)]
45struct LRUCacheEntry<T> {
46    /// The LRU partition that tracks this entry.
47    partition_index: u8,
48
49    /// The location of the LRU tracking element for this cache entry in the
50    /// right LRU partition.
51    lru_index: ItemIndex,
52
53    /// The cached data provided by the caller for this element.
54    value: T,
55}
56
57/// The main public interface to the LRU cache
58#[cfg_attr(feature = "capture", derive(Serialize))]
59#[cfg_attr(feature = "replay", derive(Deserialize))]
60#[derive(MallocSizeOf)]
61pub struct LRUCache<T, M> {
62    /// A free list of cache entries, and indices into the LRU tracking list
63    entries: FreeList<LRUCacheEntry<T>, M>,
64    /// The LRU tracking list, allowing O(1) access to the oldest element
65    lru: Vec<LRUTracker<FreeListHandle<M>>>,
66}
67
68impl<T, M> LRUCache<T, M> {
69    /// Construct a new LRU cache
70    pub fn new(lru_partition_count: usize) -> Self {
71        assert!(lru_partition_count <= u8::MAX as usize + 1);
72        LRUCache {
73            entries: FreeList::new(),
74            lru: (0..lru_partition_count).map(|_| LRUTracker::new()).collect(),
75        }
76    }
77
78    /// Insert a new element into the cache. Returns a weak handle for callers to
79    /// access the data, since the lifetime is managed by the LRU algorithm and it
80    /// may be evicted at any time.
81    pub fn push_new(
82        &mut self,
83        partition_index: u8,
84        value: T,
85    ) -> WeakFreeListHandle<M> {
86        // It's a slightly awkward process to insert an element, since we don't know
87        // the index of the LRU tracking element until we've got a handle for the
88        // underlying cached data.
89
90        // Insert the data provided by the caller
91        let handle = self.entries.insert(LRUCacheEntry {
92            partition_index: 0,
93            lru_index: ItemIndex(num::NonZeroU32::new(1).unwrap()),
94            value
95        });
96
97        // Get a weak handle to return to the caller
98        let weak_handle = handle.weak();
99
100        // Add an LRU tracking node that owns the strong handle, and store the location
101        // of this inside the cache entry.
102        let entry = self.entries.get_mut(&handle);
103        let lru_index = self.lru[partition_index as usize].push_new(handle);
104        entry.partition_index = partition_index;
105        entry.lru_index = lru_index;
106
107        weak_handle
108    }
109
110    /// Get immutable access to the data at a given slot. Since this takes a weak
111    /// handle, it may have been evicted, so returns an Option.
112    pub fn get_opt(
113        &self,
114        handle: &WeakFreeListHandle<M>,
115    ) -> Option<&T> {
116        self.entries
117            .get_opt(handle)
118            .map(|entry| {
119                &entry.value
120            })
121    }
122
123    /// Get mutable access to the data at a given slot. Since this takes a weak
124    /// handle, it may have been evicted, so returns an Option.
125    pub fn get_opt_mut(
126        &mut self,
127        handle: &WeakFreeListHandle<M>,
128    ) -> Option<&mut T> {
129        self.entries
130            .get_opt_mut(handle)
131            .map(|entry| {
132                &mut entry.value
133            })
134    }
135
136    /// Return a reference to the oldest item in the cache, keeping it in the cache.
137    /// If the cache is empty, this will return None.
138    pub fn peek_oldest(&self, partition_index: u8) -> Option<&T> {
139        self.lru[partition_index as usize]
140            .peek_front()
141            .map(|handle| {
142                let entry = self.entries.get(handle);
143                &entry.value
144            })
145    }
146
147    /// Remove the oldest item from the cache. This is used to select elements to
148    /// be evicted. If the cache is empty, this will return None.
149    pub fn pop_oldest(
150        &mut self,
151        partition_index: u8,
152    ) -> Option<T> {
153        self.lru[partition_index as usize]
154            .pop_front()
155            .map(|handle| {
156                let entry = self.entries.free(handle);
157                entry.value
158            })
159    }
160
161    /// This is a special case of `push_new`, which is a requirement for the texture
162    /// cache. Sometimes, we want to replace the content of an existing handle if it
163    /// exists, or insert a new element if the handle is invalid (for example, if an
164    /// image is resized and it moves to a new location in the texture atlas). This
165    /// method returns the old cache entry if it existed, so it can be freed by the caller.
166    #[must_use]
167    pub fn replace_or_insert(
168        &mut self,
169        handle: &mut WeakFreeListHandle<M>,
170        partition_index: u8,
171        data: T,
172    ) -> Option<T> {
173        match self.entries.get_opt_mut(handle) {
174            Some(entry) => {
175                if entry.partition_index != partition_index {
176                    // Move to a different partition.
177                    let strong_handle = self.lru[entry.partition_index as usize].remove(entry.lru_index);
178                    let lru_index = self.lru[partition_index as usize].push_new(strong_handle);
179                    entry.partition_index = partition_index;
180                    entry.lru_index = lru_index;
181                }
182                Some(mem::replace(&mut entry.value, data))
183            }
184            None => {
185                *handle = self.push_new(partition_index, data);
186                None
187            }
188        }
189    }
190
191    /// Manually evict a specific item.
192    pub fn remove(&mut self, handle: &WeakFreeListHandle<M>) -> Option<T> {
193        if let Some(entry) = self.entries.get_opt_mut(handle) {
194            let strong_handle = self.lru[entry.partition_index as usize].remove(entry.lru_index);
195            return Some(self.entries.free(strong_handle).value);
196        }
197
198        None
199    }
200
201    /// This is used by the calling code to signal that the element that this handle
202    /// references has been used on this frame. Internally, it updates the links in
203    /// the LRU tracking element to move this item to the end of the LRU list. Returns
204    /// the underlying data in case the client wants to mutate it.
205    pub fn touch(
206        &mut self,
207        handle: &WeakFreeListHandle<M>,
208    ) -> Option<&mut T> {
209        let lru = &mut self.lru;
210
211        self.entries
212            .get_opt_mut(handle)
213            .map(|entry| {
214                lru[entry.partition_index as usize].mark_used(entry.lru_index);
215                &mut entry.value
216            })
217    }
218
219    /// Try to validate that the state of the cache is consistent
220    #[cfg(test)]
221    fn validate(&self) {
222        for lru in &self.lru {
223            lru.validate();
224        }
225    }
226}
227
228/// Index of an LRU tracking element
229#[cfg_attr(feature = "capture", derive(Serialize))]
230#[cfg_attr(feature = "replay", derive(Deserialize))]
231#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, MallocSizeOf)]
232struct ItemIndex(num::NonZeroU32);
233
234impl ItemIndex {
235    fn as_usize(&self) -> usize {
236        self.0.get() as usize
237    }
238}
239
240/// Stores a strong handle controlling the lifetime of the data in the LRU
241/// cache, and a doubly-linked list node specifying where in the current LRU
242/// order this element exists. These items are themselves backed by a freelist
243/// to minimize heap allocations and improve cache access patterns.
244#[cfg_attr(feature = "capture", derive(Serialize))]
245#[cfg_attr(feature = "replay", derive(Deserialize))]
246#[derive(Debug, MallocSizeOf)]
247struct Item<H> {
248    prev: Option<ItemIndex>,
249    next: Option<ItemIndex>,
250    handle: Option<H>,
251}
252
253/// Internal implementation of the LRU tracking list
254#[cfg_attr(feature = "capture", derive(Serialize))]
255#[cfg_attr(feature = "replay", derive(Deserialize))]
256#[derive(MallocSizeOf)]
257struct LRUTracker<H> {
258    /// Current head of the list - this is the oldest item that will be evicted next.
259    head: Option<ItemIndex>,
260    /// Current tail of the list - this is the most recently used element.
261    tail: Option<ItemIndex>,
262    /// As tracking items are removed, they are stored in a freelist, to minimize heap allocations
263    free_list_head: Option<ItemIndex>,
264    /// The freelist that stores all the LRU tracking items
265    items: Vec<Item<H>>,
266}
267
268impl<H> LRUTracker<H> where H: std::fmt::Debug {
269    /// Construct a new LRU tracker
270    fn new() -> Self {
271        // Push a dummy entry in the vec that is never used. This ensures the NonZeroU32
272        // property is respected, and we never create an ItemIndex(0).
273        let items = vec![
274            Item {
275                prev: None,
276                next: None,
277                handle: None,
278            },
279        ];
280
281        LRUTracker {
282            head: None,
283            tail: None,
284            free_list_head: None,
285            items,
286        }
287    }
288
289    /// Internal function that takes an item index, and links it to the
290    /// end of the tracker list (makes it the newest item).
291    fn link_as_new_tail(
292        &mut self,
293        item_index: ItemIndex,
294    ) {
295        match (self.head, self.tail) {
296            (Some(..), Some(tail)) => {
297                // Both a head and a tail
298                self.items[item_index.as_usize()].prev = Some(tail);
299                self.items[item_index.as_usize()].next = None;
300
301                self.items[tail.as_usize()].next = Some(item_index);
302                self.tail = Some(item_index);
303            }
304            (None, None) => {
305                // No head/tail, currently empty list
306                self.items[item_index.as_usize()].prev = None;
307                self.items[item_index.as_usize()].next = None;
308
309                self.head = Some(item_index);
310                self.tail = Some(item_index);
311            }
312            (Some(..), None) | (None, Some(..)) => {
313                // Invalid state
314                unreachable!();
315            }
316        }
317    }
318
319    /// Internal function that takes an LRU item index, and removes it from
320    /// the current doubly linked list. Used during removal of items, and also
321    /// when items are moved to the back of the list as they're touched.
322    fn unlink(
323        &mut self,
324        item_index: ItemIndex,
325    ) {
326        let (next, prev) = {
327            let item = &self.items[item_index.as_usize()];
328            (item.next, item.prev)
329        };
330
331        match next {
332            Some(next) => {
333                self.items[next.as_usize()].prev = prev;
334            }
335            None => {
336                debug_assert_eq!(self.tail, Some(item_index));
337                self.tail = prev;
338            }
339        }
340
341        match prev {
342            Some(prev) => {
343                self.items[prev.as_usize()].next = next;
344            }
345            None => {
346                debug_assert_eq!(self.head, Some(item_index));
347                self.head = next;
348            }
349        }
350    }
351
352    /// Push a new LRU tracking item on to the back of the list, marking
353    /// it as the most recent item.
354    fn push_new(
355        &mut self,
356        handle: H,
357    ) -> ItemIndex {
358        // See if there is a slot available in the current free list
359        let item_index = match self.free_list_head {
360            Some(index) => {
361                // Reuse an existing slot
362                let item = &mut self.items[index.as_usize()];
363
364                assert!(item.handle.is_none());
365                item.handle = Some(handle);
366
367                self.free_list_head = item.next;
368
369                index
370            }
371            None => {
372                // No free slots available, push to the end of the array
373                let index = ItemIndex(num::NonZeroU32::new(self.items.len() as u32).unwrap());
374
375                self.items.push(Item {
376                    prev: None,
377                    next: None,
378                    handle: Some(handle),
379                });
380
381                index
382            }
383        };
384
385        // Now link this element into the LRU list
386        self.link_as_new_tail(item_index);
387
388        item_index
389    }
390
391    /// Returns a reference to the oldest element, or None if the list is empty.
392    fn peek_front(&self) -> Option<&H> {
393        self.head.map(|head| self.items[head.as_usize()].handle.as_ref().unwrap())
394    }
395
396    /// Remove the oldest element from the front of the LRU list. Returns None
397    /// if the list is empty.
398    fn pop_front(
399        &mut self,
400    ) -> Option<H> {
401        let handle = match (self.head, self.tail) {
402            (Some(head), Some(tail)) => {
403                let item_index = head;
404
405                // Head and tail are the same - removing the only element
406                if head == tail {
407                    self.head = None;
408                    self.tail = None;
409                } else {
410                    // Update the head of the list, popping the first element off
411                    let new_head = self.items[head.as_usize()].next.unwrap();
412                    self.head = Some(new_head);
413                    self.items[new_head.as_usize()].prev = None;
414                }
415
416                // Add this item to the freelist for later use
417                self.items[item_index.as_usize()].next = self.free_list_head;
418                self.free_list_head = Some(item_index);
419
420                // Return the handle to the user
421                Some(self.items[item_index.as_usize()].handle.take().unwrap())
422            }
423            (None, None) => {
424                // List is empty
425                None
426            }
427            (Some(..), None) | (None, Some(..)) => {
428                // Invalid state
429                unreachable!();
430            }
431        };
432
433        handle
434    }
435
436    /// Manually remove an item from the LRU tracking list. This is used
437    /// when an element switches from one LRU partition to a different one.
438    fn remove(
439        &mut self,
440        index: ItemIndex,
441    ) -> H {
442        // Remove from the LRU list
443        self.unlink(index);
444
445        let handle = self.items[index.as_usize()].handle.take().unwrap();
446
447        // Add LRU item to the freelist for future use.
448        self.items[index.as_usize()].next = self.free_list_head;
449        self.free_list_head = Some(index);
450
451        handle
452    }
453
454    /// Called to mark that an item was used on this frame. It unlinks the
455    /// tracking item, and then re-links it to the back of the list.
456    fn mark_used(
457        &mut self,
458        index: ItemIndex,
459    ) {
460        self.unlink(index);
461        self.link_as_new_tail(index);
462    }
463
464    /// Try to validate that the state of the linked lists are consistent
465    #[cfg(test)]
466    fn validate(&self) {
467        use std::collections::HashSet;
468
469        // Must have a valid head/tail or be empty
470        assert!((self.head.is_none() && self.tail.is_none()) || (self.head.is_some() && self.tail.is_some()));
471
472        // If there is a head, the prev of the head must be none
473        if let Some(head) = self.head {
474            assert!(self.items[head.as_usize()].prev.is_none());
475        }
476
477        // If there is a tail, the next of the tail must be none
478        if let Some(tail) = self.tail {
479            assert!(self.items[tail.as_usize()].next.is_none());
480        }
481
482        // Collect all free and valid items, both in forwards and reverse order
483        let mut free_items = Vec::new();
484        let mut free_items_set = HashSet::new();
485        let mut valid_items_front = Vec::new();
486        let mut valid_items_front_set = HashSet::new();
487        let mut valid_items_reverse = Vec::new();
488        let mut valid_items_reverse_set = HashSet::new();
489
490        let mut current = self.free_list_head;
491        while let Some(index) = current {
492            let item = &self.items[index.as_usize()];
493            free_items.push(index);
494            assert!(free_items_set.insert(index));
495            current = item.next;
496        }
497
498        current = self.head;
499        while let Some(index) = current {
500            let item = &self.items[index.as_usize()];
501            valid_items_front.push(index);
502            assert!(valid_items_front_set.insert(index));
503            current = item.next;
504        }
505
506        current = self.tail;
507        while let Some(index) = current {
508            let item = &self.items[index.as_usize()];
509            valid_items_reverse.push(index);
510            assert!(!valid_items_reverse_set.contains(&index));
511            valid_items_reverse_set.insert(index);
512            current = item.prev;
513        }
514
515        // Ensure set lengths match the vec lengths (should be enforced by the assert check during insert anyway)
516        assert_eq!(valid_items_front.len(), valid_items_front_set.len());
517        assert_eq!(valid_items_reverse.len(), valid_items_reverse_set.len());
518
519        // Length of the array should equal free + valid items count + 1 (dummy entry)
520        assert_eq!(free_items.len() + valid_items_front.len() + 1, self.items.len());
521
522        // Should be same number of items whether iterating forwards or reverse
523        assert_eq!(valid_items_front.len(), valid_items_reverse.len());
524
525        // Ensure there are no items considered in the free list that are also in the valid list
526        assert!(free_items_set.intersection(&valid_items_reverse_set).collect::<HashSet<_>>().is_empty());
527        assert!(free_items_set.intersection(&valid_items_front_set).collect::<HashSet<_>>().is_empty());
528
529        // Should be the same number of items regardless of iteration direction
530        assert_eq!(valid_items_front_set.len(), valid_items_reverse_set.len());
531
532        // Ensure that the ordering is exactly the same, regardless of iteration direction
533        for (i0, i1) in valid_items_front.iter().zip(valid_items_reverse.iter().rev()) {
534            assert_eq!(i0, i1);
535        }
536    }
537}
538
539#[test]
540fn test_lru_tracker_push_peek() {
541    // Push elements, peek and ensure:
542    // - peek_oldest returns None before first element pushed
543    // - peek_oldest returns oldest element
544    // - subsequent calls to peek_oldest return same element (nothing was removed)
545    struct CacheMarker;
546    const NUM_ELEMENTS: usize = 50;
547
548    let mut cache: LRUCache<usize, CacheMarker> = LRUCache::new(1);
549    cache.validate();
550
551    assert_eq!(cache.peek_oldest(0), None);
552
553    for i in 0 .. NUM_ELEMENTS {
554        cache.push_new(0, i);
555    }
556    cache.validate();
557
558    assert_eq!(cache.peek_oldest(0), Some(&0));
559    assert_eq!(cache.peek_oldest(0), Some(&0));
560
561    cache.pop_oldest(0);
562    assert_eq!(cache.peek_oldest(0), Some(&1));
563}
564
565#[test]
566fn test_lru_tracker_push_pop() {
567    // Push elements, pop them all off and ensure:
568    // - Returned in oldest order
569    // - pop_oldest returns None after last element popped
570    struct CacheMarker;
571    const NUM_ELEMENTS: usize = 50;
572
573    let mut cache: LRUCache<usize, CacheMarker> = LRUCache::new(1);
574    cache.validate();
575
576    for i in 0 .. NUM_ELEMENTS {
577        cache.push_new(0, i);
578    }
579    cache.validate();
580
581    for i in 0 .. NUM_ELEMENTS {
582        assert_eq!(cache.pop_oldest(0), Some(i));
583    }
584    cache.validate();
585
586    assert_eq!(cache.pop_oldest(0), None);
587}
588
589#[test]
590fn test_lru_tracker_push_touch_pop() {
591    // Push elements, touch even handles, pop them all off and ensure:
592    // - Returned in correct order
593    // - pop_oldest returns None after last element popped
594    struct CacheMarker;
595    const NUM_ELEMENTS: usize = 50;
596
597    let mut cache: LRUCache<usize, CacheMarker> = LRUCache::new(1);
598    let mut handles = Vec::new();
599    cache.validate();
600
601    for i in 0 .. NUM_ELEMENTS {
602        handles.push(cache.push_new(0, i));
603    }
604    cache.validate();
605
606    for i in 0 .. NUM_ELEMENTS/2 {
607        cache.touch(&handles[i*2]);
608    }
609    cache.validate();
610
611    for i in 0 .. NUM_ELEMENTS/2 {
612        assert_eq!(cache.pop_oldest(0), Some(i*2+1));
613    }
614    cache.validate();
615    for i in 0 .. NUM_ELEMENTS/2 {
616        assert_eq!(cache.pop_oldest(0), Some(i*2));
617    }
618    cache.validate();
619
620    assert_eq!(cache.pop_oldest(0), None);
621}
622
623#[test]
624fn test_lru_tracker_push_get() {
625    // Push elements, ensure:
626    // - get access via weak handles works
627    struct CacheMarker;
628    const NUM_ELEMENTS: usize = 50;
629
630    let mut cache: LRUCache<usize, CacheMarker> = LRUCache::new(1);
631    let mut handles = Vec::new();
632    cache.validate();
633
634    for i in 0 .. NUM_ELEMENTS {
635        handles.push(cache.push_new(0, i));
636    }
637    cache.validate();
638
639    for i in 0 .. NUM_ELEMENTS/2 {
640        assert!(cache.get_opt(&handles[i]) == Some(&i));
641    }
642    cache.validate();
643}
644
645#[test]
646fn test_lru_tracker_push_replace_get() {
647    // Push elements, replace contents, ensure:
648    // - each element was replaced with new data correctly
649    // - replace_or_insert works for invalid handles
650    struct CacheMarker;
651    const NUM_ELEMENTS: usize = 50;
652
653    let mut cache: LRUCache<usize, CacheMarker> = LRUCache::new(1);
654    let mut handles = Vec::new();
655    cache.validate();
656
657    for i in 0 .. NUM_ELEMENTS {
658        handles.push(cache.push_new(0, i));
659    }
660    cache.validate();
661
662    for i in 0 .. NUM_ELEMENTS {
663        assert_eq!(cache.replace_or_insert(&mut handles[i], 0, i * 2), Some(i));
664    }
665    cache.validate();
666
667    for i in 0 .. NUM_ELEMENTS/2 {
668        assert!(cache.get_opt(&handles[i]) == Some(&(i * 2)));
669    }
670    cache.validate();
671
672    let mut empty_handle = WeakFreeListHandle::invalid();
673    assert_eq!(cache.replace_or_insert(&mut empty_handle, 0, 100), None);
674    assert_eq!(cache.get_opt(&empty_handle), Some(&100));
675}