1use api::{DebugFlags, DocumentId, PremultipliedColorF};
28#[cfg(test)]
29use api::IdNamespace;
30use api::units::*;
31use euclid::{HomogeneousVector, Box2D};
32use crate::internal_types::{FastHashMap, FastHashSet, FrameStamp, FrameId};
33use crate::profiler::{self, TransactionProfile};
34use crate::prim_store::VECS_PER_SEGMENT;
35use crate::renderer::MAX_VERTEX_TEXTURE_WIDTH;
36use crate::util::VecHelper;
37use std::{u16, u32};
38use std::num::NonZeroU32;
39use std::ops::Add;
40use std::time::{Duration, Instant};
41
42
43pub const GPU_CACHE_INITIAL_HEIGHT: i32 = 20;
47const NEW_ROWS_PER_RESIZE: i32 = 10;
48
49const FRAMES_BEFORE_EVICTION: u64 = 10;
51
52const RECLAIM_THRESHOLD: f32 = 0.2;
55
56const RECLAIM_DELAY_S: u64 = 5;
59
60#[derive(Debug, Copy, Clone, Eq, MallocSizeOf, PartialEq)]
61#[cfg_attr(feature = "capture", derive(Serialize))]
62#[cfg_attr(feature = "replay", derive(Deserialize))]
63struct Epoch(u32);
64
65impl Epoch {
66 fn next(&mut self) {
67 *self = Epoch(self.0.wrapping_add(1));
68 }
69}
70
71#[derive(Debug, Copy, Clone, MallocSizeOf)]
72#[cfg_attr(feature = "capture", derive(Serialize))]
73#[cfg_attr(feature = "replay", derive(Deserialize))]
74struct CacheLocation {
75 block_index: BlockIndex,
76 epoch: Epoch,
77}
78
79#[derive(Copy, Clone, Debug, MallocSizeOf)]
81#[cfg_attr(feature = "capture", derive(Serialize))]
82#[cfg_attr(feature = "replay", derive(Deserialize))]
83pub struct GpuBlockData {
84 data: [f32; 4],
85}
86
87impl GpuBlockData {
88 pub const EMPTY: Self = GpuBlockData { data: [0.0; 4] };
89}
90
91impl From<PremultipliedColorF> for GpuBlockData {
93 fn from(c: PremultipliedColorF) -> Self {
94 GpuBlockData {
95 data: [c.r, c.g, c.b, c.a],
96 }
97 }
98}
99
100impl From<[f32; 4]> for GpuBlockData {
101 fn from(data: [f32; 4]) -> Self {
102 GpuBlockData { data }
103 }
104}
105
106impl<P> From<Box2D<f32, P>> for GpuBlockData {
107 fn from(r: Box2D<f32, P>) -> Self {
108 GpuBlockData {
109 data: [
110 r.min.x,
111 r.min.y,
112 r.max.x,
113 r.max.y,
114 ],
115 }
116 }
117}
118
119impl<P> From<HomogeneousVector<f32, P>> for GpuBlockData {
120 fn from(v: HomogeneousVector<f32, P>) -> Self {
121 GpuBlockData {
122 data: [
123 v.x,
124 v.y,
125 v.z,
126 v.w,
127 ],
128 }
129 }
130}
131
132impl From<TexelRect> for GpuBlockData {
133 fn from(tr: TexelRect) -> Self {
134 GpuBlockData {
135 data: [tr.uv0.x, tr.uv0.y, tr.uv1.x, tr.uv1.y],
136 }
137 }
138}
139
140
141#[derive(Debug, Copy, Clone, MallocSizeOf)]
143#[cfg_attr(feature = "capture", derive(Serialize))]
144#[cfg_attr(feature = "replay", derive(Deserialize))]
145pub struct GpuCacheHandle {
146 location: Option<CacheLocation>,
147}
148
149impl GpuCacheHandle {
150 pub fn new() -> Self {
151 GpuCacheHandle { location: None }
152 }
153
154 pub fn as_int(self, gpu_cache: &GpuCache) -> i32 {
155 gpu_cache.get_address(&self).as_int()
156 }
157}
158
159#[repr(C)]
163#[derive(Copy, Debug, Clone, MallocSizeOf, Eq, PartialEq)]
164#[cfg_attr(feature = "capture", derive(Serialize))]
165#[cfg_attr(feature = "replay", derive(Deserialize))]
166pub struct GpuCacheAddress {
167 pub u: u16,
168 pub v: u16,
169}
170
171impl GpuCacheAddress {
172 fn new(u: usize, v: usize) -> Self {
173 GpuCacheAddress {
174 u: u as u16,
175 v: v as u16,
176 }
177 }
178
179 pub const INVALID: GpuCacheAddress = GpuCacheAddress {
180 u: u16::MAX,
181 v: u16::MAX,
182 };
183
184 pub fn as_int(self) -> i32 {
185 self.v as i32 * MAX_VERTEX_TEXTURE_WIDTH as i32 + self.u as i32
189 }
190}
191
192impl Add<usize> for GpuCacheAddress {
193 type Output = GpuCacheAddress;
194
195 fn add(self, other: usize) -> GpuCacheAddress {
196 GpuCacheAddress {
197 u: self.u + other as u16,
198 v: self.v,
199 }
200 }
201}
202
203#[derive(Debug, MallocSizeOf)]
205#[cfg_attr(feature = "capture", derive(Serialize))]
206#[cfg_attr(feature = "replay", derive(Deserialize))]
207struct Block {
208 address: GpuCacheAddress,
210 epoch: Epoch,
212 next: Option<BlockIndex>,
216 last_access_time: FrameId,
218}
219
220impl Block {
221 fn new(
222 address: GpuCacheAddress,
223 next: Option<BlockIndex>,
224 frame_id: FrameId,
225 epoch: Epoch,
226 ) -> Self {
227 Block {
228 address,
229 next,
230 last_access_time: frame_id,
231 epoch,
232 }
233 }
234
235 fn advance_epoch(&mut self, max_epoch: &mut Epoch) {
236 self.epoch.next();
237 if max_epoch.0 < self.epoch.0 {
238 max_epoch.0 = self.epoch.0;
239 }
240 }
241
242 pub const INVALID: Block = Block {
244 address: GpuCacheAddress { u: 0, v: 0 },
245 epoch: Epoch(0),
246 next: None,
247 last_access_time: FrameId::INVALID,
248 };
249}
250
251#[derive(Debug, Copy, Clone, MallocSizeOf)]
257#[cfg_attr(feature = "capture", derive(Serialize))]
258#[cfg_attr(feature = "replay", derive(Deserialize))]
259struct BlockIndex(NonZeroU32);
260
261impl BlockIndex {
262 fn new(idx: usize) -> Self {
263 debug_assert!(idx <= u32::MAX as usize);
264 BlockIndex(NonZeroU32::new(idx as u32).expect("Index zero forbidden"))
265 }
266
267 fn get(&self) -> usize {
268 self.0.get() as usize
269 }
270}
271
272#[cfg_attr(feature = "capture", derive(Serialize))]
274#[cfg_attr(feature = "replay", derive(Deserialize))]
275#[derive(MallocSizeOf)]
276struct Row {
277 block_count_per_item: usize,
282}
283
284impl Row {
285 fn new(block_count_per_item: usize) -> Self {
286 Row {
287 block_count_per_item,
288 }
289 }
290}
291
292#[cfg_attr(feature = "capture", derive(Serialize))]
297#[cfg_attr(feature = "replay", derive(Deserialize))]
298#[derive(MallocSizeOf)]
299pub enum GpuCacheUpdate {
300 Copy {
301 block_index: usize,
302 block_count: usize,
303 address: GpuCacheAddress,
304 },
305}
306
307#[derive(MallocSizeOf)]
310pub enum GpuCacheDebugCmd {
311 Alloc(GpuCacheDebugChunk),
313 Free(GpuCacheAddress),
315}
316
317#[derive(Clone, MallocSizeOf)]
318pub struct GpuCacheDebugChunk {
319 pub address: GpuCacheAddress,
320 pub size: usize,
321}
322
323#[must_use]
324#[cfg_attr(feature = "capture", derive(Serialize))]
325#[cfg_attr(feature = "replay", derive(Deserialize))]
326#[derive(MallocSizeOf)]
327pub struct GpuCacheUpdateList {
328 pub frame_id: FrameId,
330 pub clear: bool,
333 pub height: i32,
336 pub updates: Vec<GpuCacheUpdate>,
338 pub blocks: Vec<GpuBlockData>,
341 #[cfg_attr(feature = "serde", serde(skip))]
343 pub debug_commands: Vec<GpuCacheDebugCmd>,
344}
345
346#[cfg_attr(feature = "capture", derive(Serialize))]
349#[cfg_attr(feature = "replay", derive(Deserialize))]
350#[derive(MallocSizeOf)]
351struct FreeBlockLists {
352 free_list_1: Option<BlockIndex>,
353 free_list_2: Option<BlockIndex>,
354 free_list_4: Option<BlockIndex>,
355 free_list_8: Option<BlockIndex>,
356 free_list_16: Option<BlockIndex>,
357 free_list_32: Option<BlockIndex>,
358 free_list_64: Option<BlockIndex>,
359 free_list_128: Option<BlockIndex>,
360 free_list_256: Option<BlockIndex>,
361 free_list_341: Option<BlockIndex>,
362 free_list_512: Option<BlockIndex>,
363 free_list_1024: Option<BlockIndex>,
364}
365
366impl FreeBlockLists {
367 fn new() -> Self {
368 FreeBlockLists {
369 free_list_1: None,
370 free_list_2: None,
371 free_list_4: None,
372 free_list_8: None,
373 free_list_16: None,
374 free_list_32: None,
375 free_list_64: None,
376 free_list_128: None,
377 free_list_256: None,
378 free_list_341: None,
379 free_list_512: None,
380 free_list_1024: None,
381 }
382 }
383
384 fn get_actual_block_count_and_free_list(
385 &mut self,
386 block_count: usize,
387 ) -> (usize, &mut Option<BlockIndex>) {
388 debug_assert_eq!(MAX_VERTEX_TEXTURE_WIDTH, 1024, "Need to update bucketing");
396 match block_count {
397 0 => panic!("Can't allocate zero sized blocks!"),
398 1 => (1, &mut self.free_list_1),
399 2 => (2, &mut self.free_list_2),
400 3..=4 => (4, &mut self.free_list_4),
401 5..=8 => (8, &mut self.free_list_8),
402 9..=16 => (16, &mut self.free_list_16),
403 17..=32 => (32, &mut self.free_list_32),
404 33..=64 => (64, &mut self.free_list_64),
405 65..=128 => (128, &mut self.free_list_128),
406 129..=256 => (256, &mut self.free_list_256),
407 257..=341 => (341, &mut self.free_list_341),
408 342..=512 => (512, &mut self.free_list_512),
409 513..=1024 => (1024, &mut self.free_list_1024),
410 _ => panic!("Can't allocate > MAX_VERTEX_TEXTURE_WIDTH per resource!"),
411 }
412 }
413}
414
415#[cfg_attr(feature = "capture", derive(Serialize))]
417#[cfg_attr(feature = "replay", derive(Deserialize))]
418#[derive(MallocSizeOf)]
419struct Texture {
420 height: i32,
422 blocks: Vec<Block>,
424 rows: Vec<Row>,
426 base_epoch: Epoch,
428 max_epoch: Epoch,
432 free_lists: FreeBlockLists,
436 occupied_list_heads: FastHashMap<DocumentId, BlockIndex>,
440 pending_blocks: Vec<GpuBlockData>,
443 updates: Vec<GpuCacheUpdate>,
445 allocated_block_count: usize,
447 #[cfg_attr(feature = "serde", serde(skip))]
450 reached_reclaim_threshold: Option<Instant>,
451 #[cfg_attr(feature = "serde", serde(skip))]
454 debug_commands: Vec<GpuCacheDebugCmd>,
455 debug_flags: DebugFlags,
457}
458
459impl Texture {
460 fn new(base_epoch: Epoch, debug_flags: DebugFlags) -> Self {
461 let blocks = vec![Block::INVALID];
465
466 Texture {
467 height: GPU_CACHE_INITIAL_HEIGHT,
468 blocks,
469 rows: Vec::new(),
470 base_epoch,
471 max_epoch: base_epoch,
472 free_lists: FreeBlockLists::new(),
473 pending_blocks: Vec::new(),
474 updates: Vec::new(),
475 occupied_list_heads: FastHashMap::default(),
476 allocated_block_count: 0,
477 reached_reclaim_threshold: None,
478 debug_commands: Vec::new(),
479 debug_flags,
480 }
481 }
482
483 fn push_data(
487 &mut self,
488 pending_block_index: Option<usize>,
489 block_count: usize,
490 frame_stamp: FrameStamp
491 ) -> CacheLocation {
492 debug_assert!(frame_stamp.is_valid());
493 let (alloc_size, free_list) = self.free_lists
495 .get_actual_block_count_and_free_list(block_count);
496
497 if free_list.is_none() {
499 if self.rows.len() as i32 == self.height {
500 self.height += NEW_ROWS_PER_RESIZE;
501 }
502
503 let items_per_row = MAX_VERTEX_TEXTURE_WIDTH / alloc_size;
505 let row_index = self.rows.len();
506 self.rows.push(Row::new(alloc_size));
507
508 let mut prev_block_index = None;
512 for i in 0 .. items_per_row {
513 let address = GpuCacheAddress::new(i * alloc_size, row_index);
514 let block_index = BlockIndex::new(self.blocks.len());
515 let block = Block::new(address, prev_block_index, frame_stamp.frame_id(), self.base_epoch);
516 self.blocks.push(block);
517 prev_block_index = Some(block_index);
518 }
519
520 *free_list = prev_block_index;
521 }
522
523 let free_block_index = free_list.take().unwrap();
527 let block = &mut self.blocks[free_block_index.get()];
528 *free_list = block.next;
529
530 block.next = self.occupied_list_heads.get(&frame_stamp.document_id()).cloned();
532 block.last_access_time = frame_stamp.frame_id();
533 self.occupied_list_heads.insert(frame_stamp.document_id(), free_block_index);
534 self.allocated_block_count += alloc_size;
535
536 if let Some(pending_block_index) = pending_block_index {
537 self.updates.push(GpuCacheUpdate::Copy {
540 block_index: pending_block_index,
541 block_count,
542 address: block.address,
543 });
544 }
545
546 if self.debug_flags.contains(DebugFlags::GPU_CACHE_DBG) {
552 self.debug_commands.push(GpuCacheDebugCmd::Alloc(GpuCacheDebugChunk {
553 address: block.address,
554 size: block_count,
555 }));
556 }
557
558 CacheLocation {
559 block_index: free_block_index,
560 epoch: block.epoch,
561 }
562 }
563
564 fn evict_old_blocks(&mut self, frame_stamp: FrameStamp) {
567 debug_assert!(frame_stamp.is_valid());
568 let mut current_block = self.occupied_list_heads.get(&frame_stamp.document_id()).map(|x| *x);
572 let mut prev_block: Option<BlockIndex> = None;
573
574 while let Some(index) = current_block {
575 let (next_block, should_unlink) = {
576 let block = &mut self.blocks[index.get()];
577
578 let next_block = block.next;
579 let mut should_unlink = false;
580
581 if block.last_access_time + FRAMES_BEFORE_EVICTION < frame_stamp.frame_id() {
585 should_unlink = true;
586
587 let row = &mut self.rows[block.address.v as usize];
589
590 let (_, free_list) = self.free_lists
593 .get_actual_block_count_and_free_list(row.block_count_per_item);
594
595 block.advance_epoch(&mut self.max_epoch);
596 block.next = *free_list;
597 *free_list = Some(index);
598
599 self.allocated_block_count -= row.block_count_per_item;
600
601 if self.debug_flags.contains(DebugFlags::GPU_CACHE_DBG) {
602 let cmd = GpuCacheDebugCmd::Free(block.address);
603 self.debug_commands.push(cmd);
604 }
605 };
606
607 (next_block, should_unlink)
608 };
609
610 if should_unlink {
613 match prev_block {
614 Some(prev_block) => {
615 self.blocks[prev_block.get()].next = next_block;
616 }
617 None => {
618 match next_block {
619 Some(next_block) => {
620 self.occupied_list_heads.insert(frame_stamp.document_id(), next_block);
621 }
622 None => {
623 self.occupied_list_heads.remove(&frame_stamp.document_id());
624 }
625 }
626 }
627 }
628 } else {
629 prev_block = current_block;
630 }
631
632 current_block = next_block;
633 }
634 }
635
636 fn utilization(&self) -> f32 {
638 let total_blocks = self.rows.len() * MAX_VERTEX_TEXTURE_WIDTH;
639 debug_assert!(total_blocks > 0);
640 let ratio = self.allocated_block_count as f32 / total_blocks as f32;
641 debug_assert!(0.0 <= ratio && ratio <= 1.0, "Bad ratio: {}", ratio);
642 ratio
643 }
644}
645
646
647#[must_use]
650pub struct GpuDataRequest<'a> {
651 #[allow(dead_code)]
654 handle: &'a mut GpuCacheHandle,
655 frame_stamp: FrameStamp,
656 start_index: usize,
657 max_block_count: usize,
658 texture: &'a mut Texture,
659}
660
661impl<'a> GpuDataRequest<'a> {
662 pub fn push<B>(&mut self, block: B)
663 where
664 B: Into<GpuBlockData>,
665 {
666 self.texture.pending_blocks.push(block.into());
667 }
668
669 pub fn write_segment(
671 &mut self,
672 local_rect: LayoutRect,
673 extra_data: [f32; 4],
674 ) {
675 let _ = VECS_PER_SEGMENT;
676 self.push(local_rect);
677 self.push(extra_data);
678 }
679
680 pub fn current_used_block_num(&self) -> usize {
681 self.texture.pending_blocks.len() - self.start_index
682 }
683}
684
685impl<'a> Drop for GpuDataRequest<'a> {
686 fn drop(&mut self) {
687 let block_count = self.current_used_block_num();
689 debug_assert!(block_count <= self.max_block_count);
690
691 let location = self.texture
692 .push_data(Some(self.start_index), block_count, self.frame_stamp);
693 self.handle.location = Some(location);
694 }
695}
696
697
698#[cfg_attr(feature = "capture", derive(Serialize))]
700#[cfg_attr(feature = "replay", derive(Deserialize))]
701#[derive(MallocSizeOf)]
702pub struct GpuCache {
703 now: FrameStamp,
705 texture: Texture,
707 saved_block_count: usize,
710 debug_flags: DebugFlags,
712 pending_clear: bool,
715 prepared_for_frames: bool,
718 requires_frame_build: bool,
721 document_frames_to_build: FastHashSet<DocumentId>,
724}
725
726impl GpuCache {
727 pub fn new() -> Self {
728 let debug_flags = DebugFlags::empty();
729 GpuCache {
730 now: FrameStamp::INVALID,
731 texture: Texture::new(Epoch(0), debug_flags),
732 saved_block_count: 0,
733 debug_flags,
734 pending_clear: false,
735 prepared_for_frames: false,
736 requires_frame_build: false,
737 document_frames_to_build: FastHashSet::default(),
738 }
739 }
740
741 #[cfg(test)]
745 pub fn new_for_testing() -> Self {
746 let mut cache = Self::new();
747 let mut now = FrameStamp::first(DocumentId::new(IdNamespace(1), 1));
748 now.advance();
749 cache.prepared_for_frames = true;
750 cache.begin_frame(now);
751 cache
752 }
753
754 pub fn clear(&mut self) {
757 assert!(self.texture.updates.is_empty(), "Clearing with pending updates");
758 let mut next_base_epoch = self.texture.max_epoch;
759 next_base_epoch.next();
760 self.texture = Texture::new(next_base_epoch, self.debug_flags);
761 self.saved_block_count = 0;
762 self.pending_clear = true;
763 self.requires_frame_build = true;
764 }
765
766 pub fn requires_frame_build(&self) -> bool {
767 self.requires_frame_build
768 }
769
770 pub fn prepare_for_frames(&mut self) {
771 self.prepared_for_frames = true;
772 if self.should_reclaim_memory() {
773 self.clear();
774 debug_assert!(self.document_frames_to_build.is_empty());
775 for &document_id in self.texture.occupied_list_heads.keys() {
776 self.document_frames_to_build.insert(document_id);
777 }
778 }
779 }
780
781 pub fn bookkeep_after_frames(&mut self) {
782 assert!(self.document_frames_to_build.is_empty());
783 assert!(self.prepared_for_frames);
784 self.requires_frame_build = false;
785 self.prepared_for_frames = false;
786 }
787
788 pub fn begin_frame(&mut self, stamp: FrameStamp) {
790 debug_assert!(self.texture.pending_blocks.is_empty());
791 assert!(self.prepared_for_frames);
792 profile_scope!("begin_frame");
793 self.now = stamp;
794 self.texture.evict_old_blocks(self.now);
795 self.saved_block_count = 0;
796 }
797
798 pub fn invalidate(&mut self, handle: &GpuCacheHandle) {
802 if let Some(ref location) = handle.location {
803 if let Some(block) = self.texture.blocks.get_mut(location.block_index.get()) {
805 if block.epoch == location.epoch {
806 block.advance_epoch(&mut self.texture.max_epoch);
807 }
808 }
809 }
810 }
811
812 pub fn request<'a>(&'a mut self, handle: &'a mut GpuCacheHandle) -> Option<GpuDataRequest<'a>> {
815 let mut max_block_count = MAX_VERTEX_TEXTURE_WIDTH;
816 if let Some(ref location) = handle.location {
818 if let Some(block) = self.texture.blocks.get_mut(location.block_index.get()) {
819 if block.epoch == location.epoch {
820 max_block_count = self.texture.rows[block.address.v as usize].block_count_per_item;
821 if block.last_access_time != self.now.frame_id() {
822 block.last_access_time = self.now.frame_id();
824 self.saved_block_count += max_block_count;
825 }
826 return None;
827 }
828 }
829 }
830
831 debug_assert!(self.now.is_valid());
832 Some(GpuDataRequest {
833 handle,
834 frame_stamp: self.now,
835 start_index: self.texture.pending_blocks.len(),
836 texture: &mut self.texture,
837 max_block_count,
838 })
839 }
840
841 pub fn push_per_frame_blocks(&mut self, blocks: &[GpuBlockData]) -> GpuCacheHandle {
848 let start_index = self.texture.pending_blocks.len();
849 self.texture.pending_blocks.extend_from_slice(blocks);
850 let location = self.texture
851 .push_data(Some(start_index), blocks.len(), self.now);
852 GpuCacheHandle {
853 location: Some(location),
854 }
855 }
856
857 pub fn push_deferred_per_frame_blocks(&mut self, block_count: usize) -> GpuCacheHandle {
861 let location = self.texture.push_data(None, block_count, self.now);
862 GpuCacheHandle {
863 location: Some(location),
864 }
865 }
866
867 pub fn end_frame(
870 &mut self,
871 profile: &mut TransactionProfile,
872 ) -> FrameStamp {
873 profile_scope!("end_frame");
874 profile.set(profiler::GPU_CACHE_ROWS_TOTAL, self.texture.rows.len());
875 profile.set(profiler::GPU_CACHE_BLOCKS_TOTAL, self.texture.allocated_block_count);
876 profile.set(profiler::GPU_CACHE_BLOCKS_SAVED, self.saved_block_count);
877
878 let reached_threshold =
879 self.texture.rows.len() > (GPU_CACHE_INITIAL_HEIGHT as usize) &&
880 self.texture.utilization() < RECLAIM_THRESHOLD;
881 if reached_threshold {
882 self.texture.reached_reclaim_threshold.get_or_insert_with(Instant::now);
883 } else {
884 self.texture.reached_reclaim_threshold = None;
885 }
886
887 self.document_frames_to_build.remove(&self.now.document_id());
888 self.now
889 }
890
891 pub fn should_reclaim_memory(&self) -> bool {
894 self.texture.reached_reclaim_threshold
895 .map_or(false, |t| t.elapsed() > Duration::from_secs(RECLAIM_DELAY_S))
896 }
897
898 pub fn extract_updates(&mut self) -> GpuCacheUpdateList {
900 let clear = self.pending_clear;
901 self.pending_clear = false;
902 GpuCacheUpdateList {
903 frame_id: self.now.frame_id(),
904 clear,
905 height: self.texture.height,
906 debug_commands: self.texture.debug_commands.take_and_preallocate(),
907 updates: self.texture.updates.take_and_preallocate(),
908 blocks: self.texture.pending_blocks.take_and_preallocate(),
909 }
910 }
911
912 pub fn set_debug_flags(&mut self, flags: DebugFlags) {
914 self.debug_flags = flags;
915 self.texture.debug_flags = flags;
916 }
917
918 pub fn get_address(&self, id: &GpuCacheHandle) -> GpuCacheAddress {
923 let location = id.location.expect("handle not requested or allocated!");
924 let block = &self.texture.blocks[location.block_index.get()];
925 debug_assert_eq!(block.epoch, location.epoch);
926 debug_assert_eq!(block.last_access_time, self.now.frame_id());
927 block.address
928 }
929}
930
931#[test]
932#[cfg(target_pointer_width = "64")]
933fn test_struct_sizes() {
934 use std::mem;
935 assert_eq!(mem::size_of::<Block>(), 24, "Block size changed");
938}