1mod entry;
11mod extract;
12
13pub mod raw_entry_v1;
14
15use alloc::vec::{self, Vec};
16use core::mem;
17use core::ops::RangeBounds;
18use hashbrown::hash_table;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Equivalent, HashValue, TryReserveError};
22
23type Indices = hash_table::HashTable<usize>;
24type Entries<K, V> = Vec<Bucket<K, V>>;
25
26pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
27pub(crate) use extract::ExtractCore;
28
29#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32 indices: Indices,
34 entries: Entries<K, V>,
36}
37
38struct RefMut<'a, K, V> {
45 indices: &'a mut Indices,
46 entries: &'a mut Entries<K, V>,
47}
48
49#[inline(always)]
50fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
51 move |&i| entries[i].hash.get()
52}
53
54#[inline]
55fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
56 key: &'a Q,
57 entries: &'a [Bucket<K, V>],
58) -> impl Fn(&usize) -> bool + 'a {
59 move |&i| Q::equivalent(key, &entries[i].key)
60}
61
62#[inline]
63fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
64 if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
65 entry.remove();
66 } else if cfg!(debug_assertions) {
67 panic!("index not found");
68 }
69}
70
71#[inline]
72fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
73 let index = table
74 .find_mut(hash.get(), move |&i| i == old)
75 .expect("index not found");
76 *index = new;
77}
78
79fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
84 assert!(indices.capacity() - indices.len() >= entries.len());
85 for entry in entries {
86 indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
87 }
88}
89
90impl<K, V> Clone for IndexMapCore<K, V>
91where
92 K: Clone,
93 V: Clone,
94{
95 fn clone(&self) -> Self {
96 let mut new = Self::new();
97 new.clone_from(self);
98 new
99 }
100
101 fn clone_from(&mut self, other: &Self) {
102 self.indices.clone_from(&other.indices);
103 if self.entries.capacity() < other.entries.len() {
104 let additional = other.entries.len() - self.entries.len();
106 self.borrow_mut().reserve_entries(additional);
107 }
108 self.entries.clone_from(&other.entries);
109 }
110}
111
112impl<K, V> IndexMapCore<K, V> {
113 const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
115
116 #[inline]
117 pub(crate) const fn new() -> Self {
118 IndexMapCore {
119 indices: Indices::new(),
120 entries: Vec::new(),
121 }
122 }
123
124 #[inline]
125 fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
126 RefMut::new(&mut self.indices, &mut self.entries)
127 }
128
129 #[inline]
130 pub(crate) fn with_capacity(n: usize) -> Self {
131 IndexMapCore {
132 indices: Indices::with_capacity(n),
133 entries: Vec::with_capacity(n),
134 }
135 }
136
137 #[inline]
138 pub(crate) fn into_entries(self) -> Entries<K, V> {
139 self.entries
140 }
141
142 #[inline]
143 pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
144 &self.entries
145 }
146
147 #[inline]
148 pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
149 &mut self.entries
150 }
151
152 pub(crate) fn with_entries<F>(&mut self, f: F)
153 where
154 F: FnOnce(&mut [Bucket<K, V>]),
155 {
156 f(&mut self.entries);
157 self.rebuild_hash_table();
158 }
159
160 #[inline]
161 pub(crate) fn len(&self) -> usize {
162 debug_assert_eq!(self.entries.len(), self.indices.len());
163 self.indices.len()
164 }
165
166 #[inline]
167 pub(crate) fn capacity(&self) -> usize {
168 Ord::min(self.indices.capacity(), self.entries.capacity())
169 }
170
171 pub(crate) fn clear(&mut self) {
172 self.indices.clear();
173 self.entries.clear();
174 }
175
176 pub(crate) fn truncate(&mut self, len: usize) {
177 if len < self.len() {
178 self.erase_indices(len, self.entries.len());
179 self.entries.truncate(len);
180 }
181 }
182
183 #[track_caller]
184 pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
185 where
186 R: RangeBounds<usize>,
187 {
188 let range = simplify_range(range, self.entries.len());
189 self.erase_indices(range.start, range.end);
190 self.entries.drain(range)
191 }
192
193 #[cfg(feature = "rayon")]
194 pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
195 where
196 K: Send,
197 V: Send,
198 R: RangeBounds<usize>,
199 {
200 use rayon::iter::ParallelDrainRange;
201 let range = simplify_range(range, self.entries.len());
202 self.erase_indices(range.start, range.end);
203 self.entries.par_drain(range)
204 }
205
206 #[track_caller]
207 pub(crate) fn split_off(&mut self, at: usize) -> Self {
208 let len = self.entries.len();
209 assert!(
210 at <= len,
211 "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
212 );
213
214 self.erase_indices(at, self.entries.len());
215 let entries = self.entries.split_off(at);
216
217 let mut indices = Indices::with_capacity(entries.len());
218 insert_bulk_no_grow(&mut indices, &entries);
219 Self { indices, entries }
220 }
221
222 #[track_caller]
223 pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
224 where
225 R: RangeBounds<usize>,
226 {
227 let range = simplify_range(range, self.len());
228 self.erase_indices(range.start, self.entries.len());
229 let entries = self.entries.split_off(range.end);
230 let drained = self.entries.split_off(range.start);
231
232 let mut indices = Indices::with_capacity(entries.len());
233 insert_bulk_no_grow(&mut indices, &entries);
234 (Self { indices, entries }, drained.into_iter())
235 }
236
237 pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
239 self.reserve(other.len());
240 insert_bulk_no_grow(&mut self.indices, &other.entries);
241 self.entries.append(&mut other.entries);
242 other.indices.clear();
243 }
244
245 pub(crate) fn reserve(&mut self, additional: usize) {
247 self.indices.reserve(additional, get_hash(&self.entries));
248 if additional > self.entries.capacity() - self.entries.len() {
250 self.borrow_mut().reserve_entries(additional);
251 }
252 }
253
254 pub(crate) fn reserve_exact(&mut self, additional: usize) {
256 self.indices.reserve(additional, get_hash(&self.entries));
257 self.entries.reserve_exact(additional);
258 }
259
260 pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
262 self.indices
263 .try_reserve(additional, get_hash(&self.entries))
264 .map_err(TryReserveError::from_hashbrown)?;
265 if additional > self.entries.capacity() - self.entries.len() {
267 self.try_reserve_entries(additional)
268 } else {
269 Ok(())
270 }
271 }
272
273 fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
275 let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
278 let try_add = new_capacity - self.entries.len();
279 if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
280 return Ok(());
281 }
282 self.entries
283 .try_reserve_exact(additional)
284 .map_err(TryReserveError::from_alloc)
285 }
286
287 pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
289 self.indices
290 .try_reserve(additional, get_hash(&self.entries))
291 .map_err(TryReserveError::from_hashbrown)?;
292 self.entries
293 .try_reserve_exact(additional)
294 .map_err(TryReserveError::from_alloc)
295 }
296
297 pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
299 self.indices
300 .shrink_to(min_capacity, get_hash(&self.entries));
301 self.entries.shrink_to(min_capacity);
302 }
303
304 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
306 if let Some(entry) = self.entries.pop() {
307 let last = self.entries.len();
308 erase_index(&mut self.indices, entry.hash, last);
309 Some((entry.key, entry.value))
310 } else {
311 None
312 }
313 }
314
315 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
317 where
318 Q: ?Sized + Equivalent<K>,
319 {
320 let eq = equivalent(key, &self.entries);
321 self.indices.find(hash.get(), eq).copied()
322 }
323
324 fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
327 if self.entries.len() == self.entries.capacity() {
328 self.borrow_mut().reserve_entries(1);
331 }
332 self.entries.push(Bucket { hash, key, value });
333 }
334
335 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
336 where
337 K: Eq,
338 {
339 let eq = equivalent(&key, &self.entries);
340 let hasher = get_hash(&self.entries);
341 match self.indices.entry(hash.get(), eq, hasher) {
342 hash_table::Entry::Occupied(entry) => {
343 let i = *entry.get();
344 (i, Some(mem::replace(&mut self.entries[i].value, value)))
345 }
346 hash_table::Entry::Vacant(entry) => {
347 let i = self.entries.len();
348 entry.insert(i);
349 self.push_entry(hash, key, value);
350 debug_assert_eq!(self.indices.len(), self.entries.len());
351 (i, None)
352 }
353 }
354 }
355
356 pub(crate) fn replace_full(
358 &mut self,
359 hash: HashValue,
360 key: K,
361 value: V,
362 ) -> (usize, Option<(K, V)>)
363 where
364 K: Eq,
365 {
366 let eq = equivalent(&key, &self.entries);
367 let hasher = get_hash(&self.entries);
368 match self.indices.entry(hash.get(), eq, hasher) {
369 hash_table::Entry::Occupied(entry) => {
370 let i = *entry.get();
371 let entry = &mut self.entries[i];
372 let kv = (
373 mem::replace(&mut entry.key, key),
374 mem::replace(&mut entry.value, value),
375 );
376 (i, Some(kv))
377 }
378 hash_table::Entry::Vacant(entry) => {
379 let i = self.entries.len();
380 entry.insert(i);
381 self.push_entry(hash, key, value);
382 debug_assert_eq!(self.indices.len(), self.entries.len());
383 (i, None)
384 }
385 }
386 }
387
388 #[track_caller]
391 pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
392 self.borrow_mut().replace_index_unique(index, hash, key).0
393 }
394
395 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
397 where
398 Q: ?Sized + Equivalent<K>,
399 {
400 let eq = equivalent(key, &self.entries);
401 match self.indices.find_entry(hash.get(), eq) {
402 Ok(entry) => {
403 let (index, _) = entry.remove();
404 let (key, value) = self.borrow_mut().shift_remove_finish(index);
405 Some((index, key, value))
406 }
407 Err(_) => None,
408 }
409 }
410
411 #[inline]
413 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
414 self.borrow_mut().shift_remove_index(index)
415 }
416
417 #[inline]
418 #[track_caller]
419 pub(super) fn move_index(&mut self, from: usize, to: usize) {
420 self.borrow_mut().move_index(from, to);
421 }
422
423 #[inline]
424 #[track_caller]
425 pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
426 self.borrow_mut().swap_indices(a, b);
427 }
428
429 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
431 where
432 Q: ?Sized + Equivalent<K>,
433 {
434 let eq = equivalent(key, &self.entries);
435 match self.indices.find_entry(hash.get(), eq) {
436 Ok(entry) => {
437 let (index, _) = entry.remove();
438 let (key, value) = self.borrow_mut().swap_remove_finish(index);
439 Some((index, key, value))
440 }
441 Err(_) => None,
442 }
443 }
444
445 #[inline]
447 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
448 self.borrow_mut().swap_remove_index(index)
449 }
450
451 fn erase_indices(&mut self, start: usize, end: usize) {
456 let (init, shifted_entries) = self.entries.split_at(end);
457 let (start_entries, erased_entries) = init.split_at(start);
458
459 let erased = erased_entries.len();
460 let shifted = shifted_entries.len();
461 let half_capacity = self.indices.capacity() / 2;
462
463 if erased == 0 {
465 } else if start + shifted < half_capacity && start < erased {
467 self.indices.clear();
469
470 insert_bulk_no_grow(&mut self.indices, start_entries);
472 insert_bulk_no_grow(&mut self.indices, shifted_entries);
473 } else if erased + shifted < half_capacity {
474 for (i, entry) in (start..).zip(erased_entries) {
478 erase_index(&mut self.indices, entry.hash, i);
479 }
480
481 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
483 update_index(&mut self.indices, entry.hash, old, new);
484 }
485 } else {
486 let offset = end - start;
488 self.indices.retain(move |i| {
489 if *i >= end {
490 *i -= offset;
491 true
492 } else {
493 *i < start
494 }
495 });
496 }
497
498 debug_assert_eq!(self.indices.len(), start + shifted);
499 }
500
501 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
502 where
503 F: FnMut(&mut K, &mut V) -> bool,
504 {
505 self.entries
506 .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
507 if self.entries.len() < self.indices.len() {
508 self.rebuild_hash_table();
509 }
510 }
511
512 fn rebuild_hash_table(&mut self) {
513 self.indices.clear();
514 insert_bulk_no_grow(&mut self.indices, &self.entries);
515 }
516
517 pub(crate) fn reverse(&mut self) {
518 self.entries.reverse();
519
520 let len = self.entries.len();
523 for i in &mut self.indices {
524 *i = len - *i - 1;
525 }
526 }
527}
528
529fn reserve_entries<K, V>(entries: &mut Entries<K, V>, additional: usize, try_capacity: usize) {
531 let try_capacity = try_capacity.min(IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY);
534 let try_add = try_capacity - entries.len();
535 if try_add > additional && entries.try_reserve_exact(try_add).is_ok() {
536 return;
537 }
538 entries.reserve_exact(additional);
539}
540
541impl<'a, K, V> RefMut<'a, K, V> {
542 #[inline]
543 fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
544 Self { indices, entries }
545 }
546
547 #[inline]
549 fn reserve_entries(&mut self, additional: usize) {
550 reserve_entries(self.entries, additional, self.indices.capacity());
551 }
552
553 fn insert_unique(self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
556 let i = self.indices.len();
557 debug_assert_eq!(i, self.entries.len());
558 let entry = self
559 .indices
560 .insert_unique(hash.get(), i, get_hash(self.entries));
561 if self.entries.len() == self.entries.capacity() {
562 reserve_entries(self.entries, 1, 2 * self.entries.capacity());
566 }
567 self.entries.push(Bucket { hash, key, value });
568 OccupiedEntry::new(self.entries, entry)
569 }
570
571 #[track_caller]
574 fn replace_index_unique(
575 self,
576 index: usize,
577 hash: HashValue,
578 key: K,
579 ) -> (K, OccupiedEntry<'a, K, V>) {
580 erase_index(self.indices, self.entries[index].hash, index);
583 let table_entry = self
584 .indices
585 .insert_unique(hash.get(), index, get_hash(&self.entries));
586
587 let entry = &mut self.entries[index];
588 entry.hash = hash;
589 let old_key = mem::replace(&mut entry.key, key);
590 (old_key, OccupiedEntry::new(self.entries, table_entry))
591 }
592
593 fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
596 let end = self.indices.len();
597 assert!(index <= end);
598 self.increment_indices(index, end);
600 let entries = &*self.entries;
601 self.indices.insert_unique(hash.get(), index, move |&i| {
602 debug_assert_ne!(i, index);
604 let i = if i < index { i } else { i - 1 };
605 entries[i].hash.get()
606 });
607 if self.entries.len() == self.entries.capacity() {
608 self.reserve_entries(1);
611 }
612 self.entries.insert(index, Bucket { hash, key, value });
613 }
614
615 fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
617 match self.entries.get(index) {
618 Some(entry) => {
619 erase_index(self.indices, entry.hash, index);
620 Some(self.shift_remove_finish(index))
621 }
622 None => None,
623 }
624 }
625
626 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
630 self.decrement_indices(index + 1, self.entries.len());
632
633 let entry = self.entries.remove(index);
635 (entry.key, entry.value)
636 }
637
638 fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
640 match self.entries.get(index) {
641 Some(entry) => {
642 erase_index(self.indices, entry.hash, index);
643 Some(self.swap_remove_finish(index))
644 }
645 None => None,
646 }
647 }
648
649 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
653 let entry = self.entries.swap_remove(index);
656
657 if let Some(entry) = self.entries.get(index) {
659 let last = self.entries.len();
662 update_index(self.indices, entry.hash, last, index);
663 }
664
665 (entry.key, entry.value)
666 }
667
668 fn decrement_indices(&mut self, start: usize, end: usize) {
673 let shifted_entries = &self.entries[start..end];
675 if shifted_entries.len() > self.indices.capacity() / 2 {
676 for i in &mut *self.indices {
678 if start <= *i && *i < end {
679 *i -= 1;
680 }
681 }
682 } else {
683 for (i, entry) in (start..end).zip(shifted_entries) {
685 update_index(self.indices, entry.hash, i, i - 1);
686 }
687 }
688 }
689
690 fn increment_indices(&mut self, start: usize, end: usize) {
695 let shifted_entries = &self.entries[start..end];
697 if shifted_entries.len() > self.indices.capacity() / 2 {
698 for i in &mut *self.indices {
700 if start <= *i && *i < end {
701 *i += 1;
702 }
703 }
704 } else {
705 for (i, entry) in (start..end).zip(shifted_entries).rev() {
708 update_index(self.indices, entry.hash, i, i + 1);
709 }
710 }
711 }
712
713 #[track_caller]
714 fn move_index(&mut self, from: usize, to: usize) {
715 let from_hash = self.entries[from].hash;
716 let _ = self.entries[to]; if from != to {
718 update_index(self.indices, from_hash, from, usize::MAX);
720
721 if from < to {
723 self.decrement_indices(from + 1, to + 1);
724 self.entries[from..=to].rotate_left(1);
725 } else if to < from {
726 self.increment_indices(to, from);
727 self.entries[to..=from].rotate_right(1);
728 }
729
730 update_index(self.indices, from_hash, usize::MAX, to);
732 }
733 }
734
735 #[track_caller]
736 fn swap_indices(&mut self, a: usize, b: usize) {
737 if a == b && a < self.entries.len() {
739 return;
740 }
741
742 match self.indices.get_many_mut(
745 [self.entries[a].hash.get(), self.entries[b].hash.get()],
746 move |i, &x| if i == 0 { x == a } else { x == b },
747 ) {
748 [Some(ref_a), Some(ref_b)] => {
749 mem::swap(ref_a, ref_b);
750 self.entries.swap(a, b);
751 }
752 _ => panic!("indices not found"),
753 }
754 }
755}
756
757#[test]
758fn assert_send_sync() {
759 fn assert_send_sync<T: Send + Sync>() {}
760 assert_send_sync::<IndexMapCore<i32, i32>>();
761 assert_send_sync::<Entry<'_, i32, i32>>();
762 assert_send_sync::<IndexedEntry<'_, i32, i32>>();
763 assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
764}