indexmap/map/
core.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod entry;
11mod extract;
12
13pub mod raw_entry_v1;
14
15use alloc::vec::{self, Vec};
16use core::mem;
17use core::ops::RangeBounds;
18use hashbrown::hash_table;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Equivalent, HashValue, TryReserveError};
22
23type Indices = hash_table::HashTable<usize>;
24type Entries<K, V> = Vec<Bucket<K, V>>;
25
26pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
27pub(crate) use extract::ExtractCore;
28
29/// Core of the map that does not depend on S
30#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32    /// indices mapping from the entry hash to its index.
33    indices: Indices,
34    /// entries is a dense vec maintaining entry order.
35    entries: Entries<K, V>,
36}
37
38/// Mutable references to the parts of an `IndexMapCore`.
39///
40/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our
41/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type
42/// is used to implement methods on the split references, and `IndexMapCore` can also call those to
43/// avoid duplication.
44struct RefMut<'a, K, V> {
45    indices: &'a mut Indices,
46    entries: &'a mut Entries<K, V>,
47}
48
49#[inline(always)]
50fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
51    move |&i| entries[i].hash.get()
52}
53
54#[inline]
55fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
56    key: &'a Q,
57    entries: &'a [Bucket<K, V>],
58) -> impl Fn(&usize) -> bool + 'a {
59    move |&i| Q::equivalent(key, &entries[i].key)
60}
61
62#[inline]
63fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
64    if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
65        entry.remove();
66    } else if cfg!(debug_assertions) {
67        panic!("index not found");
68    }
69}
70
71#[inline]
72fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
73    let index = table
74        .find_mut(hash.get(), move |&i| i == old)
75        .expect("index not found");
76    *index = new;
77}
78
79/// Inserts many entries into the indices table without reallocating,
80/// and without regard for duplication.
81///
82/// ***Panics*** if there is not sufficient capacity already.
83fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
84    assert!(indices.capacity() - indices.len() >= entries.len());
85    for entry in entries {
86        indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
87    }
88}
89
90impl<K, V> Clone for IndexMapCore<K, V>
91where
92    K: Clone,
93    V: Clone,
94{
95    fn clone(&self) -> Self {
96        let mut new = Self::new();
97        new.clone_from(self);
98        new
99    }
100
101    fn clone_from(&mut self, other: &Self) {
102        self.indices.clone_from(&other.indices);
103        if self.entries.capacity() < other.entries.len() {
104            // If we must resize, match the indices capacity.
105            let additional = other.entries.len() - self.entries.len();
106            self.borrow_mut().reserve_entries(additional);
107        }
108        self.entries.clone_from(&other.entries);
109    }
110}
111
112impl<K, V> IndexMapCore<K, V> {
113    /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
114    const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
115
116    #[inline]
117    pub(crate) const fn new() -> Self {
118        IndexMapCore {
119            indices: Indices::new(),
120            entries: Vec::new(),
121        }
122    }
123
124    #[inline]
125    fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
126        RefMut::new(&mut self.indices, &mut self.entries)
127    }
128
129    #[inline]
130    pub(crate) fn with_capacity(n: usize) -> Self {
131        IndexMapCore {
132            indices: Indices::with_capacity(n),
133            entries: Vec::with_capacity(n),
134        }
135    }
136
137    #[inline]
138    pub(crate) fn into_entries(self) -> Entries<K, V> {
139        self.entries
140    }
141
142    #[inline]
143    pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
144        &self.entries
145    }
146
147    #[inline]
148    pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
149        &mut self.entries
150    }
151
152    pub(crate) fn with_entries<F>(&mut self, f: F)
153    where
154        F: FnOnce(&mut [Bucket<K, V>]),
155    {
156        f(&mut self.entries);
157        self.rebuild_hash_table();
158    }
159
160    #[inline]
161    pub(crate) fn len(&self) -> usize {
162        debug_assert_eq!(self.entries.len(), self.indices.len());
163        self.indices.len()
164    }
165
166    #[inline]
167    pub(crate) fn capacity(&self) -> usize {
168        Ord::min(self.indices.capacity(), self.entries.capacity())
169    }
170
171    pub(crate) fn clear(&mut self) {
172        self.indices.clear();
173        self.entries.clear();
174    }
175
176    pub(crate) fn truncate(&mut self, len: usize) {
177        if len < self.len() {
178            self.erase_indices(len, self.entries.len());
179            self.entries.truncate(len);
180        }
181    }
182
183    #[track_caller]
184    pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
185    where
186        R: RangeBounds<usize>,
187    {
188        let range = simplify_range(range, self.entries.len());
189        self.erase_indices(range.start, range.end);
190        self.entries.drain(range)
191    }
192
193    #[cfg(feature = "rayon")]
194    pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
195    where
196        K: Send,
197        V: Send,
198        R: RangeBounds<usize>,
199    {
200        use rayon::iter::ParallelDrainRange;
201        let range = simplify_range(range, self.entries.len());
202        self.erase_indices(range.start, range.end);
203        self.entries.par_drain(range)
204    }
205
206    #[track_caller]
207    pub(crate) fn split_off(&mut self, at: usize) -> Self {
208        let len = self.entries.len();
209        assert!(
210            at <= len,
211            "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
212        );
213
214        self.erase_indices(at, self.entries.len());
215        let entries = self.entries.split_off(at);
216
217        let mut indices = Indices::with_capacity(entries.len());
218        insert_bulk_no_grow(&mut indices, &entries);
219        Self { indices, entries }
220    }
221
222    #[track_caller]
223    pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
224    where
225        R: RangeBounds<usize>,
226    {
227        let range = simplify_range(range, self.len());
228        self.erase_indices(range.start, self.entries.len());
229        let entries = self.entries.split_off(range.end);
230        let drained = self.entries.split_off(range.start);
231
232        let mut indices = Indices::with_capacity(entries.len());
233        insert_bulk_no_grow(&mut indices, &entries);
234        (Self { indices, entries }, drained.into_iter())
235    }
236
237    /// Append from another map without checking whether items already exist.
238    pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
239        self.reserve(other.len());
240        insert_bulk_no_grow(&mut self.indices, &other.entries);
241        self.entries.append(&mut other.entries);
242        other.indices.clear();
243    }
244
245    /// Reserve capacity for `additional` more key-value pairs.
246    pub(crate) fn reserve(&mut self, additional: usize) {
247        self.indices.reserve(additional, get_hash(&self.entries));
248        // Only grow entries if necessary, since we also round up capacity.
249        if additional > self.entries.capacity() - self.entries.len() {
250            self.borrow_mut().reserve_entries(additional);
251        }
252    }
253
254    /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
255    pub(crate) fn reserve_exact(&mut self, additional: usize) {
256        self.indices.reserve(additional, get_hash(&self.entries));
257        self.entries.reserve_exact(additional);
258    }
259
260    /// Try to reserve capacity for `additional` more key-value pairs.
261    pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
262        self.indices
263            .try_reserve(additional, get_hash(&self.entries))
264            .map_err(TryReserveError::from_hashbrown)?;
265        // Only grow entries if necessary, since we also round up capacity.
266        if additional > self.entries.capacity() - self.entries.len() {
267            self.try_reserve_entries(additional)
268        } else {
269            Ok(())
270        }
271    }
272
273    /// Try to reserve entries capacity, rounded up to match the indices
274    fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
275        // Use a soft-limit on the maximum capacity, but if the caller explicitly
276        // requested more, do it and let them have the resulting error.
277        let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
278        let try_add = new_capacity - self.entries.len();
279        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
280            return Ok(());
281        }
282        self.entries
283            .try_reserve_exact(additional)
284            .map_err(TryReserveError::from_alloc)
285    }
286
287    /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
288    pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
289        self.indices
290            .try_reserve(additional, get_hash(&self.entries))
291            .map_err(TryReserveError::from_hashbrown)?;
292        self.entries
293            .try_reserve_exact(additional)
294            .map_err(TryReserveError::from_alloc)
295    }
296
297    /// Shrink the capacity of the map with a lower bound
298    pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
299        self.indices
300            .shrink_to(min_capacity, get_hash(&self.entries));
301        self.entries.shrink_to(min_capacity);
302    }
303
304    /// Remove the last key-value pair
305    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
306        if let Some(entry) = self.entries.pop() {
307            let last = self.entries.len();
308            erase_index(&mut self.indices, entry.hash, last);
309            Some((entry.key, entry.value))
310        } else {
311            None
312        }
313    }
314
315    /// Return the index in `entries` where an equivalent key can be found
316    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
317    where
318        Q: ?Sized + Equivalent<K>,
319    {
320        let eq = equivalent(key, &self.entries);
321        self.indices.find(hash.get(), eq).copied()
322    }
323
324    /// Append a key-value pair to `entries`,
325    /// *without* checking whether it already exists.
326    fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
327        if self.entries.len() == self.entries.capacity() {
328            // Reserve our own capacity synced to the indices,
329            // rather than letting `Vec::push` just double it.
330            self.borrow_mut().reserve_entries(1);
331        }
332        self.entries.push(Bucket { hash, key, value });
333    }
334
335    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
336    where
337        K: Eq,
338    {
339        let eq = equivalent(&key, &self.entries);
340        let hasher = get_hash(&self.entries);
341        match self.indices.entry(hash.get(), eq, hasher) {
342            hash_table::Entry::Occupied(entry) => {
343                let i = *entry.get();
344                (i, Some(mem::replace(&mut self.entries[i].value, value)))
345            }
346            hash_table::Entry::Vacant(entry) => {
347                let i = self.entries.len();
348                entry.insert(i);
349                self.push_entry(hash, key, value);
350                debug_assert_eq!(self.indices.len(), self.entries.len());
351                (i, None)
352            }
353        }
354    }
355
356    /// Same as `insert_full`, except it also replaces the key
357    pub(crate) fn replace_full(
358        &mut self,
359        hash: HashValue,
360        key: K,
361        value: V,
362    ) -> (usize, Option<(K, V)>)
363    where
364        K: Eq,
365    {
366        let eq = equivalent(&key, &self.entries);
367        let hasher = get_hash(&self.entries);
368        match self.indices.entry(hash.get(), eq, hasher) {
369            hash_table::Entry::Occupied(entry) => {
370                let i = *entry.get();
371                let entry = &mut self.entries[i];
372                let kv = (
373                    mem::replace(&mut entry.key, key),
374                    mem::replace(&mut entry.value, value),
375                );
376                (i, Some(kv))
377            }
378            hash_table::Entry::Vacant(entry) => {
379                let i = self.entries.len();
380                entry.insert(i);
381                self.push_entry(hash, key, value);
382                debug_assert_eq!(self.indices.len(), self.entries.len());
383                (i, None)
384            }
385        }
386    }
387
388    /// Replaces the key at the given index,
389    /// *without* checking whether it already exists.
390    #[track_caller]
391    pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
392        self.borrow_mut().replace_index_unique(index, hash, key).0
393    }
394
395    /// Remove an entry by shifting all entries that follow it
396    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
397    where
398        Q: ?Sized + Equivalent<K>,
399    {
400        let eq = equivalent(key, &self.entries);
401        match self.indices.find_entry(hash.get(), eq) {
402            Ok(entry) => {
403                let (index, _) = entry.remove();
404                let (key, value) = self.borrow_mut().shift_remove_finish(index);
405                Some((index, key, value))
406            }
407            Err(_) => None,
408        }
409    }
410
411    /// Remove an entry by shifting all entries that follow it
412    #[inline]
413    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
414        self.borrow_mut().shift_remove_index(index)
415    }
416
417    #[inline]
418    #[track_caller]
419    pub(super) fn move_index(&mut self, from: usize, to: usize) {
420        self.borrow_mut().move_index(from, to);
421    }
422
423    #[inline]
424    #[track_caller]
425    pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
426        self.borrow_mut().swap_indices(a, b);
427    }
428
429    /// Remove an entry by swapping it with the last
430    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
431    where
432        Q: ?Sized + Equivalent<K>,
433    {
434        let eq = equivalent(key, &self.entries);
435        match self.indices.find_entry(hash.get(), eq) {
436            Ok(entry) => {
437                let (index, _) = entry.remove();
438                let (key, value) = self.borrow_mut().swap_remove_finish(index);
439                Some((index, key, value))
440            }
441            Err(_) => None,
442        }
443    }
444
445    /// Remove an entry by swapping it with the last
446    #[inline]
447    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
448        self.borrow_mut().swap_remove_index(index)
449    }
450
451    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
452    ///
453    /// All of these items should still be at their original location in `entries`.
454    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
455    fn erase_indices(&mut self, start: usize, end: usize) {
456        let (init, shifted_entries) = self.entries.split_at(end);
457        let (start_entries, erased_entries) = init.split_at(start);
458
459        let erased = erased_entries.len();
460        let shifted = shifted_entries.len();
461        let half_capacity = self.indices.capacity() / 2;
462
463        // Use a heuristic between different strategies
464        if erased == 0 {
465            // Degenerate case, nothing to do
466        } else if start + shifted < half_capacity && start < erased {
467            // Reinsert everything, as there are few kept indices
468            self.indices.clear();
469
470            // Reinsert stable indices, then shifted indices
471            insert_bulk_no_grow(&mut self.indices, start_entries);
472            insert_bulk_no_grow(&mut self.indices, shifted_entries);
473        } else if erased + shifted < half_capacity {
474            // Find each affected index, as there are few to adjust
475
476            // Find erased indices
477            for (i, entry) in (start..).zip(erased_entries) {
478                erase_index(&mut self.indices, entry.hash, i);
479            }
480
481            // Find shifted indices
482            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
483                update_index(&mut self.indices, entry.hash, old, new);
484            }
485        } else {
486            // Sweep the whole table for adjustments
487            let offset = end - start;
488            self.indices.retain(move |i| {
489                if *i >= end {
490                    *i -= offset;
491                    true
492                } else {
493                    *i < start
494                }
495            });
496        }
497
498        debug_assert_eq!(self.indices.len(), start + shifted);
499    }
500
501    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
502    where
503        F: FnMut(&mut K, &mut V) -> bool,
504    {
505        self.entries
506            .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
507        if self.entries.len() < self.indices.len() {
508            self.rebuild_hash_table();
509        }
510    }
511
512    fn rebuild_hash_table(&mut self) {
513        self.indices.clear();
514        insert_bulk_no_grow(&mut self.indices, &self.entries);
515    }
516
517    pub(crate) fn reverse(&mut self) {
518        self.entries.reverse();
519
520        // No need to save hash indices, can easily calculate what they should
521        // be, given that this is an in-place reversal.
522        let len = self.entries.len();
523        for i in &mut self.indices {
524            *i = len - *i - 1;
525        }
526    }
527}
528
529/// Reserve entries capacity, rounded up to match the indices (via `try_capacity`).
530fn reserve_entries<K, V>(entries: &mut Entries<K, V>, additional: usize, try_capacity: usize) {
531    // Use a soft-limit on the maximum capacity, but if the caller explicitly
532    // requested more, do it and let them have the resulting panic.
533    let try_capacity = try_capacity.min(IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY);
534    let try_add = try_capacity - entries.len();
535    if try_add > additional && entries.try_reserve_exact(try_add).is_ok() {
536        return;
537    }
538    entries.reserve_exact(additional);
539}
540
541impl<'a, K, V> RefMut<'a, K, V> {
542    #[inline]
543    fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
544        Self { indices, entries }
545    }
546
547    /// Reserve entries capacity, rounded up to match the indices
548    #[inline]
549    fn reserve_entries(&mut self, additional: usize) {
550        reserve_entries(self.entries, additional, self.indices.capacity());
551    }
552
553    /// Insert a key-value pair in `entries`,
554    /// *without* checking whether it already exists.
555    fn insert_unique(self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
556        let i = self.indices.len();
557        debug_assert_eq!(i, self.entries.len());
558        let entry = self
559            .indices
560            .insert_unique(hash.get(), i, get_hash(self.entries));
561        if self.entries.len() == self.entries.capacity() {
562            // We can't call `indices.capacity()` while this `entry` has borrowed it, so we'll have
563            // to amortize growth on our own. It's still an improvement over the basic `Vec::push`
564            // doubling though, since we also consider `MAX_ENTRIES_CAPACITY`.
565            reserve_entries(self.entries, 1, 2 * self.entries.capacity());
566        }
567        self.entries.push(Bucket { hash, key, value });
568        OccupiedEntry::new(self.entries, entry)
569    }
570
571    /// Replaces the key at the given index,
572    /// *without* checking whether it already exists.
573    #[track_caller]
574    fn replace_index_unique(
575        self,
576        index: usize,
577        hash: HashValue,
578        key: K,
579    ) -> (K, OccupiedEntry<'a, K, V>) {
580        // NB: This removal and insertion isn't "no grow" (with unreachable hasher)
581        // because hashbrown's tombstones might force a resize anyway.
582        erase_index(self.indices, self.entries[index].hash, index);
583        let table_entry = self
584            .indices
585            .insert_unique(hash.get(), index, get_hash(&self.entries));
586
587        let entry = &mut self.entries[index];
588        entry.hash = hash;
589        let old_key = mem::replace(&mut entry.key, key);
590        (old_key, OccupiedEntry::new(self.entries, table_entry))
591    }
592
593    /// Insert a key-value pair in `entries` at a particular index,
594    /// *without* checking whether it already exists.
595    fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
596        let end = self.indices.len();
597        assert!(index <= end);
598        // Increment others first so we don't have duplicate indices.
599        self.increment_indices(index, end);
600        let entries = &*self.entries;
601        self.indices.insert_unique(hash.get(), index, move |&i| {
602            // Adjust for the incremented indices to find hashes.
603            debug_assert_ne!(i, index);
604            let i = if i < index { i } else { i - 1 };
605            entries[i].hash.get()
606        });
607        if self.entries.len() == self.entries.capacity() {
608            // Reserve our own capacity synced to the indices,
609            // rather than letting `Vec::insert` just double it.
610            self.reserve_entries(1);
611        }
612        self.entries.insert(index, Bucket { hash, key, value });
613    }
614
615    /// Remove an entry by shifting all entries that follow it
616    fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
617        match self.entries.get(index) {
618            Some(entry) => {
619                erase_index(self.indices, entry.hash, index);
620                Some(self.shift_remove_finish(index))
621            }
622            None => None,
623        }
624    }
625
626    /// Remove an entry by shifting all entries that follow it
627    ///
628    /// The index should already be removed from `self.indices`.
629    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
630        // Correct indices that point to the entries that followed the removed entry.
631        self.decrement_indices(index + 1, self.entries.len());
632
633        // Use Vec::remove to actually remove the entry.
634        let entry = self.entries.remove(index);
635        (entry.key, entry.value)
636    }
637
638    /// Remove an entry by swapping it with the last
639    fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
640        match self.entries.get(index) {
641            Some(entry) => {
642                erase_index(self.indices, entry.hash, index);
643                Some(self.swap_remove_finish(index))
644            }
645            None => None,
646        }
647    }
648
649    /// Finish removing an entry by swapping it with the last
650    ///
651    /// The index should already be removed from `self.indices`.
652    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
653        // use swap_remove, but then we need to update the index that points
654        // to the other entry that has to move
655        let entry = self.entries.swap_remove(index);
656
657        // correct index that points to the entry that had to swap places
658        if let Some(entry) = self.entries.get(index) {
659            // was not last element
660            // examine new element in `index` and find it in indices
661            let last = self.entries.len();
662            update_index(self.indices, entry.hash, last, index);
663        }
664
665        (entry.key, entry.value)
666    }
667
668    /// Decrement all indices in the range `start..end`.
669    ///
670    /// The index `start - 1` should not exist in `self.indices`.
671    /// All entries should still be in their original positions.
672    fn decrement_indices(&mut self, start: usize, end: usize) {
673        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
674        let shifted_entries = &self.entries[start..end];
675        if shifted_entries.len() > self.indices.capacity() / 2 {
676            // Shift all indices in range.
677            for i in &mut *self.indices {
678                if start <= *i && *i < end {
679                    *i -= 1;
680                }
681            }
682        } else {
683            // Find each entry in range to shift its index.
684            for (i, entry) in (start..end).zip(shifted_entries) {
685                update_index(self.indices, entry.hash, i, i - 1);
686            }
687        }
688    }
689
690    /// Increment all indices in the range `start..end`.
691    ///
692    /// The index `end` should not exist in `self.indices`.
693    /// All entries should still be in their original positions.
694    fn increment_indices(&mut self, start: usize, end: usize) {
695        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
696        let shifted_entries = &self.entries[start..end];
697        if shifted_entries.len() > self.indices.capacity() / 2 {
698            // Shift all indices in range.
699            for i in &mut *self.indices {
700                if start <= *i && *i < end {
701                    *i += 1;
702                }
703            }
704        } else {
705            // Find each entry in range to shift its index, updated in reverse so
706            // we never have duplicated indices that might have a hash collision.
707            for (i, entry) in (start..end).zip(shifted_entries).rev() {
708                update_index(self.indices, entry.hash, i, i + 1);
709            }
710        }
711    }
712
713    #[track_caller]
714    fn move_index(&mut self, from: usize, to: usize) {
715        let from_hash = self.entries[from].hash;
716        let _ = self.entries[to]; // explicit bounds check
717        if from != to {
718            // Use a sentinel index so other indices don't collide.
719            update_index(self.indices, from_hash, from, usize::MAX);
720
721            // Update all other indices and rotate the entry positions.
722            if from < to {
723                self.decrement_indices(from + 1, to + 1);
724                self.entries[from..=to].rotate_left(1);
725            } else if to < from {
726                self.increment_indices(to, from);
727                self.entries[to..=from].rotate_right(1);
728            }
729
730            // Change the sentinel index to its final position.
731            update_index(self.indices, from_hash, usize::MAX, to);
732        }
733    }
734
735    #[track_caller]
736    fn swap_indices(&mut self, a: usize, b: usize) {
737        // If they're equal and in-bounds, there's nothing to do.
738        if a == b && a < self.entries.len() {
739            return;
740        }
741
742        // We'll get a "nice" bounds-check from indexing `entries`,
743        // and then we expect to find it in the table as well.
744        match self.indices.get_many_mut(
745            [self.entries[a].hash.get(), self.entries[b].hash.get()],
746            move |i, &x| if i == 0 { x == a } else { x == b },
747        ) {
748            [Some(ref_a), Some(ref_b)] => {
749                mem::swap(ref_a, ref_b);
750                self.entries.swap(a, b);
751            }
752            _ => panic!("indices not found"),
753        }
754    }
755}
756
757#[test]
758fn assert_send_sync() {
759    fn assert_send_sync<T: Send + Sync>() {}
760    assert_send_sync::<IndexMapCore<i32, i32>>();
761    assert_send_sync::<Entry<'_, i32, i32>>();
762    assert_send_sync::<IndexedEntry<'_, i32, i32>>();
763    assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
764}