From e577bf2556a40cb85d3befceeead50ee77ae508d Mon Sep 17 00:00:00 2001 From: Josh Stone Date: Tue, 1 Oct 2024 08:11:01 -0700 Subject: [PATCH] Use `hashbrown::HashTable` instead of `RawTable` --- Cargo.toml | 3 +- src/lib.rs | 2 +- src/map/core.rs | 541 ++++++++++++++++++++--------------- src/map/core/entry.rs | 112 +++++--- src/map/core/raw.rs | 164 ----------- src/map/core/raw_entry_v1.rs | 78 ++--- src/map/tests.rs | 2 +- 7 files changed, 425 insertions(+), 477 deletions(-) delete mode 100644 src/map/core/raw.rs diff --git a/Cargo.toml b/Cargo.toml index 4590d455..52257db9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,9 +27,8 @@ rayon = { version = "1.5.3", optional = true } rustc-rayon = { package = "rustc-rayon", version = "0.5", optional = true } [dependencies.hashbrown] -version = "0.14.1" +version = "0.15.0" default-features = false -features = ["raw"] [dev-dependencies] itertools = "0.13" diff --git a/src/lib.rs b/src/lib.rs index 9ffcd420..3e16bc6e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,4 @@ -// We *mostly* avoid unsafe code, but `map::core::raw` allows it to use `RawTable` buckets. +// We *mostly* avoid unsafe code, but `Slice` allows it for DST casting. #![deny(unsafe_code)] #![warn(rust_2018_idioms)] #![no_std] diff --git a/src/map/core.rs b/src/map/core.rs index ef6d5727..f42cccbf 100644 --- a/src/map/core.rs +++ b/src/map/core.rs @@ -8,11 +8,10 @@ //! However, we should probably not let this show in the public API or docs. mod entry; -mod raw; pub mod raw_entry_v1; -use hashbrown::raw::RawTable; +use hashbrown::hash_table; use crate::vec::{self, Vec}; use crate::TryReserveError; @@ -20,16 +19,31 @@ use core::mem; use core::ops::RangeBounds; use crate::util::simplify_range; -use crate::{Bucket, Entries, Equivalent, HashValue}; +use crate::{Bucket, Equivalent, HashValue}; + +type Indices = hash_table::HashTable; +type Entries = Vec>; pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry}; /// Core of the map that does not depend on S +#[derive(Debug)] pub(crate) struct IndexMapCore { /// indices mapping from the entry hash to its index. - indices: RawTable, - /// entries is a dense vec of entries in their order. - entries: Vec>, + indices: Indices, + /// entries is a dense vec maintaining entry order. + entries: Entries, +} + +/// Mutable references to the parts of an `IndexMapCore`. +/// +/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our +/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type +/// is used to implement methods on the split references, and `IndexMapCore` can also call those to +/// avoid duplication. +struct RefMut<'a, K, V> { + indices: &'a mut Indices, + entries: &'a mut Entries, } #[inline(always)] @@ -46,19 +60,33 @@ fn equivalent<'a, K, V, Q: ?Sized + Equivalent>( } #[inline] -fn erase_index(table: &mut RawTable, hash: HashValue, index: usize) { - let erased = table.erase_entry(hash.get(), move |&i| i == index); - debug_assert!(erased); +fn erase_index(table: &mut Indices, hash: HashValue, index: usize) { + if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) { + entry.remove(); + } else if cfg!(debug_assertions) { + panic!("index not found"); + } } #[inline] -fn update_index(table: &mut RawTable, hash: HashValue, old: usize, new: usize) { +fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) { let index = table - .get_mut(hash.get(), move |&i| i == old) + .find_mut(hash.get(), move |&i| i == old) .expect("index not found"); *index = new; } +/// Inserts many entries into the indices table without reallocating, +/// and without regard for duplication. +/// +/// ***Panics*** if there is not sufficient capacity already. +fn insert_bulk_no_grow(indices: &mut Indices, entries: &[Bucket]) { + assert!(indices.capacity() - indices.len() >= entries.len()); + for entry in entries { + indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!()); + } +} + impl Clone for IndexMapCore where K: Clone, @@ -71,32 +99,17 @@ where } fn clone_from(&mut self, other: &Self) { - let hasher = get_hash(&other.entries); - self.indices.clone_from_with_hasher(&other.indices, hasher); + self.indices.clone_from(&other.indices); if self.entries.capacity() < other.entries.len() { // If we must resize, match the indices capacity. let additional = other.entries.len() - self.entries.len(); - self.reserve_entries(additional); + self.borrow_mut().reserve_entries(additional); } self.entries.clone_from(&other.entries); } } -#[cfg(feature = "test_debug")] -impl core::fmt::Debug for IndexMapCore -where - K: core::fmt::Debug, - V: core::fmt::Debug, -{ - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("IndexMapCore") - .field("indices", &raw::DebugIndices(&self.indices)) - .field("entries", &self.entries) - .finish() - } -} - -impl Entries for IndexMapCore { +impl crate::Entries for IndexMapCore { type Entry = Bucket; #[inline] @@ -130,15 +143,20 @@ impl IndexMapCore { #[inline] pub(crate) const fn new() -> Self { IndexMapCore { - indices: RawTable::new(), + indices: Indices::new(), entries: Vec::new(), } } + #[inline] + fn borrow_mut(&mut self) -> RefMut<'_, K, V> { + RefMut::new(&mut self.indices, &mut self.entries) + } + #[inline] pub(crate) fn with_capacity(n: usize) -> Self { IndexMapCore { - indices: RawTable::with_capacity(n), + indices: Indices::with_capacity(n), entries: Vec::with_capacity(n), } } @@ -192,8 +210,8 @@ impl IndexMapCore { self.erase_indices(at, self.entries.len()); let entries = self.entries.split_off(at); - let mut indices = RawTable::with_capacity(entries.len()); - raw::insert_bulk_no_grow(&mut indices, &entries); + let mut indices = Indices::with_capacity(entries.len()); + insert_bulk_no_grow(&mut indices, &entries); Self { indices, entries } } @@ -206,15 +224,15 @@ impl IndexMapCore { let entries = self.entries.split_off(range.end); let drained = self.entries.split_off(range.start); - let mut indices = RawTable::with_capacity(entries.len()); - raw::insert_bulk_no_grow(&mut indices, &entries); + let mut indices = Indices::with_capacity(entries.len()); + insert_bulk_no_grow(&mut indices, &entries); (Self { indices, entries }, drained.into_iter()) } /// Append from another map without checking whether items already exist. pub(crate) fn append_unchecked(&mut self, other: &mut Self) { self.reserve(other.len()); - raw::insert_bulk_no_grow(&mut self.indices, &other.entries); + insert_bulk_no_grow(&mut self.indices, &other.entries); self.entries.append(&mut other.entries); other.indices.clear(); } @@ -224,20 +242,8 @@ impl IndexMapCore { self.indices.reserve(additional, get_hash(&self.entries)); // Only grow entries if necessary, since we also round up capacity. if additional > self.entries.capacity() - self.entries.len() { - self.reserve_entries(additional); - } - } - - /// Reserve entries capacity, rounded up to match the indices - fn reserve_entries(&mut self, additional: usize) { - // Use a soft-limit on the maximum capacity, but if the caller explicitly - // requested more, do it and let them have the resulting panic. - let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY); - let try_add = new_capacity - self.entries.len(); - if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { - return; + self.borrow_mut().reserve_entries(additional); } - self.entries.reserve_exact(additional); } /// Reserve capacity for `additional` more key-value pairs, without over-allocating. @@ -301,45 +307,31 @@ impl IndexMapCore { } } - /// Append a key-value pair to `entries`, *without* checking whether it already exists. - fn push_entry(&mut self, hash: HashValue, key: K, value: V) { - if self.entries.len() == self.entries.capacity() { - // Reserve our own capacity synced to the indices, - // rather than letting `Vec::push` just double it. - self.reserve_entries(1); - } - self.entries.push(Bucket { hash, key, value }); - } - - /// Insert a key-value pair in `entries` at a particular index, - /// *without* checking whether it already exists. - fn insert_entry(&mut self, index: usize, hash: HashValue, key: K, value: V) { - if self.entries.len() == self.entries.capacity() { - // Reserve our own capacity synced to the indices, - // rather than letting `Vec::insert` just double it. - self.reserve_entries(1); - } - self.entries.insert(index, Bucket { hash, key, value }); - } - /// Return the index in `entries` where an equivalent key can be found pub(crate) fn get_index_of(&self, hash: HashValue, key: &Q) -> Option where Q: ?Sized + Equivalent, { let eq = equivalent(key, &self.entries); - self.indices.get(hash.get(), eq).copied() + self.indices.find(hash.get(), eq).copied() } pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option) where K: Eq, { - match self.find_or_insert(hash, &key) { - Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))), - Err(i) => { - debug_assert_eq!(i, self.entries.len()); - self.push_entry(hash, key, value); + let eq = equivalent(&key, &self.entries); + let hasher = get_hash(&self.entries); + match self.indices.entry(hash.get(), eq, hasher) { + hash_table::Entry::Occupied(entry) => { + let i = *entry.get(); + (i, Some(mem::replace(&mut self.entries[i].value, value))) + } + hash_table::Entry::Vacant(entry) => { + let i = self.entries.len(); + entry.insert(i); + self.borrow_mut().push_entry(hash, key, value); + debug_assert_eq!(self.indices.len(), self.entries.len()); (i, None) } } @@ -355,8 +347,11 @@ impl IndexMapCore { where K: Eq, { - match self.find_or_insert(hash, &key) { - Ok(i) => { + let eq = equivalent(&key, &self.entries); + let hasher = get_hash(&self.entries); + match self.indices.entry(hash.get(), eq, hasher) { + hash_table::Entry::Occupied(entry) => { + let i = *entry.get(); let entry = &mut self.entries[i]; let kv = ( mem::replace(&mut entry.key, key), @@ -364,17 +359,195 @@ impl IndexMapCore { ); (i, Some(kv)) } - Err(i) => { - debug_assert_eq!(i, self.entries.len()); - self.push_entry(hash, key, value); + hash_table::Entry::Vacant(entry) => { + let i = self.entries.len(); + entry.insert(i); + self.borrow_mut().push_entry(hash, key, value); + debug_assert_eq!(self.indices.len(), self.entries.len()); (i, None) } } } + /// Remove an entry by shifting all entries that follow it + pub(crate) fn shift_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Equivalent, + { + let eq = equivalent(key, &self.entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(entry) => { + let (index, _) = entry.remove(); + let (key, value) = self.borrow_mut().shift_remove_finish(index); + Some((index, key, value)) + } + Err(_) => None, + } + } + + /// Remove an entry by shifting all entries that follow it + #[inline] + pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.borrow_mut().shift_remove_index(index) + } + + #[inline] + pub(super) fn move_index(&mut self, from: usize, to: usize) { + self.borrow_mut().move_index(from, to); + } + + #[inline] + pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { + self.borrow_mut().swap_indices(a, b); + } + + /// Remove an entry by swapping it with the last + pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> + where + Q: ?Sized + Equivalent, + { + let eq = equivalent(key, &self.entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(entry) => { + let (index, _) = entry.remove(); + let (key, value) = self.borrow_mut().swap_remove_finish(index); + Some((index, key, value)) + } + Err(_) => None, + } + } + + /// Remove an entry by swapping it with the last + #[inline] + pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + self.borrow_mut().swap_remove_index(index) + } + + /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` + /// + /// All of these items should still be at their original location in `entries`. + /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. + fn erase_indices(&mut self, start: usize, end: usize) { + let (init, shifted_entries) = self.entries.split_at(end); + let (start_entries, erased_entries) = init.split_at(start); + + let erased = erased_entries.len(); + let shifted = shifted_entries.len(); + let half_capacity = self.indices.capacity() / 2; + + // Use a heuristic between different strategies + if erased == 0 { + // Degenerate case, nothing to do + } else if start + shifted < half_capacity && start < erased { + // Reinsert everything, as there are few kept indices + self.indices.clear(); + + // Reinsert stable indices, then shifted indices + insert_bulk_no_grow(&mut self.indices, start_entries); + insert_bulk_no_grow(&mut self.indices, shifted_entries); + } else if erased + shifted < half_capacity { + // Find each affected index, as there are few to adjust + + // Find erased indices + for (i, entry) in (start..).zip(erased_entries) { + erase_index(&mut self.indices, entry.hash, i); + } + + // Find shifted indices + for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { + update_index(&mut self.indices, entry.hash, old, new); + } + } else { + // Sweep the whole table for adjustments + let offset = end - start; + self.indices.retain(move |i| { + if *i >= end { + *i -= offset; + true + } else { + *i < start + } + }); + } + + debug_assert_eq!(self.indices.len(), start + shifted); + } + + pub(crate) fn retain_in_order(&mut self, mut keep: F) + where + F: FnMut(&mut K, &mut V) -> bool, + { + self.entries + .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); + if self.entries.len() < self.indices.len() { + self.rebuild_hash_table(); + } + } + + fn rebuild_hash_table(&mut self) { + self.indices.clear(); + insert_bulk_no_grow(&mut self.indices, &self.entries); + } + + pub(crate) fn reverse(&mut self) { + self.entries.reverse(); + + // No need to save hash indices, can easily calculate what they should + // be, given that this is an in-place reversal. + let len = self.entries.len(); + for i in &mut self.indices { + *i = len - *i - 1; + } + } +} + +impl<'a, K, V> RefMut<'a, K, V> { + #[inline] + fn new(indices: &'a mut Indices, entries: &'a mut Entries) -> Self { + Self { indices, entries } + } + + /// Reserve entries capacity, rounded up to match the indices + fn reserve_entries(&mut self, additional: usize) { + // Use a soft-limit on the maximum capacity, but if the caller explicitly + // requested more, do it and let them have the resulting panic. + let new_capacity = Ord::min( + self.indices.capacity(), + IndexMapCore::::MAX_ENTRIES_CAPACITY, + ); + let try_add = new_capacity - self.entries.len(); + if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() { + return; + } + self.entries.reserve_exact(additional); + } + + /// Append a key-value pair to `entries`, + /// *without* checking whether it already exists. + fn push_entry(&mut self, hash: HashValue, key: K, value: V) { + if self.entries.len() == self.entries.capacity() { + // Reserve our own capacity synced to the indices, + // rather than letting `Vec::push` just double it. + self.reserve_entries(1); + } + self.entries.push(Bucket { hash, key, value }); + } + + /// Insert a key-value pair in `entries` at a particular index, + /// *without* checking whether it already exists. + fn insert_entry(&mut self, index: usize, hash: HashValue, key: K, value: V) { + if self.entries.len() == self.entries.capacity() { + // Reserve our own capacity synced to the indices, + // rather than letting `Vec::insert` just double it. + self.reserve_entries(1); + } + self.entries.insert(index, Bucket { hash, key, value }); + } + fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> usize { let i = self.indices.len(); - self.indices.insert(hash.get(), i, get_hash(&self.entries)); + self.indices + .insert_unique(hash.get(), i, get_hash(self.entries)); debug_assert_eq!(i, self.entries.len()); self.push_entry(hash, key, value); i @@ -386,7 +559,7 @@ impl IndexMapCore { // Increment others first so we don't have duplicate indices. self.increment_indices(index, end); let entries = &*self.entries; - self.indices.insert(hash.get(), index, move |&i| { + self.indices.insert_unique(hash.get(), index, move |&i| { // Adjust for the incremented indices to find hashes. debug_assert_ne!(i, index); let i = if i < index { i } else { i - 1 }; @@ -396,25 +569,10 @@ impl IndexMapCore { } /// Remove an entry by shifting all entries that follow it - pub(crate) fn shift_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> - where - Q: ?Sized + Equivalent, - { - let eq = equivalent(key, &self.entries); - match self.indices.remove_entry(hash.get(), eq) { - Some(index) => { - let (key, value) = self.shift_remove_finish(index); - Some((index, key, value)) - } - None => None, - } - } - - /// Remove an entry by shifting all entries that follow it - pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { + fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { match self.entries.get(index) { Some(entry) => { - erase_index(&mut self.indices, entry.hash, index); + erase_index(self.indices, entry.hash, index); Some(self.shift_remove_finish(index)) } None => None, @@ -433,6 +591,36 @@ impl IndexMapCore { (entry.key, entry.value) } + /// Remove an entry by swapping it with the last + fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { + match self.entries.get(index) { + Some(entry) => { + erase_index(self.indices, entry.hash, index); + Some(self.swap_remove_finish(index)) + } + None => None, + } + } + + /// Finish removing an entry by swapping it with the last + /// + /// The index should already be removed from `self.indices`. + fn swap_remove_finish(&mut self, index: usize) -> (K, V) { + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + let entry = self.entries.swap_remove(index); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(index) { + // was not last element + // examine new element in `index` and find it in indices + let last = self.entries.len(); + update_index(self.indices, entry.hash, last, index); + } + + (entry.key, entry.value) + } + /// Decrement all indices in the range `start..end`. /// /// The index `start - 1` should not exist in `self.indices`. @@ -440,9 +628,9 @@ impl IndexMapCore { fn decrement_indices(&mut self, start: usize, end: usize) { // Use a heuristic between a full sweep vs. a `find()` for every shifted item. let shifted_entries = &self.entries[start..end]; - if shifted_entries.len() > self.indices.buckets() / 2 { + if shifted_entries.len() > self.indices.capacity() / 2 { // Shift all indices in range. - for i in self.indices_mut() { + for i in &mut *self.indices { if start <= *i && *i < end { *i -= 1; } @@ -450,7 +638,7 @@ impl IndexMapCore { } else { // Find each entry in range to shift its index. for (i, entry) in (start..end).zip(shifted_entries) { - update_index(&mut self.indices, entry.hash, i, i - 1); + update_index(self.indices, entry.hash, i, i - 1); } } } @@ -462,9 +650,9 @@ impl IndexMapCore { fn increment_indices(&mut self, start: usize, end: usize) { // Use a heuristic between a full sweep vs. a `find()` for every shifted item. let shifted_entries = &self.entries[start..end]; - if shifted_entries.len() > self.indices.buckets() / 2 { + if shifted_entries.len() > self.indices.capacity() / 2 { // Shift all indices in range. - for i in self.indices_mut() { + for i in &mut *self.indices { if start <= *i && *i < end { *i += 1; } @@ -473,17 +661,17 @@ impl IndexMapCore { // Find each entry in range to shift its index, updated in reverse so // we never have duplicated indices that might have a hash collision. for (i, entry) in (start..end).zip(shifted_entries).rev() { - update_index(&mut self.indices, entry.hash, i, i + 1); + update_index(self.indices, entry.hash, i, i + 1); } } } - pub(super) fn move_index(&mut self, from: usize, to: usize) { + fn move_index(&mut self, from: usize, to: usize) { let from_hash = self.entries[from].hash; let _ = self.entries[to]; // explicit bounds check if from != to { // Use a sentinel index so other indices don't collide. - update_index(&mut self.indices, from_hash, from, usize::MAX); + update_index(self.indices, from_hash, from, usize::MAX); // Update all other indices and rotate the entry positions. if from < to { @@ -495,141 +683,27 @@ impl IndexMapCore { } // Change the sentinel index to its final position. - update_index(&mut self.indices, from_hash, usize::MAX, to); + update_index(self.indices, from_hash, usize::MAX, to); } } - pub(crate) fn swap_indices(&mut self, a: usize, b: usize) { + fn swap_indices(&mut self, a: usize, b: usize) { // If they're equal and in-bounds, there's nothing to do. if a == b && a < self.entries.len() { return; } - // We'll get a "nice" bounds-check from indexing `self.entries`, + // We'll get a "nice" bounds-check from indexing `entries`, // and then we expect to find it in the table as well. - let [ref_a, ref_b] = self - .indices - .get_many_mut( - [self.entries[a].hash.get(), self.entries[b].hash.get()], - move |i, &x| if i == 0 { x == a } else { x == b }, - ) - .expect("indices not found"); - - mem::swap(ref_a, ref_b); - self.entries.swap(a, b); - } - - /// Remove an entry by swapping it with the last - pub(crate) fn swap_remove_full(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)> - where - Q: ?Sized + Equivalent, - { - let eq = equivalent(key, &self.entries); - match self.indices.remove_entry(hash.get(), eq) { - Some(index) => { - let (key, value) = self.swap_remove_finish(index); - Some((index, key, value)) + match self.indices.get_many_mut( + [self.entries[a].hash.get(), self.entries[b].hash.get()], + move |i, &x| if i == 0 { x == a } else { x == b }, + ) { + [Some(ref_a), Some(ref_b)] => { + mem::swap(ref_a, ref_b); + self.entries.swap(a, b); } - None => None, - } - } - - /// Remove an entry by swapping it with the last - pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { - match self.entries.get(index) { - Some(entry) => { - erase_index(&mut self.indices, entry.hash, index); - Some(self.swap_remove_finish(index)) - } - None => None, - } - } - - /// Finish removing an entry by swapping it with the last - /// - /// The index should already be removed from `self.indices`. - fn swap_remove_finish(&mut self, index: usize) -> (K, V) { - // use swap_remove, but then we need to update the index that points - // to the other entry that has to move - let entry = self.entries.swap_remove(index); - - // correct index that points to the entry that had to swap places - if let Some(entry) = self.entries.get(index) { - // was not last element - // examine new element in `index` and find it in indices - let last = self.entries.len(); - update_index(&mut self.indices, entry.hash, last, index); - } - - (entry.key, entry.value) - } - - /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..` - /// - /// All of these items should still be at their original location in `entries`. - /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`. - fn erase_indices(&mut self, start: usize, end: usize) { - let (init, shifted_entries) = self.entries.split_at(end); - let (start_entries, erased_entries) = init.split_at(start); - - let erased = erased_entries.len(); - let shifted = shifted_entries.len(); - let half_capacity = self.indices.buckets() / 2; - - // Use a heuristic between different strategies - if erased == 0 { - // Degenerate case, nothing to do - } else if start + shifted < half_capacity && start < erased { - // Reinsert everything, as there are few kept indices - self.indices.clear(); - - // Reinsert stable indices, then shifted indices - raw::insert_bulk_no_grow(&mut self.indices, start_entries); - raw::insert_bulk_no_grow(&mut self.indices, shifted_entries); - } else if erased + shifted < half_capacity { - // Find each affected index, as there are few to adjust - - // Find erased indices - for (i, entry) in (start..).zip(erased_entries) { - erase_index(&mut self.indices, entry.hash, i); - } - - // Find shifted indices - for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) { - update_index(&mut self.indices, entry.hash, old, new); - } - } else { - // Sweep the whole table for adjustments - self.erase_indices_sweep(start, end); - } - - debug_assert_eq!(self.indices.len(), start + shifted); - } - - pub(crate) fn retain_in_order(&mut self, mut keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - { - self.entries - .retain_mut(|entry| keep(&mut entry.key, &mut entry.value)); - if self.entries.len() < self.indices.len() { - self.rebuild_hash_table(); - } - } - - fn rebuild_hash_table(&mut self) { - self.indices.clear(); - raw::insert_bulk_no_grow(&mut self.indices, &self.entries); - } - - pub(crate) fn reverse(&mut self) { - self.entries.reverse(); - - // No need to save hash indices, can easily calculate what they should - // be, given that this is an in-place reversal. - let len = self.entries.len(); - for i in self.indices_mut() { - *i = len - *i - 1; + _ => panic!("indices not found"), } } } @@ -640,4 +714,5 @@ fn assert_send_sync() { assert_send_sync::>(); assert_send_sync::>(); assert_send_sync::>(); + assert_send_sync::>(); } diff --git a/src/map/core/entry.rs b/src/map/core/entry.rs index 5ac8c495..f8a81367 100644 --- a/src/map/core/entry.rs +++ b/src/map/core/entry.rs @@ -1,16 +1,22 @@ -use super::raw::RawTableEntry; -use super::IndexMapCore; +use super::{equivalent, Entries, IndexMapCore, RefMut}; use crate::HashValue; use core::{fmt, mem}; +use hashbrown::hash_table; impl IndexMapCore { pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V> where K: Eq, { - match self.raw_entry(hash, |k| *k == key) { - Ok(raw) => Entry::Occupied(OccupiedEntry { raw }), - Err(map) => Entry::Vacant(VacantEntry { map, hash, key }), + let entries = &mut self.entries; + let eq = equivalent(&key, entries); + match self.indices.find_entry(hash.get(), eq) { + Ok(index) => Entry::Occupied(OccupiedEntry { entries, index }), + Err(absent) => Entry::Vacant(VacantEntry { + map: RefMut::new(absent.into_table(), entries), + hash, + key, + }), } } } @@ -125,14 +131,20 @@ impl fmt::Debug for Entry<'_, K, V> { /// A view into an occupied entry in an [`IndexMap`][crate::IndexMap]. /// It is part of the [`Entry`] enum. pub struct OccupiedEntry<'a, K, V> { - raw: RawTableEntry<'a, K, V>, + entries: &'a mut Entries, + index: hash_table::OccupiedEntry<'a, usize>, } impl<'a, K, V> OccupiedEntry<'a, K, V> { /// Return the index of the key-value pair #[inline] pub fn index(&self) -> usize { - self.raw.index() + *self.index.get() + } + + #[inline] + fn into_ref_mut(self) -> RefMut<'a, K, V> { + RefMut::new(self.index.into_table(), self.entries) } /// Gets a reference to the entry's key in the map. @@ -141,16 +153,17 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key(&self) -> &K { - &self.raw.bucket().key + &self.entries[self.index()].key } pub(crate) fn key_mut(&mut self) -> &mut K { - &mut self.raw.bucket_mut().key + let index = self.index(); + &mut self.entries[index].key } /// Gets a reference to the entry's value in the map. pub fn get(&self) -> &V { - &self.raw.bucket().value + &self.entries[self.index()].value } /// Gets a mutable reference to the entry's value in the map. @@ -158,13 +171,15 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// If you need a reference which may outlive the destruction of the /// [`Entry`] value, see [`into_mut`][Self::into_mut]. pub fn get_mut(&mut self) -> &mut V { - &mut self.raw.bucket_mut().value + let index = self.index(); + &mut self.entries[index].value } /// Converts into a mutable reference to the entry's value in the map, /// with a lifetime bound to the map itself. pub fn into_mut(self) -> &'a mut V { - &mut self.raw.into_bucket().value + let index = self.index(); + &mut self.entries[index].value } /// Sets the value of the entry to `value`, and returns the entry's old value. @@ -226,8 +241,8 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(self) -> (K, V) { - let (map, index) = self.raw.remove_index(); - map.swap_remove_finish(index) + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).swap_remove_finish(index) } /// Remove and return the key, value pair stored in the map for this entry @@ -238,8 +253,8 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(self) -> (K, V) { - let (map, index) = self.raw.remove_index(); - map.shift_remove_finish(index) + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).shift_remove_finish(index) } /// Moves the position of the entry to a new index @@ -255,8 +270,8 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// Computes in **O(n)** time (average). pub fn move_index(self, to: usize) { - let (map, index) = self.raw.into_inner(); - map.move_index(index, to); + let index = self.index(); + self.into_ref_mut().move_index(index, to); } /// Swaps the position of entry with another. @@ -268,8 +283,8 @@ impl<'a, K, V> OccupiedEntry<'a, K, V> { /// /// Computes in **O(1)** time (average). pub fn swap_indices(self, other: usize) { - let (map, index) = self.raw.into_inner(); - map.swap_indices(index, other) + let index = self.index(); + self.into_ref_mut().swap_indices(index, other); } } @@ -283,11 +298,16 @@ impl fmt::Debug for OccupiedEntry<'_, K, V> { } impl<'a, K, V> From> for OccupiedEntry<'a, K, V> { - fn from(entry: IndexedEntry<'a, K, V>) -> Self { + fn from(other: IndexedEntry<'a, K, V>) -> Self { + let IndexedEntry { + map: RefMut { indices, entries }, + index, + } = other; + let hash = entries[index].hash; Self { - raw: entry - .map - .index_raw_entry(entry.index) + entries, + index: indices + .find_entry(hash.get(), move |&i| i == index) .expect("index not found"), } } @@ -296,7 +316,7 @@ impl<'a, K, V> From> for OccupiedEntry<'a, K, V> { /// A view into a vacant entry in an [`IndexMap`][crate::IndexMap]. /// It is part of the [`Entry`] enum. pub struct VacantEntry<'a, K, V> { - map: &'a mut IndexMapCore, + map: RefMut<'a, K, V>, hash: HashValue, key: K, } @@ -323,10 +343,9 @@ impl<'a, K, V> VacantEntry<'a, K, V> { /// Inserts the entry's key and the given value into the map, and returns a mutable reference /// to the value. - pub fn insert(self, value: V) -> &'a mut V { - let Self { map, hash, key } = self; - let i = map.insert_unique(hash, key, value); - &mut map.entries[i].value + pub fn insert(mut self, value: V) -> &'a mut V { + let i = self.map.insert_unique(self.hash, self.key, value); + &mut self.map.entries[i].value } /// Inserts the entry's key and the given value into the map at its ordered @@ -342,7 +361,7 @@ impl<'a, K, V> VacantEntry<'a, K, V> { where K: Ord, { - let slice = crate::map::Slice::from_slice(&self.map.entries); + let slice = crate::map::Slice::from_slice(self.map.entries); let i = slice.binary_search_keys(&self.key).unwrap_err(); (i, self.shift_insert(i, value)) } @@ -353,10 +372,10 @@ impl<'a, K, V> VacantEntry<'a, K, V> { /// ***Panics*** if `index` is out of bounds. /// /// Computes in **O(n)** time (average). - pub fn shift_insert(self, index: usize, value: V) -> &'a mut V { - let Self { map, hash, key } = self; - map.shift_insert_unique(index, hash, key, value); - &mut map.entries[index].value + pub fn shift_insert(mut self, index: usize, value: V) -> &'a mut V { + self.map + .shift_insert_unique(index, self.hash, self.key, value); + &mut self.map.entries[index].value } } @@ -370,7 +389,7 @@ impl fmt::Debug for VacantEntry<'_, K, V> { /// /// This `struct` is created from the [`get_index_entry`][crate::IndexMap::get_index_entry] method. pub struct IndexedEntry<'a, K, V> { - map: &'a mut IndexMapCore, + map: RefMut<'a, K, V>, // We have a mutable reference to the map, which keeps the index // valid and pointing to the correct entry. index: usize, @@ -378,7 +397,10 @@ pub struct IndexedEntry<'a, K, V> { impl<'a, K, V> IndexedEntry<'a, K, V> { pub(crate) fn new(map: &'a mut IndexMapCore, index: usize) -> Self { - Self { map, index } + Self { + map: map.borrow_mut(), + index, + } } /// Return the index of the key-value pair @@ -427,7 +449,7 @@ impl<'a, K, V> IndexedEntry<'a, K, V> { /// **This perturbs the position of what used to be the last element!** /// /// Computes in **O(1)** time (average). - pub fn swap_remove_entry(self) -> (K, V) { + pub fn swap_remove_entry(mut self) -> (K, V) { self.map.swap_remove_index(self.index).unwrap() } @@ -438,7 +460,7 @@ impl<'a, K, V> IndexedEntry<'a, K, V> { /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). - pub fn shift_remove_entry(self) -> (K, V) { + pub fn shift_remove_entry(mut self) -> (K, V) { self.map.shift_remove_index(self.index).unwrap() } @@ -476,7 +498,7 @@ impl<'a, K, V> IndexedEntry<'a, K, V> { /// ***Panics*** if `to` is out of bounds. /// /// Computes in **O(n)** time (average). - pub fn move_index(self, to: usize) { + pub fn move_index(mut self, to: usize) { self.map.move_index(self.index, to); } @@ -488,8 +510,8 @@ impl<'a, K, V> IndexedEntry<'a, K, V> { /// ***Panics*** if the `other` index is out of bounds. /// /// Computes in **O(1)** time (average). - pub fn swap_indices(self, other: usize) { - self.map.swap_indices(self.index, other) + pub fn swap_indices(mut self, other: usize) { + self.map.swap_indices(self.index, other); } } @@ -504,8 +526,10 @@ impl fmt::Debug for IndexedEntry<'_, K, V> { } impl<'a, K, V> From> for IndexedEntry<'a, K, V> { - fn from(entry: OccupiedEntry<'a, K, V>) -> Self { - let (map, index) = entry.raw.into_inner(); - Self { map, index } + fn from(other: OccupiedEntry<'a, K, V>) -> Self { + Self { + index: other.index(), + map: other.into_ref_mut(), + } } } diff --git a/src/map/core/raw.rs b/src/map/core/raw.rs deleted file mode 100644 index c6a7b696..00000000 --- a/src/map/core/raw.rs +++ /dev/null @@ -1,164 +0,0 @@ -#![allow(unsafe_code)] -//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`, -//! mostly in dealing with its bucket "pointers". - -use super::{equivalent, get_hash, Bucket, HashValue, IndexMapCore}; -use hashbrown::raw::RawTable; - -type RawBucket = hashbrown::raw::Bucket; - -/// Inserts many entries into a raw table without reallocating. -/// -/// ***Panics*** if there is not sufficient capacity already. -pub(super) fn insert_bulk_no_grow(indices: &mut RawTable, entries: &[Bucket]) { - assert!(indices.capacity() - indices.len() >= entries.len()); - for entry in entries { - // SAFETY: we asserted that sufficient capacity exists for all entries. - unsafe { - indices.insert_no_grow(entry.hash.get(), indices.len()); - } - } -} - -#[cfg(feature = "test_debug")] -pub(super) struct DebugIndices<'a>(pub &'a RawTable); - -#[cfg(feature = "test_debug")] -impl core::fmt::Debug for DebugIndices<'_> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - // SAFETY: we're not letting any of the buckets escape this function - let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) }; - f.debug_list().entries(indices).finish() - } -} - -impl IndexMapCore { - /// Sweep the whole table to erase indices start..end - pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) { - // SAFETY: we're not letting any of the buckets escape this function - unsafe { - let offset = end - start; - for bucket in self.indices.iter() { - let i = bucket.as_mut(); - if *i >= end { - *i -= offset; - } else if *i >= start { - self.indices.erase(bucket); - } - } - } - } - - /// Search for a key in the table and return `Ok(entry_index)` if found. - /// Otherwise, insert the key and return `Err(new_index)`. - /// - /// Note that hashbrown may resize the table to reserve space for insertion, - /// even before checking if it's already present, so this is somewhat biased - /// towards new items. - pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result - where - K: Eq, - { - let hash = hash.get(); - let eq = equivalent(key, &self.entries); - let hasher = get_hash(&self.entries); - // SAFETY: We're not mutating between find and read/insert. - unsafe { - match self.indices.find_or_find_insert_slot(hash, eq, hasher) { - Ok(raw_bucket) => Ok(*raw_bucket.as_ref()), - Err(slot) => { - let index = self.indices.len(); - self.indices.insert_in_slot(hash, slot, index); - Err(index) - } - } - } - } - - pub(super) fn raw_entry( - &mut self, - hash: HashValue, - mut is_match: impl FnMut(&K) -> bool, - ) -> Result, &mut Self> { - let entries = &*self.entries; - let eq = move |&i: &usize| is_match(&entries[i].key); - match self.indices.find(hash.get(), eq) { - // SAFETY: The bucket is valid because we *just* found it in this map. - Some(raw_bucket) => Ok(unsafe { RawTableEntry::new(self, raw_bucket) }), - None => Err(self), - } - } - - pub(super) fn index_raw_entry(&mut self, index: usize) -> Option> { - let hash = self.entries.get(index)?.hash; - let raw_bucket = self.indices.find(hash.get(), move |&i| i == index)?; - // SAFETY: The bucket is valid because we *just* found it in this map. - Some(unsafe { RawTableEntry::new(self, raw_bucket) }) - } - - pub(super) fn indices_mut(&mut self) -> impl Iterator { - // SAFETY: we're not letting any of the buckets escape this function, - // only the item references that are appropriately bound to `&mut self`. - unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) } - } -} - -/// A view into an occupied raw entry in an `IndexMap`. -// SAFETY: The lifetime of the map reference also constrains the raw bucket, -// which is essentially a raw pointer into the map indices. -pub(super) struct RawTableEntry<'a, K, V> { - map: &'a mut IndexMapCore, - raw_bucket: RawBucket, -} - -// `hashbrown::raw::Bucket` is only `Send`, not `Sync`. -// SAFETY: `&self` only accesses the bucket to read it. -unsafe impl Sync for RawTableEntry<'_, K, V> {} - -impl<'a, K, V> RawTableEntry<'a, K, V> { - /// The caller must ensure that the `raw_bucket` is valid in the given `map`, - /// and then we hold the `&mut` reference for exclusive access. - #[inline] - unsafe fn new(map: &'a mut IndexMapCore, raw_bucket: RawBucket) -> Self { - Self { map, raw_bucket } - } - - /// Return the index of the key-value pair - #[inline] - pub(super) fn index(&self) -> usize { - // SAFETY: we have `&mut map` keeping the bucket stable - unsafe { *self.raw_bucket.as_ref() } - } - - #[inline] - pub(super) fn bucket(&self) -> &Bucket { - &self.map.entries[self.index()] - } - - #[inline] - pub(super) fn bucket_mut(&mut self) -> &mut Bucket { - let index = self.index(); - &mut self.map.entries[index] - } - - #[inline] - pub(super) fn into_bucket(self) -> &'a mut Bucket { - let index = self.index(); - &mut self.map.entries[index] - } - - /// Remove the index from indices, leaving the actual entries to the caller. - pub(super) fn remove_index(self) -> (&'a mut IndexMapCore, usize) { - // SAFETY: This is safe because it can only happen once (self is consumed) - // and map.indices have not been modified since entry construction - let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) }; - (self.map, index) - } - - /// Take no action, just return the index and the original map reference. - #[inline] - pub(super) fn into_inner(self) -> (&'a mut IndexMapCore, usize) { - let index = self.index(); - (self.map, index) - } -} diff --git a/src/map/core/raw_entry_v1.rs b/src/map/core/raw_entry_v1.rs index 87e532d5..5d73469d 100644 --- a/src/map/core/raw_entry_v1.rs +++ b/src/map/core/raw_entry_v1.rs @@ -9,13 +9,13 @@ //! `hash_raw_entry` feature (or some replacement), matching *inherent* methods will be added to //! `IndexMap` without such an opt-in trait. -use super::raw::RawTableEntry; -use super::IndexMapCore; +use super::{Entries, RefMut}; use crate::{Equivalent, HashValue, IndexMap}; use core::fmt; use core::hash::{BuildHasher, Hash, Hasher}; use core::marker::PhantomData; use core::mem; +use hashbrown::hash_table; /// Opt-in access to the experimental raw entry API. /// @@ -245,7 +245,7 @@ impl<'a, K, V, S> RawEntryBuilder<'a, K, V, S> { let hash = HashValue(hash as usize); let entries = &*self.map.core.entries; let eq = move |&i: &usize| is_match(&entries[i].key); - self.map.core.indices.get(hash.get(), eq).copied() + self.map.core.indices.find(hash.get(), eq).copied() } } @@ -283,18 +283,20 @@ impl<'a, K, V, S> RawEntryBuilderMut<'a, K, V, S> { } /// Access an entry by hash. - pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S> + pub fn from_hash(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S> where F: FnMut(&K) -> bool, { - let hash = HashValue(hash as usize); - match self.map.core.raw_entry(hash, is_match) { - Ok(raw) => RawEntryMut::Occupied(RawOccupiedEntryMut { - raw, + let ref_entries = &*self.map.core.entries; + let eq = move |&i: &usize| is_match(&ref_entries[i].key); + match self.map.core.indices.find_entry(hash, eq) { + Ok(index) => RawEntryMut::Occupied(RawOccupiedEntryMut { + entries: &mut self.map.core.entries, + index, hash_builder: PhantomData, }), - Err(map) => RawEntryMut::Vacant(RawVacantEntryMut { - map, + Err(absent) => RawEntryMut::Vacant(RawVacantEntryMut { + map: RefMut::new(absent.into_table(), &mut self.map.core.entries), hash_builder: &self.map.hash_builder, }), } @@ -377,7 +379,8 @@ impl<'a, K, V, S> RawEntryMut<'a, K, V, S> { /// A raw view into an occupied entry in an [`IndexMap`]. /// It is part of the [`RawEntryMut`] enum. pub struct RawOccupiedEntryMut<'a, K, V, S> { - raw: RawTableEntry<'a, K, V>, + entries: &'a mut Entries, + index: hash_table::OccupiedEntry<'a, usize>, hash_builder: PhantomData<&'a S>, } @@ -394,7 +397,12 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// Return the index of the key-value pair #[inline] pub fn index(&self) -> usize { - self.raw.index() + *self.index.get() + } + + #[inline] + fn into_ref_mut(self) -> RefMut<'a, K, V> { + RefMut::new(self.index.into_table(), self.entries) } /// Gets a reference to the entry's key in the map. @@ -403,7 +411,7 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key(&self) -> &K { - &self.raw.bucket().key + &self.entries[self.index()].key } /// Gets a mutable reference to the entry's key in the map. @@ -412,7 +420,8 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn key_mut(&mut self) -> &mut K { - &mut self.raw.bucket_mut().key + let index = self.index(); + &mut self.entries[index].key } /// Converts into a mutable reference to the entry's key in the map, @@ -422,12 +431,13 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like /// extra fields or the memory address of an allocation. pub fn into_key(self) -> &'a mut K { - &mut self.raw.into_bucket().key + let index = self.index(); + &mut self.entries[index].key } /// Gets a reference to the entry's value in the map. pub fn get(&self) -> &V { - &self.raw.bucket().value + &self.entries[self.index()].value } /// Gets a mutable reference to the entry's value in the map. @@ -435,29 +445,33 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// If you need a reference which may outlive the destruction of the /// [`RawEntryMut`] value, see [`into_mut`][Self::into_mut]. pub fn get_mut(&mut self) -> &mut V { - &mut self.raw.bucket_mut().value + let index = self.index(); + &mut self.entries[index].value } /// Converts into a mutable reference to the entry's value in the map, /// with a lifetime bound to the map itself. pub fn into_mut(self) -> &'a mut V { - &mut self.raw.into_bucket().value + let index = self.index(); + &mut self.entries[index].value } /// Gets a reference to the entry's key and value in the map. pub fn get_key_value(&self) -> (&K, &V) { - self.raw.bucket().refs() + self.entries[self.index()].refs() } /// Gets a reference to the entry's key and value in the map. pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { - self.raw.bucket_mut().muts() + let index = self.index(); + self.entries[index].muts() } /// Converts into a mutable reference to the entry's key and value in the map, /// with a lifetime bound to the map itself. pub fn into_key_value_mut(self) -> (&'a mut K, &'a mut V) { - self.raw.into_bucket().muts() + let index = self.index(); + self.entries[index].muts() } /// Sets the value of the entry, and returns the entry's old value. @@ -524,8 +538,8 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(self) -> (K, V) { - let (map, index) = self.raw.remove_index(); - map.swap_remove_finish(index) + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).swap_remove_finish(index) } /// Remove and return the key, value pair stored in the map for this entry @@ -536,8 +550,8 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(self) -> (K, V) { - let (map, index) = self.raw.remove_index(); - map.shift_remove_finish(index) + let (index, entry) = self.index.remove(); + RefMut::new(entry.into_table(), self.entries).shift_remove_finish(index) } /// Moves the position of the entry to a new index @@ -553,8 +567,8 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// /// Computes in **O(n)** time (average). pub fn move_index(self, to: usize) { - let (map, index) = self.raw.into_inner(); - map.move_index(index, to); + let index = self.index(); + self.into_ref_mut().move_index(index, to); } /// Swaps the position of entry with another. @@ -566,15 +580,15 @@ impl<'a, K, V, S> RawOccupiedEntryMut<'a, K, V, S> { /// /// Computes in **O(1)** time (average). pub fn swap_indices(self, other: usize) { - let (map, index) = self.raw.into_inner(); - map.swap_indices(index, other) + let index = self.index(); + self.into_ref_mut().swap_indices(index, other); } } /// A view into a vacant raw entry in an [`IndexMap`]. /// It is part of the [`RawEntryMut`] enum. pub struct RawVacantEntryMut<'a, K, V, S> { - map: &'a mut IndexMapCore, + map: RefMut<'a, K, V>, hash_builder: &'a S, } @@ -604,7 +618,7 @@ impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { /// Inserts the given key and value into the map with the provided hash, /// and returns mutable references to them. - pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) { + pub fn insert_hashed_nocheck(mut self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) { let hash = HashValue(hash as usize); let i = self.map.insert_unique(hash, key, value); self.map.entries[i].muts() @@ -633,7 +647,7 @@ impl<'a, K, V, S> RawVacantEntryMut<'a, K, V, S> { /// /// Computes in **O(n)** time (average). pub fn shift_insert_hashed_nocheck( - self, + mut self, index: usize, hash: u64, key: K, diff --git a/src/map/tests.rs b/src/map/tests.rs index ca7e9d6c..9de9db1b 100644 --- a/src/map/tests.rs +++ b/src/map/tests.rs @@ -822,7 +822,7 @@ macro_rules! move_index_oob { let mut map: IndexMap = (0..10).map(|k| (k, ())).collect(); map.move_index($from, $to); } - } + }; } move_index_oob!(test_move_index_out_of_bounds_0_10, 0, 10); move_index_oob!(test_move_index_out_of_bounds_0_max, 0, usize::MAX);