diff --git a/benches/bench.rs b/benches/bench.rs index c99badd8..d367fcd8 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -30,7 +30,7 @@ fn new_hashmap(b: &mut Bencher) { } #[bench] -fn new_orderedmap(b: &mut Bencher) { +fn new_indexmap(b: &mut Bencher) { b.iter(|| IndexMap::::new()); } @@ -40,7 +40,7 @@ fn with_capacity_10e5_hashmap(b: &mut Bencher) { } #[bench] -fn with_capacity_10e5_orderedmap(b: &mut Bencher) { +fn with_capacity_10e5_indexmap(b: &mut Bencher) { b.iter(|| IndexMap::::with_capacity(10_000)); } @@ -57,7 +57,7 @@ fn insert_hashmap_10_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_10_000(b: &mut Bencher) { +fn insert_indexmap_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -81,7 +81,7 @@ fn insert_hashmap_string_10_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_string_10_000(b: &mut Bencher) { +fn insert_indexmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -106,7 +106,7 @@ fn insert_hashmap_str_10_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_str_10_000(b: &mut Bencher) { +fn insert_indexmap_str_10_000(b: &mut Bencher) { let c = 10_000; let ss = Vec::from_iter((0..c).map(|x| x.to_string())); b.iter(|| { @@ -132,7 +132,7 @@ fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_int_bigvalue_10_000(b: &mut Bencher) { +fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) { let c = 10_000; let value = [0u64; 10]; b.iter(|| { @@ -157,7 +157,7 @@ fn insert_hashmap_100_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_100_000(b: &mut Bencher) { +fn insert_indexmap_100_000(b: &mut Bencher) { let c = 100_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -181,7 +181,7 @@ fn insert_hashmap_150(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_150(b: &mut Bencher) { +fn insert_indexmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -205,7 +205,7 @@ fn entry_hashmap_150(b: &mut Bencher) { } #[bench] -fn entry_orderedmap_150(b: &mut Bencher) { +fn entry_indexmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -229,7 +229,7 @@ fn iter_sum_hashmap_10_000(b: &mut Bencher) { } #[bench] -fn iter_sum_orderedmap_10_000(b: &mut Bencher) { +fn iter_sum_indexmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c / 10; @@ -257,7 +257,7 @@ fn iter_black_box_hashmap_10_000(b: &mut Bencher) { } #[bench] -fn iter_black_box_orderedmap_10_000(b: &mut Bencher) { +fn iter_black_box_indexmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c / 10; @@ -317,7 +317,7 @@ fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { } #[bench] -fn lookup_orderedmap_10_000_exist(b: &mut Bencher) { +fn lookup_indexmap_10_000_exist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); @@ -334,7 +334,7 @@ fn lookup_orderedmap_10_000_exist(b: &mut Bencher) { } #[bench] -fn lookup_orderedmap_10_000_noexist(b: &mut Bencher) { +fn lookup_indexmap_10_000_noexist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); @@ -357,7 +357,7 @@ const SORT_MAP_SIZE: usize = 10_000; // use lazy_static so that comparison benchmarks use the exact same inputs lazy_static! { - static ref KEYS: Vec = { shuffled_keys(0..LOOKUP_MAP_SIZE) }; + static ref KEYS: Vec = shuffled_keys(0..LOOKUP_MAP_SIZE); } lazy_static! { @@ -373,7 +373,7 @@ lazy_static! { } lazy_static! { - static ref OMAP_100K: IndexMap = { + static ref IMAP_100K: IndexMap = { let c = LOOKUP_MAP_SIZE; let mut map = IndexMap::with_capacity(c as usize); let keys = &*KEYS; @@ -385,7 +385,7 @@ lazy_static! { } lazy_static! { - static ref OMAP_SORT_U32: IndexMap = { + static ref IMAP_SORT_U32: IndexMap = { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(key, key); @@ -394,7 +394,7 @@ lazy_static! { }; } lazy_static! { - static ref OMAP_SORT_S: IndexMap = { + static ref IMAP_SORT_S: IndexMap = { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(format!("{:^16x}", &key), String::new()); @@ -416,8 +416,8 @@ fn lookup_hashmap_100_000_multi(b: &mut Bencher) { } #[bench] -fn lookup_ordermap_100_000_multi(b: &mut Bencher) { - let map = &*OMAP_100K; +fn lookup_indexmap_100_000_multi(b: &mut Bencher) { + let map = &*IMAP_100K; b.iter(|| { let mut found = 0; for key in 0..LOOKUP_SAMPLE_SIZE { @@ -442,8 +442,8 @@ fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { } #[bench] -fn lookup_ordermap_100_000_inorder_multi(b: &mut Bencher) { - let map = &*OMAP_100K; +fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) { + let map = &*IMAP_100K; let keys = &*KEYS; b.iter(|| { let mut found = 0; @@ -465,8 +465,8 @@ fn lookup_hashmap_100_000_single(b: &mut Bencher) { } #[bench] -fn lookup_ordermap_100_000_single(b: &mut Bencher) { - let map = &*OMAP_100K; +fn lookup_indexmap_100_000_single(b: &mut Bencher) { + let map = &*IMAP_100K; let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); b.iter(|| { let key = iter.next().unwrap(); @@ -490,7 +490,7 @@ fn grow_fnv_hashmap_100_000(b: &mut Bencher) { } #[bench] -fn grow_fnv_ordermap_100_000(b: &mut Bencher) { +fn grow_fnv_indexmap_100_000(b: &mut Bencher) { b.iter(|| { let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); for x in 0..GROW_SIZE { @@ -529,7 +529,7 @@ fn hashmap_merge_shuffle(b: &mut Bencher) { } #[bench] -fn ordermap_merge_simple(b: &mut Bencher) { +fn indexmap_merge_simple(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); b.iter(|| { @@ -540,7 +540,7 @@ fn ordermap_merge_simple(b: &mut Bencher) { } #[bench] -fn ordermap_merge_shuffle(b: &mut Bencher) { +fn indexmap_merge_shuffle(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); let mut v = Vec::new(); @@ -556,8 +556,8 @@ fn ordermap_merge_shuffle(b: &mut Bencher) { } #[bench] -fn swap_remove_ordermap_100_000(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn swap_remove_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().cloned()); let mut rng = SmallRng::from_entropy(); keys.shuffle(&mut rng); @@ -573,8 +573,8 @@ fn swap_remove_ordermap_100_000(b: &mut Bencher) { } #[bench] -fn shift_remove_ordermap_100_000_few(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn shift_remove_indexmap_100_000_few(b: &mut Bencher) { + let map = IMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().cloned()); let mut rng = SmallRng::from_entropy(); keys.shuffle(&mut rng); @@ -585,13 +585,13 @@ fn shift_remove_ordermap_100_000_few(b: &mut Bencher) { for key in &keys { map.shift_remove(key); } - assert_eq!(map.len(), OMAP_100K.len() - keys.len()); + assert_eq!(map.len(), IMAP_100K.len() - keys.len()); map }); } #[bench] -fn shift_remove_ordermap_2_000_full(b: &mut Bencher) { +fn shift_remove_indexmap_2_000_full(b: &mut Bencher) { let mut keys = KEYS[..2_000].to_vec(); let mut map = IndexMap::with_capacity(keys.len()); for &key in &keys { @@ -611,8 +611,8 @@ fn shift_remove_ordermap_2_000_full(b: &mut Bencher) { } #[bench] -fn pop_ordermap_100_000(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn pop_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); @@ -625,8 +625,8 @@ fn pop_ordermap_100_000(b: &mut Bencher) { } #[bench] -fn few_retain_ordermap_100_000(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn few_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); @@ -647,8 +647,8 @@ fn few_retain_hashmap_100_000(b: &mut Bencher) { } #[bench] -fn half_retain_ordermap_100_000(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn half_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); @@ -669,8 +669,8 @@ fn half_retain_hashmap_100_000(b: &mut Bencher) { } #[bench] -fn many_retain_ordermap_100_000(b: &mut Bencher) { - let map = OMAP_100K.clone(); +fn many_retain_indexmap_100_000(b: &mut Bencher) { + let map = IMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); @@ -698,8 +698,8 @@ pub fn simple_sort(m: &mut IndexMap) { } #[bench] -fn ordermap_sort_s(b: &mut Bencher) { - let map = OMAP_SORT_S.clone(); +fn indexmap_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { @@ -710,8 +710,8 @@ fn ordermap_sort_s(b: &mut Bencher) { } #[bench] -fn ordermap_simple_sort_s(b: &mut Bencher) { - let map = OMAP_SORT_S.clone(); +fn indexmap_simple_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { @@ -722,8 +722,8 @@ fn ordermap_simple_sort_s(b: &mut Bencher) { } #[bench] -fn ordermap_sort_u32(b: &mut Bencher) { - let map = OMAP_SORT_U32.clone(); +fn indexmap_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { @@ -734,8 +734,8 @@ fn ordermap_sort_u32(b: &mut Bencher) { } #[bench] -fn ordermap_simple_sort_u32(b: &mut Bencher) { - let map = OMAP_SORT_U32.clone(); +fn indexmap_simple_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { @@ -747,15 +747,15 @@ fn ordermap_simple_sort_u32(b: &mut Bencher) { // measure the fixed overhead of cloning in sort benchmarks #[bench] -fn ordermap_clone_for_sort_s(b: &mut Bencher) { - let map = OMAP_SORT_S.clone(); +fn indexmap_clone_for_sort_s(b: &mut Bencher) { + let map = IMAP_SORT_S.clone(); b.iter(|| map.clone()); } #[bench] -fn ordermap_clone_for_sort_u32(b: &mut Bencher) { - let map = OMAP_SORT_U32.clone(); +fn indexmap_clone_for_sort_u32(b: &mut Bencher) { + let map = IMAP_SORT_U32.clone(); b.iter(|| map.clone()); } diff --git a/benches/faststring.rs b/benches/faststring.rs index 3adca676..2a9c3d3b 100644 --- a/benches/faststring.rs +++ b/benches/faststring.rs @@ -94,7 +94,7 @@ fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { } #[bench] -fn insert_orderedmap_string_10_000(b: &mut Bencher) { +fn insert_indexmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); @@ -144,7 +144,7 @@ fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { } #[bench] -fn lookup_ordermap_10_000_exist_string(b: &mut Bencher) { +fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); @@ -162,7 +162,7 @@ fn lookup_ordermap_10_000_exist_string(b: &mut Bencher) { } #[bench] -fn lookup_ordermap_10_000_exist_string_oneshot(b: &mut Bencher) { +fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); diff --git a/src/lib.rs b/src/lib.rs index e39d3f1e..b9758c90 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,6 +79,8 @@ mod mutable_keys; mod serde; mod util; +mod map_core; + pub mod map; pub mod set; @@ -95,7 +97,7 @@ pub use set::IndexSet; /// Hash value newtype. Not larger than usize, since anything larger /// isn't used for selecting position anyway. -#[derive(Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] struct HashValue(usize); impl HashValue { @@ -105,26 +107,33 @@ impl HashValue { } } -impl Clone for HashValue { - #[inline] - fn clone(&self) -> Self { - *self - } -} -impl PartialEq for HashValue { - #[inline] - fn eq(&self, rhs: &Self) -> bool { - self.0 == rhs.0 - } -} - -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Debug)] struct Bucket { hash: HashValue, key: K, value: V, } +impl Clone for Bucket +where + K: Clone, + V: Clone, +{ + fn clone(&self) -> Self { + Bucket { + hash: self.hash, + key: self.key.clone(), + value: self.value.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.hash = other.hash; + self.key.clone_from(&other.key); + self.value.clone_from(&other.value); + } +} + impl Bucket { // field accessors -- used for `f` instead of closures in `.map(f)` fn key_ref(&self) -> &K { diff --git a/src/macros.rs b/src/macros.rs index 67eecf52..8e785235 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -103,13 +103,14 @@ macro_rules! iterator_methods { } fn collect(self) -> C - where C: FromIterator + where + C: FromIterator, { // NB: forwarding this directly to standard iterators will // allow it to leverage unstable traits like `TrustedLen`. self.iter.map($map_elt).collect() } - } + }; } macro_rules! double_ended_iterator_methods { @@ -119,5 +120,5 @@ macro_rules! double_ended_iterator_methods { fn next_back(&mut self) -> Option { self.iter.next_back().map($map_elt) } - } + }; } diff --git a/src/map.rs b/src/map.rs index c711f5e5..1f026fd6 100644 --- a/src/map.rs +++ b/src/map.rs @@ -20,237 +20,22 @@ use std::ops::RangeFull; #[cfg(has_std)] use std::collections::hash_map::RandomState; -use std::cmp::{max, Ordering}; +use std::cmp::Ordering; use std::fmt; -use std::marker::PhantomData; -use std::mem::replace; use equivalent::Equivalent; -use util::{enumerate, ptrdistance, third}; +use map_core::IndexMapCore; +use util::third; use {Bucket, Entries, HashValue}; +pub use map_core::{Entry, OccupiedEntry, VacantEntry}; + fn hash_elem_using(build: &B, k: &K) -> HashValue { let mut h = build.build_hasher(); k.hash(&mut h); HashValue(h.finish() as usize) } -/// A possibly truncated hash value. -/// -#[derive(Debug)] -struct ShortHash(usize, PhantomData); - -impl ShortHash { - /// Pretend this is a full HashValue, which - /// is completely ok w.r.t determining bucket index - /// - /// - Sz = u32: 32-bit hash is enough to select bucket index - /// - Sz = u64: hash is not truncated - fn into_hash(self) -> HashValue { - HashValue(self.0) - } -} - -impl Copy for ShortHash {} -impl Clone for ShortHash { - #[inline] - fn clone(&self) -> Self { - *self - } -} - -impl PartialEq for ShortHash { - #[inline] - fn eq(&self, rhs: &Self) -> bool { - self.0 == rhs.0 - } -} - -// Compare ShortHash == HashValue by truncating appropriately -// if applicable before the comparison -impl PartialEq for ShortHash -where - Sz: Size, -{ - #[inline] - fn eq(&self, rhs: &HashValue) -> bool { - if Sz::is_64_bit() { - self.0 == rhs.0 - } else { - lo32(self.0 as u64) == lo32(rhs.0 as u64) - } - } -} -impl From> for HashValue { - fn from(x: ShortHash) -> Self { - HashValue(x.0) - } -} - -/// `Pos` is stored in the `indices` array and it points to the index of a -/// `Bucket` in self.core.entries. -/// -/// Pos can be interpreted either as a 64-bit index, or as a 32-bit index and -/// a 32-bit hash. -/// -/// Storing the truncated hash next to the index saves loading the hash from the -/// entry, increasing the cache efficiency. -/// -/// Note that the lower 32 bits of the hash is enough to compute desired -/// position and probe distance in a hash map with less than 2**32 buckets. -/// -/// The IndexMap will simply query its **current raw capacity** to see what its -/// current size class is, and dispatch to the 32-bit or 64-bit lookup code as -/// appropriate. Only the growth code needs some extra logic to handle the -/// transition from one class to another -#[derive(Copy)] -struct Pos { - index: u64, -} - -impl Clone for Pos { - #[inline(always)] - fn clone(&self) -> Self { - *self - } -} - -impl fmt::Debug for Pos { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.pos() { - Some(i) => write!(f, "Pos({} / {:x})", i, self.index), - None => write!(f, "Pos(None)"), - } - } -} - -impl Pos { - #[inline] - fn none() -> Self { - Pos { index: !0 } - } - - #[inline] - fn is_none(&self) -> bool { - self.index == !0 - } - - /// Return the index part of the Pos value inside `Some(_)` if the position - /// is not none, otherwise return `None`. - #[inline] - fn pos(&self) -> Option { - if self.index == !0 { - None - } else { - Some(lo32(self.index as u64)) - } - } - - /// Set the index part of the Pos value to `i` - #[inline] - fn set_pos(&mut self, i: usize) - where - Sz: Size, - { - debug_assert!(!self.is_none()); - if Sz::is_64_bit() { - self.index = i as u64; - } else { - self.index = i as u64 | ((self.index >> 32) << 32) - } - } - - #[inline] - fn with_hash(i: usize, hash: HashValue) -> Self - where - Sz: Size, - { - if Sz::is_64_bit() { - Pos { index: i as u64 } - } else { - Pos { - index: i as u64 | ((hash.0 as u64) << 32), - } - } - } - - /// “Resolve” the Pos into a combination of its index value and - /// a proxy value to the hash (whether it contains the hash or not - /// depends on the size class of the hash map). - #[inline] - fn resolve(&self) -> Option<(usize, ShortHashProxy)> - where - Sz: Size, - { - if Sz::is_64_bit() { - if !self.is_none() { - Some((self.index as usize, ShortHashProxy::new(0))) - } else { - None - } - } else { - if !self.is_none() { - let (i, hash) = split_lo_hi(self.index); - Some((i as usize, ShortHashProxy::new(hash as usize))) - } else { - None - } - } - } - - /// Like resolve, but the Pos **must** be non-none. Return its index. - #[inline] - fn resolve_existing_index(&self) -> usize - where - Sz: Size, - { - debug_assert!( - !self.is_none(), - "datastructure inconsistent: none where valid Pos expected" - ); - if Sz::is_64_bit() { - self.index as usize - } else { - let (i, _) = split_lo_hi(self.index); - i as usize - } - } -} - -#[inline] -fn lo32(x: u64) -> usize { - (x & 0xFFFF_FFFF) as usize -} - -// split into low, hi parts -#[inline] -fn split_lo_hi(x: u64) -> (u32, u32) { - (x as u32, (x >> 32) as u32) -} - -// Possibly contains the truncated hash value for an entry, depending on -// the size class. -struct ShortHashProxy(usize, PhantomData); - -impl ShortHashProxy -where - Sz: Size, -{ - fn new(x: usize) -> Self { - ShortHashProxy(x, PhantomData) - } - - /// Get the hash from either `self` or from a lookup into `entries`, - /// depending on `Sz`. - fn get_short_hash(&self, entries: &[Bucket], index: usize) -> ShortHash { - if Sz::is_64_bit() { - ShortHash(entries[index].hash.0, PhantomData) - } else { - ShortHash(self.0, PhantomData) - } - } -} - /// A hash table where the iteration order of the key-value pairs is independent /// of the hash values of the keys. /// @@ -292,65 +77,59 @@ where /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` -#[derive(Clone)] #[cfg(has_std)] pub struct IndexMap { - core: OrderMapCore, + core: IndexMapCore, hash_builder: S, } -#[derive(Clone)] #[cfg(not(has_std))] pub struct IndexMap { - core: OrderMapCore, + core: IndexMapCore, hash_builder: S, } -// core of the map that does not depend on S -#[derive(Clone)] -struct OrderMapCore { - pub(crate) mask: usize, - /// indices are the buckets. indices.len() == raw capacity - pub(crate) indices: Box<[Pos]>, - /// entries is a dense vec of entries in their order. entries.len() == len - pub(crate) entries: Vec>, -} +impl Clone for IndexMap +where + K: Clone, + V: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + IndexMap { + core: self.core.clone(), + hash_builder: self.hash_builder.clone(), + } + } -#[inline(always)] -fn desired_pos(mask: usize, hash: HashValue) -> usize { - hash.0 & mask + fn clone_from(&mut self, other: &Self) { + self.core.clone_from(&other.core); + self.hash_builder.clone_from(&other.hash_builder); + } } impl Entries for IndexMap { type Entry = Bucket; fn into_entries(self) -> Vec { - self.core.entries + self.core.into_entries() } fn as_entries(&self) -> &[Self::Entry] { - &self.core.entries + self.core.as_entries() } fn as_entries_mut(&mut self) -> &mut [Self::Entry] { - &mut self.core.entries + self.core.as_entries_mut() } fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Self::Entry]), { - let side_index = self.core.save_hash_index(); - f(&mut self.core.entries); - self.core.restore_hash_index(side_index); + self.core.with_entries(f); } } -/// The number of steps that `current` is forward of the desired position for hash -#[inline(always)] -fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { - current.wrapping_sub(desired_pos(mask, hash)) & mask -} - impl fmt::Debug for IndexMap where K: fmt::Debug + Hash + Eq, @@ -359,61 +138,14 @@ where { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_map().entries(self.iter()).finish()?; - if cfg!(not(feature = "test_debug")) { - return Ok(()); - } - writeln!(f)?; - for (i, index) in enumerate(&*self.core.indices) { - write!(f, "{}: {:?}", i, index)?; - if let Some(pos) = index.pos() { - let hash = self.core.entries[pos].hash; - let key = &self.core.entries[pos].key; - let desire = desired_pos(self.core.mask, hash); - write!( - f, - ", desired={}, probe_distance={}, key={:?}", - desire, - probe_distance(self.core.mask, hash, i), - key - )?; - } + if cfg!(feature = "test_debug") { writeln!(f)?; + self.core.debug_entries(f)?; } - writeln!( - f, - "cap={}, raw_cap={}, entries.cap={}", - self.capacity(), - self.raw_capacity(), - self.core.entries.capacity() - )?; Ok(()) } } -#[inline] -fn usable_capacity(cap: usize) -> usize { - cap - cap / 4 -} - -#[inline] -fn to_raw_capacity(n: usize) -> usize { - n + n / 3 -} - -// this could not be captured in an efficient iterator -macro_rules! probe_loop { - ($probe_var: ident < $len: expr, $body: expr) => { - loop { - if $probe_var < $len { - $body - $probe_var += 1; - } else { - $probe_var = 0; - } - } - } -} - #[cfg(has_std)] impl IndexMap { /// Create a new map. (Does not allocate.) @@ -441,22 +173,12 @@ impl IndexMap { { if n == 0 { IndexMap { - core: OrderMapCore { - mask: 0, - indices: Box::new([]), - entries: Vec::new(), - }, + core: IndexMapCore::new(), hash_builder, } } else { - let raw = to_raw_capacity(n); - let raw_cap = max(raw.next_power_of_two(), 8); IndexMap { - core: OrderMapCore { - mask: raw_cap.wrapping_sub(1), - indices: vec![Pos::none(); raw_cap].into_boxed_slice(), - entries: Vec::with_capacity(usable_capacity(raw_cap)), - }, + core: IndexMapCore::with_capacity(n), hash_builder, } } @@ -496,337 +218,6 @@ impl IndexMap { pub fn capacity(&self) -> usize { self.core.capacity() } - - #[inline] - fn size_class_is_64bit(&self) -> bool { - self.core.size_class_is_64bit() - } - - #[inline(always)] - fn raw_capacity(&self) -> usize { - self.core.raw_capacity() - } -} - -impl OrderMapCore { - // Return whether we need 32 or 64 bits to specify a bucket or entry index - #[cfg(not(feature = "test_low_transition_point"))] - fn size_class_is_64bit(&self) -> bool { - usize::max_value() > u32::max_value() as usize - && self.raw_capacity() >= u32::max_value() as usize - } - - // for testing - #[cfg(feature = "test_low_transition_point")] - fn size_class_is_64bit(&self) -> bool { - self.raw_capacity() >= 64 - } - - #[inline(always)] - fn raw_capacity(&self) -> usize { - self.indices.len() - } -} - -/// Trait for the "size class". Either u32 or u64 depending on the index -/// size needed to address an entry's indes in self.core.entries. -trait Size { - fn is_64_bit() -> bool; - fn is_same_size() -> bool { - Self::is_64_bit() == T::is_64_bit() - } -} - -impl Size for u32 { - #[inline] - fn is_64_bit() -> bool { - false - } -} - -impl Size for u64 { - #[inline] - fn is_64_bit() -> bool { - true - } -} - -/// Call self.method(args) with `::` or `::` depending on `self` -/// size class. -/// -/// The u32 or u64 is *prepended* to the type parameter list! -macro_rules! dispatch_32_vs_64 { - // self.methodname with other explicit type params, - // size is prepended - ($self_:ident . $method:ident::<$($t:ty),*>($($arg:expr),*)) => { - if $self_.size_class_is_64bit() { - $self_.$method::($($arg),*) - } else { - $self_.$method::($($arg),*) - } - }; - // self.methodname with only one type param, the size. - ($self_:ident . $method:ident ($($arg:expr),*)) => { - if $self_.size_class_is_64bit() { - $self_.$method::($($arg),*) - } else { - $self_.$method::($($arg),*) - } - }; - // functionname with size_class_is_64bit as the first argument, only one - // type param, the size. - ($self_:ident => $function:ident ($($arg:expr),*)) => { - if $self_.size_class_is_64bit() { - $function::($($arg),*) - } else { - $function::($($arg),*) - } - }; -} - -/// Entry for an existing key-value pair or a vacant location to -/// insert one. -pub enum Entry<'a, K: 'a, V: 'a> { - /// Existing slot with equivalent key. - Occupied(OccupiedEntry<'a, K, V>), - /// Vacant slot (no equivalent key in the map). - Vacant(VacantEntry<'a, K, V>), -} - -impl<'a, K, V> Entry<'a, K, V> { - /// Computes in **O(1)** time (amortized average). - pub fn or_insert(self, default: V) -> &'a mut V { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(default), - } - } - - /// Computes in **O(1)** time (amortized average). - pub fn or_insert_with(self, call: F) -> &'a mut V - where - F: FnOnce() -> V, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(call()), - } - } - - pub fn key(&self) -> &K { - match *self { - Entry::Occupied(ref entry) => entry.key(), - Entry::Vacant(ref entry) => entry.key(), - } - } - - /// Return the index where the key-value pair exists or will be inserted. - pub fn index(&self) -> usize { - match *self { - Entry::Occupied(ref entry) => entry.index(), - Entry::Vacant(ref entry) => entry.index(), - } - } - - /// Modifies the entry if it is occupied. - pub fn and_modify(self, f: F) -> Self - where - F: FnOnce(&mut V), - { - match self { - Entry::Occupied(mut o) => { - f(o.get_mut()); - Entry::Occupied(o) - } - x => x, - } - } - - /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable - /// reference to it. Otherwise a mutable reference to an already existent value is returned. - /// - /// Computes in **O(1)** time (amortized average). - pub fn or_default(self) -> &'a mut V - where - V: Default, - { - match self { - Entry::Occupied(entry) => entry.into_mut(), - Entry::Vacant(entry) => entry.insert(V::default()), - } - } -} - -impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Entry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(), - Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(), - } - } -} - -/// A view into an occupied entry in a `IndexMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -pub struct OccupiedEntry<'a, K: 'a, V: 'a> { - map: &'a mut OrderMapCore, - key: K, - probe: usize, - index: usize, -} - -impl<'a, K, V> OccupiedEntry<'a, K, V> { - pub fn key(&self) -> &K { - &self.key - } - pub fn get(&self) -> &V { - &self.map.entries[self.index].value - } - pub fn get_mut(&mut self) -> &mut V { - &mut self.map.entries[self.index].value - } - - /// Put the new key in the occupied entry's key slot - pub(crate) fn replace_key(self) -> K { - let old_key = &mut self.map.entries[self.index].key; - replace(old_key, self.key) - } - - /// Return the index of the key-value pair - pub fn index(&self) -> usize { - self.index - } - pub fn into_mut(self) -> &'a mut V { - &mut self.map.entries[self.index].value - } - - /// Sets the value of the entry to `value`, and returns the entry's old value. - pub fn insert(&mut self, value: V) -> V { - replace(self.get_mut(), value) - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// **NOTE:** This is equivalent to `.swap_remove()`. - pub fn remove(self) -> V { - self.swap_remove() - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the postion of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove(self) -> V { - self.swap_remove_entry().1 - } - - /// Remove the key, value pair stored in the map for this entry, and return the value. - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove(self) -> V { - self.shift_remove_entry().1 - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// **NOTE:** This is equivalent to `.swap_remove_entry()`. - pub fn remove_entry(self) -> (K, V) { - self.swap_remove_entry() - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// Like `Vec::swap_remove`, the pair is removed by swapping it with the - /// last element of the map and popping it off. **This perturbs - /// the postion of what used to be the last element!** - /// - /// Computes in **O(1)** time (average). - pub fn swap_remove_entry(self) -> (K, V) { - self.map.swap_remove_found(self.probe, self.index) - } - - /// Remove and return the key, value pair stored in the map for this entry - /// - /// Like `Vec::remove`, the pair is removed by shifting all of the - /// elements that follow it, preserving their relative order. - /// **This perturbs the index of all of those elements!** - /// - /// Computes in **O(n)** time (average). - pub fn shift_remove_entry(self) -> (K, V) { - self.map.shift_remove_found(self.probe, self.index) - } -} - -impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for OccupiedEntry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct(stringify!(OccupiedEntry)) - .field("key", self.key()) - .field("value", self.get()) - .finish() - } -} - -/// A view into a vacant entry in a `IndexMap`. -/// It is part of the [`Entry`] enum. -/// -/// [`Entry`]: enum.Entry.html -pub struct VacantEntry<'a, K: 'a, V: 'a> { - map: &'a mut OrderMapCore, - key: K, - hash: HashValue, - probe: usize, -} - -impl<'a, K, V> VacantEntry<'a, K, V> { - pub fn key(&self) -> &K { - &self.key - } - pub fn into_key(self) -> K { - self.key - } - /// Return the index where the key-value pair will be inserted. - pub fn index(&self) -> usize { - self.map.len() - } - pub fn insert(self, value: V) -> &'a mut V { - if self.map.size_class_is_64bit() { - self.insert_impl::(value) - } else { - self.insert_impl::(value) - } - } - - fn insert_impl(self, value: V) -> &'a mut V - where - Sz: Size, - { - let index = self.map.entries.len(); - self.map.entries.push(Bucket { - hash: self.hash, - key: self.key, - value, - }); - let old_pos = Pos::with_hash::(index, self.hash); - self.map.insert_phase_2::(self.probe, old_pos); - &mut { self.map }.entries[index].value - } -} - -impl<'a, K: 'a + fmt::Debug, V: 'a> fmt::Debug for VacantEntry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple(stringify!(VacantEntry)) - .field(self.key()) - .finish() - } } impl IndexMap @@ -834,15 +225,6 @@ where K: Hash + Eq, S: BuildHasher, { - fn insert_phase_1<'a, Sz, A>(&'a mut self, key: K, action: A) -> A::Output - where - Sz: Size, - A: ProbeAction<'a, Sz, K, V>, - { - let hash = hash_elem_using(&self.hash_builder, &key); - self.core.insert_phase_1::(hash, key, action) - } - /// Remove all key-value pairs in the map, while preserving its capacity. /// /// Computes in **O(n)** time. @@ -855,23 +237,10 @@ where /// FIXME Not implemented fully yet. pub fn reserve(&mut self, additional: usize) { if additional > 0 { - self.reserve_one(); + self.core.reserve_one(); } } - fn reserve_one(&mut self) { - if self.len() == self.capacity() { - dispatch_32_vs_64!(self.double_capacity()); - } - } - - fn double_capacity(&mut self) - where - Sz: Size, - { - self.core.double_capacity::(); - } - /// Insert a key-value pair in the map. /// /// If an equivalent key already exists in the map: the key remains and @@ -903,8 +272,8 @@ where /// See also [`entry`](#method.entry) if you you want to insert *or* modify /// or if you need to get the index of the corresponding key-value pair. pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { - self.reserve_one(); - dispatch_32_vs_64!(self.insert_phase_1::<_>(key, InsertValue(value))) + let hash = hash_elem_using(&self.hash_builder, &key); + self.core.insert_full(hash, key, value) } /// Get the given key’s corresponding entry in the map for insertion and/or @@ -912,35 +281,35 @@ where /// /// Computes in **O(1)** time (amortized average). pub fn entry(&mut self, key: K) -> Entry { - self.reserve_one(); - dispatch_32_vs_64!(self.insert_phase_1::<_>(key, MakeEntry)) + let hash = hash_elem_using(&self.hash_builder, &key); + self.core.entry(hash, key) } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter(&self) -> Iter { Iter { - iter: self.core.entries.iter(), + iter: self.as_entries().iter(), } } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter_mut(&mut self) -> IterMut { IterMut { - iter: self.core.entries.iter_mut(), + iter: self.as_entries_mut().iter_mut(), } } /// Return an iterator over the keys of the map, in their order pub fn keys(&self) -> Keys { Keys { - iter: self.core.entries.iter(), + iter: self.as_entries().iter(), } } /// Return an iterator over the values of the map, in their order pub fn values(&self) -> Values { Values { - iter: self.core.entries.iter(), + iter: self.as_entries().iter(), } } @@ -948,7 +317,7 @@ where /// in their order pub fn values_mut(&mut self) -> ValuesMut { ValuesMut { - iter: self.core.entries.iter_mut(), + iter: self.as_entries_mut().iter_mut(), } } @@ -979,7 +348,7 @@ where Q: Hash + Equivalent, { if let Some(found) = self.get_index_of(key) { - let entry = &self.core.entries[found]; + let entry = &self.as_entries()[found]; Some((found, &entry.key, &entry.value)) } else { None @@ -1020,7 +389,7 @@ where Q: Hash + Equivalent, { if let Some((_, found)) = self.find(key) { - let entry = &mut self.core.entries[found]; + let entry = &mut self.as_entries_mut()[found]; Some((found, &mut entry.key, &mut entry.value)) } else { None @@ -1028,7 +397,7 @@ where } /// Return probe (indices) and position (entries) - pub(crate) fn find(&self, key: &Q) -> Option<(usize, usize)> + fn find(&self, key: &Q) -> Option<(usize, usize)> where Q: Hash + Equivalent, { @@ -1158,15 +527,7 @@ where where F: FnMut(&mut K, &mut V) -> bool, { - dispatch_32_vs_64!(self.retain_mut_sz::<_>(keep)); - } - - fn retain_mut_sz(&mut self, keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - Sz: Size, - { - self.core.retain_in_order_impl::(keep); + self.core.retain_in_order(keep); } /// Sort the map’s key-value pairs by the default ordering of the keys. @@ -1198,14 +559,15 @@ where /// the key-value pairs with the result. /// /// The sort is stable. - pub fn sorted_by(mut self, mut cmp: F) -> IntoIter + pub fn sorted_by(self, mut cmp: F) -> IntoIter where F: FnMut(&K, &V, &K, &V) -> Ordering, { - self.core - .entries - .sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); - self.into_iter() + let mut entries = self.into_entries(); + entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); + IntoIter { + iter: entries.into_iter(), + } } /// Reverses the order of the map’s key-value pairs in place. @@ -1218,10 +580,8 @@ where /// Clears the `IndexMap`, returning all key-value pairs as a drain iterator. /// Keeps the allocated memory for reuse. pub fn drain(&mut self, range: RangeFull) -> Drain { - self.core.clear_indices(); - Drain { - iter: self.core.entries.drain(range), + iter: self.core.drain(range), } } } @@ -1240,7 +600,7 @@ impl IndexMap { /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { - self.core.entries.get(index).map(Bucket::refs) + self.as_entries().get(index).map(Bucket::refs) } /// Get a key-value pair by index @@ -1249,7 +609,7 @@ impl IndexMap { /// /// Computes in **O(1)** time. pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { - self.core.entries.get_mut(index).map(Bucket::muts) + self.as_entries_mut().get_mut(index).map(Bucket::muts) } /// Remove the key-value pair by index @@ -1262,14 +622,9 @@ impl IndexMap { /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { - let (probe, found) = match self - .core - .entries - .get(index) - .map(|e| self.core.find_existing_entry(e)) - { + let (probe, found) = match self.as_entries().get(index) { + Some(e) => self.core.find_existing_entry(e), None => return None, - Some(t) => t, }; Some(self.core.swap_remove_found(probe, found)) } @@ -1284,530 +639,14 @@ impl IndexMap { /// /// Computes in **O(n)** time (average). pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { - let (probe, found) = match self - .core - .entries - .get(index) - .map(|e| self.core.find_existing_entry(e)) - { + let (probe, found) = match self.as_entries().get(index) { + Some(e) => self.core.find_existing_entry(e), None => return None, - Some(t) => t, }; Some(self.core.shift_remove_found(probe, found)) } } -// Methods that don't use any properties (Hash / Eq) of K. -// -// It's cleaner to separate them out, then the compiler checks that we are not -// using Hash + Eq at all in these methods. -// -// However, we should probably not let this show in the public API or docs. -impl OrderMapCore { - fn len(&self) -> usize { - self.entries.len() - } - - fn capacity(&self) -> usize { - usable_capacity(self.raw_capacity()) - } - - fn clear(&mut self) { - self.entries.clear(); - self.clear_indices(); - } - - // clear self.indices to the same state as "no elements" - fn clear_indices(&mut self) { - for pos in self.indices.iter_mut() { - *pos = Pos::none(); - } - } - - fn first_allocation(&mut self) { - debug_assert_eq!(self.len(), 0); - let raw_cap = 8usize; - self.mask = raw_cap.wrapping_sub(1); - self.indices = vec![Pos::none(); raw_cap].into_boxed_slice(); - self.entries = Vec::with_capacity(usable_capacity(raw_cap)); - } - - #[inline(never)] - // `Sz` is *current* Size class, before grow - fn double_capacity(&mut self) - where - Sz: Size, - { - debug_assert!(self.raw_capacity() == 0 || self.len() > 0); - if self.raw_capacity() == 0 { - return self.first_allocation(); - } - - // find first ideally placed element -- start of cluster - let mut first_ideal = 0; - for (i, index) in enumerate(&*self.indices) { - if let Some(pos) = index.pos() { - if 0 == probe_distance(self.mask, self.entries[pos].hash, i) { - first_ideal = i; - break; - } - } - } - - // visit the entries in an order where we can simply reinsert them - // into self.indices without any bucket stealing. - let new_raw_cap = self.indices.len() * 2; - let old_indices = replace( - &mut self.indices, - vec![Pos::none(); new_raw_cap].into_boxed_slice(), - ); - self.mask = new_raw_cap.wrapping_sub(1); - - // `Sz` is the old size class, and either u32 or u64 is the new - for &pos in &old_indices[first_ideal..] { - dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); - } - - for &pos in &old_indices[..first_ideal] { - dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); - } - let more = self.capacity() - self.len(); - self.entries.reserve_exact(more); - } - - // write to self.indices - // read from self.entries at `pos` - // - // reinserting rewrites all `Pos` entries anyway. This handles transitioning - // from u32 to u64 size class if needed by using the two type parameters. - fn reinsert_entry_in_order(&mut self, pos: Pos) - where - SzNew: Size, - SzOld: Size, - { - if let Some((i, hash_proxy)) = pos.resolve::() { - // only if the size class is conserved can we use the short hash - let entry_hash = if SzOld::is_same_size::() { - hash_proxy.get_short_hash(&self.entries, i).into_hash() - } else { - self.entries[i].hash - }; - // find first empty bucket and insert there - let mut probe = desired_pos(self.mask, entry_hash); - probe_loop!(probe < self.indices.len(), { - if self.indices[probe].is_none() { - // empty bucket, insert here - self.indices[probe] = Pos::with_hash::(i, entry_hash); - return; - } - }); - } - } - - fn pop_impl(&mut self) -> Option<(K, V)> { - let (probe, found) = match self.entries.last().map(|e| self.find_existing_entry(e)) { - None => return None, - Some(t) => t, - }; - debug_assert_eq!(found, self.entries.len() - 1); - Some(self.swap_remove_found(probe, found)) - } - - fn insert_phase_1<'a, Sz, A>(&'a mut self, hash: HashValue, key: K, action: A) -> A::Output - where - Sz: Size, - K: Eq, - A: ProbeAction<'a, Sz, K, V>, - { - let mut probe = desired_pos(self.mask, hash); - let mut dist = 0; - debug_assert!(self.len() < self.raw_capacity()); - probe_loop!(probe < self.indices.len(), { - if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { - let entry_hash = hash_proxy.get_short_hash(&self.entries, i); - // if existing element probed less than us, swap - let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); - if their_dist < dist { - // robin hood: steal the spot if it's better for us - return action.steal(VacantEntry { - map: self, - hash: hash, - key: key, - probe: probe, - }); - } else if entry_hash == hash && self.entries[i].key == key { - return action.hit(OccupiedEntry { - map: self, - key: key, - probe: probe, - index: i, - }); - } - } else { - // empty bucket, insert here - return action.empty(VacantEntry { - map: self, - hash: hash, - key: key, - probe: probe, - }); - } - dist += 1; - }); - } - - /// phase 2 is post-insert where we forward-shift `Pos` in the indices. - fn insert_phase_2(&mut self, mut probe: usize, mut old_pos: Pos) - where - Sz: Size, - { - probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe]; - if pos.is_none() { - *pos = old_pos; - break; - } else { - old_pos = replace(pos, old_pos); - } - }); - } - - /// Return probe (indices) and position (entries) - fn find_using(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> - where - F: Fn(&Bucket) -> bool, - { - dispatch_32_vs_64!(self.find_using_impl::<_>(hash, key_eq)) - } - - fn find_using_impl(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> - where - F: Fn(&Bucket) -> bool, - Sz: Size, - { - debug_assert!(self.len() > 0); - let mut probe = desired_pos(self.mask, hash); - let mut dist = 0; - probe_loop!(probe < self.indices.len(), { - if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { - let entry_hash = hash_proxy.get_short_hash(&self.entries, i); - if dist > probe_distance(self.mask, entry_hash.into_hash(), probe) { - // give up when probe distance is too long - return None; - } else if entry_hash == hash && key_eq(&self.entries[i]) { - return Some((probe, i)); - } - } else { - return None; - } - dist += 1; - }); - } - - /// Find `entry` which is already placed inside self.entries; - /// return its probe and entry index. - fn find_existing_entry(&self, entry: &Bucket) -> (usize, usize) { - debug_assert!(self.len() > 0); - - let hash = entry.hash; - let actual_pos = ptrdistance(&self.entries[0], entry); - let probe = dispatch_32_vs_64!(self => - find_existing_entry_at(&self.indices, hash, self.mask, actual_pos)); - (probe, actual_pos) - } - - /// Remove an entry by shifting all entries that follow it - fn shift_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { - dispatch_32_vs_64!(self.shift_remove_found_impl(probe, found)) - } - - fn shift_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) - where - Sz: Size, - { - // index `probe` and entry `found` is to be removed - // use Vec::remove, but then we need to update the indices that point - // to all of the other entries that have to move - self.indices[probe] = Pos::none(); - let entry = self.entries.remove(found); - - // correct indices that point to the entries that followed the removed entry. - // use a heuristic between a full sweep vs. a `probe_loop!` for every shifted item. - if self.indices.len() < (self.entries.len() - found) * 2 { - // shift all indices greater than `found` - for pos in self.indices.iter_mut() { - if let Some((i, _)) = pos.resolve::() { - if i > found { - // shift the index - pos.set_pos::(i - 1); - } - } - } - } else { - // find each following entry to shift its index - for (offset, entry) in enumerate(&self.entries[found..]) { - let index = found + offset; - let mut probe = desired_pos(self.mask, entry.hash); - probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe]; - if let Some((i, _)) = pos.resolve::() { - if i == index + 1 { - // found it, shift it - pos.set_pos::(index); - break; - } - } - }); - } - } - - self.backward_shift_after_removal::(probe); - - (entry.key, entry.value) - } - - /// Remove an entry by swapping it with the last - fn swap_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { - dispatch_32_vs_64!(self.swap_remove_found_impl(probe, found)) - } - - fn swap_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) - where - Sz: Size, - { - // index `probe` and entry `found` is to be removed - // use swap_remove, but then we need to update the index that points - // to the other entry that has to move - self.indices[probe] = Pos::none(); - let entry = self.entries.swap_remove(found); - - // correct index that points to the entry that had to swap places - if let Some(entry) = self.entries.get(found) { - // was not last element - // examine new element in `found` and find it in indices - let mut probe = desired_pos(self.mask, entry.hash); - probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe]; - if let Some((i, _)) = pos.resolve::() { - if i >= self.entries.len() { - // found it - pos.set_pos::(found); - break; - } - } - }); - } - - self.backward_shift_after_removal::(probe); - - (entry.key, entry.value) - } - - fn backward_shift_after_removal(&mut self, probe_at_remove: usize) - where - Sz: Size, - { - // backward shift deletion in self.indices - // after probe, shift all non-ideally placed indices backward - let mut last_probe = probe_at_remove; - let mut probe = probe_at_remove + 1; - probe_loop!(probe < self.indices.len(), { - if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { - let entry_hash = hash_proxy.get_short_hash(&self.entries, i); - if probe_distance(self.mask, entry_hash.into_hash(), probe) > 0 { - self.indices[last_probe] = self.indices[probe]; - self.indices[probe] = Pos::none(); - } else { - break; - } - } else { - break; - } - last_probe = probe; - }); - } - - fn retain_in_order_impl(&mut self, mut keep: F) - where - F: FnMut(&mut K, &mut V) -> bool, - Sz: Size, - { - // Like Vec::retain in self.entries; for each removed key-value pair, - // we clear its corresponding spot in self.indices, and run the - // usual backward shift in self.indices. - let len = self.entries.len(); - let mut n_deleted = 0; - for i in 0..len { - let will_keep; - let hash; - { - let ent = &mut self.entries[i]; - hash = ent.hash; - will_keep = keep(&mut ent.key, &mut ent.value); - }; - let probe = find_existing_entry_at::(&self.indices, hash, self.mask, i); - if !will_keep { - n_deleted += 1; - self.indices[probe] = Pos::none(); - self.backward_shift_after_removal::(probe); - } else if n_deleted > 0 { - self.indices[probe].set_pos::(i - n_deleted); - self.entries.swap(i - n_deleted, i); - } - } - self.entries.truncate(len - n_deleted); - } - - fn sort_by(&mut self, mut compare: F) - where - F: FnMut(&K, &V, &K, &V) -> Ordering, - { - let side_index = self.save_hash_index(); - self.entries - .sort_by(move |ei, ej| compare(&ei.key, &ei.value, &ej.key, &ej.value)); - self.restore_hash_index(side_index); - } - - fn reverse(&mut self) { - self.entries.reverse(); - - // No need to save hash indices, can easily calculate what they should - // be, given that this is an in-place reversal. - dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, self.entries.len())); - - fn apply_new_index(indices: &mut [Pos], len: usize) - where - Sz: Size, - { - for pos in indices { - if let Some((i, _)) = pos.resolve::() { - pos.set_pos::(len - i - 1); - } - } - } - } - - fn save_hash_index(&mut self) -> Vec { - // Temporarily use the hash field in a bucket to store the old index. - // Save the old hash values in `side_index`. Then we can sort - // `self.entries` in place. - Vec::from_iter( - enumerate(&mut self.entries).map(|(i, elt)| replace(&mut elt.hash, HashValue(i)).get()), - ) - } - - fn restore_hash_index(&mut self, mut side_index: Vec) { - // Write back the hash values from side_index and fill `side_index` with - // a mapping from the old to the new index instead. - for (i, ent) in enumerate(&mut self.entries) { - let old_index = ent.hash.get(); - ent.hash = HashValue(replace(&mut side_index[old_index], i)); - } - - // Apply new index to self.indices - dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, &side_index)); - - fn apply_new_index(indices: &mut [Pos], new_index: &[usize]) - where - Sz: Size, - { - for pos in indices { - if let Some((i, _)) = pos.resolve::() { - pos.set_pos::(new_index[i]); - } - } - } - } -} - -trait ProbeAction<'a, Sz: Size, K, V>: Sized { - type Output; - // handle an occupied spot in the map - fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output; - // handle an empty spot in the map - fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output; - // robin hood: handle a spot that you should steal because it's better for you - fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output; -} - -struct InsertValue(V); - -impl<'a, Sz: Size, K, V> ProbeAction<'a, Sz, K, V> for InsertValue { - type Output = (usize, Option); - - fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output { - let old = replace(&mut entry.map.entries[entry.index].value, self.0); - (entry.index, Some(old)) - } - - fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output { - let pos = &mut entry.map.indices[entry.probe]; - let index = entry.map.entries.len(); - *pos = Pos::with_hash::(index, entry.hash); - entry.map.entries.push(Bucket { - hash: entry.hash, - key: entry.key, - value: self.0, - }); - (index, None) - } - - fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output { - let index = entry.map.entries.len(); - entry.insert_impl::(self.0); - (index, None) - } -} - -struct MakeEntry; - -impl<'a, Sz: Size, K: 'a, V: 'a> ProbeAction<'a, Sz, K, V> for MakeEntry { - type Output = Entry<'a, K, V>; - - fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output { - Entry::Occupied(entry) - } - - fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output { - Entry::Vacant(entry) - } - - fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output { - Entry::Vacant(entry) - } -} - -/// Find, in the indices, an entry that already exists at a known position -/// inside self.entries in the IndexMap. -/// -/// This is effectively reverse lookup, from the entries into the hash buckets. -/// -/// Return the probe index (into self.indices) -/// -/// + indices: The self.indices of the map, -/// + hash: The full hash value from the bucket -/// + mask: self.mask. -/// + entry_index: The index of the entry in self.entries -fn find_existing_entry_at( - indices: &[Pos], - hash: HashValue, - mask: usize, - entry_index: usize, -) -> usize -where - Sz: Size, -{ - let mut probe = desired_pos(mask, hash); - probe_loop!(probe < indices.len(), { - // the entry *must* be present; if we hit a Pos::none this was not true - // and there is a debug assertion in resolve_existing_index for that. - let i = indices[probe].resolve_existing_index::(); - if i == entry_index { - return probe; - } - }); -} - use std::slice::Iter as SliceIter; use std::slice::IterMut as SliceIterMut; use std::vec::IntoIter as VecIntoIter; @@ -2096,7 +935,7 @@ where type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { - iter: self.core.entries.into_iter(), + iter: self.into_entries().into_iter(), } } } diff --git a/src/map_core.rs b/src/map_core.rs new file mode 100644 index 00000000..2d8a74ee --- /dev/null +++ b/src/map_core.rs @@ -0,0 +1,1237 @@ +//! This is the core implementation that doesn't depend on the hasher at all. +//! +//! The methods of `IndexMapCore` don't use any properties (Hash / Eq) of K. +//! +//! It's cleaner to separate them out, then the compiler checks that we are not +//! using Hash + Eq at all in these methods. +//! +//! However, we should probably not let this show in the public API or docs. + +#[cfg(not(has_std))] +use alloc::boxed::Box; +#[cfg(not(has_std))] +use std::vec::Vec; + +use std::cmp::{max, Ordering}; +use std::fmt; +use std::iter::FromIterator; +use std::marker::PhantomData; +use std::mem::replace; +use std::ops::RangeFull; +use std::vec::Drain; + +use util::{enumerate, ptrdistance}; +use {Bucket, Entries, HashValue}; + +/// Trait for the "size class". Either u32 or u64 depending on the index +/// size needed to address an entry's index in self.core.entries. +trait Size { + fn is_64_bit() -> bool; + fn is_same_size() -> bool { + Self::is_64_bit() == T::is_64_bit() + } +} + +impl Size for u32 { + #[inline] + fn is_64_bit() -> bool { + false + } +} + +impl Size for u64 { + #[inline] + fn is_64_bit() -> bool { + true + } +} + +/// Call self.method(args) with `::` or `::` depending on `self` +/// size class. +/// +/// The u32 or u64 is *prepended* to the type parameter list! +macro_rules! dispatch_32_vs_64 { + // self.methodname with other explicit type params, + // size is prepended + ($self_:ident . $method:ident::<$($t:ty),*>($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $self_.$method::($($arg),*) + } else { + $self_.$method::($($arg),*) + } + }; + // self.methodname with only one type param, the size. + ($self_:ident . $method:ident ($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $self_.$method::($($arg),*) + } else { + $self_.$method::($($arg),*) + } + }; + // functionname with size_class_is_64bit as the first argument, only one + // type param, the size. + ($self_:ident => $function:ident ($($arg:expr),*)) => { + if $self_.size_class_is_64bit() { + $function::($($arg),*) + } else { + $function::($($arg),*) + } + }; +} + +/// A possibly truncated hash value. +/// +#[derive(Debug)] +struct ShortHash(usize, PhantomData); + +impl ShortHash { + /// Pretend this is a full HashValue, which + /// is completely ok w.r.t determining bucket index + /// + /// - Sz = u32: 32-bit hash is enough to select bucket index + /// - Sz = u64: hash is not truncated + fn into_hash(self) -> HashValue { + HashValue(self.0) + } +} + +impl Copy for ShortHash {} +impl Clone for ShortHash { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl PartialEq for ShortHash { + #[inline] + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +// Compare ShortHash == HashValue by truncating appropriately +// if applicable before the comparison +impl PartialEq for ShortHash +where + Sz: Size, +{ + #[inline] + fn eq(&self, rhs: &HashValue) -> bool { + if Sz::is_64_bit() { + self.0 == rhs.0 + } else { + lo32(self.0 as u64) == lo32(rhs.0 as u64) + } + } +} +impl From> for HashValue { + fn from(x: ShortHash) -> Self { + HashValue(x.0) + } +} + +/// `Pos` is stored in the `indices` array and it points to the index of a +/// `Bucket` in self.core.entries. +/// +/// Pos can be interpreted either as a 64-bit index, or as a 32-bit index and +/// a 32-bit hash. +/// +/// Storing the truncated hash next to the index saves loading the hash from the +/// entry, increasing the cache efficiency. +/// +/// Note that the lower 32 bits of the hash is enough to compute desired +/// position and probe distance in a hash map with less than 2**32 buckets. +/// +/// The IndexMap will simply query its **current raw capacity** to see what its +/// current size class is, and dispatch to the 32-bit or 64-bit lookup code as +/// appropriate. Only the growth code needs some extra logic to handle the +/// transition from one class to another +#[derive(Copy)] +struct Pos { + index: u64, +} + +impl Clone for Pos { + #[inline(always)] + fn clone(&self) -> Self { + *self + } +} + +impl fmt::Debug for Pos { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.pos() { + Some(i) => write!(f, "Pos({} / {:x})", i, self.index), + None => write!(f, "Pos(None)"), + } + } +} + +impl Pos { + #[inline] + fn none() -> Self { + Pos { index: !0 } + } + + #[inline] + fn is_none(&self) -> bool { + self.index == !0 + } + + /// Return the index part of the Pos value inside `Some(_)` if the position + /// is not none, otherwise return `None`. + #[inline] + fn pos(&self) -> Option { + if self.index == !0 { + None + } else { + Some(lo32(self.index as u64)) + } + } + + /// Set the index part of the Pos value to `i` + #[inline] + fn set_pos(&mut self, i: usize) + where + Sz: Size, + { + debug_assert!(!self.is_none()); + if Sz::is_64_bit() { + self.index = i as u64; + } else { + self.index = i as u64 | ((self.index >> 32) << 32) + } + } + + #[inline] + fn with_hash(i: usize, hash: HashValue) -> Self + where + Sz: Size, + { + if Sz::is_64_bit() { + Pos { index: i as u64 } + } else { + Pos { + index: i as u64 | ((hash.0 as u64) << 32), + } + } + } + + /// “Resolve” the Pos into a combination of its index value and + /// a proxy value to the hash (whether it contains the hash or not + /// depends on the size class of the hash map). + #[inline] + fn resolve(&self) -> Option<(usize, ShortHashProxy)> + where + Sz: Size, + { + if Sz::is_64_bit() { + if !self.is_none() { + Some((self.index as usize, ShortHashProxy::new(0))) + } else { + None + } + } else { + if !self.is_none() { + let (i, hash) = split_lo_hi(self.index); + Some((i as usize, ShortHashProxy::new(hash as usize))) + } else { + None + } + } + } + + /// Like resolve, but the Pos **must** be non-none. Return its index. + #[inline] + fn resolve_existing_index(&self) -> usize + where + Sz: Size, + { + debug_assert!( + !self.is_none(), + "datastructure inconsistent: none where valid Pos expected" + ); + if Sz::is_64_bit() { + self.index as usize + } else { + let (i, _) = split_lo_hi(self.index); + i as usize + } + } +} + +#[inline] +fn lo32(x: u64) -> usize { + (x & 0xFFFF_FFFF) as usize +} + +// split into low, hi parts +#[inline] +fn split_lo_hi(x: u64) -> (u32, u32) { + (x as u32, (x >> 32) as u32) +} + +// Possibly contains the truncated hash value for an entry, depending on +// the size class. +struct ShortHashProxy(usize, PhantomData); + +impl ShortHashProxy +where + Sz: Size, +{ + fn new(x: usize) -> Self { + ShortHashProxy(x, PhantomData) + } + + /// Get the hash from either `self` or from a lookup into `entries`, + /// depending on `Sz`. + fn get_short_hash(&self, entries: &[Bucket], index: usize) -> ShortHash { + if Sz::is_64_bit() { + ShortHash(entries[index].hash.0, PhantomData) + } else { + ShortHash(self.0, PhantomData) + } + } +} + +#[inline] +fn usable_capacity(cap: usize) -> usize { + cap - cap / 4 +} + +#[inline] +fn to_raw_capacity(n: usize) -> usize { + n + n / 3 +} + +#[inline(always)] +fn desired_pos(mask: usize, hash: HashValue) -> usize { + hash.0 & mask +} + +/// The number of steps that `current` is forward of the desired position for hash +#[inline(always)] +fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { + current.wrapping_sub(desired_pos(mask, hash)) & mask +} + +// this could not be captured in an efficient iterator +macro_rules! probe_loop { + ($probe_var: ident < $len: expr, $body: expr) => { + loop { + if $probe_var < $len { + $body + $probe_var += 1; + } else { + $probe_var = 0; + } + } + } +} + +/// Find, in the indices, an entry that already exists at a known position +/// inside self.entries in the IndexMap. +/// +/// This is effectively reverse lookup, from the entries into the hash buckets. +/// +/// Return the probe index (into self.indices) +/// +/// + indices: The self.indices of the map, +/// + hash: The full hash value from the bucket +/// + mask: self.mask. +/// + entry_index: The index of the entry in self.entries +fn find_existing_entry_at( + indices: &[Pos], + hash: HashValue, + mask: usize, + entry_index: usize, +) -> usize +where + Sz: Size, +{ + let mut probe = desired_pos(mask, hash); + probe_loop!(probe < indices.len(), { + // the entry *must* be present; if we hit a Pos::none this was not true + // and there is a debug assertion in resolve_existing_index for that. + let i = indices[probe].resolve_existing_index::(); + if i == entry_index { + return probe; + } + }); +} + +/// Core of the map that does not depend on S +pub(crate) struct IndexMapCore { + mask: usize, + /// indices are the buckets. indices.len() == raw capacity + indices: Box<[Pos]>, + /// entries is a dense vec of entries in their order. entries.len() == len + entries: Vec>, +} + +impl Clone for IndexMapCore +where + K: Clone, + V: Clone, +{ + fn clone(&self) -> Self { + IndexMapCore { + mask: self.mask, + indices: self.indices.clone(), + entries: self.entries.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.mask = other.mask; + self.indices.clone_from(&other.indices); + self.entries.clone_from(&other.entries); + } +} + +impl Entries for IndexMapCore { + type Entry = Bucket; + + fn into_entries(self) -> Vec { + self.entries + } + + fn as_entries(&self) -> &[Self::Entry] { + &self.entries + } + + fn as_entries_mut(&mut self) -> &mut [Self::Entry] { + &mut self.entries + } + + fn with_entries(&mut self, f: F) + where + F: FnOnce(&mut [Self::Entry]), + { + let side_index = self.save_hash_index(); + f(&mut self.entries); + self.restore_hash_index(side_index); + } +} + +impl IndexMapCore { + #[inline] + pub(crate) fn new() -> Self { + IndexMapCore { + mask: 0, + indices: Box::new([]), + entries: Vec::new(), + } + } + + #[inline] + pub(crate) fn with_capacity(n: usize) -> Self { + let raw = to_raw_capacity(n); + let raw_cap = max(raw.next_power_of_two(), 8); + IndexMapCore { + mask: raw_cap.wrapping_sub(1), + indices: vec![Pos::none(); raw_cap].into_boxed_slice(), + entries: Vec::with_capacity(usable_capacity(raw_cap)), + } + } + + // Return whether we need 32 or 64 bits to specify a bucket or entry index + #[cfg(not(feature = "test_low_transition_point"))] + fn size_class_is_64bit(&self) -> bool { + usize::max_value() > u32::max_value() as usize + && self.raw_capacity() >= u32::max_value() as usize + } + + // for testing + #[cfg(feature = "test_low_transition_point")] + fn size_class_is_64bit(&self) -> bool { + self.raw_capacity() >= 64 + } + + #[inline(always)] + fn raw_capacity(&self) -> usize { + self.indices.len() + } + + pub(crate) fn len(&self) -> usize { + self.entries.len() + } + + pub(crate) fn capacity(&self) -> usize { + usable_capacity(self.raw_capacity()) + } + + pub(crate) fn clear(&mut self) { + self.entries.clear(); + self.clear_indices(); + } + + pub(crate) fn drain(&mut self, range: RangeFull) -> Drain> { + self.clear_indices(); + self.entries.drain(range) + } + + // clear self.indices to the same state as "no elements" + fn clear_indices(&mut self) { + for pos in self.indices.iter_mut() { + *pos = Pos::none(); + } + } + + fn first_allocation(&mut self) { + debug_assert_eq!(self.len(), 0); + let raw_cap = 8usize; + self.mask = raw_cap.wrapping_sub(1); + self.indices = vec![Pos::none(); raw_cap].into_boxed_slice(); + self.entries = Vec::with_capacity(usable_capacity(raw_cap)); + } + + pub(crate) fn reserve_one(&mut self) { + if self.len() == self.capacity() { + dispatch_32_vs_64!(self.double_capacity()); + } + } + + #[inline(never)] + // `Sz` is *current* Size class, before grow + fn double_capacity(&mut self) + where + Sz: Size, + { + debug_assert!(self.raw_capacity() == 0 || self.len() > 0); + if self.raw_capacity() == 0 { + return self.first_allocation(); + } + + // find first ideally placed element -- start of cluster + let mut first_ideal = 0; + for (i, index) in enumerate(&*self.indices) { + if let Some(pos) = index.pos() { + if 0 == probe_distance(self.mask, self.entries[pos].hash, i) { + first_ideal = i; + break; + } + } + } + + // visit the entries in an order where we can simply reinsert them + // into self.indices without any bucket stealing. + let new_raw_cap = self.indices.len() * 2; + let old_indices = replace( + &mut self.indices, + vec![Pos::none(); new_raw_cap].into_boxed_slice(), + ); + self.mask = new_raw_cap.wrapping_sub(1); + + // `Sz` is the old size class, and either u32 or u64 is the new + for &pos in &old_indices[first_ideal..] { + dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); + } + + for &pos in &old_indices[..first_ideal] { + dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); + } + let more = self.capacity() - self.len(); + self.entries.reserve_exact(more); + } + + // write to self.indices + // read from self.entries at `pos` + // + // reinserting rewrites all `Pos` entries anyway. This handles transitioning + // from u32 to u64 size class if needed by using the two type parameters. + fn reinsert_entry_in_order(&mut self, pos: Pos) + where + SzNew: Size, + SzOld: Size, + { + if let Some((i, hash_proxy)) = pos.resolve::() { + // only if the size class is conserved can we use the short hash + let entry_hash = if SzOld::is_same_size::() { + hash_proxy.get_short_hash(&self.entries, i).into_hash() + } else { + self.entries[i].hash + }; + // find first empty bucket and insert there + let mut probe = desired_pos(self.mask, entry_hash); + probe_loop!(probe < self.indices.len(), { + if self.indices[probe].is_none() { + // empty bucket, insert here + self.indices[probe] = Pos::with_hash::(i, entry_hash); + return; + } + }); + } + } + + pub(crate) fn pop_impl(&mut self) -> Option<(K, V)> { + let (probe, found) = match self.as_entries().last() { + Some(e) => self.find_existing_entry(e), + None => return None, + }; + debug_assert_eq!(found, self.entries.len() - 1); + Some(self.swap_remove_found(probe, found)) + } + + fn insert_phase_1<'a, Sz, A>(&'a mut self, hash: HashValue, key: K, action: A) -> A::Output + where + Sz: Size, + K: Eq, + A: ProbeAction<'a, Sz, K, V>, + { + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + debug_assert!(self.len() < self.raw_capacity()); + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + // if existing element probed less than us, swap + let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); + if their_dist < dist { + // robin hood: steal the spot if it's better for us + return action.steal(VacantEntry { + map: self, + hash: hash, + key: key, + probe: probe, + }); + } else if entry_hash == hash && self.entries[i].key == key { + return action.hit(OccupiedEntry { + map: self, + key: key, + probe: probe, + index: i, + }); + } + } else { + // empty bucket, insert here + return action.empty(VacantEntry { + map: self, + hash: hash, + key: key, + probe: probe, + }); + } + dist += 1; + }); + } + + /// phase 2 is post-insert where we forward-shift `Pos` in the indices. + fn insert_phase_2(&mut self, mut probe: usize, mut old_pos: Pos) + where + Sz: Size, + { + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe]; + if pos.is_none() { + *pos = old_pos; + break; + } else { + old_pos = replace(pos, old_pos); + } + }); + } + + pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option) + where + K: Eq, + { + self.reserve_one(); + dispatch_32_vs_64!(self.insert_phase_1::<_>(hash, key, InsertValue(value))) + } + + pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry + where + K: Eq, + { + self.reserve_one(); + dispatch_32_vs_64!(self.insert_phase_1::<_>(hash, key, MakeEntry)) + } + + /// Return probe (indices) and position (entries) + pub(crate) fn find_using(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> + where + F: Fn(&Bucket) -> bool, + { + dispatch_32_vs_64!(self.find_using_impl::<_>(hash, key_eq)) + } + + fn find_using_impl(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> + where + F: Fn(&Bucket) -> bool, + Sz: Size, + { + debug_assert!(self.len() > 0); + let mut probe = desired_pos(self.mask, hash); + let mut dist = 0; + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + if dist > probe_distance(self.mask, entry_hash.into_hash(), probe) { + // give up when probe distance is too long + return None; + } else if entry_hash == hash && key_eq(&self.entries[i]) { + return Some((probe, i)); + } + } else { + return None; + } + dist += 1; + }); + } + + /// Find `entry` which is already placed inside self.entries; + /// return its probe and entry index. + pub(crate) fn find_existing_entry(&self, entry: &Bucket) -> (usize, usize) { + debug_assert!(self.len() > 0); + + let hash = entry.hash; + let actual_pos = ptrdistance(&self.entries[0], entry); + let probe = dispatch_32_vs_64!(self => + find_existing_entry_at(&self.indices, hash, self.mask, actual_pos)); + (probe, actual_pos) + } + + /// Remove an entry by shifting all entries that follow it + pub(crate) fn shift_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { + dispatch_32_vs_64!(self.shift_remove_found_impl(probe, found)) + } + + fn shift_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) + where + Sz: Size, + { + // index `probe` and entry `found` is to be removed + // use Vec::remove, but then we need to update the indices that point + // to all of the other entries that have to move + self.indices[probe] = Pos::none(); + let entry = self.entries.remove(found); + + // correct indices that point to the entries that followed the removed entry. + // use a heuristic between a full sweep vs. a `probe_loop!` for every shifted item. + if self.indices.len() < (self.entries.len() - found) * 2 { + // shift all indices greater than `found` + for pos in self.indices.iter_mut() { + if let Some((i, _)) = pos.resolve::() { + if i > found { + // shift the index + pos.set_pos::(i - 1); + } + } + } + } else { + // find each following entry to shift its index + for (offset, entry) in enumerate(&self.entries[found..]) { + let index = found + offset; + let mut probe = desired_pos(self.mask, entry.hash); + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe]; + if let Some((i, _)) = pos.resolve::() { + if i == index + 1 { + // found it, shift it + pos.set_pos::(index); + break; + } + } + }); + } + } + + self.backward_shift_after_removal::(probe); + + (entry.key, entry.value) + } + + /// Remove an entry by swapping it with the last + pub(crate) fn swap_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { + dispatch_32_vs_64!(self.swap_remove_found_impl(probe, found)) + } + + fn swap_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) + where + Sz: Size, + { + // index `probe` and entry `found` is to be removed + // use swap_remove, but then we need to update the index that points + // to the other entry that has to move + self.indices[probe] = Pos::none(); + let entry = self.entries.swap_remove(found); + + // correct index that points to the entry that had to swap places + if let Some(entry) = self.entries.get(found) { + // was not last element + // examine new element in `found` and find it in indices + let mut probe = desired_pos(self.mask, entry.hash); + probe_loop!(probe < self.indices.len(), { + let pos = &mut self.indices[probe]; + if let Some((i, _)) = pos.resolve::() { + if i >= self.entries.len() { + // found it + pos.set_pos::(found); + break; + } + } + }); + } + + self.backward_shift_after_removal::(probe); + + (entry.key, entry.value) + } + + fn backward_shift_after_removal(&mut self, probe_at_remove: usize) + where + Sz: Size, + { + // backward shift deletion in self.indices + // after probe, shift all non-ideally placed indices backward + let mut last_probe = probe_at_remove; + let mut probe = probe_at_remove + 1; + probe_loop!(probe < self.indices.len(), { + if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { + let entry_hash = hash_proxy.get_short_hash(&self.entries, i); + if probe_distance(self.mask, entry_hash.into_hash(), probe) > 0 { + self.indices[last_probe] = self.indices[probe]; + self.indices[probe] = Pos::none(); + } else { + break; + } + } else { + break; + } + last_probe = probe; + }); + } + + pub(crate) fn retain_in_order(&mut self, keep: F) + where + F: FnMut(&mut K, &mut V) -> bool, + { + dispatch_32_vs_64!(self.retain_in_order_impl::<_>(keep)); + } + + fn retain_in_order_impl(&mut self, mut keep: F) + where + F: FnMut(&mut K, &mut V) -> bool, + Sz: Size, + { + // Like Vec::retain in self.entries; for each removed key-value pair, + // we clear its corresponding spot in self.indices, and run the + // usual backward shift in self.indices. + let len = self.entries.len(); + let mut n_deleted = 0; + for i in 0..len { + let will_keep; + let hash; + { + let ent = &mut self.entries[i]; + hash = ent.hash; + will_keep = keep(&mut ent.key, &mut ent.value); + }; + let probe = find_existing_entry_at::(&self.indices, hash, self.mask, i); + if !will_keep { + n_deleted += 1; + self.indices[probe] = Pos::none(); + self.backward_shift_after_removal::(probe); + } else if n_deleted > 0 { + self.indices[probe].set_pos::(i - n_deleted); + self.entries.swap(i - n_deleted, i); + } + } + self.entries.truncate(len - n_deleted); + } + + pub(crate) fn sort_by(&mut self, mut compare: F) + where + F: FnMut(&K, &V, &K, &V) -> Ordering, + { + let side_index = self.save_hash_index(); + self.entries + .sort_by(move |ei, ej| compare(&ei.key, &ei.value, &ej.key, &ej.value)); + self.restore_hash_index(side_index); + } + + pub(crate) fn reverse(&mut self) { + self.entries.reverse(); + + // No need to save hash indices, can easily calculate what they should + // be, given that this is an in-place reversal. + dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, self.entries.len())); + + fn apply_new_index(indices: &mut [Pos], len: usize) + where + Sz: Size, + { + for pos in indices { + if let Some((i, _)) = pos.resolve::() { + pos.set_pos::(len - i - 1); + } + } + } + } + + fn save_hash_index(&mut self) -> Vec { + // Temporarily use the hash field in a bucket to store the old index. + // Save the old hash values in `side_index`. Then we can sort + // `self.entries` in place. + Vec::from_iter( + enumerate(&mut self.entries).map(|(i, elt)| replace(&mut elt.hash, HashValue(i)).get()), + ) + } + + fn restore_hash_index(&mut self, mut side_index: Vec) { + // Write back the hash values from side_index and fill `side_index` with + // a mapping from the old to the new index instead. + for (i, ent) in enumerate(&mut self.entries) { + let old_index = ent.hash.get(); + ent.hash = HashValue(replace(&mut side_index[old_index], i)); + } + + // Apply new index to self.indices + dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, &side_index)); + + fn apply_new_index(indices: &mut [Pos], new_index: &[usize]) + where + Sz: Size, + { + for pos in indices { + if let Some((i, _)) = pos.resolve::() { + pos.set_pos::(new_index[i]); + } + } + } + } + + pub(crate) fn debug_entries(&self, f: &mut fmt::Formatter) -> fmt::Result + where + K: fmt::Debug, + { + for (i, index) in enumerate(&*self.indices) { + write!(f, "{}: {:?}", i, index)?; + if let Some(pos) = index.pos() { + let hash = self.entries[pos].hash; + let key = &self.entries[pos].key; + let desire = desired_pos(self.mask, hash); + write!( + f, + ", desired={}, probe_distance={}, key={:?}", + desire, + probe_distance(self.mask, hash, i), + key + )?; + } + writeln!(f)?; + } + writeln!( + f, + "cap={}, raw_cap={}, entries.cap={}", + self.capacity(), + self.raw_capacity(), + self.entries.capacity() + )?; + Ok(()) + } +} + +trait ProbeAction<'a, Sz: Size, K, V>: Sized { + type Output; + // handle an occupied spot in the map + fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output; + // handle an empty spot in the map + fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output; + // robin hood: handle a spot that you should steal because it's better for you + fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output; +} + +struct InsertValue(V); + +impl<'a, Sz: Size, K, V> ProbeAction<'a, Sz, K, V> for InsertValue { + type Output = (usize, Option); + + fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output { + let old = replace(&mut entry.map.entries[entry.index].value, self.0); + (entry.index, Some(old)) + } + + fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output { + let pos = &mut entry.map.indices[entry.probe]; + let index = entry.map.entries.len(); + *pos = Pos::with_hash::(index, entry.hash); + entry.map.entries.push(Bucket { + hash: entry.hash, + key: entry.key, + value: self.0, + }); + (index, None) + } + + fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output { + let index = entry.map.entries.len(); + entry.insert_impl::(self.0); + (index, None) + } +} + +struct MakeEntry; + +impl<'a, Sz: Size, K: 'a, V: 'a> ProbeAction<'a, Sz, K, V> for MakeEntry { + type Output = Entry<'a, K, V>; + + fn hit(self, entry: OccupiedEntry<'a, K, V>) -> Self::Output { + Entry::Occupied(entry) + } + + fn empty(self, entry: VacantEntry<'a, K, V>) -> Self::Output { + Entry::Vacant(entry) + } + + fn steal(self, entry: VacantEntry<'a, K, V>) -> Self::Output { + Entry::Vacant(entry) + } +} + +/// Entry for an existing key-value pair or a vacant location to +/// insert one. +pub enum Entry<'a, K: 'a, V: 'a> { + /// Existing slot with equivalent key. + Occupied(OccupiedEntry<'a, K, V>), + /// Vacant slot (no equivalent key in the map). + Vacant(VacantEntry<'a, K, V>), +} + +impl<'a, K, V> Entry<'a, K, V> { + /// Computes in **O(1)** time (amortized average). + pub fn or_insert(self, default: V) -> &'a mut V { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Computes in **O(1)** time (amortized average). + pub fn or_insert_with(self, call: F) -> &'a mut V + where + F: FnOnce() -> V, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(call()), + } + } + + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Return the index where the key-value pair exists or will be inserted. + pub fn index(&self) -> usize { + match *self { + Entry::Occupied(ref entry) => entry.index(), + Entry::Vacant(ref entry) => entry.index(), + } + } + + /// Modifies the entry if it is occupied. + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Entry::Occupied(mut o) => { + f(o.get_mut()); + Entry::Occupied(o) + } + x => x, + } + } + + /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable + /// reference to it. Otherwise a mutable reference to an already existent value is returned. + /// + /// Computes in **O(1)** time (amortized average). + pub fn or_default(self) -> &'a mut V + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } +} + +impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Entry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `IndexMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +pub struct OccupiedEntry<'a, K: 'a, V: 'a> { + map: &'a mut IndexMapCore, + key: K, + probe: usize, + index: usize, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + pub fn key(&self) -> &K { + &self.key + } + pub fn get(&self) -> &V { + &self.map.entries[self.index].value + } + pub fn get_mut(&mut self) -> &mut V { + &mut self.map.entries[self.index].value + } + + /// Put the new key in the occupied entry's key slot + pub(crate) fn replace_key(self) -> K { + let old_key = &mut self.map.entries[self.index].key; + replace(old_key, self.key) + } + + /// Return the index of the key-value pair + pub fn index(&self) -> usize { + self.index + } + pub fn into_mut(self) -> &'a mut V { + &mut self.map.entries[self.index].value + } + + /// Sets the value of the entry to `value`, and returns the entry's old value. + pub fn insert(&mut self, value: V) -> V { + replace(self.get_mut(), value) + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// **NOTE:** This is equivalent to `.swap_remove()`. + pub fn remove(self) -> V { + self.swap_remove() + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like `Vec::swap_remove`, the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove(self) -> V { + self.swap_remove_entry().1 + } + + /// Remove the key, value pair stored in the map for this entry, and return the value. + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove(self) -> V { + self.shift_remove_entry().1 + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// **NOTE:** This is equivalent to `.swap_remove_entry()`. + pub fn remove_entry(self) -> (K, V) { + self.swap_remove_entry() + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like `Vec::swap_remove`, the pair is removed by swapping it with the + /// last element of the map and popping it off. **This perturbs + /// the postion of what used to be the last element!** + /// + /// Computes in **O(1)** time (average). + pub fn swap_remove_entry(self) -> (K, V) { + self.map.swap_remove_found(self.probe, self.index) + } + + /// Remove and return the key, value pair stored in the map for this entry + /// + /// Like `Vec::remove`, the pair is removed by shifting all of the + /// elements that follow it, preserving their relative order. + /// **This perturbs the index of all of those elements!** + /// + /// Computes in **O(n)** time (average). + pub fn shift_remove_entry(self) -> (K, V) { + self.map.shift_remove_found(self.probe, self.index) + } +} + +impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for OccupiedEntry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct(stringify!(OccupiedEntry)) + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `IndexMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +pub struct VacantEntry<'a, K: 'a, V: 'a> { + map: &'a mut IndexMapCore, + key: K, + hash: HashValue, + probe: usize, +} + +impl<'a, K, V> VacantEntry<'a, K, V> { + pub fn key(&self) -> &K { + &self.key + } + pub fn into_key(self) -> K { + self.key + } + /// Return the index where the key-value pair will be inserted. + pub fn index(&self) -> usize { + self.map.len() + } + pub fn insert(self, value: V) -> &'a mut V { + if self.map.size_class_is_64bit() { + self.insert_impl::(value) + } else { + self.insert_impl::(value) + } + } + + fn insert_impl(self, value: V) -> &'a mut V + where + Sz: Size, + { + let index = self.map.entries.len(); + self.map.entries.push(Bucket { + hash: self.hash, + key: self.key, + value, + }); + let old_pos = Pos::with_hash::(index, self.hash); + self.map.insert_phase_2::(self.probe, old_pos); + &mut { self.map }.entries[index].value + } +} + +impl<'a, K: 'a + fmt::Debug, V: 'a> fmt::Debug for VacantEntry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(stringify!(VacantEntry)) + .field(self.key()) + .finish() + } +} diff --git a/src/rayon/mod.rs b/src/rayon/mod.rs index 3ed65d4d..76386ff5 100644 --- a/src/rayon/mod.rs +++ b/src/rayon/mod.rs @@ -10,9 +10,11 @@ macro_rules! parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive_unindexed(self, consumer: C) -> C::Result - where C: UnindexedConsumer + where + C: UnindexedConsumer, { - self.entries.into_par_iter() + self.entries + .into_par_iter() .map($map_elt) .drive_unindexed(consumer) } @@ -23,7 +25,7 @@ macro_rules! parallel_iterator_methods { fn opt_len(&self) -> Option { Some(self.entries.len()) } - } + }; } // generate `IndexedParallelIterator` methods by just forwarding to the underlying @@ -32,11 +34,10 @@ macro_rules! indexed_parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive(self, consumer: C) -> C::Result - where C: Consumer + where + C: Consumer, { - self.entries.into_par_iter() - .map($map_elt) - .drive(consumer) + self.entries.into_par_iter().map($map_elt).drive(consumer) } fn len(&self) -> usize { @@ -44,13 +45,15 @@ macro_rules! indexed_parallel_iterator_methods { } fn with_producer(self, callback: CB) -> CB::Output - where CB: ProducerCallback + where + CB: ProducerCallback, { - self.entries.into_par_iter() + self.entries + .into_par_iter() .map($map_elt) .with_producer(callback) } - } + }; } pub mod map; diff --git a/src/serde.rs b/src/serde.rs index e47cc3d1..9199a8d5 100644 --- a/src/serde.rs +++ b/src/serde.rs @@ -31,9 +31,9 @@ where } } -struct OrderMapVisitor(PhantomData<(K, V, S)>); +struct IndexMapVisitor(PhantomData<(K, V, S)>); -impl<'de, K, V, S> Visitor<'de> for OrderMapVisitor +impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, @@ -71,7 +71,7 @@ where where D: Deserializer<'de>, { - deserializer.deserialize_map(OrderMapVisitor(PhantomData)) + deserializer.deserialize_map(IndexMapVisitor(PhantomData)) } } @@ -109,9 +109,9 @@ where } } -struct OrderSetVisitor(PhantomData<(T, S)>); +struct IndexSetVisitor(PhantomData<(T, S)>); -impl<'de, T, S> Visitor<'de> for OrderSetVisitor +impl<'de, T, S> Visitor<'de> for IndexSetVisitor where T: Deserialize<'de> + Eq + Hash, S: Default + BuildHasher, @@ -147,7 +147,7 @@ where where D: Deserializer<'de>, { - deserializer.deserialize_seq(OrderSetVisitor(PhantomData)) + deserializer.deserialize_seq(IndexSetVisitor(PhantomData)) } } diff --git a/src/set.rs b/src/set.rs index d856f22a..1b90fb01 100644 --- a/src/set.rs +++ b/src/set.rs @@ -63,17 +63,31 @@ type Bucket = super::Bucket; /// assert!(letters.contains(&'u')); /// assert!(!letters.contains(&'y')); /// ``` -#[derive(Clone)] #[cfg(has_std)] pub struct IndexSet { map: IndexMap, } #[cfg(not(has_std))] -#[derive(Clone)] pub struct IndexSet { map: IndexMap, } +impl Clone for IndexSet +where + T: Clone, + S: Clone, +{ + fn clone(&self) -> Self { + IndexSet { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.map.clone_from(&other.map); + } +} + impl Entries for IndexSet { type Entry = Bucket; diff --git a/src/util.rs b/src/util.rs index f55498e2..5613c2b8 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,11 +1,11 @@ use std::iter::Enumerate; use std::mem::size_of; -pub fn third(t: (A, B, C)) -> C { +pub(crate) fn third(t: (A, B, C)) -> C { t.2 } -pub fn enumerate(iterable: I) -> Enumerate +pub(crate) fn enumerate(iterable: I) -> Enumerate where I: IntoIterator, { @@ -13,7 +13,7 @@ where } /// return the number of steps from a to b -pub fn ptrdistance(a: *const T, b: *const T) -> usize { +pub(crate) fn ptrdistance(a: *const T, b: *const T) -> usize { debug_assert!(a as usize <= b as usize); (b as usize - a as usize) / size_of::() } diff --git a/tests/quick.rs b/tests/quick.rs index c22844c6..1399d0a8 100644 --- a/tests/quick.rs +++ b/tests/quick.rs @@ -17,7 +17,7 @@ use rand::Rng; use fnv::FnvHasher; use std::hash::{BuildHasher, BuildHasherDefault}; type FnvBuilder = BuildHasherDefault; -type OrderMapFnv = IndexMap; +type IndexMapFnv = IndexMap; use std::cmp::min; use std::collections::HashMap; @@ -275,7 +275,7 @@ quickcheck! { ops2.remove(i); } } - let mut map2 = OrderMapFnv::default(); + let mut map2 = IndexMapFnv::default(); let mut reference2 = HashMap::new(); do_ops(&ops2, &mut map2, &mut reference2); assert_eq!(map == map2, reference == reference2);