diff --git a/CHANGELOG.md b/CHANGELOG.md index 8c4068089..6ffc27bc2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,7 +68,7 @@ This release was _yanked_ due to a breaking change. - Optimized implementation for ARM using NEON instructions. (#430) - Support for rkyv serialization. (#432) - `Equivalent` trait to look up values without `Borrow`. (#345) -- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404) +- `Hash{Map,Set}::raw_table_mut` is added which returns a mutable reference. (#404) - Fast path for `clear` on empty tables. (#428) ### Changed diff --git a/benches/set_ops.rs b/benches/set_ops.rs index 4b910a839..3b2ab5f28 100644 --- a/benches/set_ops.rs +++ b/benches/set_ops.rs @@ -19,7 +19,7 @@ const LARGE_SET_SIZE: usize = 1000; const SMALL_SET_SIZE: usize = 100; /// The number of keys present in both sets. -const OVERLAPP: usize = +const OVERLAP: usize = [LARGE_SET_SIZE, SMALL_SET_SIZE][(LARGE_SET_SIZE < SMALL_SET_SIZE) as usize] / 2; /// Creates a set containing end - start unique string elements. @@ -31,8 +31,8 @@ fn create_set(start: usize, end: usize) -> HashSet { fn set_ops_bit_or(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| &large_set | &small_set) } @@ -41,8 +41,8 @@ fn set_ops_bit_or(b: &mut Bencher) { fn set_ops_bit_and(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| &large_set & &small_set) } @@ -51,8 +51,8 @@ fn set_ops_bit_and(b: &mut Bencher) { fn set_ops_bit_xor(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| &large_set ^ &small_set) } @@ -61,8 +61,8 @@ fn set_ops_bit_xor(b: &mut Bencher) { fn set_ops_sub_large_small(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| &large_set - &small_set) } @@ -71,8 +71,8 @@ fn set_ops_sub_large_small(b: &mut Bencher) { fn set_ops_sub_small_large(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| &small_set - &large_set) } @@ -81,8 +81,8 @@ fn set_ops_sub_small_large(b: &mut Bencher) { fn set_ops_bit_or_assign(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| { let mut set = large_set.clone(); @@ -95,8 +95,8 @@ fn set_ops_bit_or_assign(b: &mut Bencher) { fn set_ops_bit_and_assign(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| { let mut set = small_set.clone(); @@ -109,8 +109,8 @@ fn set_ops_bit_and_assign(b: &mut Bencher) { fn set_ops_bit_xor_assign(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| { let mut set = large_set.clone(); @@ -123,8 +123,8 @@ fn set_ops_bit_xor_assign(b: &mut Bencher) { fn set_ops_sub_assign_large_small(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| { let mut set = large_set.clone(); @@ -137,8 +137,8 @@ fn set_ops_sub_assign_large_small(b: &mut Bencher) { fn set_ops_sub_assign_small_large(b: &mut Bencher) { let large_set = create_set(0, LARGE_SET_SIZE); let small_set = create_set( - LARGE_SET_SIZE - OVERLAPP, - LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP, + LARGE_SET_SIZE - OVERLAP, + LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP, ); b.iter(|| { let mut set = small_set.clone(); diff --git a/src/external_trait_impls/rayon/map.rs b/src/external_trait_impls/rayon/map.rs index 2534dc9b2..9623ca747 100644 --- a/src/external_trait_impls/rayon/map.rs +++ b/src/external_trait_impls/rayon/map.rs @@ -472,32 +472,32 @@ mod test_par_map { use crate::hash_map::HashMap; - struct Dropable<'a> { + struct Droppable<'a> { k: usize, counter: &'a AtomicUsize, } - impl Dropable<'_> { - fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> { + impl Droppable<'_> { + fn new(k: usize, counter: &AtomicUsize) -> Droppable<'_> { counter.fetch_add(1, Ordering::Relaxed); - Dropable { k, counter } + Droppable { k, counter } } } - impl Drop for Dropable<'_> { + impl Drop for Droppable<'_> { fn drop(&mut self) { self.counter.fetch_sub(1, Ordering::Relaxed); } } - impl Clone for Dropable<'_> { + impl Clone for Droppable<'_> { fn clone(&self) -> Self { - Dropable::new(self.k, self.counter) + Droppable::new(self.k, self.counter) } } - impl Hash for Dropable<'_> { + impl Hash for Droppable<'_> { fn hash(&self, state: &mut H) where H: Hasher, @@ -506,13 +506,13 @@ mod test_par_map { } } - impl PartialEq for Dropable<'_> { + impl PartialEq for Droppable<'_> { fn eq(&self, other: &Self) -> bool { self.k == other.k } } - impl Eq for Dropable<'_> {} + impl Eq for Droppable<'_> {} #[test] fn test_into_iter_drops() { @@ -526,8 +526,8 @@ mod test_par_map { assert_eq!(value.load(Ordering::Relaxed), 0); for i in 0..100 { - let d1 = Dropable::new(i, &key); - let d2 = Dropable::new(i + 100, &value); + let d1 = Droppable::new(i, &key); + let d2 = Droppable::new(i + 100, &value); hm.insert(d1, d2); } @@ -573,8 +573,8 @@ mod test_par_map { assert_eq!(value.load(Ordering::Relaxed), 0); for i in 0..100 { - let d1 = Dropable::new(i, &key); - let d2 = Dropable::new(i + 100, &value); + let d1 = Droppable::new(i, &key); + let d2 = Droppable::new(i + 100, &value); hm.insert(d1, d2); } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 72004d7a0..7272fb2dd 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -135,7 +135,7 @@ fn h1(hash: u64) -> usize { hash as usize } -// Constant for h2 function that grabing the top 7 bits of the hash. +// Constant for h2 function that grabs the top 7 bits of the hash. const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { mem::size_of::() } else { @@ -433,7 +433,7 @@ impl Bucket { // mem::size_of::() // | // | `self = from_base_index(base, 5)` that returns pointer - // | that points here in tha data part of the table + // | that points here in the data part of the table // | (to the end of T5) // | | `base: NonNull` must point here // v | (to the end of T0 or to the start of C0) @@ -504,15 +504,15 @@ impl Bucket { /// /// * `self` contained pointer must not be `dangling`; /// - /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, - /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other - /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned + /// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other + /// words, `self.to_base_index() + offset + 1` must be no greater than the number returned /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`]. /// /// If `mem::size_of::() == 0`, then the only requirement is that the - /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, - /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words, - /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the + /// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words, + /// `self.to_base_index() + offset + 1` must be no greater than the number returned by the /// function [`RawTable::buckets`] or [`RawTableInner::buckets`]. /// /// [`Bucket`]: crate::raw::Bucket @@ -562,7 +562,7 @@ impl Bucket { /// /// You should use [`RawTable::remove`] instead of this function, /// or be careful with calling this function directly, because compiler - /// calls its destructor when readed `value` goes out of scope. It + /// calls its destructor when the read `value` goes out of scope. It /// can cause double dropping when [`RawTable`] goes out of scope, /// because of not erased `data control byte`. /// @@ -1736,8 +1736,8 @@ impl RawTableInner { // * Caller of this function ensures that the control bytes are properly initialized. // // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` - // of the table due to masking with `self.bucket_mask` and also because mumber of - // buckets is a power of two (see `self.probe_seq` function). + // of the table due to masking with `self.bucket_mask` and also because the number + // of buckets is a power of two (see `self.probe_seq` function). // // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to // call `Group::load` due to the extended control bytes range, which is @@ -1788,7 +1788,7 @@ impl RawTableInner { /// /// This function does not check if the given element exists in the table. Also, /// this function does not check if there is enough space in the table to insert - /// a new element. Caller of the funtion must make ensure that the table has at + /// a new element. The caller of the function must make ensure that the table has at /// least 1 empty or deleted `bucket`, otherwise this function will never return /// (will go into an infinite loop) for tables larger than the group width, or /// return an index outside of the table indices range if the table is less than @@ -1885,8 +1885,8 @@ impl RawTableInner { // * Caller of this function ensures that the control bytes are properly initialized. // // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` - // of the table due to masking with `self.bucket_mask` and also because mumber of - // buckets is a power of two (see `self.probe_seq` function). + // of the table due to masking with `self.bucket_mask` and also because the number + // of buckets is a power of two (see `self.probe_seq` function). // // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to // call `Group::load` due to the extended control bytes range, which is @@ -3171,7 +3171,7 @@ impl Clone for RawTable { // Avoid `Result::ok_or_else` because it bloats LLVM IR. // // SAFETY: This is safe as we are taking the size of an already allocated table - // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power + // and therefore capacity overflow cannot occur, `self.table.buckets()` is power // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. let mut new_table = match Self::new_uninitialized( self.alloc.clone(), @@ -3185,11 +3185,11 @@ impl Clone for RawTable { // Cloning elements may fail (the clone function may panic). But we don't // need to worry about uninitialized control bits, since: // 1. The number of items (elements) in the table is zero, which means that - // the control bits will not be readed by Drop function. + // the control bits will not be read by Drop function. // 2. The `clone_from_spec` method will first copy all control bits from // `self` (thus initializing them). But this will not affect the `Drop` // function, since the `clone_from_spec` function sets `items` only after - // successfully clonning all elements. + // successfully cloning all elements. new_table.clone_from_spec(self); new_table } @@ -3587,7 +3587,7 @@ impl RawIterRange { // start of the array of control bytes, and never try to iterate after // getting all the elements, the last `self.current_group` will read bytes // from the `self.buckets() - Group::WIDTH` index. We know also that - // `self.current_group.next()` will always retun indices within the range + // `self.current_group.next()` will always return indices within the range // `0..Group::WIDTH`. // // Knowing all of the above and taking into account that we are synchronizing