Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce typo count. #558

Merged
merged 1 commit into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ This release was _yanked_ due to a breaking change.
- Optimized implementation for ARM using NEON instructions. (#430)
- Support for rkyv serialization. (#432)
- `Equivalent` trait to look up values without `Borrow`. (#345)
- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404)
- `Hash{Map,Set}::raw_table_mut` is added which returns a mutable reference. (#404)
- Fast path for `clear` on empty tables. (#428)

### Changed
Expand Down
42 changes: 21 additions & 21 deletions benches/set_ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ const LARGE_SET_SIZE: usize = 1000;
const SMALL_SET_SIZE: usize = 100;

/// The number of keys present in both sets.
const OVERLAPP: usize =
const OVERLAP: usize =
[LARGE_SET_SIZE, SMALL_SET_SIZE][(LARGE_SET_SIZE < SMALL_SET_SIZE) as usize] / 2;

/// Creates a set containing end - start unique string elements.
Expand All @@ -31,8 +31,8 @@ fn create_set(start: usize, end: usize) -> HashSet<String> {
fn set_ops_bit_or(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| &large_set | &small_set)
}
Expand All @@ -41,8 +41,8 @@ fn set_ops_bit_or(b: &mut Bencher) {
fn set_ops_bit_and(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| &large_set & &small_set)
}
Expand All @@ -51,8 +51,8 @@ fn set_ops_bit_and(b: &mut Bencher) {
fn set_ops_bit_xor(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| &large_set ^ &small_set)
}
Expand All @@ -61,8 +61,8 @@ fn set_ops_bit_xor(b: &mut Bencher) {
fn set_ops_sub_large_small(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| &large_set - &small_set)
}
Expand All @@ -71,8 +71,8 @@ fn set_ops_sub_large_small(b: &mut Bencher) {
fn set_ops_sub_small_large(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| &small_set - &large_set)
}
Expand All @@ -81,8 +81,8 @@ fn set_ops_sub_small_large(b: &mut Bencher) {
fn set_ops_bit_or_assign(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| {
let mut set = large_set.clone();
Expand All @@ -95,8 +95,8 @@ fn set_ops_bit_or_assign(b: &mut Bencher) {
fn set_ops_bit_and_assign(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| {
let mut set = small_set.clone();
Expand All @@ -109,8 +109,8 @@ fn set_ops_bit_and_assign(b: &mut Bencher) {
fn set_ops_bit_xor_assign(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| {
let mut set = large_set.clone();
Expand All @@ -123,8 +123,8 @@ fn set_ops_bit_xor_assign(b: &mut Bencher) {
fn set_ops_sub_assign_large_small(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| {
let mut set = large_set.clone();
Expand All @@ -137,8 +137,8 @@ fn set_ops_sub_assign_large_small(b: &mut Bencher) {
fn set_ops_sub_assign_small_large(b: &mut Bencher) {
let large_set = create_set(0, LARGE_SET_SIZE);
let small_set = create_set(
LARGE_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAPP,
LARGE_SET_SIZE - OVERLAP,
LARGE_SET_SIZE + SMALL_SET_SIZE - OVERLAP,
);
b.iter(|| {
let mut set = small_set.clone();
Expand Down
28 changes: 14 additions & 14 deletions src/external_trait_impls/rayon/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -472,32 +472,32 @@ mod test_par_map {

use crate::hash_map::HashMap;

struct Dropable<'a> {
struct Droppable<'a> {
k: usize,
counter: &'a AtomicUsize,
}

impl Dropable<'_> {
fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> {
impl Droppable<'_> {
fn new(k: usize, counter: &AtomicUsize) -> Droppable<'_> {
counter.fetch_add(1, Ordering::Relaxed);

Dropable { k, counter }
Droppable { k, counter }
}
}

impl Drop for Dropable<'_> {
impl Drop for Droppable<'_> {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::Relaxed);
}
}

impl Clone for Dropable<'_> {
impl Clone for Droppable<'_> {
fn clone(&self) -> Self {
Dropable::new(self.k, self.counter)
Droppable::new(self.k, self.counter)
}
}

impl Hash for Dropable<'_> {
impl Hash for Droppable<'_> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
Expand All @@ -506,13 +506,13 @@ mod test_par_map {
}
}

impl PartialEq for Dropable<'_> {
impl PartialEq for Droppable<'_> {
fn eq(&self, other: &Self) -> bool {
self.k == other.k
}
}

impl Eq for Dropable<'_> {}
impl Eq for Droppable<'_> {}

#[test]
fn test_into_iter_drops() {
Expand All @@ -526,8 +526,8 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 0);

for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
let d1 = Droppable::new(i, &key);
let d2 = Droppable::new(i + 100, &value);
hm.insert(d1, d2);
}

Expand Down Expand Up @@ -573,8 +573,8 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 0);

for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
let d1 = Droppable::new(i, &key);
let d2 = Droppable::new(i + 100, &value);
hm.insert(d1, d2);
}

Expand Down
36 changes: 18 additions & 18 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ fn h1(hash: u64) -> usize {
hash as usize
}

// Constant for h2 function that grabing the top 7 bits of the hash.
// Constant for h2 function that grabs the top 7 bits of the hash.
const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
mem::size_of::<usize>()
} else {
Expand Down Expand Up @@ -433,7 +433,7 @@ impl<T> Bucket<T> {
// mem::size_of::<T>()
// |
// | `self = from_base_index(base, 5)` that returns pointer
// | that points here in tha data part of the table
// | that points here in the data part of the table
// | (to the end of T5)
// | | `base: NonNull<T>` must point here
// v | (to the end of T0 or to the start of C0)
Expand Down Expand Up @@ -504,15 +504,15 @@ impl<T> Bucket<T> {
///
/// * `self` contained pointer must not be `dangling`;
///
/// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
/// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
/// * `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
/// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other
/// words, `self.to_base_index() + offset + 1` must be no greater than the number returned
/// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
///
/// If `mem::size_of::<T>() == 0`, then the only requirement is that the
/// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
/// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
/// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
/// `self.to_base_index() + offset` must not be greater than `RawTableInner.bucket_mask`,
/// i.e. `(self.to_base_index() + offset) <= RawTableInner.bucket_mask` or, in other words,
/// `self.to_base_index() + offset + 1` must be no greater than the number returned by the
/// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
///
/// [`Bucket`]: crate::raw::Bucket
Expand Down Expand Up @@ -562,7 +562,7 @@ impl<T> Bucket<T> {
///
/// You should use [`RawTable::remove`] instead of this function,
/// or be careful with calling this function directly, because compiler
/// calls its destructor when readed `value` goes out of scope. It
/// calls its destructor when the read `value` goes out of scope. It
/// can cause double dropping when [`RawTable`] goes out of scope,
/// because of not erased `data control byte`.
///
Expand Down Expand Up @@ -1736,8 +1736,8 @@ impl RawTableInner {
// * Caller of this function ensures that the control bytes are properly initialized.
//
// * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
// of the table due to masking with `self.bucket_mask` and also because mumber of
// buckets is a power of two (see `self.probe_seq` function).
// of the table due to masking with `self.bucket_mask` and also because the number
// of buckets is a power of two (see `self.probe_seq` function).
//
// * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
// call `Group::load` due to the extended control bytes range, which is
Expand Down Expand Up @@ -1788,7 +1788,7 @@ impl RawTableInner {
///
/// This function does not check if the given element exists in the table. Also,
/// this function does not check if there is enough space in the table to insert
/// a new element. Caller of the funtion must make ensure that the table has at
/// a new element. The caller of the function must make sure that the table has at
/// least 1 empty or deleted `bucket`, otherwise this function will never return
/// (will go into an infinite loop) for tables larger than the group width, or
/// return an index outside of the table indices range if the table is less than
Expand Down Expand Up @@ -1885,8 +1885,8 @@ impl RawTableInner {
// * Caller of this function ensures that the control bytes are properly initialized.
//
// * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
// of the table due to masking with `self.bucket_mask` and also because mumber of
// buckets is a power of two (see `self.probe_seq` function).
// of the table due to masking with `self.bucket_mask` and also because the number
// of buckets is a power of two (see `self.probe_seq` function).
//
// * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
// call `Group::load` due to the extended control bytes range, which is
Expand Down Expand Up @@ -3171,7 +3171,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
//
// SAFETY: This is safe as we are taking the size of an already allocated table
// and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
// and therefore capacity overflow cannot occur, `self.table.buckets()` is power
waywardmonkeys marked this conversation as resolved.
Show resolved Hide resolved
// of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
let mut new_table = match Self::new_uninitialized(
self.alloc.clone(),
Expand All @@ -3185,11 +3185,11 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
// Cloning elements may fail (the clone function may panic). But we don't
// need to worry about uninitialized control bits, since:
// 1. The number of items (elements) in the table is zero, which means that
// the control bits will not be readed by Drop function.
// the control bits will not be read by Drop function.
// 2. The `clone_from_spec` method will first copy all control bits from
// `self` (thus initializing them). But this will not affect the `Drop`
// function, since the `clone_from_spec` function sets `items` only after
// successfully clonning all elements.
// successfully cloning all elements.
new_table.clone_from_spec(self);
new_table
}
Expand Down Expand Up @@ -3587,7 +3587,7 @@ impl<T> RawIterRange<T> {
// start of the array of control bytes, and never try to iterate after
// getting all the elements, the last `self.current_group` will read bytes
// from the `self.buckets() - Group::WIDTH` index. We know also that
// `self.current_group.next()` will always retun indices within the range
// `self.current_group.next()` will always return indices within the range
// `0..Group::WIDTH`.
//
// Knowing all of the above and taking into account that we are synchronizing
Expand Down
Loading