Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 12 additions & 6 deletions src/map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1787,12 +1787,18 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) {
Some(mem::replace(item, v))
} else {
self.table
.insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
None
unsafe {
let (bucket, found) = self.table.find_bucket_and_record_insertion(
hash,
make_hasher::<_, V, S>(&self.hash_builder),
equivalent_key(&k),
);
if found {
Some(mem::replace(&mut bucket.as_mut().1, v))
} else {
bucket.write((k, v));
None
}
}
}

Expand Down
96 changes: 96 additions & 0 deletions src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -832,6 +832,44 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}

/// Inserts a new element into the table, and returns its raw bucket.
///
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) unsafe fn find_bucket_and_record_insertion(
&mut self,
hash: u64,
hasher: impl Fn(&T) -> u64,
mut eq: impl FnMut(&T) -> bool,
) -> (Bucket<T>, bool) {
let (mut index, found, is_empty) = {
self.table
.find_potential_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
};

if unlikely(found) {
// found = true
return (self.bucket(index), found);
}

// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
if unlikely(self.table.growth_left == 0 && is_empty) {
self.reserve(1, hasher);
// We simplify the search logic, since we do not
// have a bucket with equivalent content
index = self.table.find_insert_slot(hash);
}

self.table.growth_left -= usize::from(is_empty);
self.table.set_ctrl_h2(index, hash);
self.table.items += 1;

// found = false
(self.bucket(index), found)
}

/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
Expand Down Expand Up @@ -1150,6 +1188,64 @@ impl<A: Allocator + Clone> RawTableInner<A> {
(index, old_ctrl)
}

/// Searches for an element in the table, or a potential slot where that element could be
/// inserted.
///
/// This uses dynamic dispatch to reduce the amount of code generated, but that is
/// eliminated by LLVM optimizations.
#[inline(always)]
fn find_potential_inner(
&self,
hash: u64,
eq: &mut dyn FnMut(usize) -> bool,
) -> (usize, bool, bool) {
let h2_hash = h2(hash);
let mut probe_seq = self.probe_seq(hash);

loop {
let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };

for bit in group.match_byte(h2_hash) {
let index = (probe_seq.pos + bit) & self.bucket_mask;

if likely(eq(index)) {
return (index, true, false);
}
}

// We didn't find the element we were looking for in the group, try to get an
// insertion slot from the group if we don't have one yet.
if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
let result = (probe_seq.pos + bit) & self.bucket_mask;

// In tables smaller than the group width, trailing control
// bytes outside the range of the table are filled with
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
// perform a second scan starting at the beginning of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
unsafe {
let control_byte = *self.ctrl(result);
if unlikely(is_full(control_byte)) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(probe_seq.pos, 0);
let result = Group::load_aligned(self.ctrl(0))
.match_empty_or_deleted()
.lowest_set_bit_nonzero();
debug_assert!(special_is_empty(*self.ctrl(result)));
return (result, false, true);
}

return (result, false, special_is_empty(control_byte));
}
}
probe_seq.move_next(self.bucket_mask);
}
}

/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element.
///
Expand Down