Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hashentry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use allocator_api2::alloc::{Allocator, Global};
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Global;
use alloc::alloc::Global;

/// A view into a single entry in a map, which may either be vacant or occupied.
///
Expand Down
2 changes: 1 addition & 1 deletion src/hashiter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use core::hash::{BuildHasher, Hash};
#[cfg(feature = "stable_alloc")]
use allocator_api2::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Allocator;
use alloc::alloc::Allocator;

/// Iterator over a [`HashMap`] which yields key-value pairs.
///
Expand Down
27 changes: 14 additions & 13 deletions src/hashmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,18 @@ use core::{
hash::{BuildHasher, BuildHasherDefault, Hash, Hasher},
};

#[cfg(not(feature = "stable_alloc"))]
use alloc::alloc::Global;
#[cfg(feature = "stable_alloc")]
use allocator_api2::alloc::{Allocator, Global};
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Global;

// Re export the entry api.
pub use crate::hashentry::{Entry, OccupiedEntry, VacantEntry};

/// The default hasher for a [`HashMap`].
pub(crate) type DefaultHash = std::collections::hash_map::DefaultHasher;
pub(crate) type DefaultHash = crate::FnvHasher;

/// A [`HashMap`] implementation which uses a modified form of RobinHood/Hopscotch
/// probing. This implementation is efficient, roughly 2x the performance of
Expand Down Expand Up @@ -129,7 +129,8 @@ where
A: Allocator,
{
/// The default initial size of the map.
const INITIAL_SIZE: usize = 8;
/// Make sure it's a multiple of CELLS_IN_USE, so that no floats are needed during resizing
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are -> are not

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, meant: "no floats are needed" -> "floats are not needed". It's a little more clear.

const INITIAL_SIZE: usize = Self::CELLS_IN_USE;

/// The max number of elements to search through when having to fallback
/// to using linear search to try to find a cell.
Expand Down Expand Up @@ -623,9 +624,9 @@ where
}

// Estimate how much we need to resize by:
let ratio = cells_in_use as f32 / Self::CELLS_IN_USE as f32;
let in_use_estimated = (size_mask + 1) as f32 * ratio;
let estimated = round_to_pow2((in_use_estimated * 2.0) as usize);
// Brute-froce estimate to avoid floats
let in_use_estimated = (size_mask + 1) * cells_in_use / Self::CELLS_IN_USE;
let estimated = round_to_pow2((in_use_estimated * 2) as usize);
let mut new_table_size = estimated.max(Self::INITIAL_SIZE);

loop {
Expand Down Expand Up @@ -699,21 +700,21 @@ where
let bucket_count = cells >> 2;
let bucket_ptr =
allocate::<Bucket<K, V>, A>(allocator, bucket_count, AllocationKind::Uninitialized);
let buckets = unsafe { std::slice::from_raw_parts_mut(bucket_ptr, bucket_count) };
let buckets = unsafe { core::slice::from_raw_parts_mut(bucket_ptr, bucket_count) };

// Since AtomicU8 and AtomicU64 are the same as u8 and u64 in memory,
// we can write them as zero, rather than calling the atomic versions
for i in 0..bucket_count {
unsafe {
let bucket_deltas = &mut buckets[i].deltas as *mut u8;
std::ptr::write_bytes(bucket_deltas, 0, 8);
core::ptr::write_bytes(bucket_deltas, 0, 8);
};

for cell in 0..4 {
// FIXME: How to initialize keys?
unsafe {
let cell_hash: *mut HashedKey = &mut buckets[i].cells[cell].hash;
std::ptr::write_bytes(cell_hash, 0, 1);
core::ptr::write_bytes(cell_hash, 0, 1);
};

// FIXME: We should check if the stored type is directly writable ..
Expand Down Expand Up @@ -967,12 +968,12 @@ struct Table<K, V> {
impl<K, V> Table<K, V> {
/// Gets a mutable slice of the table buckets.
fn bucket_slice_mut(&mut self) -> &mut [Bucket<K, V>] {
unsafe { std::slice::from_raw_parts_mut(self.buckets, self.size()) }
unsafe { core::slice::from_raw_parts_mut(self.buckets, self.size()) }
}

/// Gets a slice of the table buckets.
fn bucket_slice(&self) -> &[Bucket<K, V>] {
unsafe { std::slice::from_raw_parts(self.buckets, self.size()) }
unsafe { core::slice::from_raw_parts(self.buckets, self.size()) }
}

/// Returns the number of cells in the table.
Expand All @@ -994,7 +995,7 @@ struct Bucket<K, V> {
/// Cells for the bucket.
cells: [Cell<K, V>; 4],
/// Placeholder for the key
_key: std::marker::PhantomData<K>,
_key: core::marker::PhantomData<K>,
}

/// Defines the result of an insert into the [HashMap].
Expand Down
6 changes: 3 additions & 3 deletions src/hashmap_serde.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use allocator_api2::alloc::{Allocator, Global};
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Global;
use alloc::alloc::Global;

pub struct HashMapVisitor<K, V, H> {
marker: PhantomData<fn() -> HashMap<K, V, H, Global>>,
Expand All @@ -38,7 +38,7 @@ where

impl<'de, K, V, H> Visitor<'de> for HashMapVisitor<K, V, H>
where
K: Deserialize<'de> + Eq + Hash + Clone + std::fmt::Debug,
K: Deserialize<'de> + Eq + Hash + Clone + core::fmt::Debug,
V: Deserialize<'de> + Value,
H: BuildHasher + Clone + Default,
{
Expand All @@ -65,7 +65,7 @@ where

impl<'de, K, V, H> Deserialize<'de> for HashMap<K, V, H, Global>
where
K: Deserialize<'de> + Eq + Hash + Clone + std::fmt::Debug,
K: Deserialize<'de> + Eq + Hash + Clone + core::fmt::Debug,
V: Deserialize<'de> + Value,
H: BuildHasher + Clone + Default,
{
Expand Down
37 changes: 20 additions & 17 deletions src/leapmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,17 @@ use core::{
sync::atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering},
};

use alloc::{vec, vec::Vec};

#[cfg(not(feature = "stable_alloc"))]
use alloc::alloc::Global;
#[cfg(feature = "stable_alloc")]
use allocator_api2::alloc::{Allocator, Global};
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Global;

/// The default hasher for a [`LeapMap`].
pub(crate) type DefaultHash = std::collections::hash_map::DefaultHasher;
pub(crate) type DefaultHash = crate::FnvHasher;

/// A concurrent hash map implementation which uses a modified form of RobinHood/
/// Hopscotch probing. This implementation is lock-free, and therefore it will
Expand All @@ -51,7 +53,7 @@ pub(crate) type DefaultHash = std::collections::hash_map::DefaultHasher;
/// # Limitations
///
/// This biggest limitations of this map are that the interface is slightly
/// different than using [`std::sync::RwLock<HashMap>`]. The type returned
/// different than using [`core::sync::RwLock<HashMap>`]. The type returned
/// when calling `get`, `get_mut`, `iter`, etc, are not references, but rather a
/// [`Ref`] or [`RefMut`] type which acts like a reference but still allows concurrent
/// operations on both that reference type and the map. This interface is still
Expand Down Expand Up @@ -161,7 +163,8 @@ where
A: Allocator,
{
/// The default initial size of the map.
const INITIAL_SIZE: usize = 8;
/// Make sure it's a multiple of CELLS_IN_USE, so that no floats are needed during resizing
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

are -> are not

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, meant: "no floats are needed" -> "floats are not needed". It's a little more clear.

const INITIAL_SIZE: usize = Self::CELLS_IN_USE;

/// The max number of elements to search through when having to fallback
/// to using linear search to try to find a cell.
Expand Down Expand Up @@ -747,9 +750,9 @@ where
}

// Estimate how much we need to resize by:
let ratio = cells_in_use as f32 / Self::CELLS_IN_USE as f32;
let in_use_estimated = (size_mask + 1) as f32 * ratio;
let estimated = round_to_pow2((in_use_estimated * 2.0).max(1.0) as usize);
// Brute-froce estimate to avoid floats
let in_use_estimated = (size_mask + 1) * cells_in_use / Self::CELLS_IN_USE;
let estimated = round_to_pow2((in_use_estimated * 2) as usize);

// FIXME: This doesn't allow the map to shrink
//let new_table_size = estimated.max((size_mask + 1) as usize);
Expand Down Expand Up @@ -975,7 +978,7 @@ where
migrator.overflowed.store(false, Ordering::Relaxed);
//migrator
// .dst_table
// .store(std::ptr::null_mut(), Ordering::Relaxed);
// .store(core::ptr::null_mut(), Ordering::Relaxed);

// We don't move the source tables here, rather, we wait until the
// next mogration and add them to the cleanup queue then.
Expand Down Expand Up @@ -1263,21 +1266,21 @@ where
let bucket_count = cells >> 2;
let bucket_ptr =
allocate::<Bucket<K, V>, A>(allocator, bucket_count, AllocationKind::Uninitialized);
let buckets = unsafe { std::slice::from_raw_parts_mut(bucket_ptr, bucket_count) };
let buckets = unsafe { core::slice::from_raw_parts_mut(bucket_ptr, bucket_count) };

// Since AtomicU8 and AtomicU64 are the same as u8 and u64 in memory,
// we can write them as zero, rather than calling the atomic versions
for bucket in buckets.iter_mut().take(bucket_count) {
unsafe {
let bucket_deltas = &mut bucket.deltas as *mut AtomicU8;
std::ptr::write_bytes(bucket_deltas, 0, 8);
core::ptr::write_bytes(bucket_deltas, 0, 8);
};

for cell in 0..4 {
unsafe {
// We only need to write the hash as null, an can ignore th key.
let cell_hash: *mut AtomicHashedKey = &mut bucket.cells[cell].hash;
std::ptr::write_bytes(cell_hash, 0, 1);
core::ptr::write_bytes(cell_hash, 0, 1);
};

// FIXME: Check if the stored type is directly writable ..
Expand Down Expand Up @@ -1476,12 +1479,12 @@ struct Table<K, V> {
impl<K, V> Table<K, V> {
/// Gets a mutable slice of the table buckets.
pub(super) fn bucket_slice_mut(&mut self) -> &mut [Bucket<K, V>] {
unsafe { std::slice::from_raw_parts_mut(self.buckets, self.size()) }
unsafe { core::slice::from_raw_parts_mut(self.buckets, self.size()) }
}

/// Gets a slice of the table buckets.
pub(super) fn bucket_slice(&self) -> &[Bucket<K, V>] {
unsafe { std::slice::from_raw_parts(self.buckets, self.size()) }
unsafe { core::slice::from_raw_parts(self.buckets, self.size()) }
}

/// Returns the number of cells in the table.
Expand Down Expand Up @@ -1589,11 +1592,11 @@ impl<K, V> Migrator<K, V> {
self.stale_sources = Vec::with_capacity(Self::STALE_SOURCES);
for _ in 0..Self::STALE_SOURCES {
self.stale_sources
.push(AtomicPtr::<Table<K, V>>::new(std::ptr::null_mut()));
.push(AtomicPtr::<Table<K, V>>::new(core::ptr::null_mut()));
}

self.dst_table
.store(std::ptr::null_mut(), Ordering::Relaxed);
.store(core::ptr::null_mut(), Ordering::Relaxed);
self.sources = vec![];
self.status.store(Self::RESET_FLAG, Ordering::Relaxed);
self.remaining_units.store(0, Ordering::Relaxed);
Expand Down Expand Up @@ -1681,7 +1684,7 @@ impl<K, V> Migrator<K, V> {
let bucket_count = table.size() >> 2;
deallocate::<Bucket<K, V>, A>(allocator, bucket_ptr, bucket_count);
deallocate::<Table<K, V>, A>(allocator, table_ptr, 1);
self.stale_sources[index].store(std::ptr::null_mut(), Ordering::Relaxed);
self.stale_sources[index].store(core::ptr::null_mut(), Ordering::Relaxed);
}

// Lost the race, just return.
Expand Down
6 changes: 3 additions & 3 deletions src/leapmap_serde.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use allocator_api2::alloc::{Allocator, Global};
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use std::alloc::Global;
use alloc::alloc::Global;

pub struct LeapMapVisitor<K, V, H> {
marker: PhantomData<fn() -> LeapMap<K, V, H, Global>>,
Expand All @@ -38,7 +38,7 @@ where

impl<'de, K, V, H> Visitor<'de> for LeapMapVisitor<K, V, H>
where
K: Deserialize<'de> + Eq + Hash + Copy + std::fmt::Debug,
K: Deserialize<'de> + Eq + Hash + Copy + core::fmt::Debug,
V: Deserialize<'de> + Value,
H: BuildHasher + Clone + Default,
{
Expand All @@ -65,7 +65,7 @@ where

impl<'de, K, V, H> Deserialize<'de> for LeapMap<K, V, H, Global>
where
K: Deserialize<'de> + Eq + Hash + Copy + std::fmt::Debug,
K: Deserialize<'de> + Eq + Hash + Copy + core::fmt::Debug,
V: Deserialize<'de> + Value,
H: BuildHasher + Clone + Default,
{
Expand Down
3 changes: 3 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,11 @@
//! If/when the `allocator_api` feature is no longer experimental, this feature flag will
//! be removed.

#![no_std]
#![cfg_attr(not(feature = "stable_alloc"), feature(allocator_api))]

extern crate alloc;

mod hashentry;
mod hashiter;
pub mod hashmap;
Expand Down
16 changes: 8 additions & 8 deletions src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use core::{
#[cfg(feature = "stable_alloc")]
use allocator_api2::alloc::Allocator;
#[cfg(not(feature = "stable_alloc"))]
use core::alloc::Allocator;
use alloc::alloc::Allocator;

/// Loads the buffer `buf` as a u64.
#[inline(always)]
Expand All @@ -17,7 +17,7 @@ pub fn load_u64_le(buf: &[u8], len: usize) -> u64 {
let mut data = 0u64;
let ptr: *mut _ = &mut data;
unsafe {
std::ptr::copy_nonoverlapping(buf.as_ptr(), ptr as *mut u8, len);
core::ptr::copy_nonoverlapping(buf.as_ptr(), ptr as *mut u8, len);
}
data.to_le()
}
Expand All @@ -35,7 +35,7 @@ where
+ From<usize>,
{
let v = value - T::from(1);
let res = match std::mem::size_of::<T>() {
let res = match core::mem::size_of::<T>() {
1 => {
let v = v | (v >> T::from(1));
let v = v | (v >> T::from(2));
Expand Down Expand Up @@ -81,8 +81,8 @@ pub(crate) fn allocate<T, A: Allocator>(
count: usize,
kind: AllocationKind,
) -> *mut T {
let size = std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
let size = core::mem::size_of::<T>();
let align = core::mem::align_of::<T>();

// We unwrap here because we want to panic if we fail to get a valid layout
let layout = Layout::from_size_align(size * count, align).unwrap();
Expand All @@ -96,15 +96,15 @@ pub(crate) fn allocate<T, A: Allocator>(

/// Deallocates `count` number of elements of type T, using the `allocator`.
pub(crate) fn deallocate<T, A: Allocator>(allocator: &A, ptr: *mut T, count: usize) {
let size = std::mem::size_of::<T>();
let align = std::mem::align_of::<T>();
let size = core::mem::size_of::<T>();
let align = core::mem::align_of::<T>();

// We unwrap here because we want to panic if we fail to get a valid layout
let layout = Layout::from_size_align(size * count, align).unwrap();

// Again, unwrap the allocation result. It should never fail to allocate.
let raw_ptr = ptr as *mut u8;
let nonnull_ptr = std::ptr::NonNull::new(raw_ptr).unwrap();
let nonnull_ptr = core::ptr::NonNull::new(raw_ptr).unwrap();
unsafe {
allocator.deallocate(nonnull_ptr, layout);
}
Expand Down