aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2022-07-05 17:00:16 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-07-05 17:00:16 +0000
commit029eb8c7ca64ec3bd6527addd26c030d06b51b09 (patch)
tree2cc8213e5cbbe16cb7a307e49e340dd0cc0d326a
parentffb03349fdd1a9e44f99ffbbefc799b802144793 (diff)
parentdd22deafe25307242d438fc27b01fa009fb0b6b8 (diff)
downloadhashbrown-029eb8c7ca64ec3bd6527addd26c030d06b51b09.tar.gz
Update to 0.12.1 am: 05a37eb749 am: b6260349af am: 116ae871ea am: dd22deafe2
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/hashbrown/+/2145788 Change-Id: Icca6e141afd44a1a8aead493a472beed1e1f1d0a Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json7
-rw-r--r--Android.bp5
-rw-r--r--CHANGELOG.md45
-rw-r--r--Cargo.toml59
-rw-r--r--Cargo.toml.orig11
-rw-r--r--METADATA10
-rw-r--r--README.md10
-rw-r--r--benches/bench.rs2
-rw-r--r--benches/insert_unique_unchecked.rs32
-rw-r--r--src/external_trait_impls/rayon/helpers.rs1
-rw-r--r--src/external_trait_impls/rayon/map.rs4
-rw-r--r--src/external_trait_impls/rayon/raw.rs14
-rw-r--r--src/external_trait_impls/serde.rs1
-rw-r--r--src/lib.rs21
-rw-r--r--src/macros.rs4
-rw-r--r--src/map.rs1767
-rw-r--r--src/raw/alloc.rs3
-rw-r--r--src/raw/bitmask.rs2
-rw-r--r--src/raw/generic.rs7
-rw-r--r--src/raw/mod.rs670
-rw-r--r--src/raw/sse2.rs1
-rw-r--r--src/rustc_entry.rs8
-rw-r--r--src/scopeguard.rs2
-rw-r--r--src/set.rs78
-rw-r--r--tests/rayon.rs70
-rw-r--r--tests/set.rs34
26 files changed, 2340 insertions, 528 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 0eb3824..112a41c 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "bbb5d3bb1c23569c15e54c670bc0c3669ae3e7dc"
- }
-}
+ "sha1": "fe06a93d7cba6241e636696fbf5634d8b3e65bb0"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 28b8053..b35773f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -39,13 +39,12 @@ license {
rust_library {
name: "libhashbrown",
- // has rustc warnings
host_supported: true,
crate_name: "hashbrown",
cargo_env_compat: true,
- cargo_pkg_version: "0.11.2",
+ cargo_pkg_version: "0.12.1",
srcs: ["src/lib.rs"],
- edition: "2018",
+ edition: "2021",
features: [
"ahash",
"default",
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c88d3e0..ac1bb4b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,11 +2,48 @@
All notable changes to this project will be documented in this file.
-The format is based on [Keep a Changelog](http://keepachangelog.com/)
-and this project adheres to [Semantic Versioning](http://semver.org/).
+The format is based on [Keep a Changelog](https://keepachangelog.com/)
+and this project adheres to [Semantic Versioning](https://semver.org/).
## [Unreleased]
+## [v0.12.0] - 2022-05-02
+
+## Fixed
+
+- Fixed underflow in `RawIterRange::size_hint`. (#325)
+- Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325)
+
+## [v0.12.0] - 2022-01-17
+
+## Added
+
+- Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297)
+- Added an `allocator()` getter to HashMap and HashSet. (#257)
+- Added `insert_unique_unchecked` to `HashMap` and `HashSet`. (#293)
+- Added `into_keys` and `into_values` to HashMap. (#295)
+- Implement `From<array>` on `HashSet` and `HashMap`. (#298)
+- Added `entry_ref` API to `HashMap`. (#201)
+
+## Changed
+
+- Bumped minimum Rust version to 1.56.1 and edition to 2021.
+- Use u64 for the GroupWord on WebAssembly. (#271)
+- Optimized `find`. (#279)
+- Made rehashing and resizing less generic to reduce compilation time. (#282)
+- Inlined small functions. (#283)
+- Use `BuildHasher::hash_one` when `feature = "nightly"` is enabled. (#292)
+- Relaxed the bounds on `Debug` for `HashSet`. (#296)
+- Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291)
+- Don't hash the key when searching in an empty table. (#305)
+
+## Fixed
+
+- Guard against allocations exceeding isize::MAX. (#268)
+- Made `RawTable::insert_no_grow` unsafe. (#254)
+- Inline `static_empty`. (#280)
+- Fixed trait bounds on Send/Sync impls. (#303)
+
## [v0.11.2] - 2021-03-25
## Fixed
@@ -307,7 +344,9 @@ This release was _yanked_ due to a breaking change for users of `no-default-feat
- Initial release
-[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.11.2...HEAD
+[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...HEAD
+[v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1
+[v0.12.0]: https://github.com/rust-lang/hashbrown/compare/v0.11.2...v0.12.0
[v0.11.2]: https://github.com/rust-lang/hashbrown/compare/v0.11.1...v0.11.2
[v0.11.1]: https://github.com/rust-lang/hashbrown/compare/v0.11.0...v0.11.1
[v0.11.0]: https://github.com/rust-lang/hashbrown/compare/v0.10.0...v0.11.0
diff --git a/Cargo.toml b/Cargo.toml
index 02bd480..6bc1ff4 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,27 +3,46 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
+rust-version = "1.56.1"
name = "hashbrown"
-version = "0.11.2"
+version = "0.12.1"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
-exclude = [".travis.yml", "bors.toml", "/ci/*"]
+exclude = [
+ ".github",
+ "/ci/*",
+]
description = "A Rust port of Google's SwissTable hash map"
readme = "README.md"
-keywords = ["hash", "no_std", "hashmap", "swisstable"]
-categories = ["data-structures", "no-std"]
-license = "Apache-2.0/MIT"
+keywords = [
+ "hash",
+ "no_std",
+ "hashmap",
+ "swisstable",
+]
+categories = [
+ "data-structures",
+ "no-std",
+]
+license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/hashbrown"
+resolver = "2"
+
[package.metadata.docs.rs]
-features = ["nightly", "rayon", "serde", "raw"]
+features = [
+ "nightly",
+ "rayon",
+ "serde",
+ "raw",
+]
+
[dependencies.ahash]
version = "0.7.0"
optional = true
@@ -55,6 +74,7 @@ optional = true
version = "1.0.25"
optional = true
default-features = false
+
[dev-dependencies.doc-comment]
version = "0.3.1"
@@ -65,7 +85,7 @@ version = "1.0.7"
version = "1.4"
[dev-dependencies.rand]
-version = "0.7.3"
+version = "0.8.3"
features = ["small_rng"]
[dev-dependencies.rayon]
@@ -76,9 +96,18 @@ version = "1.0"
[features]
ahash-compile-time-rng = ["ahash/compile-time-rng"]
-default = ["ahash", "inline-more"]
+default = [
+ "ahash",
+ "inline-more",
+]
inline-more = []
nightly = []
raw = []
-rustc-dep-of-std = ["nightly", "core", "compiler_builtins", "alloc", "rustc-internal-api"]
+rustc-dep-of-std = [
+ "nightly",
+ "core",
+ "compiler_builtins",
+ "alloc",
+ "rustc-internal-api",
+]
rustc-internal-api = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index a056c3c..2a6b180 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,15 +1,16 @@
[package]
name = "hashbrown"
-version = "0.11.2"
+version = "0.12.1"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "A Rust port of Google's SwissTable hash map"
-license = "Apache-2.0/MIT"
+license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/hashbrown"
readme = "README.md"
keywords = ["hash", "no_std", "hashmap", "swisstable"]
categories = ["data-structures", "no-std"]
-exclude = [".travis.yml", "bors.toml", "/ci/*"]
-edition = "2018"
+exclude = [".github", "/ci/*"]
+edition = "2021"
+rust-version = "1.56.1"
[dependencies]
# For the default hasher
@@ -29,7 +30,7 @@ bumpalo = { version = "3.5.0", optional = true }
[dev-dependencies]
lazy_static = "1.4"
-rand = { version = "0.7.3", features = ["small_rng"] }
+rand = { version = "0.8.3", features = ["small_rng"] }
rayon = "1.0"
fnv = "1.0.7"
serde_test = "1.0"
diff --git a/METADATA b/METADATA
index fa67d80..4410ede 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/hashbrown/hashbrown-0.11.2.crate"
+ value: "https://static.crates.io/crates/hashbrown/hashbrown-0.12.1.crate"
}
- version: "0.11.2"
+ version: "0.12.1"
license_type: NOTICE
last_upgrade_date {
- year: 2021
- month: 4
- day: 1
+ year: 2022
+ month: 7
+ day: 5
}
}
diff --git a/README.md b/README.md
index 86664c4..2eddcf3 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
hashbrown
=========
-[![Build Status](https://travis-ci.com/rust-lang/hashbrown.svg?branch=master)](https://travis-ci.com/rust-lang/hashbrown)
+[![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions)
[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown)
[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown)
-[![Rust](https://img.shields.io/badge/rust-1.49.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown)
+[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown)
This crate is a Rust port of Google's high-performance [SwissTable] hash
map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
@@ -85,7 +85,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
-hashbrown = "0.11"
+hashbrown = "0.12"
```
Then:
@@ -114,8 +114,8 @@ this pre-generates seeds at compile time and embeds them as constants. See [aHas
Licensed under either of:
- * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
- * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
at your option.
diff --git a/benches/bench.rs b/benches/bench.rs
index 568c513..c393b9a 100644
--- a/benches/bench.rs
+++ b/benches/bench.rs
@@ -38,7 +38,7 @@ impl Iterator for RandomKeys {
type Item = usize;
fn next(&mut self) -> Option<usize> {
// Add 1 then multiply by some 32 bit prime.
- self.state = self.state.wrapping_add(1).wrapping_mul(3787392781);
+ self.state = self.state.wrapping_add(1).wrapping_mul(3_787_392_781);
Some(self.state)
}
}
diff --git a/benches/insert_unique_unchecked.rs b/benches/insert_unique_unchecked.rs
new file mode 100644
index 0000000..857ad18
--- /dev/null
+++ b/benches/insert_unique_unchecked.rs
@@ -0,0 +1,32 @@
+//! Compare `insert` and `insert_unique_unchecked` operations performance.
+
+#![feature(test)]
+
+extern crate test;
+
+use hashbrown::HashMap;
+use test::Bencher;
+
+#[bench]
+fn insert(b: &mut Bencher) {
+ let keys: Vec<String> = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect();
+ b.iter(|| {
+ let mut m = HashMap::with_capacity(1000);
+ for k in &keys {
+ m.insert(k, k);
+ }
+ m
+ });
+}
+
+#[bench]
+fn insert_unique_unchecked(b: &mut Bencher) {
+ let keys: Vec<String> = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect();
+ b.iter(|| {
+ let mut m = HashMap::with_capacity(1000);
+ for k in &keys {
+ m.insert_unique_unchecked(k, k);
+ }
+ m
+ });
+}
diff --git a/src/external_trait_impls/rayon/helpers.rs b/src/external_trait_impls/rayon/helpers.rs
index 9382007..070b08c 100644
--- a/src/external_trait_impls/rayon/helpers.rs
+++ b/src/external_trait_impls/rayon/helpers.rs
@@ -4,6 +4,7 @@ use alloc::vec::Vec;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Helper for collecting parallel iterators to an intermediary
+#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending!
pub(super) fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
let list = iter
.into_par_iter()
diff --git a/src/external_trait_impls/rayon/map.rs b/src/external_trait_impls/rayon/map.rs
index 61b7380..14d91c2 100644
--- a/src/external_trait_impls/rayon/map.rs
+++ b/src/external_trait_impls/rayon/map.rs
@@ -512,7 +512,7 @@ mod test_par_map {
where
H: Hasher,
{
- self.k.hash(state)
+ self.k.hash(state);
}
}
@@ -679,7 +679,7 @@ mod test_par_map {
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
- map.par_values_mut().for_each(|value| *value = (*value) * 2);
+ map.par_values_mut().for_each(|value| *value *= 2);
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
diff --git a/src/external_trait_impls/rayon/raw.rs b/src/external_trait_impls/rayon/raw.rs
index 18da1ea..883303e 100644
--- a/src/external_trait_impls/rayon/raw.rs
+++ b/src/external_trait_impls/rayon/raw.rs
@@ -87,7 +87,7 @@ impl<T, A: Allocator + Clone> RawIntoParIter<T, A> {
}
}
-impl<T: Send, A: Allocator + Clone> ParallelIterator for RawIntoParIter<T, A> {
+impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -116,7 +116,7 @@ pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> {
marker: PhantomData<&'a RawTable<T, A>>,
}
-unsafe impl<T, A: Allocator + Clone> Send for RawParDrain<'_, T, A> {}
+unsafe impl<T: Send, A: Allocator + Clone> Send for RawParDrain<'_, T, A> {}
impl<T, A: Allocator + Clone> RawParDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -134,7 +134,7 @@ impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> {
C: UnindexedConsumer<Self::Item>,
{
let _guard = guard(self.table, |table| unsafe {
- table.as_mut().clear_no_drop()
+ table.as_mut().clear_no_drop();
});
let iter = unsafe { self.table.as_ref().iter().iter };
mem::forget(self);
@@ -146,7 +146,9 @@ impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> {
impl<T, A: Allocator + Clone> Drop for RawParDrain<'_, T, A> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
- unsafe { self.table.as_mut().clear() }
+ unsafe {
+ self.table.as_mut().clear();
+ }
}
}
@@ -175,7 +177,7 @@ impl<T: Send> UnindexedProducer for ParDrainProducer<T> {
{
// Make sure to modify the iterator in-place so that any remaining
// elements are processed in our Drop impl.
- while let Some(item) = self.iter.next() {
+ for item in &mut self.iter {
folder = folder.consume(unsafe { item.read() });
if folder.full() {
return folder;
@@ -193,7 +195,7 @@ impl<T> Drop for ParDrainProducer<T> {
fn drop(&mut self) {
// Drop all remaining elements
if mem::needs_drop::<T>() {
- while let Some(item) = self.iter.next() {
+ for item in &mut self.iter {
unsafe {
item.drop();
}
diff --git a/src/external_trait_impls/serde.rs b/src/external_trait_impls/serde.rs
index 7816e78..4d62dee 100644
--- a/src/external_trait_impls/serde.rs
+++ b/src/external_trait_impls/serde.rs
@@ -161,6 +161,7 @@ mod set {
deserializer.deserialize_seq(visitor)
}
+ #[allow(clippy::missing_errors_doc)]
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
diff --git a/src/lib.rs b/src/lib.rs
index b2d6584..bc1c971 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -21,7 +21,8 @@
allocator_api,
slice_ptr_get,
nonnull_slice_from_raw_parts,
- maybe_uninit_array_assume_init
+ maybe_uninit_array_assume_init,
+ build_hasher_simple_hash_one
)
)]
#![allow(
@@ -30,7 +31,9 @@
clippy::must_use_candidate,
clippy::option_if_let_else,
clippy::redundant_else,
- clippy::manual_map
+ clippy::manual_map,
+ clippy::missing_safety_doc,
+ clippy::missing_errors_doc
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
@@ -128,20 +131,6 @@ pub enum TryReserveError {
},
}
-/// The error type for [`RawTable::get_each_mut`](crate::raw::RawTable::get_each_mut),
-/// [`HashMap::get_each_mut`], and [`HashMap::get_each_key_value_mut`].
-#[cfg(feature = "nightly")]
-#[derive(Clone, PartialEq, Eq, Debug)]
-pub enum UnavailableMutError {
- /// The requested entry is not present in the table.
- Absent,
- /// The requested entry is present, but a mutable reference to it was already created and
- /// returned from this call to `get_each_mut` or `get_each_key_value_mut`.
- ///
- /// Includes the index of the existing mutable reference in the returned array.
- Duplicate(usize),
-}
-
/// Wrapper around `Bump` which allows it to be used as an allocator for
/// `HashMap`, `HashSet` and `RawTable`.
///
diff --git a/src/macros.rs b/src/macros.rs
index 0279597..4de6b5a 100644
--- a/src/macros.rs
+++ b/src/macros.rs
@@ -57,8 +57,8 @@ macro_rules! cfg_if {
// default fn syntax for specialization changes in the future.
#[cfg(feature = "nightly")]
macro_rules! default_fn {
- ($($tt:tt)*) => {
- default $($tt)*
+ (#[$($a:tt)*] $($tt:tt)*) => {
+ #[$($a)*] default $($tt)*
}
}
#[cfg(not(feature = "nightly"))]
diff --git a/src/map.rs b/src/map.rs
index ab11288..98324b5 100644
--- a/src/map.rs
+++ b/src/map.rs
@@ -1,15 +1,11 @@
use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable};
use crate::TryReserveError;
-#[cfg(feature = "nightly")]
-use crate::UnavailableMutError;
use core::borrow::Borrow;
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
use core::iter::{FromIterator, FusedIterator};
use core::marker::PhantomData;
use core::mem;
-#[cfg(feature = "nightly")]
-use core::mem::MaybeUninit;
use core::ops::Index;
/// Default hasher for `HashMap`.
@@ -244,6 +240,7 @@ where
move |x| k.eq(x.borrow())
}
+#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
where
@@ -257,6 +254,18 @@ where
state.finish()
}
+#[cfg(feature = "nightly")]
+#[cfg_attr(feature = "inline-more", inline)]
+pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
+where
+ K: Borrow<Q>,
+ Q: Hash + ?Sized,
+ S: BuildHasher,
+{
+ hash_builder.hash_one(val)
+}
+
+#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
where
@@ -269,6 +278,16 @@ where
state.finish()
}
+#[cfg(feature = "nightly")]
+#[cfg_attr(feature = "inline-more", inline)]
+pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
+where
+ K: Hash,
+ S: BuildHasher,
+{
+ hash_builder.hash_one(val)
+}
+
#[cfg(feature = "ahash")]
impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// Creates an empty `HashMap`.
@@ -395,6 +414,12 @@ impl<K, V, S> HashMap<K, V, S> {
}
impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
+ /// Returns a reference to the underlying allocator.
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.table.allocator()
+ }
+
/// Creates an empty `HashMap` which will use the given hash builder to hash
/// keys. It will be allocated with the given allocator.
///
@@ -773,6 +798,52 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
pub fn clear(&mut self) {
self.table.clear();
}
+
+ /// Creates a consuming iterator visiting all the keys in arbitrary order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `K`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// let vec: Vec<&str> = map.into_keys().collect();
+ /// ```
+ #[inline]
+ pub fn into_keys(self) -> IntoKeys<K, V, A> {
+ IntoKeys {
+ inner: self.into_iter(),
+ }
+ }
+
+ /// Creates a consuming iterator visiting all the values in arbitrary order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map = HashMap::new();
+ /// map.insert("a", 1);
+ /// map.insert("b", 2);
+ /// map.insert("c", 3);
+ ///
+ /// let vec: Vec<i32> = map.into_values().collect();
+ /// ```
+ #[inline]
+ pub fn into_values(self) -> IntoValues<K, V, A> {
+ IntoValues {
+ inner: self.into_iter(),
+ }
+ }
}
impl<K, V, S, A> HashMap<K, V, S, A>
@@ -915,6 +986,46 @@ where
}
}
+ /// Gets the given key's corresponding entry by reference in the map for in-place manipulation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut words: HashMap<String, usize> = HashMap::new();
+ /// let source = ["poneyland", "horseyland", "poneyland", "poneyland"];
+ /// for (i, &s) in source.iter().enumerate() {
+ /// let counter = words.entry_ref(s).or_insert(0);
+ /// *counter += 1;
+ /// }
+ ///
+ /// assert_eq!(words["poneyland"], 3);
+ /// assert_eq!(words["horseyland"], 1);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ let hash = make_hash::<K, Q, S>(&self.hash_builder, key);
+ if let Some(elem) = self.table.find(hash, equivalent_key(key)) {
+ EntryRef::Occupied(OccupiedEntryRef {
+ hash,
+ key: Some(KeyOrRef::Borrowed(key)),
+ elem,
+ table: self,
+ })
+ } else {
+ EntryRef::Vacant(VacantEntryRef {
+ hash,
+ key: KeyOrRef::Borrowed(key),
+ table: self,
+ })
+ }
+ }
+
/// Returns a reference to the value corresponding to the key.
///
/// The key may be any borrowed form of the map's key type, but
@@ -985,8 +1096,12 @@ where
K: Borrow<Q>,
Q: Hash + Eq,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
- self.table.get(hash, equivalent_key(k))
+ if self.table.is_empty() {
+ None
+ } else {
+ let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ self.table.get(hash, equivalent_key(k))
+ }
}
/// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value.
@@ -1093,24 +1208,24 @@ where
K: Borrow<Q>,
Q: Hash + Eq,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
- self.table.get_mut(hash, equivalent_key(k))
+ if self.table.is_empty() {
+ None
+ } else {
+ let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ self.table.get_mut(hash, equivalent_key(k))
+ }
}
/// Attempts to get mutable references to `N` values in the map at once.
///
- /// Returns an array of length `N` with the results of each query. For soundness,
- /// at most one mutable reference will be returned to any value. An
- /// `Err(UnavailableMutError::Duplicate(i))` in the returned array indicates that a suitable
- /// key-value pair exists, but a mutable reference to the value already occurs at index `i` in
- /// the returned array.
- ///
- /// This method is available only if the `nightly` feature is enabled.
+ /// Returns an array of length `N` with the results of each query. For soundness, at most one
+ /// mutable reference will be returned to any value. `None` will be returned if any of the
+ /// keys are duplicates or missing.
///
/// # Examples
///
/// ```
- /// use hashbrown::{HashMap, UnavailableMutError};
+ /// use hashbrown::HashMap;
///
/// let mut libraries = HashMap::new();
/// libraries.insert("Bodleian Library".to_string(), 1602);
@@ -1118,58 +1233,108 @@ where
/// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
/// libraries.insert("Library of Congress".to_string(), 1800);
///
- /// let got = libraries.get_each_mut([
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "Library of Congress",
+ /// ]);
+ /// assert_eq!(
+ /// got,
+ /// Some([
+ /// &mut 1807,
+ /// &mut 1800,
+ /// ]),
+ /// );
+ ///
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_mut([
/// "Athenæum",
/// "New York Public Library",
+ /// ]);
+ /// assert_eq!(got, None);
+ ///
+ /// // Duplicate keys result in None
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "Athenæum",
+ /// ]);
+ /// assert_eq!(got, None);
+ /// ```
+ pub fn get_many_mut<Q: ?Sized, const N: usize>(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v))
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once, without validating that
+ /// the values are unique.
+ ///
+ /// Returns an array of length `N` with the results of each query. `None` will be returned if
+ /// any of the keys are missing.
+ ///
+ /// For a safe alternative see [`get_many_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting
+ /// references are not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut libraries = HashMap::new();
+ /// libraries.insert("Bodleian Library".to_string(), 1602);
+ /// libraries.insert("Athenæum".to_string(), 1807);
+ /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
+ /// libraries.insert("Library of Congress".to_string(), 1800);
+ ///
+ /// let got = libraries.get_many_mut([
/// "Athenæum",
/// "Library of Congress",
/// ]);
/// assert_eq!(
/// got,
- /// [
- /// Ok(&mut 1807),
- /// Err(UnavailableMutError::Absent),
- /// Err(UnavailableMutError::Duplicate(0)),
- /// Ok(&mut 1800),
- /// ]
+ /// Some([
+ /// &mut 1807,
+ /// &mut 1800,
+ /// ]),
/// );
+ ///
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_mut([
+ /// "Athenæum",
+ /// "New York Public Library",
+ /// ]);
+ /// assert_eq!(got, None);
/// ```
- #[cfg(feature = "nightly")]
- pub fn get_each_mut<Q: ?Sized, const N: usize>(
+ pub unsafe fn get_many_unchecked_mut<Q: ?Sized, const N: usize>(
&mut self,
ks: [&Q; N],
- ) -> [Result<&'_ mut V, UnavailableMutError>; N]
+ ) -> Option<[&'_ mut V; N]>
where
K: Borrow<Q>,
Q: Hash + Eq,
{
- let mut pairs = self.get_each_inner_mut(ks);
- // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
- let mut out: [MaybeUninit<Result<&'_ mut V, UnavailableMutError>>; N] =
- unsafe { MaybeUninit::uninit().assume_init() };
- for i in 0..N {
- out[i] = MaybeUninit::new(
- mem::replace(&mut pairs[i], Err(UnavailableMutError::Absent)).map(|(_, v)| v),
- );
- }
- unsafe { MaybeUninit::array_assume_init(out) }
+ self.get_many_unchecked_mut_inner(ks)
+ .map(|res| res.map(|(_, v)| v))
}
/// Attempts to get mutable references to `N` values in the map at once, with immutable
/// references to the corresponding keys.
///
- /// Returns an array of length `N` with the results of each query. For soundness,
- /// at most one mutable reference will be returned to any value. An
- /// `Err(UnavailableMutError::Duplicate(i))` in the returned array indicates that a suitable
- /// key-value pair exists, but a mutable reference to the value already occurs at index `i` in
- /// the returned array.
- ///
- /// This method is available only if the `nightly` feature is enabled.
+ /// Returns an array of length `N` with the results of each query. For soundness, at most one
+ /// mutable reference will be returned to any value. `None` will be returned if any of the keys
+ /// are duplicates or missing.
///
/// # Examples
///
/// ```
- /// use hashbrown::{HashMap, UnavailableMutError};
+ /// use hashbrown::HashMap;
///
/// let mut libraries = HashMap::new();
/// libraries.insert("Bodleian Library".to_string(), 1602);
@@ -1177,49 +1342,127 @@ where
/// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
/// libraries.insert("Library of Congress".to_string(), 1800);
///
- /// let got = libraries.get_each_key_value_mut([
+ /// let got = libraries.get_many_key_value_mut([
/// "Bodleian Library",
/// "Herzogin-Anna-Amalia-Bibliothek",
- /// "Herzogin-Anna-Amalia-Bibliothek",
+ /// ]);
+ /// assert_eq!(
+ /// got,
+ /// Some([
+ /// (&"Bodleian Library".to_string(), &mut 1602),
+ /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691),
+ /// ]),
+ /// );
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_key_value_mut([
+ /// "Bodleian Library",
/// "Gewandhaus",
/// ]);
+ /// assert_eq!(got, None);
+ ///
+ /// // Duplicate keys result in None
+ /// let got = libraries.get_many_key_value_mut([
+ /// "Bodleian Library",
+ /// "Herzogin-Anna-Amalia-Bibliothek",
+ /// "Herzogin-Anna-Amalia-Bibliothek",
+ /// ]);
+ /// assert_eq!(got, None);
+ /// ```
+ pub fn get_many_key_value_mut<Q: ?Sized, const N: usize>(
+ &mut self,
+ ks: [&Q; N],
+ ) -> Option<[(&'_ K, &'_ mut V); N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ self.get_many_mut_inner(ks)
+ .map(|res| res.map(|(k, v)| (&*k, v)))
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once, with immutable
+ /// references to the corresponding keys, without validating that the values are unique.
+ ///
+ /// Returns an array of length `N` with the results of each query. `None` will be returned if
+ /// any of the keys are missing.
+ ///
+ /// For a safe alternative see [`get_many_key_value_mut`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting
+ /// references are not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut libraries = HashMap::new();
+ /// libraries.insert("Bodleian Library".to_string(), 1602);
+ /// libraries.insert("Athenæum".to_string(), 1807);
+ /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691);
+ /// libraries.insert("Library of Congress".to_string(), 1800);
+ ///
+ /// let got = libraries.get_many_key_value_mut([
+ /// "Bodleian Library",
+ /// "Herzogin-Anna-Amalia-Bibliothek",
+ /// ]);
/// assert_eq!(
/// got,
- /// [
- /// Ok((&"Bodleian Library".to_string(), &mut 1602)),
- /// Ok((&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691)),
- /// Err(UnavailableMutError::Duplicate(1)),
- /// Err(UnavailableMutError::Absent),
- /// ]
+ /// Some([
+ /// (&"Bodleian Library".to_string(), &mut 1602),
+ /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691),
+ /// ]),
/// );
+ /// // Missing keys result in None
+ /// let got = libraries.get_many_key_value_mut([
+ /// "Bodleian Library",
+ /// "Gewandhaus",
+ /// ]);
+ /// assert_eq!(got, None);
/// ```
- #[cfg(feature = "nightly")]
- pub fn get_each_key_value_mut<Q: ?Sized, const N: usize>(
+ pub unsafe fn get_many_key_value_unchecked_mut<Q: ?Sized, const N: usize>(
&mut self,
ks: [&Q; N],
- ) -> [Result<(&'_ K, &'_ mut V), UnavailableMutError>; N]
+ ) -> Option<[(&'_ K, &'_ mut V); N]>
where
K: Borrow<Q>,
Q: Hash + Eq,
{
- let mut pairs = self.get_each_inner_mut(ks);
- // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
- let mut out: [MaybeUninit<Result<(&'_ K, &'_ mut V), UnavailableMutError>>; N] =
- unsafe { MaybeUninit::uninit().assume_init() };
- for i in 0..N {
- out[i] = MaybeUninit::new(
- mem::replace(&mut pairs[i], Err(UnavailableMutError::Absent))
- .map(|(k, v)| (&*k, v)),
- );
- }
- unsafe { MaybeUninit::array_assume_init(out) }
+ self.get_many_unchecked_mut_inner(ks)
+ .map(|res| res.map(|(k, v)| (&*k, v)))
}
- #[cfg(feature = "nightly")]
- fn get_each_inner_mut<Q: ?Sized, const N: usize>(
+ fn get_many_mut_inner<Q: ?Sized, const N: usize>(
+ &mut self,
+ ks: [&Q; N],
+ ) -> Option<[&'_ mut (K, V); N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ let hashes = self.build_hashes_inner(ks);
+ self.table
+ .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ }
+
+ unsafe fn get_many_unchecked_mut_inner<Q: ?Sized, const N: usize>(
&mut self,
ks: [&Q; N],
- ) -> [Result<&'_ mut (K, V), UnavailableMutError>; N]
+ ) -> Option<[&'_ mut (K, V); N]>
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ let hashes = self.build_hashes_inner(ks);
+ self.table
+ .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ }
+
+ fn build_hashes_inner<Q: ?Sized, const N: usize>(&self, ks: [&Q; N]) -> [u64; N]
where
K: Borrow<Q>,
Q: Hash + Eq,
@@ -1228,8 +1471,7 @@ where
for i in 0..N {
hashes[i] = make_hash::<K, Q, S>(&self.hash_builder, ks[i]);
}
- self.table
- .get_each_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ hashes
}
/// Inserts a key-value pair into the map.
@@ -1269,6 +1511,36 @@ where
}
}
+ /// Insert a key-value pair into the map without checking
+ /// if the key already exists in the map.
+ ///
+ /// Returns a reference to the key and value just inserted.
+ ///
+ /// This operation is safe if a key does not exist in the map.
+ ///
+ /// However, if a key exists in the map already, the behavior is unspecified:
+ /// this operation may panic, loop forever, or any following operation with the map
+ /// may panic, loop forever or return arbitrary result.
+ ///
+ /// That said, this operation (and following operations) are guaranteed to
+ /// not violate memory safety.
+ ///
+ /// This operation is faster than regular insert, because it does not perform
+ /// lookup before insertion.
+ ///
+ /// This operation is useful during initial population of the map.
+ /// For example, when constructing a map from another map, we know
+ /// that keys are unique.
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) {
+ let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
+ let bucket = self
+ .table
+ .insert(hash, (k, v), make_hasher::<K, _, V, S>(&self.hash_builder));
+ let (k_ref, v_ref) = unsafe { bucket.as_mut() };
+ (k_ref, v_ref)
+ }
+
/// Tries to insert a key-value pair into the map, and returns
/// a mutable reference to the value in the entry.
///
@@ -1495,6 +1767,27 @@ where
}
}
+// The default hasher is used to match the std implementation signature
+#[cfg(feature = "ahash")]
+impl<K, V, A, const N: usize> From<[(K, V); N]> for HashMap<K, V, DefaultHashBuilder, A>
+where
+ K: Eq + Hash,
+ A: Default + Allocator + Clone,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let map1 = HashMap::from([(1, 2), (3, 4)]);
+ /// let map2: HashMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(arr: [(K, V); N]) -> Self {
+ arr.into_iter().collect()
+ }
+}
+
/// An iterator over the entries of a `HashMap`.
///
/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its
@@ -1575,6 +1868,88 @@ impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
}
}
+/// An owning iterator over the keys of a `HashMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`HashMap`].
+/// See its documentation for more.
+///
+/// [`into_keys`]: struct.HashMap.html#method.into_keys
+/// [`HashMap`]: struct.HashMap.html
+pub struct IntoKeys<K, V, A: Allocator + Clone = Global> {
+ inner: IntoIter<K, V, A>,
+}
+
+impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> {
+ type Item = K;
+
+ #[inline]
+ fn next(&mut self) -> Option<K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+
+impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.inner.iter().map(|(k, _)| k))
+ .finish()
+ }
+}
+
+/// An owning iterator over the values of a `HashMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`HashMap`].
+/// See its documentation for more.
+///
+/// [`into_values`]: struct.HashMap.html#method.into_values
+/// [`HashMap`]: struct.HashMap.html
+pub struct IntoValues<K, V, A: Allocator + Clone = Global> {
+ inner: IntoIter<K, V, A>,
+}
+
+impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
+ type Item = V;
+
+ #[inline]
+ fn next(&mut self) -> Option<V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+
+impl<K, V: Debug, A: Allocator + Clone> fmt::Debug for IntoValues<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.inner.iter().map(|(_, v)| v))
+ .finish()
+ }
+}
+
/// An iterator over the keys of a `HashMap`.
///
/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its
@@ -1686,7 +2061,7 @@ pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T);
impl<T: Iterator> Drop for ConsumeAllOnDrop<'_, T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
- self.0.for_each(drop)
+ self.0.for_each(drop);
}
}
@@ -1723,7 +2098,7 @@ impl<K, V, A: Allocator + Clone> DrainFilterInner<'_, K, V, A> {
F: FnMut(&K, &mut V) -> bool,
{
unsafe {
- while let Some(item) = self.iter.next() {
+ for item in &mut self.iter {
let &mut (ref key, ref mut value) = item.as_mut();
if f(key, value) {
return Some(self.table.remove(item));
@@ -1786,6 +2161,7 @@ unsafe impl<K, V, S, A> Send for RawOccupiedEntryMut<'_, K, V, S, A>
where
K: Send,
V: Send,
+ S: Send,
A: Send + Allocator + Clone,
{
}
@@ -1793,7 +2169,8 @@ unsafe impl<K, V, S, A> Sync for RawOccupiedEntryMut<'_, K, V, S, A>
where
K: Sync,
V: Sync,
- A: Send + Allocator + Clone,
+ S: Sync,
+ A: Sync + Allocator + Clone,
{
}
@@ -2413,6 +2790,119 @@ impl<K: Debug, V, S, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, S, A>
}
}
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry_ref`] method on [`HashMap`].
+///
+/// [`HashMap`]: struct.HashMap.html
+/// [`entry_ref`]: struct.HashMap.html#method.entry_ref
+pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global>
+where
+ A: Allocator + Clone,
+{
+ /// An occupied entry.
+ Occupied(OccupiedEntryRef<'a, 'b, K, Q, V, S, A>),
+
+ /// A vacant entry.
+ Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>),
+}
+
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
+ for EntryRef<'_, '_, K, Q, V, S, A>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ EntryRef::Vacant(ref v) => f.debug_tuple("EntryRef").field(v).finish(),
+ EntryRef::Occupied(ref o) => f.debug_tuple("EntryRef").field(o).finish(),
+ }
+ }
+}
+
+enum KeyOrRef<'a, K, Q: ?Sized> {
+ Borrowed(&'a Q),
+ Owned(K),
+}
+
+impl<'a, K, Q: ?Sized> KeyOrRef<'a, K, Q> {
+ fn into_owned(self) -> K
+ where
+ K: From<&'a Q>,
+ {
+ match self {
+ Self::Borrowed(borrowed) => borrowed.into(),
+ Self::Owned(owned) => owned,
+ }
+ }
+}
+
+impl<'a, K: Borrow<Q>, Q: ?Sized> AsRef<Q> for KeyOrRef<'a, K, Q> {
+ fn as_ref(&self) -> &Q {
+ match self {
+ Self::Borrowed(borrowed) => borrowed,
+ Self::Owned(owned) => owned.borrow(),
+ }
+ }
+}
+
+/// A view into an occupied entry in a `HashMap`.
+/// It is part of the [`EntryRef`] enum.
+///
+/// [`EntryRef`]: enum.EntryRef.html
+pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> {
+ hash: u64,
+ key: Option<KeyOrRef<'b, K, Q>>,
+ elem: Bucket<(K, V)>,
+ table: &'a mut HashMap<K, V, S, A>,
+}
+
+unsafe impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
+where
+ K: Send,
+ Q: Sync + ?Sized,
+ V: Send,
+ S: Send,
+ A: Send + Allocator + Clone,
+{
+}
+unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
+where
+ K: Sync,
+ Q: Sync + ?Sized,
+ V: Sync,
+ S: Sync,
+ A: Sync + Allocator + Clone,
+{
+}
+
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
+ for OccupiedEntryRef<'_, '_, K, Q, V, S, A>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedEntryRef")
+ .field("key", &self.key())
+ .field("value", &self.get())
+ .finish()
+ }
+}
+
+/// A view into a vacant entry in a `HashMap`.
+/// It is part of the [`EntryRef`] enum.
+///
+/// [`EntryRef`]: enum.EntryRef.html
+pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> {
+ hash: u64,
+ key: KeyOrRef<'b, K, Q>,
+ table: &'a mut HashMap<K, V, S, A>,
+}
+
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug
+ for VacantEntryRef<'_, '_, K, Q, V, S, A>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("VacantEntryRef").field(&self.key()).finish()
+ }
+}
+
/// The error returned by [`try_insert`](HashMap::try_insert) when the key already exists.
///
/// Contains the occupied entry, and the value that was not inserted.
@@ -2659,13 +3149,11 @@ impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
}
impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
-impl<K, V> fmt::Debug for ValuesMut<'_, K, V>
-where
- K: fmt::Debug,
- V: fmt::Debug,
-{
+impl<K, V: Debug> fmt::Debug for ValuesMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_list().entries(self.inner.iter()).finish()
+ f.debug_list()
+ .entries(self.inner.iter().map(|(_, val)| val))
+ .finish()
}
}
@@ -3362,6 +3850,689 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
}
}
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> {
+ /// Sets the value of the entry, and returns an OccupiedEntryRef.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// let entry = map.entry_ref("horseyland").insert(37);
+ ///
+ /// assert_eq!(entry.key(), "horseyland");
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn insert(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ match self {
+ EntryRef::Occupied(mut entry) => {
+ entry.insert(value);
+ entry
+ }
+ EntryRef::Vacant(entry) => entry.insert_entry(value),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ ///
+ /// map.entry_ref("poneyland").or_insert(3);
+ /// assert_eq!(map["poneyland"], 3);
+ ///
+ /// *map.entry_ref("poneyland").or_insert(10) *= 2;
+ /// assert_eq!(map["poneyland"], 6);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn or_insert(self, default: V) -> &'a mut V
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ match self {
+ EntryRef::Occupied(entry) => entry.into_mut(),
+ EntryRef::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, String> = HashMap::new();
+ /// let s = "hoho".to_string();
+ ///
+ /// map.entry_ref("poneyland").or_insert_with(|| s);
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ match self {
+ EntryRef::Occupied(entry) => entry.into_mut(),
+ EntryRef::Vacant(entry) => entry.insert(default()),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting, if empty, the result of the default function.
+ /// This method allows for generating key-derived values for insertion by providing the default
+ /// function a reference to the key that was moved during the `.entry_ref(key)` method call.
+ ///
+ /// The reference to the moved key is provided so that cloning or copying the key is
+ /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, usize> = HashMap::new();
+ ///
+ /// map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count());
+ ///
+ /// assert_eq!(map["poneyland"], 9);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn or_insert_with_key<F: FnOnce(&Q) -> V>(self, default: F) -> &'a mut V
+ where
+ K: Hash + Borrow<Q> + From<&'b Q>,
+ S: BuildHasher,
+ {
+ match self {
+ EntryRef::Occupied(entry) => entry.into_mut(),
+ EntryRef::Vacant(entry) => {
+ let value = default(entry.key.as_ref());
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Returns a reference to this entry's key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// assert_eq!(map.entry_ref("poneyland").key(), "poneyland");
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn key(&self) -> &Q
+ where
+ K: Borrow<Q>,
+ {
+ match *self {
+ EntryRef::Occupied(ref entry) => entry.key(),
+ EntryRef::Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ ///
+ /// map.entry_ref("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.entry_ref("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ EntryRef::Occupied(mut entry) => {
+ f(entry.get_mut());
+ EntryRef::Occupied(entry)
+ }
+ EntryRef::Vacant(entry) => EntryRef::Vacant(entry),
+ }
+ }
+
+ /// Provides shared access to the key and owned access to the value of
+ /// an occupied entry and allows to replace or remove it based on the
+ /// value of the returned option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ ///
+ /// let entry = map
+ /// .entry_ref("poneyland")
+ /// .and_replace_entry_with(|_k, _v| panic!());
+ ///
+ /// match entry {
+ /// EntryRef::Vacant(e) => {
+ /// assert_eq!(e.key(), "poneyland");
+ /// }
+ /// EntryRef::Occupied(_) => panic!(),
+ /// }
+ ///
+ /// map.insert("poneyland".to_string(), 42);
+ ///
+ /// let entry = map
+ /// .entry_ref("poneyland")
+ /// .and_replace_entry_with(|k, v| {
+ /// assert_eq!(k, "poneyland");
+ /// assert_eq!(v, 42);
+ /// Some(v + 1)
+ /// });
+ ///
+ /// match entry {
+ /// EntryRef::Occupied(e) => {
+ /// assert_eq!(e.key(), "poneyland");
+ /// assert_eq!(e.get(), &43);
+ /// }
+ /// EntryRef::Vacant(_) => panic!(),
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 43);
+ ///
+ /// let entry = map
+ /// .entry_ref("poneyland")
+ /// .and_replace_entry_with(|_k, _v| None);
+ ///
+ /// match entry {
+ /// EntryRef::Vacant(e) => assert_eq!(e.key(), "poneyland"),
+ /// EntryRef::Occupied(_) => panic!(),
+ /// }
+ ///
+ /// assert!(!map.contains_key("poneyland"));
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn and_replace_entry_with<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&Q, V) -> Option<V>,
+ K: Borrow<Q>,
+ {
+ match self {
+ EntryRef::Occupied(entry) => entry.replace_entry_with(f),
+ EntryRef::Vacant(_) => self,
+ }
+ }
+}
+
+impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> {
+ /// Ensures a value is in the entry by inserting the default value if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
+ /// map.entry("poneyland").or_default();
+ ///
+ /// assert_eq!(map["poneyland"], None);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn or_default(self) -> &'a mut V
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ match self {
+ EntryRef::Occupied(entry) => entry.into_mut(),
+ EntryRef::Vacant(entry) => entry.insert(Default::default()),
+ }
+ }
+}
+
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> {
+ /// Gets a reference to the key in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ /// assert_eq!(map.entry_ref("poneyland").key(), "poneyland");
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn key(&self) -> &Q
+ where
+ K: Borrow<Q>,
+ {
+ unsafe { &self.elem.as_ref().0 }.borrow()
+ }
+
+ /// Take the ownership of the key and value from the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
+ /// // We delete the entry from the map.
+ /// o.remove_entry();
+ /// }
+ ///
+ /// assert_eq!(map.contains_key("poneyland"), false);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn remove_entry(self) -> (K, V) {
+ unsafe { self.table.table.remove(self.elem) }
+ }
+
+ /// Gets a reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
+ /// assert_eq!(o.get(), &12);
+ /// }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn get(&self) -> &V {
+ unsafe { &self.elem.as_ref().1 }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ ///
+ /// If you need a reference to the `OccupiedEntryRef` which may outlive the
+ /// destruction of the `EntryRef` value, see [`into_mut`].
+ ///
+ /// [`into_mut`]: #method.into_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") {
+ /// *o.get_mut() += 10;
+ /// assert_eq!(*o.get(), 22);
+ ///
+ /// // We can use the same Entry multiple times.
+ /// *o.get_mut() += 2;
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 24);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn get_mut(&mut self) -> &mut V {
+ unsafe { &mut self.elem.as_mut().1 }
+ }
+
+ /// Converts the OccupiedEntryRef into a mutable reference to the value in the entry
+ /// with a lifetime bound to the map itself.
+ ///
+ /// If you need multiple references to the `OccupiedEntryRef`, see [`get_mut`].
+ ///
+ /// [`get_mut`]: #method.get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
+ /// *o.into_mut() += 10;
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 22);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn into_mut(self) -> &'a mut V {
+ unsafe { &mut self.elem.as_mut().1 }
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") {
+ /// assert_eq!(o.insert(15), 12);
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 15);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn insert(&mut self, mut value: V) -> V {
+ let old_value = self.get_mut();
+ mem::swap(&mut value, old_value);
+ value
+ }
+
+ /// Takes the value out of the entry, and returns it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.entry_ref("poneyland").or_insert(12);
+ ///
+ /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
+ /// assert_eq!(o.remove(), 12);
+ /// }
+ ///
+ /// assert_eq!(map.contains_key("poneyland"), false);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn remove(self) -> V {
+ self.remove_entry().1
+ }
+
+ /// Replaces the entry, returning the old key and value. The new key in the hash map will be
+ /// the key used to create this entry.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::hash_map::{EntryRef, HashMap};
+ /// use std::rc::Rc;
+ ///
+ /// let mut map: HashMap<Rc<str>, u32> = HashMap::new();
+ /// map.insert(Rc::from("Stringthing"), 15);
+ ///
+ /// if let EntryRef::Occupied(entry) = map.entry_ref("Stringthing") {
+ /// // Also replace the key with a handle to our other key.
+ /// let (old_key, old_value): (Rc<str>, u32) = entry.replace_entry(16);
+ /// }
+ ///
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn replace_entry(self, value: V) -> (K, V)
+ where
+ K: From<&'b Q>,
+ {
+ let entry = unsafe { self.elem.as_mut() };
+
+ let old_key = mem::replace(&mut entry.0, self.key.unwrap().into_owned());
+ let old_value = mem::replace(&mut entry.1, value);
+
+ (old_key, old_value)
+ }
+
+ /// Replaces the key in the hash map with the key used to create this entry.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if this OccupiedEntry was created through [`Entry::insert`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::hash_map::{EntryRef, HashMap};
+ /// use std::rc::Rc;
+ ///
+ /// let mut map: HashMap<Rc<str>, u32> = HashMap::new();
+ /// let mut known_strings: Vec<Rc<str>> = Vec::new();
+ ///
+ /// // Initialise known strings, run program, etc.
+ ///
+ /// reclaim_memory(&mut map, &known_strings);
+ ///
+ /// fn reclaim_memory(map: &mut HashMap<Rc<str>, u32>, known_strings: &[Rc<str>] ) {
+ /// for s in known_strings {
+ /// if let EntryRef::Occupied(entry) = map.entry_ref(s.as_ref()) {
+ /// // Replaces the entry's key with our version of it in `known_strings`.
+ /// entry.replace_key();
+ /// }
+ /// }
+ /// }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn replace_key(self) -> K
+ where
+ K: From<&'b Q>,
+ {
+ let entry = unsafe { self.elem.as_mut() };
+ mem::replace(&mut entry.0, self.key.unwrap().into_owned())
+ }
+
+ /// Provides shared access to the key and owned access to the value of
+ /// the entry and allows to replace or remove it based on the
+ /// value of the returned option.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// map.insert("poneyland".to_string(), 42);
+ ///
+ /// let entry = match map.entry_ref("poneyland") {
+ /// EntryRef::Occupied(e) => {
+ /// e.replace_entry_with(|k, v| {
+ /// assert_eq!(k, "poneyland");
+ /// assert_eq!(v, 42);
+ /// Some(v + 1)
+ /// })
+ /// }
+ /// EntryRef::Vacant(_) => panic!(),
+ /// };
+ ///
+ /// match entry {
+ /// EntryRef::Occupied(e) => {
+ /// assert_eq!(e.key(), "poneyland");
+ /// assert_eq!(e.get(), &43);
+ /// }
+ /// EntryRef::Vacant(_) => panic!(),
+ /// }
+ ///
+ /// assert_eq!(map["poneyland"], 43);
+ ///
+ /// let entry = match map.entry_ref("poneyland") {
+ /// EntryRef::Occupied(e) => e.replace_entry_with(|_k, _v| None),
+ /// EntryRef::Vacant(_) => panic!(),
+ /// };
+ ///
+ /// match entry {
+ /// EntryRef::Vacant(e) => {
+ /// assert_eq!(e.key(), "poneyland");
+ /// }
+ /// EntryRef::Occupied(_) => panic!(),
+ /// }
+ ///
+ /// assert!(!map.contains_key("poneyland"));
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn replace_entry_with<F>(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A>
+ where
+ F: FnOnce(&Q, V) -> Option<V>,
+ K: Borrow<Q>,
+ {
+ unsafe {
+ let mut spare_key = None;
+
+ self.table
+ .table
+ .replace_bucket_with(self.elem.clone(), |(key, value)| {
+ if let Some(new_value) = f(key.borrow(), value) {
+ Some((key, new_value))
+ } else {
+ spare_key = Some(KeyOrRef::Owned(key));
+ None
+ }
+ });
+
+ if let Some(key) = spare_key {
+ EntryRef::Vacant(VacantEntryRef {
+ hash: self.hash,
+ key,
+ table: self.table,
+ })
+ } else {
+ EntryRef::Occupied(self)
+ }
+ }
+ }
+}
+
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> {
+ /// Gets a reference to the key that would be used when inserting a value
+ /// through the `VacantEntryRef`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// let key: &str = "poneyland";
+ /// assert_eq!(map.entry_ref(key).key(), "poneyland");
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn key(&self) -> &Q
+ where
+ K: Borrow<Q>,
+ {
+ self.key.as_ref()
+ }
+
+ /// Take ownership of the key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// let key: &str = "poneyland";
+ ///
+ /// if let EntryRef::Vacant(v) = map.entry_ref(key) {
+ /// v.into_key();
+ /// }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn into_key(self) -> K
+ where
+ K: From<&'b Q>,
+ {
+ self.key.into_owned()
+ }
+
+ /// Sets the value of the entry with the VacantEntryRef's key,
+ /// and returns a mutable reference to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use hashbrown::hash_map::EntryRef;
+ ///
+ /// let mut map: HashMap<String, u32> = HashMap::new();
+ /// let key: &str = "poneyland";
+ ///
+ /// if let EntryRef::Vacant(o) = map.entry_ref(key) {
+ /// o.insert(37);
+ /// }
+ /// assert_eq!(map["poneyland"], 37);
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn insert(self, value: V) -> &'a mut V
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ let table = &mut self.table.table;
+ let entry = table.insert_entry(
+ self.hash,
+ (self.key.into_owned(), value),
+ make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ );
+ &mut entry.1
+ }
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn insert_entry(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
+ where
+ K: Hash + From<&'b Q>,
+ S: BuildHasher,
+ {
+ let elem = self.table.table.insert(
+ self.hash,
+ (self.key.into_owned(), value),
+ make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ );
+ OccupiedEntryRef {
+ hash: self.hash,
+ key: None,
+ elem,
+ table: self.table,
+ }
+ }
+}
+
impl<K, V, S, A> FromIterator<(K, V)> for HashMap<K, V, S, A>
where
K: Eq + Hash,
@@ -3500,8 +4671,8 @@ fn assert_covariance() {
mod test_map {
use super::DefaultHashBuilder;
use super::Entry::{Occupied, Vacant};
+ use super::EntryRef;
use super::{HashMap, RawEntryMut};
- use crate::TryReserveError::*;
use rand::{rngs::SmallRng, Rng, SeedableRng};
use std::borrow::ToOwned;
use std::cell::RefCell;
@@ -3570,6 +4741,7 @@ mod test_map {
assert_eq!(m.len(), 1);
assert!(m.insert(2, 4).is_none());
assert_eq!(m.len(), 2);
+ #[allow(clippy::redundant_clone)]
let m2 = m.clone();
assert_eq!(*m2.get(&1).unwrap(), 2);
assert_eq!(*m2.get(&2).unwrap(), 4);
@@ -3723,6 +4895,7 @@ mod test_map {
}
});
+ #[allow(clippy::let_underscore_drop)] // kind-of a false positive
for _ in half.by_ref() {}
DROP_VECTOR.with(|v| {
@@ -3760,6 +4933,17 @@ mod test_map {
}
#[test]
+ fn test_empty_entry_ref() {
+ let mut m: HashMap<std::string::String, bool> = HashMap::new();
+ match m.entry_ref("poneyland") {
+ EntryRef::Occupied(_) => panic!(),
+ EntryRef::Vacant(_) => {}
+ }
+ assert!(*m.entry_ref("poneyland").or_insert(true));
+ assert_eq!(m.len(), 1);
+ }
+
+ #[test]
fn test_empty_iter() {
let mut m: HashMap<i32, bool> = HashMap::new();
assert_eq!(m.drain().next(), None);
@@ -3856,7 +5040,7 @@ mod test_map {
let mut m = HashMap::new();
assert!(m.insert(1, 2).is_none());
assert_eq!(*m.get(&1).unwrap(), 2);
- assert!(!m.insert(1, 3).is_none());
+ assert!(m.insert(1, 3).is_some());
assert_eq!(*m.get(&1).unwrap(), 3);
}
@@ -3889,6 +5073,18 @@ mod test_map {
}
#[test]
+ fn test_insert_unique_unchecked() {
+ let mut map = HashMap::new();
+ let (k1, v1) = map.insert_unique_unchecked(10, 11);
+ assert_eq!((&10, &mut 11), (k1, v1));
+ let (k2, v2) = map.insert_unique_unchecked(20, 21);
+ assert_eq!((&20, &mut 21), (k2, v2));
+ assert_eq!(Some(&11), map.get(&10));
+ assert_eq!(Some(&21), map.get(&20));
+ assert_eq!(None, map.get(&30));
+ }
+
+ #[test]
fn test_is_empty() {
let mut m = HashMap::with_capacity(4);
assert!(m.insert(1, 2).is_none());
@@ -3934,7 +5130,7 @@ mod test_map {
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
- let keys: Vec<_> = map.keys().cloned().collect();
+ let keys: Vec<_> = map.keys().copied().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
@@ -3945,7 +5141,7 @@ mod test_map {
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_iter().collect();
- let values: Vec<_> = map.values().cloned().collect();
+ let values: Vec<_> = map.values().copied().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
@@ -3957,9 +5153,9 @@ mod test_map {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_iter().collect();
for value in map.values_mut() {
- *value = (*value) * 2
+ *value *= 2;
}
- let values: Vec<_> = map.values().cloned().collect();
+ let values: Vec<_> = map.values().copied().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
@@ -3967,6 +5163,30 @@ mod test_map {
}
#[test]
+ fn test_into_keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<_> = map.into_keys().collect();
+
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn test_into_values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: HashMap<_, _> = vec.into_iter().collect();
+ let values: Vec<_> = map.into_values().collect();
+
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
fn test_find() {
let mut m = HashMap::new();
assert!(m.get(&1).is_none());
@@ -4124,7 +5344,7 @@ mod test_map {
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
- let map: HashMap<_, _> = xs.iter().cloned().collect();
+ let map: HashMap<_, _> = xs.iter().copied().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
@@ -4137,7 +5357,7 @@ mod test_map {
fn test_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
- let map: HashMap<_, _> = xs.iter().cloned().collect();
+ let map: HashMap<_, _> = xs.iter().copied().collect();
let mut iter = map.iter();
@@ -4150,7 +5370,7 @@ mod test_map {
fn test_iter_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
- let map: HashMap<_, _> = xs.iter().cloned().collect();
+ let map: HashMap<_, _> = xs.iter().copied().collect();
let mut iter = map.iter();
@@ -4163,7 +5383,7 @@ mod test_map {
fn test_mut_size_hint() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
- let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+ let mut map: HashMap<_, _> = xs.iter().copied().collect();
let mut iter = map.iter_mut();
@@ -4176,7 +5396,7 @@ mod test_map {
fn test_iter_mut_len() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
- let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+ let mut map: HashMap<_, _> = xs.iter().copied().collect();
let mut iter = map.iter_mut();
@@ -4205,6 +5425,7 @@ mod test_map {
map.insert(2, 1);
map.insert(3, 4);
+ #[allow(clippy::no_effect)] // false positive lint
map[&4];
}
@@ -4212,7 +5433,7 @@ mod test_map {
fn test_entry() {
let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
- let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+ let mut map: HashMap<_, _> = xs.iter().copied().collect();
// Existing key (insert)
match map.entry(1) {
@@ -4259,6 +5480,63 @@ mod test_map {
}
#[test]
+ fn test_entry_ref() {
+ let xs = [
+ ("One".to_owned(), 10),
+ ("Two".to_owned(), 20),
+ ("Three".to_owned(), 30),
+ ("Four".to_owned(), 40),
+ ("Five".to_owned(), 50),
+ ("Six".to_owned(), 60),
+ ];
+
+ let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+
+ // Existing key (insert)
+ match map.entry_ref("One") {
+ EntryRef::Vacant(_) => unreachable!(),
+ EntryRef::Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ assert_eq!(map.get("One").unwrap(), &100);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.entry_ref("Two") {
+ EntryRef::Vacant(_) => unreachable!(),
+ EntryRef::Occupied(mut view) => {
+ let v = view.get_mut();
+ let new_v = (*v) * 10;
+ *v = new_v;
+ }
+ }
+ assert_eq!(map.get("Two").unwrap(), &200);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (take)
+ match map.entry_ref("Three") {
+ EntryRef::Vacant(_) => unreachable!(),
+ EntryRef::Occupied(view) => {
+ assert_eq!(view.remove(), 30);
+ }
+ }
+ assert_eq!(map.get("Three"), None);
+ assert_eq!(map.len(), 5);
+
+ // Inexistent key (insert)
+ match map.entry_ref("Ten") {
+ EntryRef::Occupied(_) => unreachable!(),
+ EntryRef::Vacant(view) => {
+ assert_eq!(*view.insert(1000), 1000);
+ }
+ }
+ assert_eq!(map.get("Ten").unwrap(), &1000);
+ assert_eq!(map.len(), 6);
+ }
+
+ #[test]
fn test_entry_take_doesnt_corrupt() {
#![allow(deprecated)] //rand
// Test for #19292
@@ -4271,18 +5549,18 @@ mod test_map {
let mut m = HashMap::new();
let mut rng = {
- let seed = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
- SmallRng::from_seed(seed)
+ let seed = u64::from_le_bytes(*b"testseed");
+ SmallRng::seed_from_u64(seed)
};
// Populate the map with some items.
for _ in 0..50 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
m.insert(x, ());
}
for _ in 0..1000 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
match m.entry(x) {
Vacant(_) => {}
Occupied(e) => {
@@ -4295,6 +5573,44 @@ mod test_map {
}
#[test]
+ fn test_entry_ref_take_doesnt_corrupt() {
+ #![allow(deprecated)] //rand
+ // Test for #19292
+ fn check(m: &HashMap<std::string::String, ()>) {
+ for k in m.keys() {
+ assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ }
+ }
+
+ let mut m = HashMap::new();
+
+ let mut rng = {
+ let seed = u64::from_le_bytes(*b"testseed");
+ SmallRng::seed_from_u64(seed)
+ };
+
+ // Populate the map with some items.
+ for _ in 0..50 {
+ let mut x = std::string::String::with_capacity(1);
+ x.push(rng.gen_range('a'..='z'));
+ m.insert(x, ());
+ }
+
+ for _ in 0..1000 {
+ let mut x = std::string::String::with_capacity(1);
+ x.push(rng.gen_range('a'..='z'));
+ match m.entry_ref(x.as_str()) {
+ EntryRef::Vacant(_) => {}
+ EntryRef::Occupied(e) => {
+ e.remove();
+ }
+ }
+
+ check(&m);
+ }
+ }
+
+ #[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
@@ -4341,11 +5657,11 @@ mod test_map {
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
- a.insert(key.clone(), value.clone());
+ a.insert(key, value);
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
- match a.entry(key.clone()) {
+ match a.entry(key) {
Vacant(_) => panic!(),
Occupied(e) => assert_eq!(key, *e.key()),
}
@@ -4354,17 +5670,53 @@ mod test_map {
}
#[test]
+ fn test_occupied_entry_ref_key() {
+ let mut a = HashMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+ assert!(a.is_empty());
+ a.insert(key.to_owned(), value);
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+
+ match a.entry_ref(key) {
+ EntryRef::Vacant(_) => panic!(),
+ EntryRef::Occupied(e) => assert_eq!(key, e.key()),
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ }
+
+ #[test]
fn test_vacant_entry_key() {
let mut a = HashMap::new();
let key = "hello there";
let value = "value goes here";
assert!(a.is_empty());
- match a.entry(key.clone()) {
+ match a.entry(key) {
Occupied(_) => panic!(),
Vacant(e) => {
assert_eq!(key, *e.key());
- e.insert(value.clone());
+ e.insert(value);
+ }
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ }
+
+ #[test]
+ fn test_vacant_entry_ref_key() {
+ let mut a: HashMap<std::string::String, &str> = HashMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+
+ assert!(a.is_empty());
+ match a.entry_ref(key) {
+ EntryRef::Occupied(_) => panic!(),
+ EntryRef::Vacant(e) => {
+ assert_eq!(key, e.key());
+ e.insert(value);
}
}
assert_eq!(a.len(), 1);
@@ -4415,6 +5767,49 @@ mod test_map {
}
#[test]
+ fn test_occupied_entry_ref_replace_entry_with() {
+ let mut a: HashMap<std::string::String, &str> = HashMap::new();
+
+ let key = "a key";
+ let value = "an initial value";
+ let new_value = "a new value";
+
+ let entry = a.entry_ref(key).insert(value).replace_entry_with(|k, v| {
+ assert_eq!(k, key);
+ assert_eq!(v, value);
+ Some(new_value)
+ });
+
+ match entry {
+ EntryRef::Occupied(e) => {
+ assert_eq!(e.key(), key);
+ assert_eq!(e.get(), &new_value);
+ }
+ EntryRef::Vacant(_) => panic!(),
+ }
+
+ assert_eq!(a[key], new_value);
+ assert_eq!(a.len(), 1);
+
+ let entry = match a.entry_ref(key) {
+ EntryRef::Occupied(e) => e.replace_entry_with(|k, v| {
+ assert_eq!(k, key);
+ assert_eq!(v, new_value);
+ None
+ }),
+ EntryRef::Vacant(_) => panic!(),
+ };
+
+ match entry {
+ EntryRef::Vacant(e) => assert_eq!(e.key(), key),
+ EntryRef::Occupied(_) => panic!(),
+ }
+
+ assert!(!a.contains_key(key));
+ assert_eq!(a.len(), 0);
+ }
+
+ #[test]
fn test_entry_and_replace_entry_with() {
let mut a = HashMap::new();
@@ -4464,6 +5859,55 @@ mod test_map {
}
#[test]
+ fn test_entry_ref_and_replace_entry_with() {
+ let mut a = HashMap::new();
+
+ let key = "a key";
+ let value = "an initial value";
+ let new_value = "a new value";
+
+ let entry = a.entry_ref(key).and_replace_entry_with(|_, _| panic!());
+
+ match entry {
+ EntryRef::Vacant(e) => assert_eq!(e.key(), key),
+ EntryRef::Occupied(_) => panic!(),
+ }
+
+ a.insert(key.to_owned(), value);
+
+ let entry = a.entry_ref(key).and_replace_entry_with(|k, v| {
+ assert_eq!(k, key);
+ assert_eq!(v, value);
+ Some(new_value)
+ });
+
+ match entry {
+ EntryRef::Occupied(e) => {
+ assert_eq!(e.key(), key);
+ assert_eq!(e.get(), &new_value);
+ }
+ EntryRef::Vacant(_) => panic!(),
+ }
+
+ assert_eq!(a[key], new_value);
+ assert_eq!(a.len(), 1);
+
+ let entry = a.entry_ref(key).and_replace_entry_with(|k, v| {
+ assert_eq!(k, key);
+ assert_eq!(v, new_value);
+ None
+ });
+
+ match entry {
+ EntryRef::Vacant(e) => assert_eq!(e.key(), key),
+ EntryRef::Occupied(_) => panic!(),
+ }
+
+ assert!(!a.contains_key(key));
+ assert_eq!(a.len(), 0);
+ }
+
+ #[test]
fn test_raw_occupied_entry_replace_entry_with() {
let mut a = HashMap::new();
@@ -4581,24 +6025,56 @@ mod test_map {
let mut m = HashMap::new();
let mut rng = {
- let seed = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
- SmallRng::from_seed(seed)
+ let seed = u64::from_le_bytes(*b"testseed");
+ SmallRng::seed_from_u64(seed)
};
// Populate the map with some items.
for _ in 0..50 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
m.insert(x, ());
}
for _ in 0..1000 {
- let x = rng.gen_range(-10, 10);
+ let x = rng.gen_range(-10..10);
m.entry(x).and_replace_entry_with(|_, _| None);
check(&m);
}
}
#[test]
+ fn test_replace_entry_ref_with_doesnt_corrupt() {
+ #![allow(deprecated)] //rand
+ // Test for #19292
+ fn check(m: &HashMap<std::string::String, ()>) {
+ for k in m.keys() {
+ assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ }
+ }
+
+ let mut m = HashMap::new();
+
+ let mut rng = {
+ let seed = u64::from_le_bytes(*b"testseed");
+ SmallRng::seed_from_u64(seed)
+ };
+
+ // Populate the map with some items.
+ for _ in 0..50 {
+ let mut x = std::string::String::with_capacity(1);
+ x.push(rng.gen_range('a'..='z'));
+ m.insert(x, ());
+ }
+
+ for _ in 0..1000 {
+ let mut x = std::string::String::with_capacity(1);
+ x.push(rng.gen_range('a'..='z'));
+ m.entry_ref(x.as_str()).and_replace_entry_with(|_, _| None);
+ check(&m);
+ }
+ }
+
+ #[test]
fn test_retain() {
let mut map: HashMap<i32, i32> = (0..100).map(|x| (x, x * 10)).collect();
@@ -4629,21 +6105,27 @@ mod test_map {
#[test]
#[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613)
fn test_try_reserve() {
- let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
+ use crate::TryReserveError::{AllocError, CapacityOverflow};
const MAX_USIZE: usize = usize::MAX;
+ let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
+
if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 8) {
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) {
} else {
// This may succeed if there is enough free memory. Attempt to
- // allocate a second hashmap to ensure the allocation will fail.
+ // allocate a few more hashmaps to ensure the allocation will fail.
let mut empty_bytes2: HashMap<u8, u8> = HashMap::new();
- if let Err(AllocError { .. }) = empty_bytes2.try_reserve(MAX_USIZE / 8) {
+ let _ = empty_bytes2.try_reserve(MAX_USIZE / 16);
+ let mut empty_bytes3: HashMap<u8, u8> = HashMap::new();
+ let _ = empty_bytes3.try_reserve(MAX_USIZE / 16);
+ let mut empty_bytes4: HashMap<u8, u8> = HashMap::new();
+ if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) {
} else {
panic!("usize::MAX / 8 should trigger an OOM!");
}
@@ -4654,9 +6136,9 @@ mod test_map {
fn test_raw_entry() {
use super::RawEntryMut::{Occupied, Vacant};
- let xs = [(1i32, 10i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+ let xs = [(1_i32, 10_i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
- let mut map: HashMap<_, _> = xs.iter().cloned().collect();
+ let mut map: HashMap<_, _> = xs.iter().copied().collect();
let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
super::make_insert_hash::<i32, _>(map.hasher(), &k)
@@ -4729,7 +6211,7 @@ mod test_map {
// Ensure all lookup methods produce equivalent results.
for k in 0..12 {
let hash = compute_hash(&map, k);
- let v = map.get(&k).cloned();
+ let v = map.get(&k).copied();
let kv = v.as_ref().map(|v| (&k, v));
assert_eq!(map.raw_entry().from_key(&k), kv);
@@ -4799,8 +6281,6 @@ mod test_map {
#[test]
#[cfg(feature = "raw")]
fn test_into_iter_refresh() {
- use core::hash::{BuildHasher, Hash, Hasher};
-
#[cfg(miri)]
const N: usize = 32;
#[cfg(not(miri))]
@@ -4808,13 +6288,13 @@ mod test_map {
let mut rng = rand::thread_rng();
for n in 0..N {
- let mut m = HashMap::new();
+ let mut map = HashMap::new();
for i in 0..n {
- assert!(m.insert(i, 2 * i).is_none());
+ assert!(map.insert(i, 2 * i).is_none());
}
- let hasher = m.hasher().clone();
+ let hash_builder = map.hasher().clone();
- let mut it = unsafe { m.table.iter() };
+ let mut it = unsafe { map.table.iter() };
assert_eq!(it.len(), n);
let mut i = 0;
@@ -4823,23 +6303,21 @@ mod test_map {
loop {
// occasionally remove some elements
if i < n && rng.gen_bool(0.1) {
- let mut hsh = hasher.build_hasher();
- i.hash(&mut hsh);
- let hash = hsh.finish();
+ let hash_value = super::make_insert_hash(&hash_builder, &i);
unsafe {
- let e = m.table.find(hash, |q| q.0.eq(&i));
+ let e = map.table.find(hash_value, |q| q.0.eq(&i));
if let Some(e) = e {
it.reflect_remove(&e);
- let t = m.table.remove(e);
+ let t = map.table.remove(e);
removed.push(t);
left -= 1;
} else {
assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed);
- let e = m.table.insert(
- hash,
+ let e = map.table.insert(
+ hash_value,
(i, 2 * i),
- super::make_hasher::<usize, _, usize, _>(&hasher),
+ super::make_hasher::<usize, _, usize, _>(&hash_builder),
);
it.reflect_insert(&e);
if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) {
@@ -4857,14 +6335,14 @@ mod test_map {
assert!(i < n);
let t = unsafe { e.unwrap().as_ref() };
assert!(!removed.contains(t));
- let (k, v) = t;
- assert_eq!(*v, 2 * k);
+ let (key, value) = t;
+ assert_eq!(*value, 2 * key);
i += 1;
}
assert!(i <= n);
// just for safety:
- assert_eq!(m.table.len(), left);
+ assert_eq!(map.table.len(), left);
}
}
@@ -4886,37 +6364,38 @@ mod test_map {
const EMPTY_MAP: HashMap<u32, std::string::String, MyHasher> =
HashMap::with_hasher(MyHasher);
- let mut map = EMPTY_MAP.clone();
+ let mut map = EMPTY_MAP;
map.insert(17, "seventeen".to_owned());
assert_eq!("seventeen", map[&17]);
}
#[test]
- #[cfg(feature = "nightly")]
fn test_get_each_mut() {
- use crate::UnavailableMutError::*;
-
let mut map = HashMap::new();
map.insert("foo".to_owned(), 0);
map.insert("bar".to_owned(), 10);
map.insert("baz".to_owned(), 20);
map.insert("qux".to_owned(), 30);
- let xs = map.get_each_mut(["foo", "dud", "foo", "qux"]);
- assert_eq!(
- xs,
- [Ok(&mut 0), Err(Absent), Err(Duplicate(0)), Ok(&mut 30)]
- );
+ let xs = map.get_many_mut(["foo", "qux"]);
+ assert_eq!(xs, Some([&mut 0, &mut 30]));
- let ys = map.get_each_key_value_mut(["bar", "baz", "baz", "dip"]);
+ let xs = map.get_many_mut(["foo", "dud"]);
+ assert_eq!(xs, None);
+
+ let xs = map.get_many_mut(["foo", "foo"]);
+ assert_eq!(xs, None);
+
+ let ys = map.get_many_key_value_mut(["bar", "baz"]);
assert_eq!(
ys,
- [
- Ok((&"bar".to_owned(), &mut 10)),
- Ok((&"baz".to_owned(), &mut 20)),
- Err(Duplicate(1)),
- Err(Absent),
- ]
+ Some([(&"bar".to_owned(), &mut 10), (&"baz".to_owned(), &mut 20),]),
);
+
+ let ys = map.get_many_key_value_mut(["bar", "dip"]);
+ assert_eq!(ys, None);
+
+ let ys = map.get_many_key_value_mut(["baz", "baz"]);
+ assert_eq!(ys, None);
}
}
diff --git a/src/raw/alloc.rs b/src/raw/alloc.rs
index de6c455..76fe1e9 100644
--- a/src/raw/alloc.rs
+++ b/src/raw/alloc.rs
@@ -33,6 +33,7 @@ mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
+ #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate
pub unsafe trait Allocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()>;
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
@@ -47,7 +48,7 @@ mod inner {
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
- dealloc(ptr.as_ptr(), layout)
+ dealloc(ptr.as_ptr(), layout);
}
}
impl Default for Global {
diff --git a/src/raw/bitmask.rs b/src/raw/bitmask.rs
index 99b2d53..7d4f9fc 100644
--- a/src/raw/bitmask.rs
+++ b/src/raw/bitmask.rs
@@ -106,7 +106,7 @@ impl IntoIterator for BitMask {
}
}
-/// Iterator over the contents of a `BitMask`, returning the indicies of set
+/// Iterator over the contents of a `BitMask`, returning the indices of set
/// bits.
pub struct BitMaskIter(BitMask);
diff --git a/src/raw/generic.rs b/src/raw/generic.rs
index ef066e8..b4d31e6 100644
--- a/src/raw/generic.rs
+++ b/src/raw/generic.rs
@@ -9,12 +9,14 @@ use core::{mem, ptr};
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
+ target_arch = "wasm32",
))]
type GroupWord = u64;
#[cfg(all(
target_pointer_width = "32",
not(target_arch = "aarch64"),
not(target_arch = "x86_64"),
+ not(target_arch = "wasm32"),
))]
type GroupWord = u32;
@@ -37,7 +39,7 @@ fn repeat(byte: u8) -> GroupWord {
#[derive(Copy, Clone)]
pub struct Group(GroupWord);
-// We perform all operations in the native endianess, and convert to
+// We perform all operations in the native endianness, and convert to
// little-endian just before creating a BitMask. The can potentially
// enable the compiler to eliminate unnecessary byte swaps if we are
// only checking whether a BitMask is empty.
@@ -50,6 +52,7 @@ impl Group {
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
+ #[inline]
pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
@@ -103,7 +106,7 @@ impl Group {
#[inline]
pub fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
- // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
}
diff --git a/src/raw/mod.rs b/src/raw/mod.rs
index 3ae6980..5fe0a72 100644
--- a/src/raw/mod.rs
+++ b/src/raw/mod.rs
@@ -1,16 +1,13 @@
use crate::alloc::alloc::{handle_alloc_error, Layout};
use crate::scopeguard::guard;
use crate::TryReserveError;
-#[cfg(feature = "nightly")]
-use crate::UnavailableMutError;
-use core::hint;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::mem::ManuallyDrop;
-#[cfg(feature = "nightly")]
use core::mem::MaybeUninit;
use core::ptr::NonNull;
+use core::{hint, ptr};
cfg_if! {
// Use the SSE2 implementation if possible: it allows us to scan 16 buckets
@@ -59,7 +56,7 @@ fn cold() {}
#[inline]
fn likely(b: bool) -> bool {
if !b {
- cold()
+ cold();
}
b
}
@@ -67,18 +64,18 @@ fn likely(b: bool) -> bool {
#[inline]
fn unlikely(b: bool) -> bool {
if b {
- cold()
+ cold();
}
b
}
#[cfg(feature = "nightly")]
-#[cfg_attr(feature = "inline-more", inline)]
+#[inline]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
to.offset_from(from) as usize
}
#[cfg(not(feature = "nightly"))]
-#[cfg_attr(feature = "inline-more", inline)]
+#[inline]
unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
(to as usize - from as usize) / mem::size_of::<T>()
}
@@ -211,7 +208,7 @@ fn capacity_to_buckets(cap: usize) -> Option<usize> {
// Any overflows will have been caught by the checked_mul. Also, any
// rounding errors from the division above will be cleaned up by
- // next_power_of_two (which can't overflow because of the previous divison).
+ // next_power_of_two (which can't overflow because of the previous division).
Some(adjusted_cap.next_power_of_two())
}
@@ -292,14 +289,14 @@ pub struct Bucket<T> {
unsafe impl<T> Send for Bucket<T> {}
impl<T> Clone for Bucket<T> {
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn clone(&self) -> Self {
Self { ptr: self.ptr }
}
}
impl<T> Bucket<T> {
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
// won't overflow because index must be less than length
@@ -311,7 +308,7 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
if mem::size_of::<T>() == 0 {
self.ptr.as_ptr() as usize - 1
@@ -319,7 +316,7 @@ impl<T> Bucket<T> {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub fn as_ptr(&self) -> *mut T {
if mem::size_of::<T>() == 0 {
// Just return an arbitrary ZST pointer which is properly aligned
@@ -328,7 +325,7 @@ impl<T> Bucket<T> {
unsafe { self.ptr.as_ptr().sub(1) }
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if mem::size_of::<T>() == 0 {
(self.ptr.as_ptr() as usize + offset) as *mut T
@@ -343,23 +340,24 @@ impl<T> Bucket<T> {
pub unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn read(&self) -> T {
self.as_ptr().read()
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[cfg(feature = "raw")]
+ #[inline]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
}
@@ -398,7 +396,7 @@ impl<T> RawTable<T, Global> {
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub const fn new() -> Self {
Self {
table: RawTableInner::new_in(Global),
@@ -427,7 +425,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// In effect this returns a table with exactly 1 bucket. However we can
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub fn new_in(alloc: A) -> Self {
Self {
table: RawTableInner::new_in(alloc),
@@ -492,33 +490,39 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
+ /// Returns a reference to the underlying allocator.
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ &self.table.alloc
+ }
+
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
- self.table.free_buckets(TableLayout::new::<T>())
+ self.table.free_buckets(TableLayout::new::<T>());
}
/// Returns pointer to one past last element of data table.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn data_end(&self) -> NonNull<T> {
NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
}
/// Returns pointer to start of data table.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
#[cfg(feature = "nightly")]
pub unsafe fn data_start(&self) -> *mut T {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
/// Returns the index of a bucket from a `Bucket`.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
bucket.to_base_index(self.data_end())
}
/// Returns a pointer to an element in the table.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.table.bucket_mask, 0);
debug_assert!(index < self.buckets());
@@ -530,7 +534,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
#[deprecated(since = "0.8.1", note = "use erase or remove instead")]
pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
- self.table.erase(index)
+ self.table.erase(index);
}
/// Erases an element from the table, dropping it in place.
@@ -550,7 +554,9 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
// Avoid `Option::map` because it bloats LLVM IR.
if let Some(bucket) = self.find(hash, eq) {
- unsafe { self.erase(bucket) };
+ unsafe {
+ self.erase(bucket);
+ }
true
} else {
false
@@ -579,7 +585,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Marks all table buckets as empty without dropping their contents.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear_no_drop(&mut self) {
- self.table.clear_no_drop()
+ self.table.clear_no_drop();
}
/// Removes all elements from the table without freeing the backing memory.
@@ -593,7 +599,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && self.len() != 0 {
+ if mem::needs_drop::<T>() && !self.is_empty() {
for item in self.iter() {
item.drop();
}
@@ -624,7 +630,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.table.items == 0 {
- *self = Self::with_capacity_in(min_size, self.table.alloc.clone())
+ *self = Self::with_capacity_in(min_size, self.table.alloc.clone());
} else {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
@@ -676,102 +682,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
- // Avoid `Option::ok_or_else` because it bloats LLVM IR.
- let new_items = match self.table.items.checked_add(additional) {
- Some(new_items) => new_items,
- None => return Err(fallibility.capacity_overflow()),
- };
- let full_capacity = bucket_mask_to_capacity(self.table.bucket_mask);
- if new_items <= full_capacity / 2 {
- // Rehash in-place without re-allocating if we have plenty of spare
- // capacity that is locked up due to DELETED entries.
- self.rehash_in_place(hasher);
- Ok(())
- } else {
- // Otherwise, conservatively resize to at least the next size up
- // to avoid churning deletes into frequent rehashes.
- self.resize(
- usize::max(new_items, full_capacity + 1),
- hasher,
- fallibility,
- )
- }
- }
-
- /// Rehashes the contents of the table in place (i.e. without changing the
- /// allocation).
- ///
- /// If `hasher` panics then some the table's contents may be lost.
- fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
unsafe {
- // If the hash function panics then properly clean up any elements
- // that we haven't rehashed yet. We unfortunately can't preserve the
- // element since we lost their hash and have no way of recovering it
- // without risking another panic.
- self.table.prepare_rehash_in_place();
-
- let mut guard = guard(&mut self.table, move |self_| {
+ self.table.reserve_rehash_inner(
+ additional,
+ &|table, index| hasher(table.bucket::<T>(index).as_ref()),
+ fallibility,
+ TableLayout::new::<T>(),
if mem::needs_drop::<T>() {
- for i in 0..self_.buckets() {
- if *self_.ctrl(i) == DELETED {
- self_.set_ctrl(i, EMPTY);
- self_.bucket::<T>(i).drop();
- self_.items -= 1;
- }
- }
- }
- self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
- });
-
- // At this point, DELETED elements are elements that we haven't
- // rehashed yet. Find them and re-insert them at their ideal
- // position.
- 'outer: for i in 0..guard.buckets() {
- if *guard.ctrl(i) != DELETED {
- continue;
- }
-
- 'inner: loop {
- // Hash the current item
- let item = guard.bucket(i);
- let hash = hasher(item.as_ref());
-
- // Search for a suitable place to put it
- let new_i = guard.find_insert_slot(hash);
-
- // Probing works by scanning through all of the control
- // bytes in groups, which may not be aligned to the group
- // size. If both the new and old position fall within the
- // same unaligned group, then there is no benefit in moving
- // it and we can just continue to the next item.
- if likely(guard.is_in_same_group(i, new_i, hash)) {
- guard.set_ctrl_h2(i, hash);
- continue 'outer;
- }
-
- // We are moving the current item to a new position. Write
- // our H2 to the control byte of the new position.
- let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
- if prev_ctrl == EMPTY {
- guard.set_ctrl(i, EMPTY);
- // If the target slot is empty, simply move the current
- // element into the new slot and clear the old control
- // byte.
- guard.bucket(new_i).copy_from_nonoverlapping(&item);
- continue 'outer;
- } else {
- // If the target slot is occupied, swap the two elements
- // and then continue processing the element that we just
- // swapped into the old slot.
- debug_assert_eq!(prev_ctrl, DELETED);
- mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
- continue 'inner;
- }
- }
- }
-
- guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
- mem::forget(guard);
+ Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
+ } else {
+ None
+ },
+ )
}
}
@@ -784,30 +706,12 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
- let mut new_table =
- self.table
- .prepare_resize(TableLayout::new::<T>(), capacity, fallibility)?;
-
- // Copy all elements to the new table.
- for item in self.iter() {
- // This may panic.
- let hash = hasher(item.as_ref());
-
- // We can use a simpler version of insert() here since:
- // - there are no DELETED entries.
- // - we know there is enough space in the table.
- // - all elements are unique.
- let (index, _) = new_table.prepare_insert_slot(hash);
- new_table.bucket(index).copy_from_nonoverlapping(&item);
- }
-
- // We successfully copied all elements without panicking. Now replace
- // self with the new table. The old table will have its memory freed but
- // the items will not be dropped (since they have been moved into the
- // new table).
- mem::swap(&mut self.table, &mut new_table);
-
- Ok(())
+ self.table.resize_inner(
+ capacity,
+ &|table, index| hasher(table.bucket::<T>(index).as_ref()),
+ fallibility,
+ TableLayout::new::<T>(),
+ )
}
}
@@ -872,19 +776,17 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// This does not check if the given element already exists in the table.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
- pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
- unsafe {
- let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
- let bucket = self.table.bucket(index);
+ pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
+ let (index, old_ctrl) = self.table.prepare_insert_slot(hash);
+ let bucket = self.table.bucket(index);
- // If we are replacing a DELETED entry then we don't need to update
- // the load counter.
- self.table.growth_left -= special_is_empty(old_ctrl) as usize;
+ // If we are replacing a DELETED entry then we don't need to update
+ // the load counter.
+ self.table.growth_left -= special_is_empty(old_ctrl) as usize;
- bucket.write(value);
- self.table.items += 1;
- bucket
- }
+ bucket.write(value);
+ self.table.items += 1;
+ bucket
}
/// Temporary removes a bucket, applying the given function to the removed
@@ -917,14 +819,14 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
- unsafe {
- for bucket in self.iter_hash(hash) {
- let elm = bucket.as_ref();
- if likely(eq(elm)) {
- return Some(bucket);
- }
- }
- None
+ let result = self.table.find_inner(hash, &mut |index| unsafe {
+ eq(self.bucket(index).as_ref())
+ });
+
+ // Avoid `Option::map` because it bloats LLVM IR.
+ match result {
+ Some(index) => Some(unsafe { self.bucket(index) }),
+ None => None,
}
}
@@ -950,79 +852,84 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Attempts to get mutable references to `N` entries in the table at once.
///
- /// Returns an array of length `N` with the results of each query. For soundness,
- /// at most one mutable reference will be returned to any entry. An
- /// `Err(UnavailableMutError::Duplicate(i))` in the returned array indicates that a suitable
- /// entry exists, but a mutable reference to it already occurs at index `i` in the returned
- /// array.
+ /// Returns an array of length `N` with the results of each query.
+ ///
+ /// At most one mutable reference will be returned to any entry. `None` will be returned if any
+ /// of the hashes are duplicates. `None` will be returned if the hash is not found.
///
/// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
/// the `i`th key to be looked up.
- ///
- /// This method is available only if the `nightly` feature is enabled.
- #[cfg(feature = "nightly")]
- pub fn get_each_mut<const N: usize>(
+ pub fn get_many_mut<const N: usize>(
&mut self,
hashes: [u64; N],
- mut eq: impl FnMut(usize, &T) -> bool,
- ) -> [Result<&'_ mut T, UnavailableMutError>; N] {
- // Collect the requested buckets.
- // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
- let mut buckets: [MaybeUninit<Option<Bucket<T>>>; N] =
- unsafe { MaybeUninit::uninit().assume_init() };
- for i in 0..N {
- buckets[i] = MaybeUninit::new(self.find(hashes[i], |k| eq(i, k)));
+ eq: impl FnMut(usize, &T) -> bool,
+ ) -> Option<[&'_ mut T; N]> {
+ unsafe {
+ let ptrs = self.get_many_mut_pointers(hashes, eq)?;
+
+ for (i, &cur) in ptrs.iter().enumerate() {
+ if ptrs[..i].iter().any(|&prev| ptr::eq::<T>(prev, cur)) {
+ return None;
+ }
+ }
+ // All bucket are distinct from all previous buckets so we're clear to return the result
+ // of the lookup.
+
+ // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
+ Some(mem::transmute_copy(&ptrs))
}
- let buckets: [Option<Bucket<T>>; N] = unsafe { MaybeUninit::array_assume_init(buckets) };
+ }
- // Walk through the buckets, checking for duplicates and building up the output array.
+ pub unsafe fn get_many_unchecked_mut<const N: usize>(
+ &mut self,
+ hashes: [u64; N],
+ eq: impl FnMut(usize, &T) -> bool,
+ ) -> Option<[&'_ mut T; N]> {
+ let ptrs = self.get_many_mut_pointers(hashes, eq)?;
+ Some(mem::transmute_copy(&ptrs))
+ }
+
+ unsafe fn get_many_mut_pointers<const N: usize>(
+ &mut self,
+ hashes: [u64; N],
+ mut eq: impl FnMut(usize, &T) -> bool,
+ ) -> Option<[*mut T; N]> {
// TODO use `MaybeUninit::uninit_array` here instead once that's stable.
- let mut out: [MaybeUninit<Result<&'_ mut T, UnavailableMutError>>; N] =
- unsafe { MaybeUninit::uninit().assume_init() };
- for i in 0..N {
- out[i] = MaybeUninit::new(
- #[allow(clippy::never_loop)]
- 'outer: loop {
- for j in 0..i {
- match (&buckets[j], &buckets[i]) {
- // These two buckets are the same, and we can't safely return a second
- // mutable reference to the same entry.
- (Some(prev), Some(cur)) if prev.as_ptr() == cur.as_ptr() => {
- break 'outer Err(UnavailableMutError::Duplicate(j));
- }
- _ => {}
- }
- }
- // This bucket is distinct from all previous buckets (or it doesn't exist), so
- // we're clear to return the result of the lookup.
- break match &buckets[i] {
- None => Err(UnavailableMutError::Absent),
- Some(bkt) => unsafe { Ok(bkt.as_mut()) },
- };
- },
- )
+ let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit();
+ let outs_ptr = outs.as_mut_ptr();
+
+ for (i, &hash) in hashes.iter().enumerate() {
+ let cur = self.find(hash, |k| eq(i, k))?;
+ *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut();
}
- unsafe { MaybeUninit::array_assume_init(out) }
+ // TODO use `MaybeUninit::array_assume_init` here instead once that's stable.
+ Some(outs.assume_init())
}
/// Returns the number of elements the map can hold without reallocating.
///
/// This number is a lower bound; the table might be able to hold
/// more, but is guaranteed to be able to hold at least this many.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub fn capacity(&self) -> usize {
self.table.items + self.table.growth_left
}
/// Returns the number of elements in the table.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub fn len(&self) -> usize {
self.table.items
}
+ /// Returns `true` if the table contains no elements.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
/// Returns the number of buckets in the table.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub fn buckets(&self) -> usize {
self.table.bucket_mask + 1
}
@@ -1031,7 +938,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
pub unsafe fn iter(&self) -> RawIter<T> {
let data = Bucket::from_base_index(self.data_end(), 0);
RawIter {
@@ -1042,12 +949,15 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Returns an iterator over occupied buckets that could match a given hash.
///
- /// In rare cases, the iterator may return a bucket with a different hash.
+ /// `RawTable` only stores 7 bits of the hash value, so this iterator may
+ /// return items that have a hash value different than the one provided. You
+ /// should always validate the returned values before using them.
///
/// It is up to the caller to ensure that the `RawTable` outlives the
/// `RawIterHash`. Because we cannot make the `next` method unsafe on the
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
+ #[cfg(feature = "raw")]
pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
RawIterHash::new(self, hash)
}
@@ -1121,11 +1031,21 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> where T: Send {}
-unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> where T: Sync {}
+unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A>
+where
+ T: Send,
+ A: Send,
+{
+}
+unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A>
+where
+ T: Sync,
+ A: Sync,
+{
+}
impl<A> RawTableInner<A> {
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
const fn new_in(alloc: A) -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
@@ -1154,6 +1074,15 @@ impl<A: Allocator + Clone> RawTableInner<A> {
None => return Err(fallibility.capacity_overflow()),
};
+ // We need an additional check to ensure that the allocation doesn't
+ // exceed `isize::MAX`. We can skip this check on 64-bit systems since
+ // such allocations will never succeed anyways.
+ //
+ // This mirrors what Vec does in the standard library.
+ if mem::size_of::<usize>() < 8 && layout.size() > isize::MAX as usize {
+ return Err(fallibility.capacity_overflow());
+ }
+
let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
@@ -1221,7 +1150,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// EMPTY entries. These will unfortunately trigger a
// match, but once masked may point to a full bucket that
// is already occupied. We detect this situation here and
- // perform a second scan starting at the begining of the
+ // perform a second scan starting at the beginning of the
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
@@ -1240,6 +1169,32 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
+ /// code generated, but it is eliminated by LLVM optimizations.
+ #[inline]
+ fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
+ let h2_hash = h2(hash);
+ let mut probe_seq = self.probe_seq(hash);
+
+ loop {
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
+
+ for bit in group.match_byte(h2_hash) {
+ let index = (probe_seq.pos + bit) & self.bucket_mask;
+
+ if likely(eq(index)) {
+ return Some(index);
+ }
+ }
+
+ if likely(group.match_empty().any_bit_set()) {
+ return None;
+ }
+
+ probe_seq.move_next(self.bucket_mask);
+ }
+ }
+
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_rehash_in_place(&mut self) {
@@ -1263,14 +1218,22 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
+ unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
+ debug_assert_ne!(self.bucket_mask, 0);
+ debug_assert!(index < self.buckets());
+ let base: *mut u8 = self.data_end().as_ptr();
+ base.sub((index + 1) * size_of)
+ }
+
+ #[inline]
unsafe fn data_end<T>(&self) -> NonNull<T> {
NonNull::new_unchecked(self.ctrl.as_ptr().cast())
}
@@ -1305,7 +1268,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[inline]
unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) {
- self.growth_left -= special_is_empty(old_ctrl) as usize;
+ self.growth_left -= usize::from(special_is_empty(old_ctrl));
self.set_ctrl_h2(index, hash);
self.items += 1;
}
@@ -1322,7 +1285,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// the end of the array.
#[inline]
unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
- self.set_ctrl(index, h2(hash))
+ self.set_ctrl(index, h2(hash));
}
#[inline]
@@ -1415,6 +1378,179 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}))
}
+ /// Reserves or rehashes to make room for `additional` more elements.
+ ///
+ /// This uses dynamic dispatch to reduce the amount of
+ /// code generated, but it is eliminated by LLVM optimizations when inlined.
+ #[allow(clippy::inline_always)]
+ #[inline(always)]
+ unsafe fn reserve_rehash_inner(
+ &mut self,
+ additional: usize,
+ hasher: &dyn Fn(&mut Self, usize) -> u64,
+ fallibility: Fallibility,
+ layout: TableLayout,
+ drop: Option<fn(*mut u8)>,
+ ) -> Result<(), TryReserveError> {
+ // Avoid `Option::ok_or_else` because it bloats LLVM IR.
+ let new_items = match self.items.checked_add(additional) {
+ Some(new_items) => new_items,
+ None => return Err(fallibility.capacity_overflow()),
+ };
+ let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
+ if new_items <= full_capacity / 2 {
+ // Rehash in-place without re-allocating if we have plenty of spare
+ // capacity that is locked up due to DELETED entries.
+ self.rehash_in_place(hasher, layout.size, drop);
+ Ok(())
+ } else {
+ // Otherwise, conservatively resize to at least the next size up
+ // to avoid churning deletes into frequent rehashes.
+ self.resize_inner(
+ usize::max(new_items, full_capacity + 1),
+ hasher,
+ fallibility,
+ layout,
+ )
+ }
+ }
+
+ /// Allocates a new table of a different size and moves the contents of the
+ /// current table into it.
+ ///
+ /// This uses dynamic dispatch to reduce the amount of
+ /// code generated, but it is eliminated by LLVM optimizations when inlined.
+ #[allow(clippy::inline_always)]
+ #[inline(always)]
+ unsafe fn resize_inner(
+ &mut self,
+ capacity: usize,
+ hasher: &dyn Fn(&mut Self, usize) -> u64,
+ fallibility: Fallibility,
+ layout: TableLayout,
+ ) -> Result<(), TryReserveError> {
+ let mut new_table = self.prepare_resize(layout, capacity, fallibility)?;
+
+ // Copy all elements to the new table.
+ for i in 0..self.buckets() {
+ if !is_full(*self.ctrl(i)) {
+ continue;
+ }
+
+ // This may panic.
+ let hash = hasher(self, i);
+
+ // We can use a simpler version of insert() here since:
+ // - there are no DELETED entries.
+ // - we know there is enough space in the table.
+ // - all elements are unique.
+ let (index, _) = new_table.prepare_insert_slot(hash);
+
+ ptr::copy_nonoverlapping(
+ self.bucket_ptr(i, layout.size),
+ new_table.bucket_ptr(index, layout.size),
+ layout.size,
+ );
+ }
+
+ // We successfully copied all elements without panicking. Now replace
+ // self with the new table. The old table will have its memory freed but
+ // the items will not be dropped (since they have been moved into the
+ // new table).
+ mem::swap(self, &mut new_table);
+
+ Ok(())
+ }
+
+ /// Rehashes the contents of the table in place (i.e. without changing the
+ /// allocation).
+ ///
+ /// If `hasher` panics then some the table's contents may be lost.
+ ///
+ /// This uses dynamic dispatch to reduce the amount of
+ /// code generated, but it is eliminated by LLVM optimizations when inlined.
+ #[allow(clippy::inline_always)]
+ #[cfg_attr(feature = "inline-more", inline(always))]
+ #[cfg_attr(not(feature = "inline-more"), inline)]
+ unsafe fn rehash_in_place(
+ &mut self,
+ hasher: &dyn Fn(&mut Self, usize) -> u64,
+ size_of: usize,
+ drop: Option<fn(*mut u8)>,
+ ) {
+ // If the hash function panics then properly clean up any elements
+ // that we haven't rehashed yet. We unfortunately can't preserve the
+ // element since we lost their hash and have no way of recovering it
+ // without risking another panic.
+ self.prepare_rehash_in_place();
+
+ let mut guard = guard(self, move |self_| {
+ if let Some(drop) = drop {
+ for i in 0..self_.buckets() {
+ if *self_.ctrl(i) == DELETED {
+ self_.set_ctrl(i, EMPTY);
+ drop(self_.bucket_ptr(i, size_of));
+ self_.items -= 1;
+ }
+ }
+ }
+ self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
+ });
+
+ // At this point, DELETED elements are elements that we haven't
+ // rehashed yet. Find them and re-insert them at their ideal
+ // position.
+ 'outer: for i in 0..guard.buckets() {
+ if *guard.ctrl(i) != DELETED {
+ continue;
+ }
+
+ let i_p = guard.bucket_ptr(i, size_of);
+
+ 'inner: loop {
+ // Hash the current item
+ let hash = hasher(*guard, i);
+
+ // Search for a suitable place to put it
+ let new_i = guard.find_insert_slot(hash);
+ let new_i_p = guard.bucket_ptr(new_i, size_of);
+
+ // Probing works by scanning through all of the control
+ // bytes in groups, which may not be aligned to the group
+ // size. If both the new and old position fall within the
+ // same unaligned group, then there is no benefit in moving
+ // it and we can just continue to the next item.
+ if likely(guard.is_in_same_group(i, new_i, hash)) {
+ guard.set_ctrl_h2(i, hash);
+ continue 'outer;
+ }
+
+ // We are moving the current item to a new position. Write
+ // our H2 to the control byte of the new position.
+ let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
+ if prev_ctrl == EMPTY {
+ guard.set_ctrl(i, EMPTY);
+ // If the target slot is empty, simply move the current
+ // element into the new slot and clear the old control
+ // byte.
+ ptr::copy_nonoverlapping(i_p, new_i_p, size_of);
+ continue 'outer;
+ } else {
+ // If the target slot is occupied, swap the two elements
+ // and then continue processing the element that we just
+ // swapped into the old slot.
+ debug_assert_eq!(prev_ctrl, DELETED);
+ ptr::swap_nonoverlapping(i_p, new_i_p, size_of);
+ continue 'inner;
+ }
+ }
+ }
+
+ guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
+
+ mem::forget(guard);
+ }
+
#[inline]
unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
@@ -1454,7 +1590,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
//
// Note that in this context `leading_zeros` refers to the bytes at the
// end of a group, while `trailing_zeros` refers to the bytes at the
- // begining of a group.
+ // beginning of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
@@ -1524,7 +1660,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
self.clone_from_spec(source, |self_| {
// We need to leave the table in an empty state.
- self_.clear_no_drop()
+ self_.clear_no_drop();
});
}
}
@@ -1536,8 +1672,8 @@ trait RawTableClone {
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
}
impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
- #[cfg_attr(feature = "inline-more", inline)]
default_fn! {
+ #[cfg_attr(feature = "inline-more", inline)]
unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
self.clone_from_impl(source, on_panic);
}
@@ -1574,7 +1710,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
- if mem::needs_drop::<T>() && self_.len() != 0 {
+ if mem::needs_drop::<T>() && !self_.is_empty() {
for i in 0..=*index {
if is_full(*self_.table.ctrl(i)) {
self_.bucket(i).drop();
@@ -1650,7 +1786,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
}
impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn default() -> Self {
Self::new_in(Default::default())
}
@@ -1824,13 +1960,17 @@ impl<T> Iterator for RawIterRange<T> {
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
// We don't have an item count, so just guess based on the range size.
- (
- 0,
- Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
- )
+ let remaining_buckets = if self.end > self.next_ctrl {
+ unsafe { offset_from(self.end, self.next_ctrl) }
+ } else {
+ 0
+ };
+
+ // Add a group width to include the group we are currently processing.
+ (0, Some(Group::WIDTH + remaining_buckets))
}
}
@@ -1871,7 +2011,7 @@ impl<T> RawIter<T> {
/// For the iterator to remain valid, this method must be called once
/// for each insert before `next` is called again.
///
- /// This method does not guarantee that an insertion of a bucket witha greater
+ /// This method does not guarantee that an insertion of a bucket with a greater
/// index than the last one yielded will be reflected in the iterator.
///
/// This method should be called _after_ the given insert is made.
@@ -1923,7 +2063,7 @@ impl<T> RawIter<T> {
// If it did, we're done.
// - Otherwise, update the iterator cached group so that it won't
// yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
- // We'll also need ot update the item count accordingly.
+ // We'll also need to update the item count accordingly.
if let Some(index) = self.iter.current_group.lowest_set_bit() {
let next_bucket = self.iter.data.next_n(index);
if b.as_ptr() > next_bucket.as_ptr() {
@@ -2006,7 +2146,7 @@ impl<T> Iterator for RawIter<T> {
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
@@ -2030,8 +2170,18 @@ impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> where T: Send {}
-unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> where T: Sync {}
+unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A>
+where
+ T: Send,
+ A: Send,
+{
+}
+unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A>
+where
+ T: Sync,
+ A: Sync,
+{
+}
#[cfg(feature = "nightly")]
unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
@@ -2072,7 +2222,7 @@ impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
unsafe { Some(self.iter.next()?.read()) }
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
@@ -2103,8 +2253,18 @@ impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
}
}
-unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> where T: Send {}
-unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> where T: Sync {}
+unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A>
+where
+ T: Send,
+ A: Send,
+{
+}
+unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A>
+where
+ T: Sync,
+ A: Sync,
+{
+}
impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -2136,7 +2296,7 @@ impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
}
}
- #[cfg_attr(feature = "inline-more", inline)]
+ #[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
@@ -2147,7 +2307,9 @@ impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
/// Iterator over occupied buckets that could match a given hash.
///
-/// In rare cases, the iterator may return a bucket with a different hash.
+/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
+/// items that have a hash value different than the one provided. You should
+/// always validate the returned values before using them.
pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
inner: RawIterHashInner<'a, A>,
_marker: PhantomData<T>,
@@ -2170,6 +2332,7 @@ struct RawIterHashInner<'a, A: Allocator + Clone> {
impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
+ #[cfg(feature = "raw")]
fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
RawIterHash {
inner: RawIterHashInner::new(&table.table, hash),
@@ -2179,6 +2342,7 @@ impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
}
impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
#[cfg_attr(feature = "inline-more", inline)]
+ #[cfg(feature = "raw")]
fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
unsafe {
let h2_hash = h2(hash);
@@ -2235,6 +2399,20 @@ impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
mod test_map {
use super::*;
+ fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
+ unsafe {
+ table.table.rehash_in_place(
+ &|table, index| hasher(table.bucket::<T>(index).as_ref()),
+ mem::size_of::<T>(),
+ if mem::needs_drop::<T>() {
+ Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
+ } else {
+ None
+ },
+ );
+ }
+ }
+
#[test]
fn rehash() {
let mut table = RawTable::new();
@@ -2250,7 +2428,7 @@ mod test_map {
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
}
- table.rehash_in_place(hasher);
+ rehash_in_place(&mut table, hasher);
for i in 0..100 {
unsafe {
diff --git a/src/raw/sse2.rs b/src/raw/sse2.rs
index eed9684..a0bf6da 100644
--- a/src/raw/sse2.rs
+++ b/src/raw/sse2.rs
@@ -28,6 +28,7 @@ impl Group {
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
+ #[inline]
#[allow(clippy::items_after_statements)]
pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
diff --git a/src/rustc_entry.rs b/src/rustc_entry.rs
index 1793c4a..465db47 100644
--- a/src/rustc_entry.rs
+++ b/src/rustc_entry.rs
@@ -574,8 +574,10 @@ impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(self, value: V) -> &'a mut V {
- let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
- unsafe { &mut bucket.as_mut().1 }
+ unsafe {
+ let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
+ &mut bucket.as_mut().1
+ }
}
/// Sets the value of the entry with the RustcVacantEntry's key,
@@ -596,7 +598,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> {
- let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
+ let bucket = unsafe { self.table.insert_no_grow(self.hash, (self.key, value)) };
RustcOccupiedEntry {
key: None,
elem: bucket,
diff --git a/src/scopeguard.rs b/src/scopeguard.rs
index 4e9bf04..ccdc0c5 100644
--- a/src/scopeguard.rs
+++ b/src/scopeguard.rs
@@ -44,6 +44,6 @@ where
{
#[inline]
fn drop(&mut self) {
- (self.dropfn)(&mut self.value)
+ (self.dropfn)(&mut self.value);
}
}
diff --git a/src/set.rs b/src/set.rs
index d59183b..bb83054 100644
--- a/src/set.rs
+++ b/src/set.rs
@@ -378,7 +378,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
- self.map.clear()
+ self.map.clear();
}
}
@@ -454,6 +454,12 @@ impl<T, S, A> HashSet<T, S, A>
where
A: Allocator + Clone,
{
+ /// Returns a reference to the underlying allocator.
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.map.allocator()
+ }
+
/// Creates a new empty hash set which will use the given hasher to hash
/// keys.
///
@@ -553,7 +559,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize) {
- self.map.reserve(additional)
+ self.map.reserve(additional);
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -595,7 +601,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to_fit(&mut self) {
- self.map.shrink_to_fit()
+ self.map.shrink_to_fit();
}
/// Shrinks the capacity of the set with a lower limit. It will drop
@@ -621,7 +627,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_capacity: usize) {
- self.map.shrink_to(min_capacity)
+ self.map.shrink_to(min_capacity);
}
/// Visits the values representing the difference,
@@ -985,6 +991,30 @@ where
self.map.insert(value, ()).is_none()
}
+ /// Insert a value the set without checking if the value already exists in the set.
+ ///
+ /// Returns a reference to the value just inserted.
+ ///
+ /// This operation is safe if a value does not exist in the set.
+ ///
+ /// However, if a value exists in the set already, the behavior is unspecified:
+ /// this operation may panic, loop forever, or any following operation with the set
+ /// may panic, loop forever or return arbitrary result.
+ ///
+ /// That said, this operation (and following operations) are guaranteed to
+ /// not violate memory safety.
+ ///
+ /// This operation is faster than regular insert, because it does not perform
+ /// lookup before insertion.
+ ///
+ /// This operation is useful during initial population of the set.
+ /// For example, when constructing a set from another set, we know
+ /// that values are unique.
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn insert_unique_unchecked(&mut self, value: T) -> &T {
+ self.map.insert_unique_unchecked(value, ()).0
+ }
+
/// Adds a value to the set, replacing the existing value, if any, that is equal to the given
/// one. Returns the replaced value.
///
@@ -1098,8 +1128,7 @@ where
impl<T, S, A> fmt::Debug for HashSet<T, S, A>
where
- T: Eq + Hash + fmt::Debug,
- S: BuildHasher,
+ T: fmt::Debug,
A: Allocator + Clone,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -1130,6 +1159,27 @@ where
}
}
+// The default hasher is used to match the std implementation signature
+#[cfg(feature = "ahash")]
+impl<T, A, const N: usize> From<[T; N]> for HashSet<T, DefaultHashBuilder, A>
+where
+ T: Eq + Hash,
+ A: Default + Allocator + Clone,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashSet;
+ ///
+ /// let set1 = HashSet::from([1, 2, 3, 4]);
+ /// let set2: HashSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ arr.into_iter().collect()
+ }
+}
+
impl<T, S, A> Extend<T> for HashSet<T, S, A>
where
T: Eq + Hash,
@@ -1162,7 +1212,7 @@ where
{
#[cfg_attr(feature = "inline-more", inline)]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- self.extend(iter.into_iter().cloned());
+ self.extend(iter.into_iter().copied());
}
#[inline]
@@ -1963,7 +2013,7 @@ mod test_set {
let expected = [3, 5, 11, 77];
for x in a.intersection(&b) {
assert!(expected.contains(x));
- i += 1
+ i += 1;
}
assert_eq!(i, expected.len());
}
@@ -1986,7 +2036,7 @@ mod test_set {
let expected = [1, 5, 11];
for x in a.difference(&b) {
assert!(expected.contains(x));
- i += 1
+ i += 1;
}
assert_eq!(i, expected.len());
}
@@ -2012,7 +2062,7 @@ mod test_set {
let expected = [-2, 1, 5, 11, 14, 22];
for x in a.symmetric_difference(&b) {
assert!(expected.contains(x));
- i += 1
+ i += 1;
}
assert_eq!(i, expected.len());
}
@@ -2042,7 +2092,7 @@ mod test_set {
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
for x in a.union(&b) {
assert!(expected.contains(x));
- i += 1
+ i += 1;
}
assert_eq!(i, expected.len());
}
@@ -2068,7 +2118,7 @@ mod test_set {
fn test_from_iter() {
let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9];
- let set: HashSet<_> = xs.iter().cloned().collect();
+ let set: HashSet<_> = xs.iter().copied().collect();
for x in &xs {
assert!(set.contains(x));
@@ -2230,7 +2280,7 @@ mod test_set {
#[test]
fn test_retain() {
let xs = [1, 2, 3, 4, 5, 6];
- let mut set: HashSet<i32> = xs.iter().cloned().collect();
+ let mut set: HashSet<i32> = xs.iter().copied().collect();
set.retain(|&k| k % 2 == 0);
assert_eq!(set.len(), 3);
assert!(set.contains(&2));
@@ -2272,7 +2322,7 @@ mod test_set {
const EMPTY_SET: HashSet<u32, MyHasher> = HashSet::with_hasher(MyHasher);
- let mut set = EMPTY_SET.clone();
+ let mut set = EMPTY_SET;
set.insert(19);
assert!(set.contains(&19));
}
diff --git a/tests/rayon.rs b/tests/rayon.rs
index 39b4770..8c603c5 100644
--- a/tests/rayon.rs
+++ b/tests/rayon.rs
@@ -269,20 +269,20 @@ fn map_seq_par_equivalence_existing_empty_extend_empty() {
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
- map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
- map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
+ map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied());
+ map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_empty_extend() {
- let expected = MAP_EXTENSION.iter().cloned().collect::<HashMap<_, _>>();
+ let expected = MAP_EXTENSION.iter().copied().collect::<HashMap<_, _>>();
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
- map_seq.extend(MAP_EXTENSION.iter().cloned());
- map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
+ map_seq.extend(MAP_EXTENSION.iter().copied());
+ map_par.par_extend(MAP_EXTENSION.par_iter().copied());
assert_eq3!(map_seq, map_par, expected);
}
@@ -293,8 +293,8 @@ fn map_seq_par_equivalence_existing_extend_empty() {
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
- map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
- map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
+ map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied());
+ map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied());
assert_eq3!(map_seq, map_par, expected);
}
@@ -305,8 +305,8 @@ fn map_seq_par_equivalence_existing_extend() {
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
- map_seq.extend(MAP_EXTENSION.iter().cloned());
- map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
+ map_seq.extend(MAP_EXTENSION.iter().copied());
+ map_par.par_extend(MAP_EXTENSION.par_iter().copied());
assert_eq3!(map_seq, map_par, expected);
}
@@ -423,20 +423,20 @@ fn set_seq_par_equivalence_existing_empty_extend_empty() {
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
- set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
- set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
+ set_seq.extend(SET_EXTENSION_EMPTY.iter().copied());
+ set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_empty_extend() {
- let expected = SET_EXTENSION.iter().cloned().collect::<HashSet<_>>();
+ let expected = SET_EXTENSION.iter().copied().collect::<HashSet<_>>();
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
- set_seq.extend(SET_EXTENSION.iter().cloned());
- set_par.par_extend(SET_EXTENSION.par_iter().cloned());
+ set_seq.extend(SET_EXTENSION.iter().copied());
+ set_par.par_extend(SET_EXTENSION.par_iter().copied());
assert_eq3!(set_seq, set_par, expected);
}
@@ -447,8 +447,8 @@ fn set_seq_par_equivalence_existing_extend_empty() {
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
- set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
- set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
+ set_seq.extend(SET_EXTENSION_EMPTY.iter().copied());
+ set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied());
assert_eq3!(set_seq, set_par, expected);
}
@@ -459,37 +459,37 @@ fn set_seq_par_equivalence_existing_extend() {
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
- set_seq.extend(SET_EXTENSION.iter().cloned());
- set_par.par_extend(SET_EXTENSION.par_iter().cloned());
+ set_seq.extend(SET_EXTENSION.iter().copied());
+ set_par.par_extend(SET_EXTENSION.par_iter().copied());
assert_eq3!(set_seq, set_par, expected);
}
lazy_static! {
- static ref SET_A: HashSet<char> = ['a', 'b', 'c', 'd'].iter().cloned().collect();
- static ref SET_B: HashSet<char> = ['a', 'b', 'e', 'f'].iter().cloned().collect();
- static ref SET_DIFF_AB: HashSet<char> = ['c', 'd'].iter().cloned().collect();
- static ref SET_DIFF_BA: HashSet<char> = ['e', 'f'].iter().cloned().collect();
- static ref SET_SYMM_DIFF_AB: HashSet<char> = ['c', 'd', 'e', 'f'].iter().cloned().collect();
- static ref SET_INTERSECTION_AB: HashSet<char> = ['a', 'b'].iter().cloned().collect();
+ static ref SET_A: HashSet<char> = ['a', 'b', 'c', 'd'].iter().copied().collect();
+ static ref SET_B: HashSet<char> = ['a', 'b', 'e', 'f'].iter().copied().collect();
+ static ref SET_DIFF_AB: HashSet<char> = ['c', 'd'].iter().copied().collect();
+ static ref SET_DIFF_BA: HashSet<char> = ['e', 'f'].iter().copied().collect();
+ static ref SET_SYMM_DIFF_AB: HashSet<char> = ['c', 'd', 'e', 'f'].iter().copied().collect();
+ static ref SET_INTERSECTION_AB: HashSet<char> = ['a', 'b'].iter().copied().collect();
static ref SET_UNION_AB: HashSet<char> =
- ['a', 'b', 'c', 'd', 'e', 'f'].iter().cloned().collect();
+ ['a', 'b', 'c', 'd', 'e', 'f'].iter().copied().collect();
}
#[test]
fn set_seq_par_equivalence_difference() {
- let diff_ab_seq = SET_A.difference(&*SET_B).cloned().collect::<HashSet<_>>();
+ let diff_ab_seq = SET_A.difference(&*SET_B).copied().collect::<HashSet<_>>();
let diff_ab_par = SET_A
.par_difference(&*SET_B)
- .cloned()
+ .copied()
.collect::<HashSet<_>>();
assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB);
- let diff_ba_seq = SET_B.difference(&*SET_A).cloned().collect::<HashSet<_>>();
+ let diff_ba_seq = SET_B.difference(&*SET_A).copied().collect::<HashSet<_>>();
let diff_ba_par = SET_B
.par_difference(&*SET_A)
- .cloned()
+ .copied()
.collect::<HashSet<_>>();
assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA);
@@ -499,11 +499,11 @@ fn set_seq_par_equivalence_difference() {
fn set_seq_par_equivalence_symmetric_difference() {
let symm_diff_ab_seq = SET_A
.symmetric_difference(&*SET_B)
- .cloned()
+ .copied()
.collect::<HashSet<_>>();
let symm_diff_ab_par = SET_A
.par_symmetric_difference(&*SET_B)
- .cloned()
+ .copied()
.collect::<HashSet<_>>();
assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB);
@@ -511,10 +511,10 @@ fn set_seq_par_equivalence_symmetric_difference() {
#[test]
fn set_seq_par_equivalence_intersection() {
- let intersection_ab_seq = SET_A.intersection(&*SET_B).cloned().collect::<HashSet<_>>();
+ let intersection_ab_seq = SET_A.intersection(&*SET_B).copied().collect::<HashSet<_>>();
let intersection_ab_par = SET_A
.par_intersection(&*SET_B)
- .cloned()
+ .copied()
.collect::<HashSet<_>>();
assert_eq3!(
@@ -526,8 +526,8 @@ fn set_seq_par_equivalence_intersection() {
#[test]
fn set_seq_par_equivalence_union() {
- let union_ab_seq = SET_A.union(&*SET_B).cloned().collect::<HashSet<_>>();
- let union_ab_par = SET_A.par_union(&*SET_B).cloned().collect::<HashSet<_>>();
+ let union_ab_seq = SET_A.union(&*SET_B).copied().collect::<HashSet<_>>();
+ let union_ab_par = SET_A.par_union(&*SET_B).copied().collect::<HashSet<_>>();
assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB);
}
diff --git a/tests/set.rs b/tests/set.rs
index 3fc0717..5ae1ec9 100644
--- a/tests/set.rs
+++ b/tests/set.rs
@@ -2,29 +2,33 @@
use hashbrown::HashSet;
use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng};
+use std::iter;
#[test]
fn test_hashset_insert_remove() {
let mut m: HashSet<Vec<char>> = HashSet::new();
- //let num: u32 = 4096;
- //let tx: Vec<Vec<u8>> = (0..num).map(|i| (i..(16 + i)).collect()).collect();
- let seed: [u8; 16] = [
- 130, 220, 246, 217, 111, 124, 221, 189, 190, 234, 121, 93, 67, 95, 100, 43,
- ];
+ let seed = u64::from_le_bytes(*b"testseed");
- let rng = &mut SmallRng::from_seed(seed);
- let tx: Vec<Vec<char>> = (0..4096)
- .map(|_| (rng.sample_iter(&Alphanumeric).take(32).collect()))
- .collect();
+ let rng = &mut SmallRng::seed_from_u64(seed);
+ let tx: Vec<Vec<char>> = iter::repeat_with(|| {
+ rng.sample_iter(&Alphanumeric)
+ .take(32)
+ .map(char::from)
+ .collect()
+ })
+ .take(4096)
+ .collect();
+ // more readable with explicit `true` / `false`
+ #[allow(clippy::bool_assert_comparison)]
for _ in 0..32 {
- for i in 0..4096 {
- assert_eq!(m.contains(&tx[i].clone()), false);
- assert_eq!(m.insert(tx[i].clone()), true);
+ for x in &tx {
+ assert_eq!(m.contains(x), false);
+ assert_eq!(m.insert(x.clone()), true);
}
- for i in 0..4096 {
- println!("removing {} {:?}", i, tx[i]);
- assert_eq!(m.remove(&tx[i]), true);
+ for (i, x) in tx.iter().enumerate() {
+ println!("removing {} {:?}", i, x);
+ assert_eq!(m.remove(x), true);
}
}
}