summaryrefslogtreecommitdiff
path: root/abseil-cpp/absl/container/internal/raw_hash_set.h
diff options
context:
space:
mode:
Diffstat (limited to 'abseil-cpp/absl/container/internal/raw_hash_set.h')
-rw-r--r--abseil-cpp/absl/container/internal/raw_hash_set.h2159
1 files changed, 1571 insertions, 588 deletions
diff --git a/abseil-cpp/absl/container/internal/raw_hash_set.h b/abseil-cpp/absl/container/internal/raw_hash_set.h
index ec13a2f..5f89d8e 100644
--- a/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -53,75 +53,208 @@
//
// IMPLEMENTATION DETAILS
//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+// struct BackingArray {
+// // The number of elements we can insert before growing the capacity.
+// size_t growth_left;
+// // Control bytes for the "real" slots.
+// ctrl_t ctrl[capacity];
+// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+// // stop and serves no other purpose.
+// ctrl_t sentinel;
+// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+// // that if a probe sequence picks a value near the end of `ctrl`,
+// // `Group` will have valid control bytes to look at.
+// ctrl_t clones[kWidth - 1];
+// // The actual slot data.
+// slot_type slots[capacity];
+// };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a special end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
+//
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
+//
+// # Hashing
+//
+// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
+// `H1(hash(x))` is an index into `slots`, and essentially the starting point
+// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
+// objects that cannot possibly be the one we are looking for.
+//
+// # Table operations.
+//
+// The key operations are `insert`, `find`, and `erase`.
+//
+// Since `insert` and `erase` are implemented in terms of `find`, we describe
+// `find` first. To `find` a value `x`, we compute `hash(x)`. From
+// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
+// group of slots in some interesting order.
//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// The `H2` bits ensure when we compare a slot to an object with `==`, we are
+// likely to have actually found the object. That is, the chance is low that
+// `==` is called and returns `false`. Thus, when we search for an object, we
+// are unlikely to call `==` many times. This likelyhood can be analyzed as
+// follows (assuming that H2 is a random enough hash function).
//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// Let's assume that there are `k` "wrong" objects that must be examined in a
+// probe sequence. For example, when doing a `find` on an object that is in the
+// table, `k` is the number of objects between the start of the probe sequence
+// and the final found object (not including the final found object). The
+// expected number of objects with an H2 match is then `k/128`. Measurements
+// and analysis indicate that even at high load factors, `k` is less than 32,
+// meaning that the number of "false positive" comparisons we must perform is
+// less than 1/8 per `find`.
+
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
//
-// P(0) = H1 % N
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
//
-// P(i) = (P(i - 1) + i) % N
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, initialized slot pointee to the caller.
//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#include <algorithm>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <limits>
#include <memory>
+#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
-#include "absl/base/internal/bits.h"
+#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
+#include "absl/base/prefetch.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
-#include "absl/container/internal/have_sse.h"
-#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER)
+// When compiled in sanitizer mode, we add generation integers to the backing
+// array and iterators. In the backing array, we store the generation between
+// the control bytes and the slots. When iterators are dereferenced, we assert
+// that the container has not been mutated in a way that could cause iterator
+// invalidation since the iterator was initialized.
+#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
+#endif
+
+// We use uint8_t so we don't need to worry about padding.
+using GenerationType = uint8_t;
+
+// A sentinel value for empty generations. Using 0 makes it easy to constexpr
+// initialize an array of this value.
+constexpr GenerationType SentinelEmptyGeneration() { return 0; }
+
+constexpr GenerationType NextGeneration(GenerationType generation) {
+ return ++generation == SentinelEmptyGeneration() ? ++generation : generation;
+}
+
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+constexpr bool SwisstableGenerationsEnabled() { return true; }
+constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
+#else
+constexpr bool SwisstableGenerationsEnabled() { return false; }
+constexpr size_t NumGenerationBytes() { return 0; }
+#endif
+
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
std::true_type /* propagate_on_container_swap */) {
@@ -132,14 +265,40 @@ template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// are mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
+//
+// It turns out that this probe sequence visits every group exactly once if the
+// number of groups is a power of two, since (i^2+i)/2 is a bijection in
+// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
template <size_t Width>
class probe_seq {
public:
+ // Creates a new probe sequence using `hash` as the initial value of the
+ // sequence and `mask` (usually the capacity of the table) as the mask to
+ // apply to each value in the progression.
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
+
+ // The offset within the table, i.e., the value `p(i)` above.
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
@@ -148,7 +307,7 @@ class probe_seq {
offset_ += index_;
offset_ &= mask_;
}
- // 0-based probe index. The i-th probe in the probe sequence.
+ // 0-based probe index, a multiple of `Width`.
size_t index() const { return index_; }
private:
@@ -172,9 +331,9 @@ struct IsDecomposable : std::false_type {};
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
- absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
+ absl::void_t<decltype(Policy::apply(
+ RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -189,69 +348,85 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
}
template <typename T>
-int TrailingZeros(T x) {
- return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
- static_cast<uint64_t>(x))
- : base_internal::CountTrailingZerosNonZero32(
- static_cast<uint32_t>(x));
+uint32_t TrailingZeros(T x) {
+ ABSL_ASSUME(x != 0);
+ return static_cast<uint32_t>(countr_zero(x));
}
-template <typename T>
-int LeadingZeros(T x) {
- return sizeof(T) == 8
- ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
- : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
-}
+// An abstract bitmask, such as that emitted by a SIMD instruction.
+//
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than one real bit is set in an abstract bit.
+template <class T, int SignificantBits, int Shift = 0>
+class NonIterableBitMask {
+ public:
+ explicit NonIterableBitMask(T mask) : mask_(mask) {}
+
+ explicit operator bool() const { return this->mask_ != 0; }
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+ // Returns the index of the lowest *abstract* bit set in `self`.
+ uint32_t LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ // Returns the index of the highest *abstract* bit set in `self`.
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
+ }
+
+ // Returns the number of trailing zero *abstract* bits.
+ uint32_t TrailingZeros() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ // Returns the number of leading zero *abstract* bits.
+ uint32_t LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
+ }
+
+ T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
//
// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+ using Base = NonIterableBitMask<T, SignificantBits, Shift>;
static_assert(std::is_unsigned<T>::value, "");
static_assert(Shift == 0 || Shift == 3, "");
public:
- // These are useful for unit tests (gunit).
+ explicit BitMask(T mask) : Base(mask) {}
+ // BitMask is an iterator over the indices of its abstract bits.
using value_type = int;
using iterator = BitMask;
using const_iterator = BitMask;
- explicit BitMask(T mask) : mask_(mask) {}
BitMask& operator++() {
- mask_ &= (mask_ - 1);
+ this->mask_ &= (this->mask_ - 1);
return *this;
}
- explicit operator bool() const { return mask_ != 0; }
- int operator*() const { return LowestBitSet(); }
- int LowestBitSet() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
- int HighestBitSet() const {
- return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
- 1) >>
- Shift;
- }
+
+ uint32_t operator*() const { return Base::LowestBitSet(); }
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
- int TrailingZeros() const {
- return container_internal::TrailingZeros(mask_) >> Shift;
- }
-
- int LeadingZeros() const {
- constexpr int total_significant_bits = SignificantBits << Shift;
- constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
- }
-
private:
friend bool operator==(const BitMask& a, const BitMask& b) {
return a.mask_ == b.mask_;
@@ -259,75 +434,137 @@ class BitMask {
friend bool operator!=(const BitMask& a, const BitMask& b) {
return a.mask_ != b.mask_;
}
-
- T mask_;
};
-using ctrl_t = signed char;
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
// below for details.
-enum Ctrl : ctrl_t {
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
kSentinel = -1, // 0b11111111
};
static_assert(
- kEmpty & kDeleted & kSentinel & 0x80,
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
"Special markers need to have the MSB to make checking for them efficient");
-static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
- "kEmpty and kDeleted must be smaller than kSentinel to make the "
- "SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(kSentinel == -1,
- "kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
-static_assert(kEmpty == -128,
- "kEmpty must be -128 to make the SIMD check for its "
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
"existence efficient (psignb xmm, xmm)");
-static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
- "kEmpty and kDeleted must share an unset bit that is not shared "
- "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
- "efficient");
-static_assert(kDeleted == -2,
- "kDeleted must be -2 to make the implementation of "
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MaskEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
+// See definition comment for why this is size 32.
+ABSL_DLL extern const ctrl_t kEmptyGroup[32];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
- alignas(16) static constexpr ctrl_t empty_group[] = {
- kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
- kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
- return const_cast<ctrl_t*>(empty_group);
+ // Const must be cast away here; no uses of this function will actually write
+ // to it, because it is only used for empty tables.
+ return const_cast<ctrl_t*>(kEmptyGroup + 16);
+}
+
+// Returns a pointer to a generation to use for an empty hashtable.
+GenerationType* EmptyGeneration();
+
+// Returns whether `generation` is a generation for an empty hashtable that
+// could be returned by EmptyGeneration().
+inline bool IsEmptyGeneration(const GenerationType* generation) {
+ return *generation == SentinelEmptyGeneration();
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
+ return (hash >> 7) ^ PerTableSalt(ctrl);
}
-inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
-inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= 0; }
-inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+// Helpers for checking the state of a control byte.
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
+
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -354,40 +591,42 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of slots that match hash.
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
- auto match = _mm_set1_epi8(hash);
+ auto match = _mm_set1_epi8(static_cast<char>(hash));
return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
}
// Returns a bitmask representing the positions of empty slots.
- BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
- // This only works because kEmpty is -128.
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+ // This only works because ctrl_t::kEmpty is -128.
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
- return Match(static_cast<h2_t>(kEmpty));
+ auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
- BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+ return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
- return TrailingZeros(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -401,6 +640,67 @@ struct GroupSse2Impl {
};
#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupAArch64Impl(const ctrl_t* pos) {
+ ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+ }
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ uint8x8_t dup = vdup_n_u8(hash);
+ auto mask = vceq_u8(ctrl, dup);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>(
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
+ // produced bitfield. We then count number of trailing zeros.
+ // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+ // so we should be fine.
+ return static_cast<uint32_t>(countr_zero(mask)) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t slsbs = 0x0202020202020202ULL;
+ constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
+ auto x = slsbs & (mask >> 6);
+ auto res = (x + midbs) | msbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint8x8_t ctrl;
+};
+#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
+
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -414,7 +714,7 @@ struct GroupPortableImpl {
//
// Caveat: there are false positives but:
// - they only occur if there is a real match
- // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
// - they will be handled gracefully by subsequent checks in code
//
// Example:
@@ -427,19 +727,24 @@ struct GroupPortableImpl {
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+ msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+ msbs);
}
uint32_t CountLeadingEmptyOrDeleted() const {
- constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
- return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
+ 3);
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -453,44 +758,334 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
+#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
#else
using Group = GroupPortableImpl;
#endif
-template <class Policy, class Hash, class Eq, class Alloc>
-class raw_hash_set;
+// When there is an insertion with no reserved growth, we rehash with
+// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
+// constant divided by capacity ensures that inserting N elements is still O(N)
+// in the average case. Using the constant 16 means that we expect to rehash ~8
+// times more often than when generations are disabled. We are adding expected
+// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
+// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
+inline size_t RehashProbabilityConstant() { return 16; }
+
+class CommonFieldsGenerationInfoEnabled {
+ // A sentinel value for reserved_growth_ indicating that we just ran out of
+ // reserved growth on the last insertion. When reserve is called and then
+ // insertions take place, reserved_growth_'s state machine is N, ..., 1,
+ // kReservedGrowthJustRanOut, 0.
+ static constexpr size_t kReservedGrowthJustRanOut =
+ (std::numeric_limits<size_t>::max)();
+
+ public:
+ CommonFieldsGenerationInfoEnabled() = default;
+ CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
+ : reserved_growth_(that.reserved_growth_),
+ reservation_size_(that.reservation_size_),
+ generation_(that.generation_) {
+ that.reserved_growth_ = 0;
+ that.reservation_size_ = 0;
+ that.generation_ = EmptyGeneration();
+ }
+ CommonFieldsGenerationInfoEnabled& operator=(
+ CommonFieldsGenerationInfoEnabled&&) = default;
+
+ // Whether we should rehash on insert in order to detect bugs of using invalid
+ // references. We rehash on the first insertion after reserved_growth_ reaches
+ // 0 after a call to reserve. We also do a rehash with low probability
+ // whenever reserved_growth_ is zero.
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
+ size_t capacity) const;
+ void maybe_increment_generation_on_insert() {
+ if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
+
+ if (reserved_growth_ > 0) {
+ if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
+ } else {
+ *generation_ = NextGeneration(*generation_);
+ }
+ }
+ void reset_reserved_growth(size_t reservation, size_t size) {
+ reserved_growth_ = reservation - size;
+ }
+ size_t reserved_growth() const { return reserved_growth_; }
+ void set_reserved_growth(size_t r) { reserved_growth_ = r; }
+ size_t reservation_size() const { return reservation_size_; }
+ void set_reservation_size(size_t r) { reservation_size_ = r; }
+ GenerationType generation() const { return *generation_; }
+ void set_generation(GenerationType g) { *generation_ = g; }
+ GenerationType* generation_ptr() const { return generation_; }
+ void set_generation_ptr(GenerationType* g) { generation_ = g; }
+
+ private:
+ // The number of insertions remaining that are guaranteed to not rehash due to
+ // a prior call to reserve. Note: we store reserved growth in addition to
+ // reservation size because calls to erase() decrease size_ but don't decrease
+ // reserved growth.
+ size_t reserved_growth_ = 0;
+ // The maximum argument to reserve() since the container was cleared. We need
+ // to keep track of this, in addition to reserved growth, because we reset
+ // reserved growth to this when erase(begin(), end()) is called.
+ size_t reservation_size_ = 0;
+ // Pointer to the generation counter, which is used to validate iterators and
+ // is stored in the backing array between the control bytes and the slots.
+ // Note that we can't store the generation inside the container itself and
+ // keep a pointer to the container in the iterators because iterators must
+ // remain valid when the container is moved.
+ // Note: we could derive this pointer from the control pointer, but it makes
+ // the code more complicated, and there's a benefit in having the sizes of
+ // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
+ // which is that tests are less likely to rely on the size remaining the same.
+ GenerationType* generation_ = EmptyGeneration();
+};
+
+class CommonFieldsGenerationInfoDisabled {
+ public:
+ CommonFieldsGenerationInfoDisabled() = default;
+ CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
+ default;
+ CommonFieldsGenerationInfoDisabled& operator=(
+ CommonFieldsGenerationInfoDisabled&&) = default;
+
+ bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
+ return false;
+ }
+ void maybe_increment_generation_on_insert() {}
+ void reset_reserved_growth(size_t, size_t) {}
+ size_t reserved_growth() const { return 0; }
+ void set_reserved_growth(size_t) {}
+ size_t reservation_size() const { return 0; }
+ void set_reservation_size(size_t) {}
+ GenerationType generation() const { return 0; }
+ void set_generation(GenerationType) {}
+ GenerationType* generation_ptr() const { return nullptr; }
+ void set_generation_ptr(GenerationType*) {}
+};
+
+class HashSetIteratorGenerationInfoEnabled {
+ public:
+ HashSetIteratorGenerationInfoEnabled() = default;
+ explicit HashSetIteratorGenerationInfoEnabled(
+ const GenerationType* generation_ptr)
+ : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
+
+ GenerationType generation() const { return generation_; }
+ void reset_generation() { generation_ = *generation_ptr_; }
+ const GenerationType* generation_ptr() const { return generation_ptr_; }
+ void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
+
+ private:
+ const GenerationType* generation_ptr_ = EmptyGeneration();
+ GenerationType generation_ = *generation_ptr_;
+};
+
+class HashSetIteratorGenerationInfoDisabled {
+ public:
+ HashSetIteratorGenerationInfoDisabled() = default;
+ explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
+
+ GenerationType generation() const { return 0; }
+ void reset_generation() {}
+ const GenerationType* generation_ptr() const { return nullptr; }
+ void set_generation_ptr(const GenerationType*) {}
+};
+
+#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
+using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
+#else
+using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
+using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
+#endif
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-// PRECONDITION:
-// IsValidCapacity(capacity)
-// ctrl[capacity] == kSentinel
-// ctrl[i] != kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
-inline void ConvertDeletedToEmptyAndFullToDeleted(
- ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == kSentinel);
+// Computes the offset from the start of the backing allocation of the control
+// bytes. growth_left is stored at the beginning of the backing array.
+inline size_t ControlOffset() { return sizeof(size_t); }
+
+// Returns the number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) of the generation counter (if it exists).
+inline size_t GenerationOffset(size_t capacity) {
assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
- Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return ControlOffset() + num_control_bytes;
+}
+
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
+ (~slot_align + 1);
+}
+
+// Given the capacity of a table, computes the total size of the backing
+// array.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
+// CommonFields hold the fields in raw_hash_set that do not depend
+// on template parameters. This allows us to conveniently pass all
+// of this state to helper functions as a single argument.
+class CommonFields : public CommonFieldsGenerationInfo {
+ public:
+ CommonFields() = default;
+
+ // Not copyable
+ CommonFields(const CommonFields&) = delete;
+ CommonFields& operator=(const CommonFields&) = delete;
+
+ // Movable
+ CommonFields(CommonFields&& that)
+ : CommonFieldsGenerationInfo(
+ std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
+ // Explicitly copying fields into "this" and then resetting "that"
+ // fields generates less code then calling absl::exchange per field.
+ control_(that.control()),
+ slots_(that.slot_array()),
+ capacity_(that.capacity()),
+ compressed_tuple_(that.size(), std::move(that.infoz())) {
+ that.set_control(EmptyGroup());
+ that.set_slots(nullptr);
+ that.set_capacity(0);
+ that.set_size(0);
+ }
+ CommonFields& operator=(CommonFields&&) = default;
+
+ ctrl_t* control() const { return control_; }
+ void set_control(ctrl_t* c) { control_ = c; }
+ void* backing_array_start() const {
+ // growth_left is stored before control bytes.
+ assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
+ return control() - sizeof(size_t);
+ }
+
+ // Note: we can't use slots() because Qt defines "slots" as a macro.
+ void* slot_array() const { return slots_; }
+ void set_slots(void* s) { slots_ = s; }
+
+ // The number of filled slots.
+ size_t size() const { return compressed_tuple_.template get<0>(); }
+ void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
+
+ // The total number of available slots.
+ size_t capacity() const { return capacity_; }
+ void set_capacity(size_t c) {
+ assert(c == 0 || IsValidCapacity(c));
+ capacity_ = c;
+ }
+
+ // The number of slots we can still fill without needing to rehash.
+ // This is stored in the heap allocation before the control bytes.
+ size_t growth_left() const {
+ return *reinterpret_cast<size_t*>(backing_array_start());
}
- // Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
- ctrl[capacity] = kSentinel;
+ void set_growth_left(size_t gl) {
+ *reinterpret_cast<size_t*>(backing_array_start()) = gl;
+ }
+
+ HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
+ const HashtablezInfoHandle& infoz() const {
+ return compressed_tuple_.template get<1>();
+ }
+
+ bool should_rehash_for_bug_detection_on_insert() const {
+ return CommonFieldsGenerationInfo::
+ should_rehash_for_bug_detection_on_insert(control(), capacity());
+ }
+ void reset_reserved_growth(size_t reservation) {
+ CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
+ }
+
+ // The size of the backing array allocation.
+ size_t alloc_size(size_t slot_size, size_t slot_align) const {
+ return AllocSize(capacity(), slot_size, slot_align);
+ }
+
+ // Returns the number of control bytes set to kDeleted. For testing only.
+ size_t TombstonesCount() const {
+ return static_cast<size_t>(
+ std::count(control(), control() + capacity(), ctrl_t::kDeleted));
+ }
+
+ private:
+ // TODO(b/259599413): Investigate removing some of these fields:
+ // - control/slots can be derived from each other
+ // - we can use 6 bits for capacity since it's always a power of two minus 1
+
+ // The control bytes (and, also, a pointer near to the base of the backing
+ // array).
+ //
+ // This contains `capacity + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ //
+ // Note that growth_left is stored immediately before this pointer.
+ ctrl_t* control_ = EmptyGroup();
+
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `control`. May be null for empty tables.
+ void* slots_ = nullptr;
+
+ size_t capacity_ = 0;
+
+ // Bundle together size and HashtablezInfoHandle to ensure EBO for
+ // HashtablezInfoHandle when sampling is turned off.
+ absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
+ compressed_tuple_{0u, HashtablezInfoHandle{}};
+};
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+// Returns the next valid capacity after `n`.
+inline size_t NextCapacity(size_t n) {
+ assert(IsValidCapacity(n) || n == 0);
+ return n * 2 + 1;
}
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Applies the following mapping to every byte in the control array:
+// * kDeleted -> kEmpty
+// * kEmpty -> kEmpty
+// * _ -> kDeleted
+// PRECONDITION:
+// IsValidCapacity(capacity)
+// ctrl[capacity] == ctrl_t::kSentinel
+// ctrl[i] != ctrl_t::kSentinel for all i < capacity
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
+
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {
- return n ? ~size_t{} >> LeadingZeros(n) : 1;
+ return n ? ~size_t{} >> countl_zero(n) : 1;
}
-// We use 7/8th as maximum load factor.
-// For 16-wide groups, that gives an average of two empty slots per group.
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -500,8 +1095,12 @@ inline size_t CapacityToGrowth(size_t capacity) {
}
return capacity - capacity / 8;
}
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and required NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -511,18 +1110,371 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
-inline void AssertIsFull(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+ size_t bucket_count) {
+ if (bucket_count != 0) {
+ return bucket_count;
+ }
+ using InputIterCategory =
+ typename std::iterator_traits<InputIter>::iterator_category;
+ if (std::is_base_of<std::random_access_iterator_tag,
+ InputIterCategory>::value) {
+ return GrowthToLowerboundCapacity(
+ static_cast<size_t>(std::distance(first, last)));
+ }
+ return 0;
+}
+
+constexpr bool SwisstableDebugEnabled() {
+#if defined(ABSL_SWISSTABLE_ENABLE_GENERATIONS) || \
+ ABSL_OPTION_HARDENED == 1 || !defined(NDEBUG)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
+ const GenerationType* generation_ptr,
+ const char* operation) {
+ if (!SwisstableDebugEnabled()) return;
+ if (ctrl == nullptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) + " called on end() iterator.");
+ }
+ if (ctrl == EmptyGroup()) {
+ ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
+ " called on default-constructed iterator.");
+ }
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element was likely erased.");
+ }
+ } else {
+ if (!IsFull(*ctrl)) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ std::string(operation) +
+ " called on invalid iterator. The element might have been erased "
+ "or the table might have rehashed. Consider running with "
+ "--config=asan to diagnose rehashing issues.");
+ }
+ }
+}
+
+// Note that for comparisons, null/end iterators are valid.
+inline void AssertIsValidForComparison(const ctrl_t* ctrl,
+ GenerationType generation,
+ const GenerationType* generation_ptr) {
+ if (!SwisstableDebugEnabled()) return;
+ const bool ctrl_is_valid_for_comparison =
+ ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
+ if (SwisstableGenerationsEnabled()) {
+ if (generation != *generation_ptr) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. The table could have "
+ "rehashed since this iterator was initialized.");
+ }
+ if (!ctrl_is_valid_for_comparison) {
+ ABSL_INTERNAL_LOG(
+ FATAL, "Invalid iterator comparison. The element was likely erased.");
+ }
+ } else {
+ ABSL_HARDENING_ASSERT(
+ ctrl_is_valid_for_comparison &&
+ "Invalid iterator comparison. The element might have been erased or "
+ "the table might have rehashed. Consider running with --config=asan to "
+ "diagnose rehashing issues.");
+ }
+}
+
+// If the two iterators come from the same container, then their pointers will
+// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
+ const ctrl_t* ctrl_b,
+ const void* const& slot_a,
+ const void* const& slot_b) {
+ // If either control byte is null, then we can't tell.
+ if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
+ const void* low_slot = slot_a;
+ const void* hi_slot = slot_b;
+ if (ctrl_a > ctrl_b) {
+ std::swap(ctrl_a, ctrl_b);
+ std::swap(low_slot, hi_slot);
+ }
+ return ctrl_b < low_slot && low_slot <= hi_slot;
+}
+
+// Asserts that two iterators come from the same container.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
+ const void* const& slot_a,
+ const void* const& slot_b,
+ const GenerationType* generation_ptr_a,
+ const GenerationType* generation_ptr_b) {
+ if (!SwisstableDebugEnabled()) return;
+ const bool a_is_default = ctrl_a == EmptyGroup();
+ const bool b_is_default = ctrl_b == EmptyGroup();
+ if (a_is_default != b_is_default) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ "Invalid iterator comparison. Comparing default-constructed iterator "
+ "with non-default-constructed iterator.");
+ }
+ if (a_is_default && b_is_default) return;
+
+ if (SwisstableGenerationsEnabled()) {
+ if (generation_ptr_a == generation_ptr_b) return;
+ const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
+ const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
+ if (a_is_empty != b_is_empty) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator from "
+ "a non-empty hashtable with an iterator from an empty "
+ "hashtable.");
+ }
+ if (a_is_empty && b_is_empty) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterators from "
+ "different empty hashtables.");
+ }
+ const bool a_is_end = ctrl_a == nullptr;
+ const bool b_is_end = ctrl_b == nullptr;
+ if (a_is_end || b_is_end) {
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator with "
+ "an end() iterator from a different hashtable.");
+ }
+ ABSL_INTERNAL_LOG(FATAL,
+ "Invalid iterator comparison. Comparing non-end() "
+ "iterators from different hashtables.");
+ } else {
+ ABSL_HARDENING_ASSERT(
+ AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+ "Invalid iterator comparison. The iterators may be from different "
+ "containers or the container might have rehashed. Consider running "
+ "with --config=asan to diagnose rehashing issues.");
+ }
+}
+
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+};
+
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
+//
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_non_full()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+// Begins a probing operation on `common.control`, using `hash`.
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
+ size_t hash) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
+ return probe(common.control(), common.capacity(), hash);
+}
+
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
+//
+// Behavior when the entire table is full is undefined.
+//
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
+template <typename = void>
+inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
+ auto seq = probe(common, hash);
+ const ctrl_t* ctrl = common.control();
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MaskEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(common.capacity()) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
+ assert(seq.index() <= common.capacity() && "full table!");
+ }
+}
+
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const CommonFields&, size_t);
+
+// Non-inlined version of find_first_non_full for use in less
+// performance critical routines.
+FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
+
+inline void ResetGrowthLeft(CommonFields& common) {
+ common.set_growth_left(CapacityToGrowth(common.capacity()) - common.size());
}
-inline void AssertIsValid(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as marked as empty.
+inline void ResetCtrl(CommonFields& common, size_t slot_size) {
+ const size_t capacity = common.capacity();
+ ctrl_t* ctrl = common.control();
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
+ ResetGrowthLeft(common);
}
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
+inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
+ size_t slot_size) {
+ const size_t capacity = common.capacity();
+ assert(i < capacity);
+
+ auto* slot_i = static_cast<const char*>(common.slot_array()) + i * slot_size;
+ if (IsFull(h)) {
+ SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+ } else {
+ SanitizerPoisonMemoryRegion(slot_i, slot_size);
+ }
+
+ ctrl_t* ctrl = common.control();
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+}
+
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
+inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
+ size_t slot_size) {
+ SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
+}
+
+// growth_left (which is a size_t) is stored with the backing array.
+constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
+ return (std::max)(align_of_slot, alignof(size_t));
+}
+
+template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
+ assert(c.capacity());
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or won't make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ const size_t sample_size =
+ (std::is_same<Alloc, std::allocator<char>>::value &&
+ c.slot_array() == nullptr)
+ ? SizeOfSlot
+ : 0;
+
+ const size_t cap = c.capacity();
+ const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
+ // growth_left (which is a size_t) is stored with the backing array.
+ char* mem = static_cast<char*>(
+ Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
+ const GenerationType old_generation = c.generation();
+ c.set_generation_ptr(
+ reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
+ c.set_generation(NextGeneration(old_generation));
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
+ c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
+ ResetCtrl(c, SizeOfSlot);
+ if (sample_size) {
+ c.infoz() = Sample(sample_size);
+ }
+ c.infoz().RecordStorageChanged(c.size(), cap);
+}
+
+// PolicyFunctions bundles together some information for a particular
+// raw_hash_set<T, ...> instantiation. This information is passed to
+// type-erased functions that want to do small amounts of type-specific
+// work.
+struct PolicyFunctions {
+ size_t slot_size;
+
+ // Returns the hash of the pointed-to slot.
+ size_t (*hash_slot)(void* set, void* slot);
+
+ // Transfer the contents of src_slot to dst_slot.
+ void (*transfer)(void* set, void* dst_slot, void* src_slot);
+
+ // Deallocate the backing store from common.
+ void (*dealloc)(CommonFields& common, const PolicyFunctions& policy);
+};
+
+// ClearBackingArray clears the backing array, either modifying it in place,
+// or creating a new one based on the value of "reuse".
+// REQUIRES: c.capacity > 0
+void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+ bool reuse);
+
+// Type-erased version of raw_hash_set::erase_meta_only.
+void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
+
+// Function to place in PolicyFunctions::dealloc for raw_hash_sets
+// that are using std::allocator. This allows us to share the same
+// function body for raw_hash_set instantiations that have the
+// same slot alignment.
+template <size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
+ const PolicyFunctions& policy) {
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ policy.slot_size * common.capacity());
+
+ std::allocator<char> alloc;
+ Deallocate<BackingArrayAlignment(AlignOfSlot)>(
+ &alloc, common.backing_array_start(),
+ common.alloc_size(policy.slot_size, AlignOfSlot));
+}
+
+// For trivially relocatable types we use memcpy directly. This allows us to
+// share the same function body for raw_hash_set instantiations that have the
+// same slot size as long as they are relocatable.
+template <size_t SizeOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
+ memcpy(dst, src, SizeOfSlot);
+}
+
+// Type-erased version of raw_hash_set::drop_deletes_without_resize.
+void DropDeletesWithoutResize(CommonFields& common,
+ const PolicyFunctions& policy, void* tmp_space);
+
+// A SwissTable.
+//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -579,13 +1531,6 @@ class raw_hash_set {
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
- using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
-
- static Layout MakeLayout(size_t capacity) {
- assert(IsValidCapacity(capacity));
- return Layout(capacity + Group::kWidth + 1, capacity);
- }
-
using AllocTraits = absl::allocator_traits<allocator_type>;
using SlotAlloc = typename absl::allocator_traits<
allocator_type>::template rebind_alloc<slot_type>;
@@ -628,7 +1573,7 @@ class raw_hash_set {
static_assert(std::is_same<const_pointer, const value_type*>::value,
"Allocators with custom pointer types are not supported");
- class iterator {
+ class iterator : private HashSetIteratorGenerationInfo {
friend class raw_hash_set;
public:
@@ -644,16 +1589,19 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- AssertIsFull(ctrl_);
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
- pointer operator->() const { return &operator*(); }
+ pointer operator->() const {
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator->");
+ return &operator*();
+ }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- AssertIsFull(ctrl_);
+ AssertIsFull(ctrl_, generation(), generation_ptr(), "operator++");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -667,8 +1615,10 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- AssertIsValid(a.ctrl_);
- AssertIsValid(b.ctrl_);
+ AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
+ AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
+ AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_,
+ a.generation_ptr(), b.generation_ptr());
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -676,22 +1626,35 @@ class raw_hash_set {
}
private:
- iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ iterator(ctrl_t* ctrl, slot_type* slot,
+ const GenerationType* generation_ptr)
+ : HashSetIteratorGenerationInfo(generation_ptr),
+ ctrl_(ctrl),
+ slot_(slot) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
- ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ ABSL_ASSUME(ctrl != nullptr);
}
+ // For end() iterators.
+ explicit iterator(const GenerationType* generation_ptr)
+ : HashSetIteratorGenerationInfo(generation_ptr), ctrl_(nullptr) {}
+ // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+ // they reach one.
+ //
+ // If a sentinel is reached, we null `ctrl_` out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
- if (ABSL_PREDICT_FALSE(*ctrl_ == kSentinel)) ctrl_ = nullptr;
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
- ctrl_t* ctrl_ = nullptr;
+ // We use EmptyGroup() for default-constructed iterators so that they can
+ // be distinguished from end iterators, which have nullptr ctrl_.
+ ctrl_t* ctrl_ = EmptyGroup();
// To avoid uninitialized member warnings, put slot_ in an anonymous union.
// The member is not initialized on singleton and end iterators.
union {
@@ -709,9 +1672,9 @@ class raw_hash_set {
using pointer = typename raw_hash_set::const_pointer;
using difference_type = typename raw_hash_set::difference_type;
- const_iterator() {}
+ const_iterator() = default;
// Implicit construction from iterator.
- const_iterator(iterator i) : inner_(std::move(i)) {}
+ const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
reference operator*() const { return *inner_; }
pointer operator->() const { return inner_.operator->(); }
@@ -730,8 +1693,10 @@ class raw_hash_set {
}
private:
- const_iterator(const ctrl_t* ctrl, const slot_type* slot)
- : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+ const_iterator(const ctrl_t* ctrl, const slot_type* slot,
+ const GenerationType* gen)
+ : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
+ }
iterator inner_;
};
@@ -739,18 +1704,20 @@ class raw_hash_set {
using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
using insert_return_type = InsertReturnType<iterator, node_type>;
+ // Note: can't use `= default` due to non-default noexcept (causes
+ // problems for some compilers). NOLINTNEXTLINE
raw_hash_set() noexcept(
- std::is_nothrow_default_constructible<hasher>::value&&
- std::is_nothrow_default_constructible<key_equal>::value&&
- std::is_nothrow_default_constructible<allocator_type>::value) {}
-
- explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
- const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+ std::is_nothrow_default_constructible<hasher>::value &&
+ std::is_nothrow_default_constructible<key_equal>::value &&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+ ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
+ size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
- capacity_ = NormalizeCapacity(bucket_count);
- reset_growth_left();
+ common().set_capacity(NormalizeCapacity(bucket_count));
initialize_slots();
}
}
@@ -769,7 +1736,8 @@ class raw_hash_set {
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : raw_hash_set(bucket_count, hash, eq, alloc) {
+ : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+ hash, eq, alloc) {
insert(first, last);
}
@@ -851,50 +1819,37 @@ class raw_hash_set {
raw_hash_set(const raw_hash_set& that, const allocator_type& a)
: raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
- reserve(that.size());
+ const size_t size = that.size();
+ if (size == 0) return;
+ reserve(size);
// Because the table is guaranteed to be empty, we can do something faster
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full(hash);
- set_ctrl(target.offset, H2(hash));
+ auto target = find_first_non_full_outofline(common(), hash);
+ SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
emplace_at(target.offset, v);
- infoz_.RecordInsert(hash, target.probe_length);
+ common().maybe_increment_generation_on_insert();
+ infoz().RecordInsert(hash, target.probe_length);
}
- size_ = that.size();
- growth_left() -= that.size();
- }
-
- raw_hash_set(raw_hash_set&& that) noexcept(
- std::is_nothrow_copy_constructible<hasher>::value&&
- std::is_nothrow_copy_constructible<key_equal>::value&&
- std::is_nothrow_copy_constructible<allocator_type>::value)
- : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
- slots_(absl::exchange(that.slots_, nullptr)),
- size_(absl::exchange(that.size_, 0)),
- capacity_(absl::exchange(that.capacity_, 0)),
- infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
- // Hash, equality and allocator are copied instead of moved because
- // `that` must be left valid. If Hash is std::function<Key>, moving it
- // would create a nullptr functor that cannot be called.
- settings_(that.settings_) {
- // growth_left was copied above, reset the one from `that`.
- that.growth_left() = 0;
+ common().set_size(size);
+ set_growth_left(growth_left() - size);
}
+ ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
+ std::is_nothrow_copy_constructible<hasher>::value &&
+ std::is_nothrow_copy_constructible<key_equal>::value &&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
+ : // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
+ settings_(absl::exchange(that.common(), CommonFields{}),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
- : ctrl_(EmptyGroup()),
- slots_(nullptr),
- size_(0),
- capacity_(0),
- settings_(0, that.hash_ref(), that.eq_ref(), a) {
+ : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
if (a == that.alloc_ref()) {
- std::swap(ctrl_, that.ctrl_);
- std::swap(slots_, that.slots_);
- std::swap(size_, that.size_);
- std::swap(capacity_, that.capacity_);
- std::swap(growth_left(), that.growth_left());
- std::swap(infoz_, that.infoz_);
+ std::swap(common(), that.common());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -913,35 +1868,54 @@ class raw_hash_set {
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
- absl::allocator_traits<allocator_type>::is_always_equal::value&&
- std::is_nothrow_move_assignable<hasher>::value&&
- std::is_nothrow_move_assignable<key_equal>::value) {
+ absl::allocator_traits<allocator_type>::is_always_equal::value &&
+ std::is_nothrow_move_assignable<hasher>::value &&
+ std::is_nothrow_move_assignable<key_equal>::value) {
// TODO(sbenza): We should only use the operations from the noexcept clause
// to make sure we actually adhere to that contract.
+ // NOLINTNEXTLINE: not returning *this for performance.
return move_assign(
std::move(that),
typename AllocTraits::propagate_on_container_move_assignment());
}
- ~raw_hash_set() { destroy_slots(); }
+ ~raw_hash_set() {
+ const size_t cap = capacity();
+ if (!cap) return;
+ destroy_slots();
+
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), common().backing_array_start(),
+ AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
- iterator begin() {
+ infoz().Unregister();
+ }
+
+ iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = iterator_at(0);
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return {}; }
+ iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
- const_iterator begin() const {
+ const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return {}; }
- const_iterator cbegin() const { return begin(); }
- const_iterator cend() const { return end(); }
+ const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return iterator(common().generation_ptr());
+ }
+ const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return begin();
+ }
+ const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
bool empty() const { return !size(); }
- size_t size() const { return size_; }
- size_t capacity() const { return capacity_; }
+ size_t size() const { return common().size(); }
+ size_t capacity() const { return common().capacity(); }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -952,20 +1926,26 @@ class raw_hash_set {
// compared to destruction of the elements of the container. So we pick the
// largest bucket_count() threshold for which iteration is still fast and
// past that we simply deallocate the array.
- if (capacity_ > 127) {
+ const size_t cap = capacity();
+ if (cap == 0) {
+ // Already guaranteed to be empty; so nothing to do.
+ } else {
destroy_slots();
- } else if (capacity_) {
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/cap < 128);
+ }
+ common().set_reserved_growth(0);
+ common().set_reservation_size(0);
+ }
+
+ inline void destroy_slots() {
+ const size_t cap = capacity();
+ const ctrl_t* ctrl = control();
+ slot_type* slot = slot_array();
+ for (size_t i = 0; i != cap; ++i) {
+ if (IsFull(ctrl[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slot + i);
}
- size_ = 0;
- reset_ctrl();
- reset_growth_left();
}
- assert(empty());
- infoz_.RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -975,11 +1955,10 @@ class raw_hash_set {
// m.insert(std::make_pair("abc", 42));
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
// bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- std::pair<iterator, bool> insert(T&& value) {
+ std::pair<iterator, bool> insert(T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<T>(value));
}
@@ -994,13 +1973,11 @@ class raw_hash_set {
// const char* p = "hello";
// s.insert(p);
//
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- std::pair<iterator, bool> insert(const T& value) {
+ std::pair<iterator, bool> insert(const T& value)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(value);
}
@@ -1009,7 +1986,8 @@ class raw_hash_set {
//
// flat_hash_map<std::string, int> s;
// s.insert({"abc", 42});
- std::pair<iterator, bool> insert(init_type&& value) {
+ std::pair<iterator, bool> insert(init_type&& value)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::move(value));
}
@@ -1018,27 +1996,26 @@ class raw_hash_set {
template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
- iterator insert(const_iterator, T&& value) {
+ iterator insert(const_iterator, T&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::forward<T>(value)).first;
}
- // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
- // RequiresInsertable<T> with RequiresInsertable<const T&>.
- // We are hitting this bug: https://godbolt.org/g/1Vht4f.
template <
- class T, RequiresInsertable<T> = 0,
+ class T, RequiresInsertable<const T&> = 0,
typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
- iterator insert(const_iterator, const T& value) {
+ iterator insert(const_iterator,
+ const T& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(value).first;
}
- iterator insert(const_iterator, init_type&& value) {
+ iterator insert(const_iterator,
+ init_type&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return insert(std::move(value)).first;
}
template <class InputIt>
void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) insert(*first);
+ for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1050,7 +2027,7 @@ class raw_hash_set {
insert(ilist.begin(), ilist.end());
}
- insert_return_type insert(node_type&& node) {
+ insert_return_type insert(node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
if (!node) return {end(), false, node_type()};
const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
auto res = PolicyTraits::apply(
@@ -1064,8 +2041,11 @@ class raw_hash_set {
}
}
- iterator insert(const_iterator, node_type&& node) {
- return insert(std::move(node)).first;
+ iterator insert(const_iterator,
+ node_type&& node) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@@ -1079,7 +2059,8 @@ class raw_hash_set {
// m.emplace("abc", "xyz");
template <class... Args, typename std::enable_if<
IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
return PolicyTraits::apply(EmplaceDecomposable{*this},
std::forward<Args>(args)...);
}
@@ -1089,7 +2070,8 @@ class raw_hash_set {
// destroys.
template <class... Args, typename std::enable_if<
!IsDecomposable<Args...>::value, int>::type = 0>
- std::pair<iterator, bool> emplace(Args&&... args) {
+ std::pair<iterator, bool> emplace(Args&&... args)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
@@ -1099,14 +2081,16 @@ class raw_hash_set {
}
template <class... Args>
- iterator emplace_hint(const_iterator, Args&&... args) {
+ iterator emplace_hint(const_iterator,
+ Args&&... args) ABSL_ATTRIBUTE_LIFETIME_BOUND {
return emplace(std::forward<Args>(args)...).first;
}
// Extension API: support for lazy emplace.
//
// Looks up key in the table. If found, returns the iterator to the element.
- // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`,
+ // and returns an iterator to the new element.
//
// `f` must abide by several restrictions:
// - it MUST call `raw_hash_set::constructor` with arguments as if a
@@ -1149,10 +2133,11 @@ class raw_hash_set {
};
template <class K = key_type, class F>
- iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ iterator lazy_emplace(const key_arg<K>& key,
+ F&& f) ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto res = find_or_prepare_insert(key);
if (res.second) {
- slot_type* slot = slots_ + res.first;
+ slot_type* slot = slot_array() + res.first;
std::forward<F>(f)(constructor(&alloc_ref(), &slot));
assert(!slot);
}
@@ -1194,12 +2179,25 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- AssertIsFull(it.ctrl_);
+ AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
- iterator erase(const_iterator first, const_iterator last) {
+ iterator erase(const_iterator first,
+ const_iterator last) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ // We check for empty first because ClearBackingArray requires that
+ // capacity() > 0 as a precondition.
+ if (empty()) return end();
+ if (first == begin() && last == end()) {
+ // TODO(ezb): we access control bytes in destroy_slots so it could make
+ // sense to combine destroy_slots and ClearBackingArray to avoid cache
+ // misses when the table is large. Note that we also do this in clear().
+ destroy_slots();
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/true);
+ common().set_reserved_growth(common().reservation_size());
+ return end();
+ }
while (first != last) {
erase(first++);
}
@@ -1228,7 +2226,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_);
+ AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+ position.inner_.generation_ptr(), "extract()");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1248,35 +2247,45 @@ class raw_hash_set {
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
- swap(ctrl_, that.ctrl_);
- swap(slots_, that.slots_);
- swap(size_, that.size_);
- swap(capacity_, that.capacity_);
- swap(growth_left(), that.growth_left());
+ swap(common(), that.common());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- swap(infoz_, that.infoz_);
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
- if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) {
- destroy_slots();
- infoz_.RecordStorageChanged(0, 0);
+ if (n == 0 && capacity() == 0) return;
+ if (n == 0 && size() == 0) {
+ ClearBackingArray(common(), GetPolicyFunctions(), /*reuse=*/false);
return;
}
+
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > capacity_) {
+ if (n == 0 || m > capacity()) {
resize(m);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
- void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
+ void reserve(size_t n) {
+ if (n > size() + growth_left()) {
+ size_t m = GrowthToLowerboundCapacity(n);
+ resize(NormalizeCapacity(m));
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
+ }
+ common().reset_reserved_growth(n);
+ common().set_reservation_size(n);
+ }
// Extension API: support for heterogeneous keys.
//
@@ -1300,11 +2309,13 @@ class raw_hash_set {
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
(void)key;
-#if defined(__GNUC__)
- auto seq = probe(hash_ref()(key));
- __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
- __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif // __GNUC__
+ // Avoid probing if we won't be able to prefetch the addresses received.
+#ifdef ABSL_HAVE_PREFETCH
+ prefetch_heap_block();
+ auto seq = probe(common(), hash_ref()(key));
+ PrefetchToLocalCache(control() + seq.offset());
+ PrefetchToLocalCache(slot_array() + seq.offset());
+#endif // ABSL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -1315,32 +2326,39 @@ class raw_hash_set {
// 2. The type of the key argument doesn't have to be key_type. This is so
// called heterogeneous key support.
template <class K = key_type>
- iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(hash);
+ iterator find(const key_arg<K>& key,
+ size_t hash) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ auto seq = probe(common(), hash);
+ slot_type* slot_ptr = slot_array();
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ Group g{ctrl + seq.offset()};
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
+ PolicyTraits::element(slot_ptr + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
}
template <class K = key_type>
- iterator find(const key_arg<K>& key) {
+ iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key, size_t hash) const {
+ const_iterator find(const key_arg<K>& key,
+ size_t hash) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return const_cast<raw_hash_set*>(this)->find(key, hash);
}
template <class K = key_type>
- const_iterator find(const key_arg<K>& key) const {
+ const_iterator find(const key_arg<K>& key) const
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1350,22 +2368,23 @@ class raw_hash_set {
}
template <class K = key_type>
- std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
+ ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
template <class K = key_type>
std::pair<const_iterator, const_iterator> equal_range(
- const key_arg<K>& key) const {
+ const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = find(key);
if (it != end()) return {it, std::next(it)};
return {it, it};
}
- size_t bucket_count() const { return capacity_; }
+ size_t bucket_count() const { return capacity(); }
float load_factor() const {
- return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
}
float max_load_factor() const { return 1.0f; }
void max_load_factor(float) {
@@ -1390,6 +2409,14 @@ class raw_hash_set {
return !(a == b);
}
+ template <typename H>
+ friend typename std::enable_if<H::template is_hashable<value_type>::value,
+ H>::type
+ AbslHashValue(H h, const raw_hash_set& s) {
+ return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
+ s.size());
+ }
+
friend void swap(raw_hash_set& a,
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
@@ -1444,7 +2471,8 @@ class raw_hash_set {
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
- PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
+ &slot);
} else if (do_destroy) {
PolicyTraits::destroy(&s.alloc_ref(), &slot);
}
@@ -1455,235 +2483,145 @@ class raw_hash_set {
slot_type&& slot;
};
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
+ // Erases, but does not destroy, the value pointed to by `it`.
+ //
+ // This merely updates the pertinent control byte. This can be used in
+ // conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
- assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
- --size_;
- const size_t index = it.inner_.ctrl_ - ctrl_;
- const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
-
- // We count how many consecutive non empties we have to the right and to the
- // left of `it`. If the sum is >= kWidth then there is at least one probe
- // window that might have seen a full group.
- bool was_never_full =
- empty_before && empty_after &&
- static_cast<size_t>(empty_after.TrailingZeros() +
- empty_before.LeadingZeros()) < Group::kWidth;
-
- set_ctrl(index, was_never_full ? kEmpty : kDeleted);
- growth_left() += was_never_full;
- infoz_.RecordErase();
- }
-
- void initialize_slots() {
- assert(capacity_);
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or wont make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- //
+ EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
+ }
+
+ // Allocates a backing array for `self` and initializes its control bytes.
+ // This reads `capacity` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // This does not free the currently held array; `capacity` must be nonzero.
+ inline void initialize_slots() {
// People are often sloppy with the exact type of their allocator (sometimes
// it has an extra const or is missing the pair, but rebinds made it work
- // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
- // bound more carefully.
- if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
- slots_ == nullptr) {
- infoz_ = Sample();
- }
-
- auto layout = MakeLayout(capacity_);
- char* mem = static_cast<char*>(
- Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
- ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
- slots_ = layout.template Pointer<1>(mem);
- reset_ctrl();
- reset_growth_left();
- infoz_.RecordStorageChanged(size_, capacity_);
- }
-
- void destroy_slots() {
- if (!capacity_) return;
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
- }
- auto layout = MakeLayout(capacity_);
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
- ctrl_ = EmptyGroup();
- slots_ = nullptr;
- size_ = 0;
- capacity_ = 0;
- growth_left() = 0;
+ // anyway).
+ using CharAlloc =
+ typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
+ InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
+ common(), CharAlloc(alloc_ref()));
}
- void resize(size_t new_capacity) {
+ ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
assert(IsValidCapacity(new_capacity));
- auto* old_ctrl = ctrl_;
- auto* old_slots = slots_;
- const size_t old_capacity = capacity_;
- capacity_ = new_capacity;
+ auto* old_ctrl = control();
+ auto* old_slots = slot_array();
+ const size_t old_capacity = common().capacity();
+ common().set_capacity(new_capacity);
initialize_slots();
+ auto* new_slots = slot_array();
size_t total_probe_length = 0;
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(hash);
+ auto target = find_first_non_full(common(), hash);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
- set_ctrl(new_i, H2(hash));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
+ PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
}
}
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- auto layout = MakeLayout(old_capacity);
- Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
- layout.AllocSize());
- }
- infoz_.RecordRehash(total_probe_length);
- }
-
- void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
- assert(IsValidCapacity(capacity_));
- assert(!is_small());
- // Algorithm:
- // - mark all DELETED slots as EMPTY
- // - mark all FULL slots as DELETED
- // - for each slot marked as DELETED
- // hash = Hash(element)
- // target = find_first_non_full(hash)
- // if target is in the same group
- // mark slot as FULL
- // else if target is EMPTY
- // transfer element to target
- // mark slot as EMPTY
- // mark target as FULL
- // else if target is DELETED
- // swap current element with target element
- // mark target as FULL
- // repeat procedure for current slot with moved from element (target)
- ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
- alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- size_t total_probe_length = 0;
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
- for (size_t i = 0; i != capacity_; ++i) {
- if (!IsDeleted(ctrl_[i])) continue;
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(slots_ + i));
- auto target = find_first_non_full(hash);
- size_t new_i = target.offset;
- total_probe_length += target.probe_length;
-
- // Verify if the old and new i fall within the same group wrt the hash.
- // If they do, we don't need to move the object as it falls already in the
- // best probe we can.
- const auto probe_index = [&](size_t pos) {
- return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
- };
-
- // Element doesn't move.
- if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- set_ctrl(i, H2(hash));
- continue;
- }
- if (IsEmpty(ctrl_[new_i])) {
- // Transfer element to the empty spot.
- // set_ctrl poisons/unpoisons the slots so we have to call it at the
- // right time.
- set_ctrl(new_i, H2(hash));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
- set_ctrl(i, kEmpty);
- } else {
- assert(IsDeleted(ctrl_[new_i]));
- set_ctrl(new_i, H2(hash));
- // Until we are done rehashing, DELETED marks previously FULL slots.
- // Swap i and new_i elements.
- PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
- --i; // repeat
- }
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), old_ctrl - ControlOffset(),
+ AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
- reset_growth_left();
- infoz_.RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
+ }
+
+ // Prunes control bytes to remove as many tombstones as possible.
+ //
+ // See the comment on `rehash_and_grow_if_necessary()`.
+ inline void drop_deletes_without_resize() {
+ // Stack-allocate space for swapping elements.
+ alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
+ DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
}
+ // Called whenever the table *might* need to conditionally grow.
+ //
+ // This function is an optimization opportunity to perform a rehash even when
+ // growth is unnecessary, because vacating tombstones is beneficial for
+ // performance in the long-run.
void rehash_and_grow_if_necessary() {
- if (capacity_ == 0) {
- resize(1);
- } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+ const size_t cap = capacity();
+ if (cap > Group::kWidth &&
+ // Do these calculations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= cap * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity.
+ // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
- resize(capacity_ * 2 + 1);
+ resize(NextCapacity(cap));
}
}
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(hash);
+ auto seq = probe(common(), hash);
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
- elem))
+ Group g{ctrl + seq.offset()};
+ for (uint32_t i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(
+ PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
return true;
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
return false;
}
- // Probes the raw_hash_set with the probe sequence for hash and returns the
- // pointer to the first empty or deleted slot.
- // NOTE: this function must work with tables having both kEmpty and kDelete
- // in one group. Such tables appears during drop_deletes_without_resize.
- //
- // This function is very useful when insertions happen and:
- // - the input is already a set
- // - there are enough slots
- // - the element with the hash is not in the table
- struct FindInfo {
- size_t offset;
- size_t probe_length;
- };
- FindInfo find_first_non_full(size_t hash) {
- auto seq = probe(hash);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
- if (mask) {
-#if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
-#endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
- }
- seq.next();
- assert(seq.index() < capacity_ && "full table!");
- }
- }
-
// TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that));
@@ -1697,36 +2635,54 @@ class raw_hash_set {
}
protected:
+ // Attempts to find `key` in the table; if it isn't found, returns a slot that
+ // the value can be inserted into, with the control byte already set to
+ // `key`'s H2.
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ prefetch_heap_block();
auto hash = hash_ref()(key);
- auto seq = probe(hash);
+ auto seq = probe(common(), hash);
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ Group g{ctrl + seq.offset()};
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
+ PolicyTraits::element(slot_array() + seq.offset(i)))))
return {seq.offset(i), false};
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
return {prepare_insert(hash), true};
}
+ // Given the hash of a value not currently in the table, finds the next
+ // viable slot index to insert it at.
+ //
+ // REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- auto target = find_first_non_full(hash);
- if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
- !IsDeleted(ctrl_[target.offset]))) {
+ const bool rehash_for_bug_detection =
+ common().should_rehash_for_bug_detection_on_insert();
+ if (rehash_for_bug_detection) {
+ // Move to a different heap allocation in order to detect bugs.
+ const size_t cap = capacity();
+ resize(growth_left() > 0 ? cap : NextCapacity(cap));
+ }
+ auto target = find_first_non_full(common(), hash);
+ if (!rehash_for_bug_detection &&
+ ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(control()[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(hash);
+ target = find_first_non_full(common(), hash);
}
- ++size_;
- growth_left() -= IsEmpty(ctrl_[target.offset]);
- set_ctrl(target.offset, H2(hash));
- infoz_.RecordInsert(hash, target.probe_length);
+ common().set_size(common().size() + 1);
+ set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
+ SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
+ common().maybe_increment_generation_on_insert();
+ infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1740,7 +2696,7 @@ class raw_hash_set {
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
template <class... Args>
void emplace_at(size_t i, Args&&... args) {
- PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ PolicyTraits::construct(&alloc_ref(), slot_array() + i,
std::forward<Args>(args)...);
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
@@ -1748,60 +2704,46 @@ class raw_hash_set {
"constructed value does not match the lookup key");
}
- iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
- const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+ iterator iterator_at(size_t i) ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return {control() + i, slot_array() + i, common().generation_ptr()};
+ }
+ const_iterator iterator_at(size_t i) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
+ return {control() + i, slot_array() + i, common().generation_ptr()};
+ }
private:
friend struct RawHashSetTestOnlyAccess;
- probe_seq<Group::kWidth> probe(size_t hash) const {
- return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
- }
-
- // Reset all ctrl bytes back to kEmpty, except the sentinel.
- void reset_ctrl() {
- std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
- ctrl_[capacity_] = kSentinel;
- SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- }
-
- void reset_growth_left() {
- growth_left() = CapacityToGrowth(capacity()) - size_;
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // This is stored separately due to tombstones: we do not include tombstones
+ // in the growth capacity, because we'd like to rehash when the table is
+ // otherwise filled with tombstones: otherwise, probe sequences might get
+ // unacceptably long without triggering a rehash. Callers can also force a
+ // rehash via the standard `rehash(0)`, which will recompute this value as a
+ // side-effect.
+ //
+ // See `CapacityToGrowth()`.
+ size_t growth_left() const { return common().growth_left(); }
+ void set_growth_left(size_t gl) { return common().set_growth_left(gl); }
+
+ // Prefetch the heap-allocated memory region to resolve potential TLB and
+ // cache misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+ void prefetch_heap_block() const {
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+ __builtin_prefetch(control(), 0, 1);
+#endif
}
- // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
- // the end too.
- void set_ctrl(size_t i, ctrl_t h) {
- assert(i < capacity_);
+ CommonFields& common() { return settings_.template get<0>(); }
+ const CommonFields& common() const { return settings_.template get<0>(); }
- if (IsFull(h)) {
- SanitizerUnpoisonObject(slots_ + i);
- } else {
- SanitizerPoisonObject(slots_ + i);
- }
-
- ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + 1 +
- ((Group::kWidth - 1) & capacity_)] = h;
+ ctrl_t* control() const { return common().control(); }
+ slot_type* slot_array() const {
+ return static_cast<slot_type*>(common().slot_array());
}
-
- size_t& growth_left() { return settings_.template get<0>(); }
-
- // The representation of the object has two modes:
- // - small: For capacities < kWidth-1
- // - large: For the rest.
- //
- // Differences:
- // - In small mode we are able to use the whole capacity. The extra control
- // bytes give us at least one "empty" control byte to stop the iteration.
- // This is important to make 1 a valid capacity.
- //
- // - In small mode only the first `capacity()` control bytes after the
- // sentinel are valid. The rest contain dummy kEmpty values that do not
- // represent a real slot. This is important to take into account on
- // find_first_non_full(), where we never try ShouldInsertBackwards() for
- // small tables.
- bool is_small() const { return capacity_ < Group::kWidth - 1; }
+ HashtablezInfoHandle& infoz() { return common().infoz(); }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -1812,28 +2754,66 @@ class raw_hash_set {
return settings_.template get<3>();
}
- // TODO(alkis): Investigate removing some of these fields:
- // - ctrl/slots can be derived from each other
- // - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
- HashtablezInfoHandle infoz_;
- absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
- key_equal, allocator_type>
- settings_{0, hasher{}, key_equal{}, allocator_type{}};
+ // Make type-specific functions for this type's PolicyFunctions struct.
+ static size_t hash_slot_fn(void* set, void* slot) {
+ auto* h = static_cast<raw_hash_set*>(set);
+ return PolicyTraits::apply(
+ HashElement{h->hash_ref()},
+ PolicyTraits::element(static_cast<slot_type*>(slot)));
+ }
+ static void transfer_slot_fn(void* set, void* dst, void* src) {
+ auto* h = static_cast<raw_hash_set*>(set);
+ PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
+ static_cast<slot_type*>(src));
+ }
+ // Note: dealloc_fn will only be used if we have a non-standard allocator.
+ static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
+ auto* set = reinterpret_cast<raw_hash_set*>(&common);
+
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(common.slot_array(),
+ sizeof(slot_type) * common.capacity());
+
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &set->alloc_ref(), common.backing_array_start(),
+ common.alloc_size(sizeof(slot_type), alignof(slot_type)));
+ }
+
+ static const PolicyFunctions& GetPolicyFunctions() {
+ static constexpr PolicyFunctions value = {
+ sizeof(slot_type),
+ &raw_hash_set::hash_slot_fn,
+ PolicyTraits::transfer_uses_memcpy()
+ ? TransferRelocatable<sizeof(slot_type)>
+ : &raw_hash_set::transfer_slot_fn,
+ (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
+ ? &DeallocateStandard<alignof(slot_type)>
+ : &raw_hash_set::dealloc_fn),
+ };
+ return value;
+ }
+
+ // Bundle together CommonFields plus other objects which might be empty.
+ // CompressedTuple will ensure that sizeof is not affected by any of the empty
+ // fields that occur after CommonFields.
+ absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
+ allocator_type>
+ settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+typename raw_hash_set<P, H, E, A>::size_type EraseIf(
+ Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+ const auto initial_size = c->size();
for (auto it = c->begin(), last = c->end(); it != last;) {
- auto copy_it = it++;
- if (pred(*copy_it)) {
- c->erase(copy_it);
+ if (pred(*it)) {
+ c->erase(it++);
+ } else {
+ ++it;
}
}
+ return initial_size - c->size();
}
namespace hashtable_debug_internal {
@@ -1846,36 +2826,37 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = set.probe(hash);
+ auto seq = probe(set.common(), hash);
+ const ctrl_t* ctrl = set.control();
while (true) {
- container_internal::Group g{set.ctrl_ + seq.offset()};
- for (int i : g.Match(container_internal::H2(hash))) {
+ container_internal::Group g{ctrl + seq.offset()};
+ for (uint32_t i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
- Traits::element(set.slots_ + seq.offset(i))))
+ Traits::element(set.slot_array() + seq.offset(i))))
return num_probes;
++num_probes;
}
- if (g.MatchEmpty()) return num_probes;
+ if (g.MaskEmpty()) return num_probes;
seq.next();
++num_probes;
}
}
static size_t AllocatedByteSize(const Set& c) {
- size_t capacity = c.capacity_;
+ size_t capacity = c.capacity();
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
- size_t m = layout.AllocSize();
+ size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * c.size();
} else {
+ const ctrl_t* ctrl = c.control();
for (size_t i = 0; i != capacity; ++i) {
- if (container_internal::IsFull(c.ctrl_[i])) {
- m += Traits::space_used(c.slots_ + i);
+ if (container_internal::IsFull(ctrl[i])) {
+ m += Traits::space_used(c.slot_array() + i);
}
}
}
@@ -1885,8 +2866,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
- size_t m = layout.AllocSize();
+ size_t m =
+ AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * size;
@@ -1900,4 +2881,6 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
ABSL_NAMESPACE_END
} // namespace absl
+#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
+
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_