summaryrefslogtreecommitdiff
path: root/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
diff options
context:
space:
mode:
Diffstat (limited to 'abseil-cpp/absl/container/internal/hashtablez_sampler.cc')
-rw-r--r--abseil-cpp/absl/container/internal/hashtablez_sampler.cc249
1 files changed, 132 insertions, 117 deletions
diff --git a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index e4484fb..79a0973 100644
--- a/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -14,6 +14,7 @@
#include "absl/container/internal/hashtablez_sampler.h"
+#include <algorithm>
#include <atomic>
#include <cassert>
#include <cmath>
@@ -21,49 +22,57 @@
#include <limits>
#include "absl/base/attributes.h"
-#include "absl/base/internal/exponential_biased.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
+#include "absl/profiling/internal/exponential_biased.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/time/clock.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int HashtablezInfo::kMaxStackDepth;
+#endif
namespace {
ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
-ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
+void TriggerHashtablezConfigListener() {
+ auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
+ if (listener != nullptr) listener();
+}
+
} // namespace
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-HashtablezSampler& HashtablezSampler::Global() {
+HashtablezSampler& GlobalHashtablezSampler() {
static auto* sampler = new HashtablezSampler();
return *sampler;
}
-HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
- DisposeCallback f) {
- return dispose_.exchange(f, std::memory_order_relaxed);
-}
-
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+ size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -72,100 +81,17 @@ void HashtablezInfo::PrepareForSampling() {
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+ hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
+ weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
- dead = nullptr;
-}
-
-HashtablezSampler::HashtablezSampler()
- : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
- absl::MutexLock l(&graveyard_.init_mu);
- graveyard_.dead = &graveyard_;
-}
-
-HashtablezSampler::~HashtablezSampler() {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- HashtablezInfo* next = s->next;
- delete s;
- s = next;
- }
-}
-
-void HashtablezSampler::PushNew(HashtablezInfo* sample) {
- sample->next = all_.load(std::memory_order_relaxed);
- while (!all_.compare_exchange_weak(sample->next, sample,
- std::memory_order_release,
- std::memory_order_relaxed)) {
- }
-}
-
-void HashtablezSampler::PushDead(HashtablezInfo* sample) {
- if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
- dispose(*sample);
- }
-
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
- absl::MutexLock sample_lock(&sample->init_mu);
- sample->dead = graveyard_.dead;
- graveyard_.dead = sample;
-}
-
-HashtablezInfo* HashtablezSampler::PopDead() {
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
-
- // The list is circular, so eventually it collapses down to
- // graveyard_.dead == &graveyard_
- // when it is empty.
- HashtablezInfo* sample = graveyard_.dead;
- if (sample == &graveyard_) return nullptr;
-
- absl::MutexLock sample_lock(&sample->init_mu);
- graveyard_.dead = sample->dead;
- sample->PrepareForSampling();
- return sample;
-}
-
-HashtablezInfo* HashtablezSampler::Register() {
- int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
- if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
- dropped_samples_.fetch_add(1, std::memory_order_relaxed);
- return nullptr;
- }
-
- HashtablezInfo* sample = PopDead();
- if (sample == nullptr) {
- // Resurrection failed. Hire a new warlock.
- sample = new HashtablezInfo();
- PushNew(sample);
- }
-
- return sample;
-}
-
-void HashtablezSampler::Unregister(HashtablezInfo* sample) {
- PushDead(sample);
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int64_t HashtablezSampler::Iterate(
- const std::function<void(const HashtablezInfo& stack)>& f) {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- absl::MutexLock l(&s->init_mu);
- if (s->dead == nullptr) {
- f(*s);
- }
- s = s->next;
- }
-
- return dropped_samples_.load(std::memory_order_relaxed);
+ inline_element_size = inline_element_size_value;
}
static bool ShouldForceSampling() {
@@ -180,27 +106,40 @@ static bool ShouldForceSampling() {
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) {
- state = AbslContainerInternalSampleEverything() ? kForce : kDontForce;
+ state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+ ? kForce
+ : kDontForce;
global_state.store(state, std::memory_order_relaxed);
}
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
- return HashtablezSampler::Global().Register();
+ next_sample.next_sample = 1;
+ const int64_t old_stride = exchange(next_sample.sample_stride, 1);
+ HashtablezInfo* result =
+ GlobalHashtablezSampler().Register(old_stride, inline_element_size);
+ return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
+ next_sample = {
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max(),
+ };
return nullptr;
#else
- bool first = *next_sample < 0;
- *next_sample = g_exponential_biased_generator.GetStride(
+ bool first = next_sample.next_sample < 0;
+
+ const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ next_sample.next_sample = next_stride;
+ const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
- ABSL_ASSERT(*next_sample >= 1);
+ ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@@ -210,16 +149,53 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
- if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
- return SampleSlow(next_sample);
+ if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample, inline_element_size);
}
- return HashtablezSampler::Global().Register();
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
- HashtablezSampler::Global().Unregister(info);
+ GlobalHashtablezSampler().Unregister(info);
+}
+
+void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
+void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ }
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -227,7 +203,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
@@ -235,6 +211,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length),
@@ -243,11 +220,41 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->size.fetch_add(1, std::memory_order_relaxed);
}
+void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+void SetHashtablezConfigListener(HashtablezConfigListener l) {
+ g_hashtablez_config_listener.store(l, std::memory_order_release);
+}
+
+bool IsHashtablezEnabled() {
+ return g_hashtablez_enabled.load(std::memory_order_acquire);
+}
+
void SetHashtablezEnabled(bool enabled) {
+ SetHashtablezEnabledInternal(enabled);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezEnabledInternal(bool enabled) {
g_hashtablez_enabled.store(enabled, std::memory_order_release);
}
+int32_t GetHashtablezSampleParameter() {
+ return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
+}
+
void SetHashtablezSampleParameter(int32_t rate) {
+ SetHashtablezSampleParameterInternal(rate);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezSampleParameterInternal(int32_t rate) {
if (rate > 0) {
g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
} else {
@@ -256,12 +263,20 @@ void SetHashtablezSampleParameter(int32_t rate) {
}
}
-void SetHashtablezMaxSamples(int32_t max) {
+size_t GetHashtablezMaxSamples() {
+ return GlobalHashtablezSampler().GetMaxSamples();
+}
+
+void SetHashtablezMaxSamples(size_t max) {
+ SetHashtablezMaxSamplesInternal(max);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezMaxSamplesInternal(size_t max) {
if (max > 0) {
- g_hashtablez_max_samples.store(max, std::memory_order_release);
+ GlobalHashtablezSampler().SetMaxSamples(max);
} else {
- ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
- static_cast<long long>(max)); // NOLINT(runtime/int)
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: 0");
}
}