summaryrefslogtreecommitdiff
path: root/abseil-cpp/absl/base/spinlock_test_common.cc
diff options
context:
space:
mode:
Diffstat (limited to 'abseil-cpp/absl/base/spinlock_test_common.cc')
-rw-r--r--abseil-cpp/absl/base/spinlock_test_common.cc41
1 files changed, 21 insertions, 20 deletions
diff --git a/abseil-cpp/absl/base/spinlock_test_common.cc b/abseil-cpp/absl/base/spinlock_test_common.cc
index dee266e..52ecf58 100644
--- a/abseil-cpp/absl/base/spinlock_test_common.cc
+++ b/abseil-cpp/absl/base/spinlock_test_common.cc
@@ -34,7 +34,7 @@
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/notification.h"
-constexpr int32_t kNumThreads = 10;
+constexpr uint32_t kNumThreads = 10;
constexpr int32_t kIters = 1000;
namespace absl {
@@ -48,14 +48,14 @@ struct SpinLockTest {
int64_t wait_end_time) {
return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time);
}
- static uint64_t DecodeWaitCycles(uint32_t lock_value) {
+ static int64_t DecodeWaitCycles(uint32_t lock_value) {
return SpinLock::DecodeWaitCycles(lock_value);
}
};
namespace {
-static constexpr int kArrayLength = 10;
+static constexpr size_t kArrayLength = 10;
static uint32_t values[kArrayLength];
ABSL_CONST_INIT static SpinLock static_cooperative_spinlock(
@@ -79,11 +79,11 @@ static uint32_t Hash32(uint32_t a, uint32_t c) {
return c;
}
-static void TestFunction(int thread_salt, SpinLock* spinlock) {
+static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) {
for (int i = 0; i < kIters; i++) {
SpinLockHolder h(spinlock);
- for (int j = 0; j < kArrayLength; j++) {
- const int index = (j + thread_salt) % kArrayLength;
+ for (size_t j = 0; j < kArrayLength; j++) {
+ const size_t index = (j + thread_salt) % kArrayLength;
values[index] = Hash32(values[index], thread_salt);
std::this_thread::yield();
}
@@ -92,7 +92,8 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) {
static void ThreadedTest(SpinLock* spinlock) {
std::vector<std::thread> threads;
- for (int i = 0; i < kNumThreads; ++i) {
+ threads.reserve(kNumThreads);
+ for (uint32_t i = 0; i < kNumThreads; ++i) {
threads.push_back(std::thread(TestFunction, i, spinlock));
}
for (auto& thread : threads) {
@@ -100,7 +101,7 @@ static void ThreadedTest(SpinLock* spinlock) {
}
SpinLockHolder h(spinlock);
- for (int i = 1; i < kArrayLength; i++) {
+ for (size_t i = 1; i < kArrayLength; i++) {
EXPECT_EQ(values[0], values[i]);
}
}
@@ -132,28 +133,28 @@ TEST(SpinLock, WaitCyclesEncoding) {
// but the lower kProfileTimestampShift will be dropped.
const int kMaxCyclesShift =
32 - kLockwordReservedShift + kProfileTimestampShift;
- const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
+ const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1;
// These bits should be zero after encoding.
const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1;
// These bits are dropped when wait cycles are encoded.
- const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
+ const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1;
// Test a bunch of random values
std::default_random_engine generator;
// Shift to avoid overflow below.
- std::uniform_int_distribution<uint64_t> time_distribution(
- 0, std::numeric_limits<uint64_t>::max() >> 4);
- std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles);
+ std::uniform_int_distribution<int64_t> time_distribution(
+ 0, std::numeric_limits<int64_t>::max() >> 3);
+ std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles);
for (int i = 0; i < 100; i++) {
int64_t start_time = time_distribution(generator);
int64_t cycles = cycle_distribution(generator);
int64_t end_time = start_time + cycles;
uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time);
- EXPECT_EQ(0, lock_value & kLockwordReservedMask);
- uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
+ EXPECT_EQ(0u, lock_value & kLockwordReservedMask);
+ int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value);
EXPECT_EQ(0, decoded & kProfileTimestampMask);
EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded);
}
@@ -177,21 +178,21 @@ TEST(SpinLock, WaitCyclesEncoding) {
// Test clamping
uint32_t max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles);
- uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
- uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
+ int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value);
+ int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask;
EXPECT_EQ(expected_max_value_decoded, max_value_decoded);
const int64_t step = (1 << kProfileTimestampShift);
uint32_t after_max_value =
SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step);
- uint64_t after_max_value_decoded =
+ int64_t after_max_value_decoded =
SpinLockTest::DecodeWaitCycles(after_max_value);
EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded);
uint32_t before_max_value = SpinLockTest::EncodeWaitCycles(
start_time, start_time + kMaxCycles - step);
- uint64_t before_max_value_decoded =
- SpinLockTest::DecodeWaitCycles(before_max_value);
+ int64_t before_max_value_decoded =
+ SpinLockTest::DecodeWaitCycles(before_max_value);
EXPECT_GT(expected_max_value_decoded, before_max_value_decoded);
}