aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 04:43:25 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 04:43:25 +0000
commit6f2763c2eea5f4be7f981f3376a3355427936926 (patch)
tree2d03a520f6d625a02107773a53521ec016838942
parent414da74a4943deed7526a5610cf6fcf666d10dae (diff)
parent03a46c72c5a836d644d28b2d1a32f87efd0cf814 (diff)
downloadabseil-cpp-6f2763c2eea5f4be7f981f3376a3355427936926.tar.gz
Snap for 10453563 from 03a46c72c5a836d644d28b2d1a32f87efd0cf814 to mainline-conscrypt-releaseaml_con_341614000aml_con_341511080aml_con_341410300aml_con_341310090aml_con_341110000android14-mainline-conscrypt-release
Change-Id: I103be36a8ff8c6d9a7fa110877fe65012d4f6c2a
-rw-r--r--Android.bp96
-rw-r--r--CMake/AbseilDll.cmake24
-rw-r--r--CMake/AbseilHelpers.cmake13
-rw-r--r--CMake/README.md13
-rwxr-xr-xCMake/install_test_project/test.sh4
-rw-r--r--CMakeLists.txt35
-rw-r--r--METADATA5
-rw-r--r--WORKSPACE31
-rw-r--r--absl/BUILD.bazel39
-rw-r--r--absl/algorithm/BUILD.bazel1
-rw-r--r--absl/algorithm/CMakeLists.txt1
-rw-r--r--absl/algorithm/algorithm_test.cc9
-rw-r--r--absl/algorithm/container.h2
-rw-r--r--absl/base/BUILD.bazel37
-rw-r--r--absl/base/CMakeLists.txt43
-rw-r--r--absl/base/attributes.h71
-rw-r--r--absl/base/casts.h129
-rw-r--r--absl/base/config.h225
-rw-r--r--absl/base/exception_safety_testing_test.cc3
-rw-r--r--absl/base/internal/cycleclock.cc54
-rw-r--r--absl/base/internal/cycleclock.h69
-rw-r--r--absl/base/internal/direct_mmap.h6
-rw-r--r--absl/base/internal/endian.h79
-rw-r--r--absl/base/internal/fast_type_id.h2
-rw-r--r--absl/base/internal/invoke.h54
-rw-r--r--absl/base/internal/low_level_alloc_test.cc11
-rw-r--r--absl/base/internal/prefetch.h138
-rw-r--r--absl/base/internal/prefetch_test.cc43
-rw-r--r--absl/base/internal/raw_logging.cc51
-rw-r--r--absl/base/internal/raw_logging.h25
-rw-r--r--absl/base/internal/spinlock.cc3
-rw-r--r--absl/base/internal/spinlock.h8
-rw-r--r--absl/base/internal/spinlock_linux.inc7
-rw-r--r--absl/base/internal/sysinfo.cc1
-rw-r--r--absl/base/internal/sysinfo_test.cc23
-rw-r--r--absl/base/internal/thread_identity.cc3
-rw-r--r--absl/base/internal/unscaledcycleclock.cc13
-rw-r--r--absl/base/internal/unscaledcycleclock.h15
-rw-r--r--absl/base/invoke_test.cc102
-rw-r--r--absl/base/log_severity.cc28
-rw-r--r--absl/base/log_severity.h51
-rw-r--r--absl/base/log_severity_test.cc43
-rw-r--r--absl/base/optimization.h30
-rw-r--r--absl/base/options.h10
-rw-r--r--absl/base/thread_annotations.h4
-rw-r--r--absl/cleanup/CMakeLists.txt1
-rw-r--r--absl/container/BUILD.bazel69
-rw-r--r--absl/container/CMakeLists.txt61
-rw-r--r--absl/container/btree_benchmark.cc64
-rw-r--r--absl/container/btree_map.h76
-rw-r--r--absl/container/btree_set.h103
-rw-r--r--absl/container/btree_test.cc382
-rw-r--r--absl/container/fixed_array.h2
-rw-r--r--absl/container/flat_hash_map.h15
-rw-r--r--absl/container/flat_hash_map_test.cc15
-rw-r--r--absl/container/flat_hash_set.h24
-rw-r--r--absl/container/flat_hash_set_test.cc10
-rw-r--r--absl/container/inlined_vector.h29
-rw-r--r--absl/container/inlined_vector_test.cc17
-rw-r--r--absl/container/internal/btree.h1071
-rw-r--r--absl/container/internal/btree_container.h40
-rw-r--r--absl/container/internal/common.h11
-rw-r--r--absl/container/internal/compressed_tuple_test.cc10
-rw-r--r--absl/container/internal/container_memory.h38
-rw-r--r--absl/container/internal/counting_allocator.h8
-rw-r--r--absl/container/internal/hash_function_defaults_test.cc2
-rw-r--r--absl/container/internal/hashtablez_sampler.cc88
-rw-r--r--absl/container/internal/hashtablez_sampler.h40
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc64
-rw-r--r--absl/container/internal/have_sse.h50
-rw-r--r--absl/container/internal/inlined_vector.h97
-rw-r--r--absl/container/internal/node_slot_policy.h (renamed from absl/container/internal/node_hash_policy.h)8
-rw-r--r--absl/container/internal/node_slot_policy_test.cc (renamed from absl/container/internal/node_hash_policy_test.cc)4
-rw-r--r--absl/container/internal/raw_hash_set.cc4
-rw-r--r--absl/container/internal/raw_hash_set.h699
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc28
-rw-r--r--absl/container/internal/raw_hash_set_test.cc41
-rw-r--r--absl/container/internal/unordered_map_constructor_test.h2
-rw-r--r--absl/container/internal/unordered_map_lookup_test.h4
-rw-r--r--absl/container/internal/unordered_map_modifiers_test.h11
-rw-r--r--absl/container/internal/unordered_set_constructor_test.h2
-rw-r--r--absl/container/internal/unordered_set_lookup_test.h2
-rw-r--r--absl/container/internal/unordered_set_modifiers_test.h8
-rw-r--r--absl/container/node_hash_map.h19
-rw-r--r--absl/container/node_hash_map_test.cc15
-rw-r--r--absl/container/node_hash_set.h15
-rw-r--r--absl/container/node_hash_set_test.cc10
-rw-r--r--absl/copts/AbseilConfigureCopts.cmake54
-rw-r--r--absl/debugging/BUILD.bazel115
-rw-r--r--absl/debugging/CMakeLists.txt81
-rw-r--r--absl/debugging/failure_signal_handler.cc6
-rw-r--r--absl/debugging/internal/address_is_readable.cc133
-rw-r--r--absl/debugging/internal/elf_mem_image.cc4
-rw-r--r--absl/debugging/internal/elf_mem_image.h5
-rw-r--r--absl/debugging/internal/examine_stack.cc222
-rw-r--r--absl/debugging/internal/examine_stack.h34
-rw-r--r--absl/debugging/internal/stacktrace_aarch64-inl.inc11
-rw-r--r--absl/debugging/internal/stacktrace_arm-inl.inc11
-rw-r--r--absl/debugging/internal/stacktrace_config.h3
-rw-r--r--absl/debugging/internal/stacktrace_powerpc-inl.inc11
-rw-r--r--absl/debugging/internal/stacktrace_riscv-inl.inc38
-rw-r--r--absl/debugging/internal/stacktrace_x86-inl.inc11
-rw-r--r--absl/debugging/internal/vdso_support.cc17
-rw-r--r--absl/debugging/leak_check.cc44
-rw-r--r--absl/debugging/leak_check.h19
-rw-r--r--absl/debugging/leak_check_test.cc17
-rw-r--r--absl/debugging/stacktrace_benchmark.cc55
-rw-r--r--absl/debugging/symbolize.cc7
-rw-r--r--absl/debugging/symbolize_elf.inc53
-rw-r--r--absl/debugging/symbolize_test.cc21
-rw-r--r--absl/flags/BUILD.bazel32
-rw-r--r--absl/flags/CMakeLists.txt4
-rw-r--r--absl/flags/config.h8
-rw-r--r--absl/flags/declare.h10
-rw-r--r--absl/flags/flag.h5
-rw-r--r--absl/flags/flag_test.cc193
-rw-r--r--absl/flags/internal/flag.cc5
-rw-r--r--absl/flags/internal/flag.h10
-rw-r--r--absl/flags/internal/usage_test.cc24
-rw-r--r--absl/flags/marshalling.h94
-rw-r--r--absl/flags/marshalling_test.cc166
-rw-r--r--absl/functional/BUILD.bazel39
-rw-r--r--absl/functional/CMakeLists.txt36
-rw-r--r--absl/functional/any_invocable.h313
-rw-r--r--absl/functional/any_invocable_test.cc1696
-rw-r--r--absl/functional/bind_front.h11
-rw-r--r--absl/functional/function_ref.h3
-rw-r--r--absl/functional/function_ref_test.cc1
-rw-r--r--absl/functional/function_type_benchmark.cc (renamed from absl/functional/function_ref_benchmark.cc)40
-rw-r--r--absl/functional/internal/any_invocable.h857
-rw-r--r--absl/hash/BUILD.bazel6
-rw-r--r--absl/hash/CMakeLists.txt8
-rw-r--r--absl/hash/hash.h90
-rw-r--r--absl/hash/hash_benchmark.cc77
-rw-r--r--absl/hash/hash_test.cc362
-rw-r--r--absl/hash/internal/city_test.cc2
-rw-r--r--absl/hash/internal/hash.h241
-rw-r--r--absl/hash/internal/spy_hash_state.h35
-rw-r--r--absl/memory/BUILD.bazel3
-rw-r--r--absl/memory/memory_test.cc21
-rw-r--r--absl/numeric/BUILD.bazel14
-rw-r--r--absl/numeric/bits.h3
-rw-r--r--absl/numeric/bits_benchmark.cc73
-rw-r--r--absl/numeric/int128.cc6
-rw-r--r--absl/numeric/int128.h4
-rw-r--r--absl/profiling/BUILD.bazel3
-rw-r--r--absl/profiling/internal/exponential_biased_test.cc2
-rw-r--r--absl/profiling/internal/sample_recorder.h27
-rw-r--r--absl/profiling/internal/sample_recorder_test.cc39
-rw-r--r--absl/random/BUILD.bazel19
-rw-r--r--absl/random/CMakeLists.txt6
-rw-r--r--absl/random/bernoulli_distribution.h8
-rw-r--r--absl/random/beta_distribution_test.cc34
-rw-r--r--absl/random/bit_gen_ref.h4
-rw-r--r--absl/random/distributions.h6
-rw-r--r--absl/random/distributions_test.cc1
-rw-r--r--absl/random/exponential_distribution_test.cc6
-rw-r--r--absl/random/gaussian_distribution_test.cc2
-rw-r--r--absl/random/generators_test.cc6
-rw-r--r--absl/random/internal/BUILD.bazel13
-rw-r--r--absl/random/internal/chi_square.cc3
-rw-r--r--absl/random/internal/distribution_caller.h3
-rw-r--r--absl/random/internal/explicit_seed_seq_test.cc26
-rw-r--r--absl/random/internal/fast_uniform_bits.h3
-rw-r--r--absl/random/internal/generate_real.h4
-rw-r--r--absl/random/internal/mock_helpers.h1
-rw-r--r--absl/random/internal/nonsecure_base.h107
-rw-r--r--absl/random/internal/nonsecure_base_test.cc66
-rw-r--r--absl/random/internal/pcg_engine.h2
-rw-r--r--absl/random/internal/randen.h16
-rw-r--r--absl/random/internal/randen_detect.cc8
-rw-r--r--absl/random/internal/randen_engine.h71
-rw-r--r--absl/random/internal/salted_seed_seq.h50
-rw-r--r--absl/random/internal/traits.h58
-rw-r--r--absl/random/internal/uniform_helper.h10
-rw-r--r--absl/random/internal/wide_multiply.h81
-rw-r--r--absl/random/internal/wide_multiply_test.cc110
-rw-r--r--absl/random/log_uniform_int_distribution.h19
-rw-r--r--absl/random/log_uniform_int_distribution_test.cc2
-rw-r--r--absl/random/mocking_bit_gen.h2
-rw-r--r--absl/random/poisson_distribution.h11
-rw-r--r--absl/random/poisson_distribution_test.cc2
-rw-r--r--absl/random/seed_sequences.h1
-rw-r--r--absl/random/uniform_int_distribution.h4
-rw-r--r--absl/random/uniform_real_distribution.h2
-rw-r--r--absl/random/uniform_real_distribution_test.cc121
-rw-r--r--absl/random/zipf_distribution.h7
-rw-r--r--absl/random/zipf_distribution_test.cc2
-rw-r--r--absl/status/BUILD.bazel5
-rw-r--r--absl/status/CMakeLists.txt11
-rw-r--r--absl/status/internal/status_internal.h17
-rw-r--r--absl/status/status.cc176
-rw-r--r--absl/status/status.h34
-rw-r--r--absl/status/status_test.cc20
-rw-r--r--absl/status/statusor.h12
-rw-r--r--absl/strings/BUILD.bazel85
-rw-r--r--absl/strings/CMakeLists.txt57
-rw-r--r--absl/strings/ascii.h8
-rw-r--r--absl/strings/cord.cc1107
-rw-r--r--absl/strings/cord.h283
-rw-r--r--absl/strings/cord_analysis.cc188
-rw-r--r--absl/strings/cord_analysis.h44
-rw-r--r--absl/strings/cord_buffer.cc (renamed from absl/debugging/leak_check_disable.cc)26
-rw-r--r--absl/strings/cord_buffer.h572
-rw-r--r--absl/strings/cord_buffer_test.cc320
-rw-r--r--absl/strings/cord_ring_reader_test.cc2
-rw-r--r--absl/strings/cord_ring_test.cc41
-rw-r--r--absl/strings/cord_test.cc1242
-rw-r--r--absl/strings/internal/cord_data_edge.h63
-rw-r--r--absl/strings/internal/cord_data_edge_test.cc130
-rw-r--r--absl/strings/internal/cord_internal.cc52
-rw-r--r--absl/strings/internal/cord_internal.h211
-rw-r--r--absl/strings/internal/cord_internal_test.cc116
-rw-r--r--absl/strings/internal/cord_rep_btree.cc198
-rw-r--r--absl/strings/internal/cord_rep_btree.h103
-rw-r--r--absl/strings/internal/cord_rep_btree_navigator.cc4
-rw-r--r--absl/strings/internal/cord_rep_btree_navigator.h6
-rw-r--r--absl/strings/internal/cord_rep_btree_navigator_test.cc21
-rw-r--r--absl/strings/internal/cord_rep_btree_reader.cc5
-rw-r--r--absl/strings/internal/cord_rep_btree_reader.h9
-rw-r--r--absl/strings/internal/cord_rep_btree_test.cc212
-rw-r--r--absl/strings/internal/cord_rep_consume.cc81
-rw-r--r--absl/strings/internal/cord_rep_consume_test.cc173
-rw-r--r--absl/strings/internal/cord_rep_crc.cc54
-rw-r--r--absl/strings/internal/cord_rep_crc.h102
-rw-r--r--absl/strings/internal/cord_rep_crc_test.cc115
-rw-r--r--absl/strings/internal/cord_rep_flat.h75
-rw-r--r--absl/strings/internal/cord_rep_ring.cc24
-rw-r--r--absl/strings/internal/cord_rep_test_util.h15
-rw-r--r--absl/strings/internal/cordz_info.cc49
-rw-r--r--absl/strings/internal/cordz_info_statistics_test.cc106
-rw-r--r--absl/strings/internal/cordz_sample_token_test.cc2
-rw-r--r--absl/strings/internal/cordz_statistics.h1
-rw-r--r--absl/strings/internal/cordz_update_tracker.h6
-rw-r--r--absl/strings/internal/cordz_update_tracker_test.cc6
-rw-r--r--absl/strings/internal/escaping.cc11
-rw-r--r--absl/strings/internal/ostringstream.cc2
-rw-r--r--absl/strings/internal/str_format/arg.cc2
-rw-r--r--absl/strings/internal/str_format/arg.h2
-rw-r--r--absl/strings/internal/str_format/bind.h45
-rw-r--r--absl/strings/internal/str_format/checker.h9
-rw-r--r--absl/strings/internal/str_format/convert_test.cc40
-rw-r--r--absl/strings/internal/str_format/extension.cc9
-rw-r--r--absl/strings/internal/str_format/extension.h7
-rw-r--r--absl/strings/internal/str_format/output.h3
-rw-r--r--absl/strings/internal/str_format/parser.h6
-rw-r--r--absl/strings/internal/str_join_internal.h15
-rw-r--r--absl/strings/internal/string_constant.h12
-rw-r--r--absl/strings/internal/utf8.cc18
-rw-r--r--absl/strings/numbers.cc16
-rw-r--r--absl/strings/numbers.h49
-rw-r--r--absl/strings/str_cat.h33
-rw-r--r--absl/strings/str_cat_test.cc5
-rw-r--r--absl/strings/str_format_test.cc2
-rw-r--r--absl/strings/str_join.h24
-rw-r--r--absl/strings/str_join_test.cc134
-rw-r--r--absl/strings/str_split.h3
-rw-r--r--absl/strings/string_view.cc15
-rw-r--r--absl/strings/string_view.h5
-rw-r--r--absl/strings/string_view_test.cc2
-rw-r--r--absl/strings/strip.h14
-rw-r--r--absl/strings/substitute.h12
-rw-r--r--absl/strings/substitute_test.cc48
-rw-r--r--absl/synchronization/BUILD.bazel24
-rw-r--r--absl/synchronization/CMakeLists.txt4
-rw-r--r--absl/synchronization/internal/create_thread_identity.cc15
-rw-r--r--absl/synchronization/internal/create_thread_identity.h4
-rw-r--r--absl/synchronization/internal/per_thread_sem.cc4
-rw-r--r--absl/synchronization/internal/per_thread_sem.h7
-rw-r--r--absl/synchronization/internal/per_thread_sem_test.cc9
-rw-r--r--absl/synchronization/internal/waiter.cc25
-rw-r--r--absl/synchronization/internal/waiter.h14
-rw-r--r--absl/synchronization/mutex.cc115
-rw-r--r--absl/synchronization/mutex.h26
-rw-r--r--absl/synchronization/mutex_test.cc26
-rw-r--r--absl/synchronization/notification.h5
-rw-r--r--absl/time/BUILD.bazel4
-rw-r--r--absl/time/CMakeLists.txt1
-rw-r--r--absl/time/clock.cc2
-rw-r--r--absl/time/duration.cc9
-rw-r--r--absl/time/duration_test.cc35
-rw-r--r--absl/time/internal/cctz/BUILD.bazel8
-rw-r--r--absl/time/internal/cctz/include/cctz/civil_time_detail.h18
-rw-r--r--absl/time/internal/cctz/src/time_zone_format_test.cc16
-rw-r--r--absl/time/internal/cctz/src/time_zone_lookup_test.cc48
-rw-r--r--absl/time/internal/cctz/testdata/README.zoneinfo7
-rw-r--r--absl/time/internal/cctz/testdata/version2
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenasbin1209 -> 1209 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/America/Santiagobin1282 -> 1282 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Asia/Gazabin1230 -> 1240 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebronbin1248 -> 1258 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Chile/Continentalbin1282 -> 1282 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Kievbin549 -> 558 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopolbin865 -> 865 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorodbin530 -> 539 bytes
-rw-r--r--absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhyebin560 -> 569 bytes
-rw-r--r--absl/time/internal/test_util.cc10
-rw-r--r--absl/time/internal/zoneinfo.inc943
-rw-r--r--absl/time/time.h28
-rw-r--r--absl/time/time_test.cc10
-rw-r--r--absl/types/BUILD.bazel1
-rw-r--r--absl/types/CMakeLists.txt2
-rw-r--r--absl/types/any.h23
-rw-r--r--absl/types/any_test.cc33
-rw-r--r--absl/types/internal/conformance_profile.h2
-rw-r--r--absl/types/internal/optional.h8
-rw-r--r--absl/types/internal/variant.h6
-rw-r--r--absl/types/optional.h31
-rw-r--r--absl/types/optional_test.cc74
-rw-r--r--absl/types/span.h3
-rw-r--r--ci/cmake_common.sh2
-rwxr-xr-xci/linux_clang-latest_libcxx_asan_bazel.sh5
-rwxr-xr-xci/linux_clang-latest_libcxx_bazel.sh5
-rwxr-xr-xci/linux_clang-latest_libcxx_tsan_bazel.sh5
-rwxr-xr-xci/linux_clang-latest_libstdcxx_bazel.sh1
-rw-r--r--ci/linux_docker_containers.sh6
-rwxr-xr-xci/linux_gcc-floor_libstdcxx_bazel.sh1
-rwxr-xr-xci/linux_gcc-latest_libstdcxx_bazel.sh1
-rwxr-xr-xci/linux_gcc-latest_libstdcxx_cmake.sh2
-rwxr-xr-xci/linux_gcc_alpine_cmake.sh2
-rwxr-xr-xci/macos_xcode_bazel.sh13
-rwxr-xr-xci/macos_xcode_cmake.sh2
322 files changed, 14380 insertions, 5344 deletions
diff --git a/Android.bp b/Android.bp
index 612c65cc..13dedf13 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,31 +1,12 @@
package {
- default_applicable_licenses: ["external_abseil-cpp_license"],
-}
-
-// Added automatically by a large-scale-change that took the approach of
-// 'apply every license found to every target'. While this makes sure we respect
-// every license restriction, it may not be entirely correct.
-//
-// e.g. GPL in an MIT project might only apply to the contrib/ directory.
-//
-// Please consider splitting the single license below into multiple licenses,
-// taking care not to lose any license_kind information, and overriding the
-// default license using the 'licenses: [...]' property on targets as needed.
-//
-// For unused files, consider creating a 'fileGroup' with "//visibility:private"
-// to attach the license to, and including a comment whether the files may be
-// used in the current project.
-// See: http://go/android-license-faq
+ default_applicable_licenses: ["libabsl_license"],
+}
+
license {
- name: "external_abseil-cpp_license",
+ name: "libabsl_license",
visibility: [":__subpackages__"],
- license_kinds: [
- "SPDX-license-identifier-Apache-2.0",
- "legacy_unencumbered",
- ],
- license_text: [
- "LICENSE",
- ],
+ license_kinds: ["SPDX-license-identifier-Apache-2.0"],
+ license_text: ["LICENSE"],
}
cc_library_headers {
@@ -35,12 +16,21 @@ cc_library_headers {
export_include_dirs: ["."],
}
+cc_defaults {
+ name: "libabsl_library_defaults",
+ header_libs: ["libabsl_headers"],
+ export_header_lib_headers: ["libabsl_headers"],
+ whole_static_libs: ["libabsl_base"],
+}
+
cc_library_host_static {
name: "libabsl_base",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/base/internal/cycleclock.cc",
"absl/base/internal/low_level_alloc.cc",
"absl/base/internal/raw_logging.cc",
+ "absl/base/internal/scoped_set_env.cc",
"absl/base/internal/spinlock.cc",
"absl/base/internal/spinlock_wait.cc",
"absl/base/internal/strerror.cc",
@@ -48,14 +38,16 @@ cc_library_host_static {
"absl/base/internal/thread_identity.cc",
"absl/base/internal/throw_delegate.cc",
"absl/base/internal/unscaledcycleclock.cc",
+ "absl/base/log_severity.cc",
],
+ exclude_static_libs: ["libabsl_base"], // don't depend on itself
}
cc_library_host_static {
name: "libabsl_container",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/container/internal/test_instance_tracker.cc",
- "absl/container/internal/hash_generator_testing.cc",
"absl/container/internal/hashtablez_sampler.cc",
"absl/container/internal/hashtablez_sampler_force_weak_definition.cc",
"absl/container/internal/raw_hash_set.cc",
@@ -64,6 +56,7 @@ cc_library_host_static {
cc_library_host_static {
name: "libabsl_debugging",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/debugging/failure_signal_handler.cc",
"absl/debugging/internal/address_is_readable.cc",
@@ -80,39 +73,44 @@ cc_library_host_static {
cc_library_host_static {
name: "libabsl_flags",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/flags/commandlineflag.cc",
- "absl/flags/usage_config.cc",
- "absl/flags/marshalling.cc",
- "absl/flags/usage.cc",
+ "absl/flags/flag_test_defs.cc",
"absl/flags/flag.cc",
- "absl/flags/parse.cc",
"absl/flags/internal/commandlineflag.cc",
"absl/flags/internal/flag.cc",
"absl/flags/internal/private_handle_accessor.cc",
"absl/flags/internal/program_name.cc",
"absl/flags/internal/usage.cc",
- "absl/flags/flag_test_defs.cc",
+ "absl/flags/marshalling.cc",
+ "absl/flags/parse.cc",
"absl/flags/reflection.cc",
+ "absl/flags/usage_config.cc",
+ "absl/flags/usage.cc",
],
}
cc_library_host_static {
name: "libabsl_hash",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/hash/internal/city.cc",
"absl/hash/internal/hash.cc",
"absl/hash/internal/low_level_hash.cc",
+ "absl/hash/internal/print_hash_of.cc",
],
}
cc_library_host_static {
name: "libabsl_numeric",
+ defaults: ["libabsl_library_defaults"],
srcs: ["absl/numeric/int128.cc"],
}
cc_library_host_static {
name: "libabsl_profiling",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/profiling/internal/exponential_biased.cc",
"absl/profiling/internal/periodic_sampler.cc",
@@ -120,7 +118,32 @@ cc_library_host_static {
}
cc_library_host_static {
+ name: "libabsl_random",
+ defaults: ["libabsl_library_defaults"],
+ srcs: [
+ "absl/random/discrete_distribution.cc",
+ "absl/random/gaussian_distribution.cc",
+ "absl/random/internal/chi_square.cc",
+ "absl/random/internal/distribution_test_util.cc",
+ "absl/random/internal/gaussian_distribution_gentables.cc",
+ "absl/random/internal/nanobenchmark.cc",
+ "absl/random/internal/pool_urbg.cc",
+ "absl/random/internal/randen_benchmarks.cc",
+ "absl/random/internal/randen.cc",
+ "absl/random/internal/randen_detect.cc",
+ "absl/random/internal/randen_hwaes.cc",
+ "absl/random/internal/randen_round_keys.cc",
+ "absl/random/internal/randen_slow.cc",
+ "absl/random/internal/seed_material.cc",
+ "absl/random/seed_gen_exception.cc",
+ "absl/random/seed_sequences.cc",
+ ],
+ cflags: ["-Wno-unused-parameter"],
+}
+
+cc_library_host_static {
name: "libabsl_status",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/status/status.cc",
"absl/status/status_payload_printer.cc",
@@ -130,9 +153,12 @@ cc_library_host_static {
cc_library_host_static {
name: "libabsl_strings",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/strings/ascii.cc",
"absl/strings/charconv.cc",
+ "absl/strings/cord_analysis.cc",
+ "absl/strings/cord_buffer.cc",
"absl/strings/cord.cc",
"absl/strings/escaping.cc",
"absl/strings/internal/charconv_bigint.cc",
@@ -142,13 +168,16 @@ cc_library_host_static {
"absl/strings/internal/cord_rep_btree_navigator.cc",
"absl/strings/internal/cord_rep_btree_reader.cc",
"absl/strings/internal/cord_rep_consume.cc",
+ "absl/strings/internal/cord_rep_crc.cc",
"absl/strings/internal/cord_rep_ring.cc",
"absl/strings/internal/cordz_functions.cc",
"absl/strings/internal/cordz_handle.cc",
"absl/strings/internal/cordz_info.cc",
+ "absl/strings/internal/cordz_sample_token.cc",
"absl/strings/internal/escaping.cc",
"absl/strings/internal/memutil.cc",
"absl/strings/internal/ostringstream.cc",
+ "absl/strings/internal/pow10_helper.cc",
"absl/strings/internal/str_format/arg.cc",
"absl/strings/internal/str_format/bind.cc",
"absl/strings/internal/str_format/extension.cc",
@@ -168,6 +197,7 @@ cc_library_host_static {
cc_library_host_static {
name: "libabsl_synchronization",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/synchronization/barrier.cc",
"absl/synchronization/blocking_counter.cc",
@@ -175,13 +205,14 @@ cc_library_host_static {
"absl/synchronization/internal/per_thread_sem.cc",
"absl/synchronization/internal/waiter.cc",
"absl/synchronization/internal/graphcycles.cc",
- "absl/synchronization/notification.cc",
"absl/synchronization/mutex.cc",
+ "absl/synchronization/notification.cc",
],
}
cc_library_host_static {
name: "libabsl_time",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/time/civil_time.cc",
"absl/time/clock.cc",
@@ -203,6 +234,7 @@ cc_library_host_static {
cc_library_host_static {
name: "libabsl_types",
+ defaults: ["libabsl_library_defaults"],
srcs: [
"absl/types/bad_any_cast.cc",
"absl/types/bad_optional_access.cc",
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index fa323ff8..00cddb84 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -26,6 +26,7 @@ set(ABSL_INTERNAL_DLL_FILES
"base/internal/low_level_alloc.h"
"base/internal/low_level_scheduling.h"
"base/internal/per_thread_tls.h"
+ "base/internal/prefetch.h"
"base/internal/pretty_function.h"
"base/internal/raw_logging.cc"
"base/internal/raw_logging.h"
@@ -78,10 +79,9 @@ set(ABSL_INTERNAL_DLL_FILES
"container/internal/hashtablez_sampler.cc"
"container/internal/hashtablez_sampler.h"
"container/internal/hashtablez_sampler_force_weak_definition.cc"
- "container/internal/have_sse.h"
"container/internal/inlined_vector.h"
"container/internal/layout.h"
- "container/internal/node_hash_policy.h"
+ "container/internal/node_slot_policy.h"
"container/internal/raw_hash_map.h"
"container/internal/raw_hash_set.cc"
"container/internal/raw_hash_set.h"
@@ -91,7 +91,6 @@ set(ABSL_INTERNAL_DLL_FILES
"debugging/failure_signal_handler.cc"
"debugging/failure_signal_handler.h"
"debugging/leak_check.h"
- "debugging/leak_check_disable.cc"
"debugging/stacktrace.cc"
"debugging/stacktrace.h"
"debugging/symbolize.cc"
@@ -110,9 +109,11 @@ set(ABSL_INTERNAL_DLL_FILES
"debugging/internal/symbolize.h"
"debugging/internal/vdso_support.cc"
"debugging/internal/vdso_support.h"
+ "functional/any_invocable.h"
"functional/internal/front_binder.h"
"functional/bind_front.h"
"functional/function_ref.h"
+ "functional/internal/any_invocable.h"
"functional/internal/function_ref.h"
"hash/hash.h"
"hash/internal/city.h"
@@ -196,22 +197,29 @@ set(ABSL_INTERNAL_DLL_FILES
"strings/charconv.h"
"strings/cord.cc"
"strings/cord.h"
+ "strings/cord_analysis.cc"
+ "strings/cord_analysis.h"
+ "strings/cord_buffer.cc"
+ "strings/cord_buffer.h"
"strings/escaping.cc"
"strings/escaping.h"
"strings/internal/charconv_bigint.cc"
"strings/internal/charconv_bigint.h"
"strings/internal/charconv_parse.cc"
"strings/internal/charconv_parse.h"
+ "strings/internal/cord_data_edge.h"
"strings/internal/cord_internal.cc"
"strings/internal/cord_internal.h"
- "strings/internal/cord_rep_consume.h"
- "strings/internal/cord_rep_consume.cc"
"strings/internal/cord_rep_btree.cc"
"strings/internal/cord_rep_btree.h"
"strings/internal/cord_rep_btree_navigator.cc"
"strings/internal/cord_rep_btree_navigator.h"
"strings/internal/cord_rep_btree_reader.cc"
"strings/internal/cord_rep_btree_reader.h"
+ "strings/internal/cord_rep_crc.cc"
+ "strings/internal/cord_rep_crc.h"
+ "strings/internal/cord_rep_consume.h"
+ "strings/internal/cord_rep_consume.cc"
"strings/internal/cord_rep_flat.h"
"strings/internal/cord_rep_ring.cc"
"strings/internal/cord_rep_ring.h"
@@ -341,6 +349,7 @@ set(ABSL_INTERNAL_DLL_FILES
"types/internal/span.h"
"types/variant.h"
"utility/utility.h"
+ "debugging/leak_check.cc"
)
set(ABSL_INTERNAL_DLL_TARGETS
@@ -351,7 +360,6 @@ set(ABSL_INTERNAL_DLL_TARGETS
"debugging_internal"
"demangle_internal"
"leak_check"
- "leak_check_disable"
"stack_consumption"
"debugging"
"hash"
@@ -382,6 +390,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"kernel_timeout_internal"
"synchronization"
"thread_pool"
+ "any_invocable"
"bind_front"
"function_ref"
"atomic_hook"
@@ -451,8 +460,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"hashtablez_sampler"
"hashtable_debug"
"hashtable_debug_hooks"
- "have_sse"
- "node_hash_policy"
+ "node_slot_policy"
"raw_hash_map"
"container_common"
"raw_hash_set"
diff --git a/CMake/AbseilHelpers.cmake b/CMake/AbseilHelpers.cmake
index f2ce5675..dbb09fe2 100644
--- a/CMake/AbseilHelpers.cmake
+++ b/CMake/AbseilHelpers.cmake
@@ -40,7 +40,8 @@ endif()
# LINKOPTS: List of link options
# PUBLIC: Add this so that this library will be exported under absl::
# Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal.
-# TESTONLY: When added, this target will only be built if BUILD_TESTING=ON.
+# TESTONLY: When added, this target will only be built if both
+# BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON.
#
# Note:
# By default, absl_cc_library will always create a library named absl_${NAME},
@@ -82,7 +83,8 @@ function(absl_cc_library)
${ARGN}
)
- if(ABSL_CC_LIB_TESTONLY AND NOT BUILD_TESTING)
+ if(NOT ABSL_CC_LIB_PUBLIC AND ABSL_CC_LIB_TESTONLY AND
+ NOT (BUILD_TESTING AND ABSL_BUILD_TESTING))
return()
endif()
@@ -168,6 +170,7 @@ function(absl_cc_library)
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
endif()
endforeach()
+ string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}")
FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\
prefix=${CMAKE_INSTALL_PREFIX}\n\
exec_prefix=\${prefix}\n\
@@ -179,7 +182,7 @@ Description: Abseil ${_NAME} library\n\
URL: https://abseil.io/\n\
Version: ${PC_VERSION}\n\
Requires:${PC_DEPS}\n\
-Libs: -L\${libdir} $<JOIN:${ABSL_CC_LIB_LINKOPTS}, > $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-labsl_${_NAME}>\n\
+Libs: -L\${libdir} ${PC_LINKOPTS} $<$<NOT:$<BOOL:${ABSL_CC_LIB_IS_INTERFACE}>>:-labsl_${_NAME}>\n\
Cflags: -I\${includedir}${PC_CFLAGS}\n")
INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
@@ -278,7 +281,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
if(ABSL_ENABLE_INSTALL)
set_target_properties(${_NAME} PROPERTIES
OUTPUT_NAME "absl_${_NAME}"
- SOVERSION "2111.0.0"
+ SOVERSION "2206.0.0"
)
endif()
else()
@@ -364,7 +367,7 @@ endfunction()
# GTest::gtest_main
# )
function(absl_cc_test)
- if(NOT BUILD_TESTING)
+ if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING))
return()
endif()
diff --git a/CMake/README.md b/CMake/README.md
index f8b27e63..8134615e 100644
--- a/CMake/README.md
+++ b/CMake/README.md
@@ -20,8 +20,10 @@ googletest framework
### Step-by-Step Instructions
1. If you want to build the Abseil tests, integrate the Abseil dependency
-[Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass
-`-DBUILD_TESTING=OFF` when configuring your project with CMake.
+[Google Test](https://github.com/google/googletest) into your CMake
+project. To disable Abseil tests, you have to pass either
+`-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your
+project with CMake.
2. Download Abseil and copy it into a subdirectory in your CMake project or add
Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your
@@ -91,7 +93,8 @@ setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high.
### Running Abseil Tests with CMake
-Use the `-DBUILD_TESTING=ON` flag to run Abseil tests.
+Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests. Note that
+BUILD_TESTING must also be on (the default).
You will need to provide Abseil with a Googletest dependency. There are two
options for how to do this:
@@ -109,7 +112,7 @@ For example, to run just the Abseil tests, you could use this script:
cd path/to/abseil-cpp
mkdir build
cd build
-cmake -DBUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON ..
+cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON ..
make -j
ctest
```
@@ -175,7 +178,7 @@ cmake --build /temporary/build/abseil-cpp --target install
## Google Test Options
-`-DBUILD_TESTING=ON` must be set to enable testing
+`-DABSL_BUILD_TESTING=ON` must be set to enable testing
- Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default)
- Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON`
diff --git a/CMake/install_test_project/test.sh b/CMake/install_test_project/test.sh
index 5a78c92c..cc028bac 100755
--- a/CMake/install_test_project/test.sh
+++ b/CMake/install_test_project/test.sh
@@ -55,10 +55,10 @@ cmake "${absl_dir}" \
-DABSL_USE_EXTERNAL_GOOGLETEST=ON \
-DABSL_FIND_GOOGLETEST=ON \
-DCMAKE_BUILD_TYPE=Release \
- -DBUILD_TESTING=ON \
+ -DABSL_BUILD_TESTING=ON \
-DBUILD_SHARED_LIBS="${build_shared_libs}"
make -j $(nproc)
-ctest -j $(nproc)
+ctest -j $(nproc) --output-on-failure
make install
ldconfig
popd
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 750a4755..e3a7e9ff 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -46,11 +46,7 @@ if (POLICY CMP0091)
cmake_policy(SET CMP0091 NEW)
endif (POLICY CMP0091)
-# Set BUILD_TESTING to OFF by default.
-# This must come before the project() and include(CTest) lines.
-OPTION(BUILD_TESTING "Build tests" OFF)
-
-project(absl LANGUAGES CXX VERSION 20211102)
+project(absl LANGUAGES CXX VERSION 20220623)
include(CTest)
# Output directory is correct by default for most build setups. However, when
@@ -111,8 +107,11 @@ find_package(Threads REQUIRED)
include(CMakeDependentOption)
+option(ABSL_BUILD_TESTING
+ "If ON, Abseil will build all of Abseil's own tests." OFF)
+
option(ABSL_USE_EXTERNAL_GOOGLETEST
- "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subproject." OFF)
+ "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF)
cmake_dependent_option(ABSL_FIND_GOOGLETEST
"If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project."
@@ -130,13 +129,19 @@ set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH
"If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
)
-if(BUILD_TESTING)
+if(BUILD_TESTING AND ABSL_BUILD_TESTING)
## check targets
if (ABSL_USE_EXTERNAL_GOOGLETEST)
if (ABSL_FIND_GOOGLETEST)
find_package(GTest REQUIRED)
- else()
- if (NOT TARGET gtest AND NOT TARGET GTest::gtest)
+ elseif(NOT TARGET GTest::gtest)
+ if(TARGET gtest)
+ # When Google Test is included directly rather than through find_package, the aliases are missing.
+ add_library(GTest::gtest ALIAS gtest)
+ add_library(GTest::gtest_main ALIAS gtest_main)
+ add_library(GTest::gmock ALIAS gmock)
+ add_library(GTest::gmock_main ALIAS gmock_main)
+ else()
message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.")
endif()
endif()
@@ -146,7 +151,7 @@ if(BUILD_TESTING)
message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL")
endif()
if(ABSL_USE_GOOGLETEST_HEAD)
- set(absl_gtest_download_url "https://github.com/google/googletest/archive/master.zip")
+ set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip")
elseif(ABSL_GOOGLETEST_DOWNLOAD_URL)
set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL})
endif()
@@ -158,14 +163,6 @@ if(BUILD_TESTING)
include(CMake/Googletest/DownloadGTest.cmake)
endif()
- if (NOT ABSL_FIND_GOOGLETEST)
- # When Google Test is included directly rather than through find_package, the aliases are missing.
- add_library(GTest::gtest ALIAS gtest)
- add_library(GTest::gtest_main ALIAS gtest_main)
- add_library(GTest::gmock ALIAS gmock)
- add_library(GTest::gmock_main ALIAS gmock_main)
- endif()
-
check_target(GTest::gtest)
check_target(GTest::gtest_main)
check_target(GTest::gmock)
@@ -175,7 +172,7 @@ endif()
add_subdirectory(absl)
if(ABSL_ENABLE_INSTALL)
-
+
# install as a subdirectory only
install(EXPORT ${PROJECT_NAME}Targets
diff --git a/METADATA b/METADATA
index a36d767a..87c5ef38 100644
--- a/METADATA
+++ b/METADATA
@@ -12,7 +12,8 @@ third_party {
type: GIT
value: "https://github.com/abseil/abseil-cpp"
}
- version: "20211102.0"
- last_upgrade_date { year: 2021 month: 12 day: 21 }
+ version: "20220623.1"
+ last_upgrade_date { year: 2022 month: 9 day: 13 }
+ license_type: NOTICE
}
diff --git a/WORKSPACE b/WORKSPACE
index c9aa8caf..c332ba4e 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -20,11 +20,21 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive(
- name = "com_google_googletest", # 2021-07-09T13:28:13Z
- sha256 = "12ef65654dc01ab40f6f33f9d02c04f2097d2cd9fbe48dc6001b29543583b0ad",
- strip_prefix = "googletest-8d51ffdfab10b3fba636ae69bc03da4b54f8c235",
+ name = "com_google_googletest",
+ sha256 = "ce7366fe57eb49928311189cb0e40e0a8bf3d3682fca89af30d884c25e983786",
+ strip_prefix = "googletest-release-1.12.0",
# Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh.
- urls = ["https://github.com/google/googletest/archive/8d51ffdfab10b3fba636ae69bc03da4b54f8c235.zip"],
+ urls = ["https://github.com/google/googletest/archive/refs/tags/release-1.12.0.zip"],
+)
+
+# RE2 (the regular expression library used by GoogleTest)
+# Note this must use a commit from the `abseil` branch of the RE2 project.
+# https://github.com/google/re2/tree/abseil
+http_archive(
+ name = "com_googlesource_code_re2",
+ sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887",
+ strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12",
+ urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09
)
# Google benchmark.
@@ -35,10 +45,17 @@ http_archive(
urls = ["https://github.com/google/benchmark/archive/0baacde3618ca617da95375e0af13ce1baadea47.zip"],
)
+# Bazel Skylib.
+http_archive(
+ name = "bazel_skylib",
+ urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"],
+ sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728",
+)
+
# Bazel platform rules.
http_archive(
name = "platforms",
- sha256 = "b601beaf841244de5c5a50d2b2eddd34839788000fa1be4260ce6603ca0d8eb7",
- strip_prefix = "platforms-98939346da932eef0b54cf808622f5bb0928f00b",
- urls = ["https://github.com/bazelbuild/platforms/archive/98939346da932eef0b54cf808622f5bb0928f00b.zip"],
+ sha256 = "a879ea428c6d56ab0ec18224f976515948822451473a80d06c2e50af0bbe5121",
+ strip_prefix = "platforms-da5541f26b7de1dc8e04c075c99df5351742a4a2",
+ urls = ["https://github.com/bazelbuild/platforms/archive/da5541f26b7de1dc8e04c075c99df5351742a4a2.zip"], # 2022-05-27
)
diff --git a/absl/BUILD.bazel b/absl/BUILD.bazel
index d799b7fe..7cccbbba 100644
--- a/absl/BUILD.bazel
+++ b/absl/BUILD.bazel
@@ -13,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
@@ -64,7 +66,15 @@ config_setting(
)
config_setting(
- name = "wasm",
+ name = "cpu_wasm",
+ values = {
+ "cpu": "wasm",
+ },
+ visibility = [":__subpackages__"],
+)
+
+config_setting(
+ name = "cpu_wasm32",
values = {
"cpu": "wasm32",
},
@@ -72,6 +82,33 @@ config_setting(
)
config_setting(
+ name = "platforms_wasm32",
+ constraint_values = [
+ "@platforms//cpu:wasm32",
+ ],
+ visibility = [":__subpackages__"],
+)
+
+config_setting(
+ name = "platforms_wasm64",
+ constraint_values = [
+ "@platforms//cpu:wasm64",
+ ],
+ visibility = [":__subpackages__"],
+)
+
+selects.config_setting_group(
+ name = "wasm",
+ match_any = [
+ ":cpu_wasm",
+ ":cpu_wasm32",
+ ":platforms_wasm32",
+ ":platforms_wasm64",
+ ],
+ visibility = [":__subpackages__"],
+)
+
+config_setting(
name = "fuchsia",
values = {
"cpu": "fuchsia",
diff --git a/absl/algorithm/BUILD.bazel b/absl/algorithm/BUILD.bazel
index afc52639..f6d74714 100644
--- a/absl/algorithm/BUILD.bazel
+++ b/absl/algorithm/BUILD.bazel
@@ -43,6 +43,7 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":algorithm",
+ "//absl/base:config",
"@com_google_googletest//:gtest_main",
],
)
diff --git a/absl/algorithm/CMakeLists.txt b/absl/algorithm/CMakeLists.txt
index 609d8589..181b49ca 100644
--- a/absl/algorithm/CMakeLists.txt
+++ b/absl/algorithm/CMakeLists.txt
@@ -35,6 +35,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::algorithm
+ absl::config
GTest::gmock_main
)
diff --git a/absl/algorithm/algorithm_test.cc b/absl/algorithm/algorithm_test.cc
index 81fccb61..d18df024 100644
--- a/absl/algorithm/algorithm_test.cc
+++ b/absl/algorithm/algorithm_test.cc
@@ -20,6 +20,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
namespace {
@@ -50,7 +51,15 @@ TEST(EqualTest, EmptyRange) {
std::vector<int> empty1;
std::vector<int> empty2;
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105705
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wnonnull"
+#endif
EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end()));
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end()));
EXPECT_TRUE(
absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end()));
diff --git a/absl/algorithm/container.h b/absl/algorithm/container.h
index c38a4a63..26b19529 100644
--- a/absl/algorithm/container.h
+++ b/absl/algorithm/container.h
@@ -166,7 +166,7 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
// c_all_of()
//
// Container-based version of the <algorithm> `std::all_of()` function to
-// test a condition on all elements within a container.
+// test if all elements within a container satisfy a condition.
template <typename C, typename Pred>
bool c_all_of(const C& c, Pred&& pred) {
return std::all_of(container_algorithm_internal::c_begin(c),
diff --git a/absl/base/BUILD.bazel b/absl/base/BUILD.bazel
index 4769efda..bd023ad8 100644
--- a/absl/base/BUILD.bazel
+++ b/absl/base/BUILD.bazel
@@ -75,6 +75,7 @@ cc_library(
":atomic_hook",
":config",
":core_headers",
+ ":errno_saver",
":log_severity",
],
)
@@ -157,7 +158,9 @@ cc_library(
"internal/direct_mmap.h",
"internal/low_level_alloc.h",
],
- copts = ABSL_DEFAULT_COPTS,
+ copts = ABSL_DEFAULT_COPTS + select({
+ "//conditions:default": [],
+ }),
linkopts = select({
"//absl:msvc_compiler": [],
"//absl:clang-cl_compiler": [],
@@ -432,6 +435,9 @@ cc_test(
srcs = ["spinlock_test_common.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":base",
":base_internal",
@@ -557,6 +563,7 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = [
"no_test_ios_x86_64",
+ "no_test_wasm",
],
deps = [
":malloc_internal",
@@ -570,6 +577,9 @@ cc_test(
srcs = ["internal/thread_identity_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":base",
":core_headers",
@@ -702,6 +712,31 @@ cc_test(
],
)
+cc_library(
+ name = "prefetch",
+ hdrs = ["internal/prefetch.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
+ deps = [
+ ":config",
+ ],
+)
+
+cc_test(
+ name = "prefetch_test",
+ size = "small",
+ srcs = ["internal/prefetch_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":prefetch",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_test(
name = "unique_small_name_test",
size = "small",
diff --git a/absl/base/CMakeLists.txt b/absl/base/CMakeLists.txt
index c7233cb3..ed55093a 100644
--- a/absl/base/CMakeLists.txt
+++ b/absl/base/CMakeLists.txt
@@ -16,6 +16,7 @@
find_library(LIBRT rt)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
atomic_hook
@@ -28,6 +29,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
errno_saver
@@ -52,6 +54,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
raw_logging_internal
@@ -63,11 +66,13 @@ absl_cc_library(
absl::atomic_hook
absl::config
absl::core_headers
+ absl::errno_saver
absl::log_severity
COPTS
${ABSL_DEFAULT_COPTS}
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
spinlock_wait
@@ -131,6 +136,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
malloc_internal
@@ -151,6 +157,7 @@ absl_cc_library(
Threads::Threads
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
base_internal
@@ -207,6 +214,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
throw_delegate
@@ -221,6 +229,7 @@ absl_cc_library(
absl::raw_logging_internal
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
exception_testing
@@ -234,6 +243,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
pretty_function
@@ -243,6 +253,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
exception_safety_testing
@@ -276,6 +287,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
atomic_hook_test_helper
@@ -375,6 +387,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
spinlock_test_common
@@ -409,6 +422,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
endian
@@ -519,6 +533,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
scoped_set_env
@@ -570,6 +585,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
strerror
@@ -601,6 +617,7 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
fast_type_id
@@ -626,6 +643,32 @@ absl_cc_test(
GTest::gtest_main
)
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+ NAME
+ prefetch
+ HDRS
+ "internal/prefetch.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::config
+)
+
+absl_cc_test(
+ NAME
+ prefetch_test
+ SRCS
+ "internal/prefetch_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::prefetch
+ GTest::gtest_main
+)
+
absl_cc_test(
NAME
optimization_test
diff --git a/absl/base/attributes.h b/absl/base/attributes.h
index e3907827..e4e7a3d8 100644
--- a/absl/base/attributes.h
+++ b/absl/base/attributes.h
@@ -136,9 +136,10 @@
// for further information.
// The MinGW compiler doesn't complain about the weak attribute until the link
// step, presumably because Windows doesn't use ELF binaries.
-#if (ABSL_HAVE_ATTRIBUTE(weak) || \
- (defined(__GNUC__) && !defined(__clang__))) && \
- (!defined(_WIN32) || __clang_major__ < 9) && !defined(__MINGW32__)
+#if (ABSL_HAVE_ATTRIBUTE(weak) || \
+ (defined(__GNUC__) && !defined(__clang__))) && \
+ (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
+ !defined(__MINGW32__)
#undef ABSL_ATTRIBUTE_WEAK
#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
#define ABSL_HAVE_ATTRIBUTE_WEAK 1
@@ -212,6 +213,9 @@
// https://gcc.gnu.org/gcc-4.8/changes.html
#if ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#elif defined(_MSC_VER) && _MSC_VER >= 1928
+// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
#else
#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
#endif
@@ -311,7 +315,6 @@
__attribute__((section(#name))) __attribute__((noinline))
#endif
-
// ABSL_ATTRIBUTE_SECTION_VARIABLE
//
// Tells the compiler/linker to put a given variable into a section and define
@@ -338,8 +341,8 @@
// a no-op on ELF but not on Mach-O.
//
#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
-#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
- extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \
+ extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
#endif
#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
@@ -400,6 +403,9 @@
//
// Tells the compiler to warn about unused results.
//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard `[[nodiscard]]` directly over this macro.
+//
// When annotating a function, it must appear as the first part of the
// declaration or definition. The compiler will warn if the return value from
// such a function is unused:
@@ -426,9 +432,10 @@
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
//
// Note: past advice was to place the macro after the argument list.
-#if ABSL_HAVE_ATTRIBUTE(nodiscard)
-#define ABSL_MUST_USE_RESULT [[nodiscard]]
-#elif defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+//
+// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is
+// compliant with the stricter [[nodiscard]].
+#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
#else
#define ABSL_MUST_USE_RESULT
@@ -498,7 +505,7 @@
#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
#define ABSL_XRAY_LOG_ARGS(N) \
- [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+ [[clang::xray_always_instrument, clang::xray_log_args(N)]]
#else
#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
#endif
@@ -642,6 +649,9 @@
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
+// For code or headers that are assured to only build with C++14 and up, prefer
+// just using the standard `[[deprecated("message")]]` directly over this macro.
+//
// Examples:
//
// class ABSL_DEPRECATED("Use Bar instead") Foo {...};
@@ -652,14 +662,17 @@
// ABSL_DEPRECATED("Use DoThat() instead")
// void DoThis();
//
+// enum FooEnum {
+// kBar ABSL_DEPRECATED("Use kBaz instead"),
+// };
+//
// Every usage of a deprecated entity will trigger a warning when compiled with
-// clang's `-Wdeprecated-declarations` option. This option is turned off by
-// default, but the warnings will be reported by clang-tidy.
-#if defined(__clang__) && defined(__cplusplus) && __cplusplus >= 201103L
+// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain
+// turns this warning off by default, instead relying on clang-tidy to report
+// new uses of deprecated code.
+#if ABSL_HAVE_ATTRIBUTE(deprecated)
#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
-#endif
-
-#ifndef ABSL_DEPRECATED
+#else
#define ABSL_DEPRECATED(message)
#endif
@@ -669,9 +682,18 @@
// not compile (on supported platforms) unless the variable has a constant
// initializer. This is useful for variables with static and thread storage
// duration, because it guarantees that they will not suffer from the so-called
-// "static init order fiasco". Prefer to put this attribute on the most visible
-// declaration of the variable, if there's more than one, because code that
-// accesses the variable can then use the attribute for optimization.
+// "static init order fiasco".
+//
+// This attribute must be placed on the initializing declaration of the
+// variable. Some compilers will give a -Wmissing-constinit warning when this
+// attribute is placed on some other declaration but missing from the
+// initializing declaration.
+//
+// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can
+// also be used in a non-initializing declaration to tell the compiler that a
+// variable is already initialized, reducing overhead that would otherwise be
+// incurred by a hidden guard variable. Thus annotating all declarations with
+// this attribute is recommended to potentially enhance optimization.
//
// Example:
//
@@ -680,14 +702,19 @@
// ABSL_CONST_INIT static MyType my_var;
// };
//
-// MyType MyClass::my_var = MakeMyType(...);
+// ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...);
+//
+// For code or headers that are assured to only build with C++20 and up, prefer
+// just using the standard `constinit` keyword directly over this macro.
//
// Note that this attribute is redundant if the variable is declared constexpr.
-#if ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#if defined(__cpp_constinit) && __cpp_constinit >= 201907L
+#define ABSL_CONST_INIT constinit
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
#define ABSL_CONST_INIT [[clang::require_constant_initialization]]
#else
#define ABSL_CONST_INIT
-#endif // ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#endif
// ABSL_ATTRIBUTE_PURE_FUNCTION
//
diff --git a/absl/base/casts.h b/absl/base/casts.h
index 83c69126..b99adb06 100644
--- a/absl/base/casts.h
+++ b/absl/base/casts.h
@@ -29,6 +29,10 @@
#include <type_traits>
#include <utility>
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+#include <bit> // For std::bit_cast.
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
#include "absl/base/internal/identity.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
@@ -36,19 +40,6 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-namespace internal_casts {
-
-template <class Dest, class Source>
-struct is_bitcastable
- : std::integral_constant<
- bool,
- sizeof(Dest) == sizeof(Source) &&
- type_traits_internal::is_trivially_copyable<Source>::value &&
- type_traits_internal::is_trivially_copyable<Dest>::value &&
- std::is_default_constructible<Dest>::value> {};
-
-} // namespace internal_casts
-
// implicit_cast()
//
// Performs an implicit conversion between types following the language
@@ -105,81 +96,83 @@ constexpr To implicit_cast(typename absl::internal::identity_t<To> to) {
// bit_cast()
//
-// Performs a bitwise cast on a type without changing the underlying bit
-// representation of that type's value. The two types must be of the same size
-// and both types must be trivially copyable. As with most casts, use with
-// caution. A `bit_cast()` might be needed when you need to temporarily treat a
-// type as some other type, such as in the following cases:
+// Creates a value of the new type `Dest` whose representation is the same as
+// that of the argument, which is of (deduced) type `Source` (a "bitwise cast";
+// every bit in the value representation of the result is equal to the
+// corresponding bit in the object representation of the source). Source and
+// destination types must be of the same size, and both types must be trivially
+// copyable.
//
-// * Serialization (casting temporarily to `char *` for those purposes is
-// always allowed by the C++ standard)
-// * Managing the individual bits of a type within mathematical operations
-// that are not normally accessible through that type
-// * Casting non-pointer types to pointer types (casting the other way is
-// allowed by `reinterpret_cast()` but round-trips cannot occur the other
-// way).
-//
-// Example:
+// As with most casts, use with caution. A `bit_cast()` might be needed when you
+// need to treat a value as the value of some other type, for example, to access
+// the individual bits of an object which are not normally accessible through
+// the object's type, such as for working with the binary representation of a
+// floating point value:
//
// float f = 3.14159265358979;
-// int i = bit_cast<int32_t>(f);
+// int i = bit_cast<int>(f);
// // i = 0x40490fdb
//
-// Casting non-pointer types to pointer types and then dereferencing them
-// traditionally produces undefined behavior.
+// Reinterpreting and accessing a value directly as a different type (as shown
+// below) usually results in undefined behavior.
//
// Example:
//
// // WRONG
-// float f = 3.14159265358979; // WRONG
-// int i = * reinterpret_cast<int*>(&f); // WRONG
+// float f = 3.14159265358979;
+// int i = reinterpret_cast<int&>(f); // Wrong
+// int j = *reinterpret_cast<int*>(&f); // Equally wrong
+// int k = *bit_cast<int*>(&f); // Equally wrong
//
-// The address-casting method produces undefined behavior according to the ISO
-// C++ specification section [basic.lval]. Roughly, this section says: if an
-// object in memory has one type, and a program accesses it with a different
-// type, the result is undefined behavior for most values of "different type".
+// Reinterpret-casting results in undefined behavior according to the ISO C++
+// specification, section [basic.lval]. Roughly, this section says: if an object
+// in memory has one type, and a program accesses it with a different type, the
+// result is undefined behavior for most "different type".
+//
+// Using bit_cast on a pointer and then dereferencing it is no better than using
+// reinterpret_cast. You should only use bit_cast on the value itself.
//
// Such casting results in type punning: holding an object in memory of one type
// and reading its bits back using a different type. A `bit_cast()` avoids this
-// issue by implementing its casts using `memcpy()`, which avoids introducing
-// this undefined behavior.
-//
-// NOTE: The requirements here are more strict than the bit_cast of standard
-// proposal p0476 due to the need for workarounds and lack of intrinsics.
-// Specifically, this implementation also requires `Dest` to be
-// default-constructible.
-template <
- typename Dest, typename Source,
- typename std::enable_if<internal_casts::is_bitcastable<Dest, Source>::value,
- int>::type = 0>
+// issue by copying the object representation to a new value, which avoids
+// introducing this undefined behavior (since the original value is never
+// accessed in the wrong way).
+//
+// The requirements of `absl::bit_cast` are more strict than that of
+// `std::bit_cast` unless compiler support is available. Specifically, without
+// compiler support, this implementation also requires `Dest` to be
+// default-constructible. In C++20, `absl::bit_cast` is replaced by
+// `std::bit_cast`.
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+using std::bit_cast;
+
+#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+template <typename Dest, typename Source,
+ typename std::enable_if<
+ sizeof(Dest) == sizeof(Source) &&
+ type_traits_internal::is_trivially_copyable<Source>::value &&
+ type_traits_internal::is_trivially_copyable<Dest>::value
+#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ && std::is_default_constructible<Dest>::value
+#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ ,
+ int>::type = 0>
+#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline constexpr Dest bit_cast(const Source& source) {
+ return __builtin_bit_cast(Dest, source);
+}
+#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
inline Dest bit_cast(const Source& source) {
Dest dest;
memcpy(static_cast<void*>(std::addressof(dest)),
static_cast<const void*>(std::addressof(source)), sizeof(dest));
return dest;
}
+#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
-// NOTE: This overload is only picked if the requirements of bit_cast are
-// not met. It is therefore UB, but is provided temporarily as previous
-// versions of this function template were unchecked. Do not use this in
-// new code.
-template <
- typename Dest, typename Source,
- typename std::enable_if<
- !internal_casts::is_bitcastable<Dest, Source>::value,
- int>::type = 0>
-ABSL_DEPRECATED(
- "absl::bit_cast type requirements were violated. Update the types "
- "being used such that they are the same size and are both "
- "TriviallyCopyable.")
-inline Dest bit_cast(const Source& source) {
- static_assert(sizeof(Dest) == sizeof(Source),
- "Source and destination types should have equal sizes.");
-
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/config.h b/absl/base/config.h
index 585485c3..705ecea0 100644
--- a/absl/base/config.h
+++ b/absl/base/config.h
@@ -56,6 +56,25 @@
#include <cstddef>
#endif // __cplusplus
+// ABSL_INTERNAL_CPLUSPLUS_LANG
+//
+// MSVC does not set the value of __cplusplus correctly, but instead uses
+// _MSVC_LANG as a stand-in.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+//
+// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at
+// times, for example:
+// https://github.com/microsoft/vscode-cpptools/issues/1770
+// https://reviews.llvm.org/D70996
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(_MSVC_LANG)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG
+#elif defined(__cplusplus)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus
+#endif
+
#if defined(__APPLE__)
// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
// __IPHONE_8_0.
@@ -92,8 +111,8 @@
//
// LTS releases can be obtained from
// https://github.com/abseil/abseil-cpp/releases.
-#define ABSL_LTS_RELEASE_VERSION 20211102
-#define ABSL_LTS_RELEASE_PATCH_LEVEL 0
+#define ABSL_LTS_RELEASE_VERSION 20220623
+#define ABSL_LTS_RELEASE_PATCH_LEVEL 1
// Helper macro to convert a CPP variable to a string literal.
#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
@@ -183,12 +202,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_BUILTIN(x) 0
#endif
-#if defined(__is_identifier)
-#define ABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x))
-#else
-#define ABSL_INTERNAL_HAS_KEYWORD(x) 0
-#endif
-
#ifdef __has_feature
#define ABSL_HAVE_FEATURE(f) __has_feature(f)
#else
@@ -212,11 +225,12 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
-// We assume __thread is supported on Linux when compiled with Clang or compiled
-// against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+// We assume __thread is supported on Linux or Asylo when compiled with Clang or
+// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
#ifdef ABSL_HAVE_TLS
#error ABSL_HAVE_TLS cannot be directly set
-#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#elif (defined(__linux__) || defined(__ASYLO__)) && \
+ (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define ABSL_HAVE_TLS 1
#endif
@@ -259,19 +273,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
#endif
-// ABSL_HAVE_SOURCE_LOCATION_CURRENT
-//
-// Indicates whether `absl::SourceLocation::current()` will return useful
-// information in some contexts.
-#ifndef ABSL_HAVE_SOURCE_LOCATION_CURRENT
-#if ABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \
- ABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE)
-#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
-#elif ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0)
-#define ABSL_HAVE_SOURCE_LOCATION_CURRENT 1
-#endif
-#endif
-
// ABSL_HAVE_THREAD_LOCAL
//
// Checks whether C++11's `thread_local` storage duration specifier is
@@ -414,7 +415,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
- defined(__HAIKU__)
+ defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
+ defined(__QNX__)
#define ABSL_HAVE_MMAP 1
#endif
@@ -425,7 +427,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
- defined(_AIX) || defined(__ros__)
+ defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \
+ defined(__NetBSD__)
#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
#endif
@@ -520,22 +523,41 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error "absl endian detection needs to be set up for your compiler"
#endif
-// macOS 10.13 and iOS 10.11 don't let you use <any>, <optional>, or <variant>
-// even though the headers exist and are publicly noted to work. See
-// https://github.com/abseil/abseil-cpp/issues/207 and
+// macOS < 10.13 and iOS < 11 don't let you use <any>, <optional>, or <variant>
+// even though the headers exist and are publicly noted to work, because the
+// libc++ shared library shipped on the system doesn't have the requisite
+// exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and
// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
+//
// libc++ spells out the availability requirements in the file
// llvm-project/libcxx/include/__config via the #define
// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS.
-#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
- ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
- (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
- (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
- (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
- __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
+//
+// Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14
+// and iOS < 12 in the libc++ headers. This was corrected by
+// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
+// which subsequently made it into the XCode 12.5 release. We need to match the
+// old (incorrect) conditions when built with old XCode, but can use the
+// corrected earlier versions with new XCode.
+#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \
+ ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \
+ (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \
+ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \
+ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \
+ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))))
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
#else
#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
@@ -705,8 +727,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
#endif
-#undef ABSL_INTERNAL_HAS_KEYWORD
-
// ABSL_DLL
//
// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)`
@@ -732,8 +752,6 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// a compiler instrumentation module and a run-time library.
#ifdef ABSL_HAVE_MEMORY_SANITIZER
#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
-#elif defined(__SANITIZE_MEMORY__)
-#define ABSL_HAVE_MEMORY_SANITIZER 1
#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
#define ABSL_HAVE_MEMORY_SANITIZER 1
#endif
@@ -760,6 +778,45 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_ADDRESS_SANITIZER 1
#endif
+// ABSL_HAVE_HWADDRESS_SANITIZER
+//
+// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan
+// memory error detector which can use CPU features like ARM TBI, Intel LAM or
+// AMD UAI.
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_HWADDRESS__)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#endif
+
+// ABSL_HAVE_LEAK_SANITIZER
+//
+// LeakSanitizer (or lsan) is a detector of memory leaks.
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time
+// whether the LeakSanitizer is potentially available. However, just because the
+// LeakSanitizer is available does not mean it is active. Use the
+// always-available run-time interface in //absl/debugging/leak_check.h for
+// interacting with LeakSanitizer.
+#ifdef ABSL_HAVE_LEAK_SANITIZER
+#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
+#elif defined(LEAK_SANITIZER)
+// GCC provides no method for detecting the presense of the standalone
+// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
+// use -DLEAK_SANITIZER.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+// Clang standalone LeakSanitizer (-fsanitize=leak)
+#elif ABSL_HAVE_FEATURE(leak_sanitizer)
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER)
+// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#endif
+
// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
//
// Class template argument deduction is a language feature added in C++17.
@@ -769,4 +826,88 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
#endif
+// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+//
+// Prior to C++17, static constexpr variables defined in classes required a
+// separate definition outside of the class body, for example:
+//
+// class Foo {
+// static constexpr int kBar = 0;
+// };
+// constexpr int Foo::kBar;
+//
+// In C++17, these variables defined in classes are considered inline variables,
+// and the extra declaration is redundant. Since some compilers warn on the
+// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used
+// conditionally ignore them:
+//
+// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+// constexpr int Foo::kBar;
+// #endif
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+ ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
+#endif
+
+// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with
+// RTTI support.
+#ifdef ABSL_INTERNAL_HAS_RTTI
+#error ABSL_INTERNAL_HAS_RTTI cannot be directly set
+#elif !defined(__GNUC__) || defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_RTTI 1
+#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
+
+// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE
+#error ABSL_INTERNAL_HAVE_SSE cannot be directly set
+#elif defined(__SSE__)
+#define ABSL_INTERNAL_HAVE_SSE 1
+#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
+// indicates that at least SSE was targeted with the /arch:SSE option.
+// All x86-64 processors support SSE, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
+#elif defined(__SSE2__)
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
+// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
+// All x86-64 processors support SSE2, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+//
+// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3
+// with MSVC requires either assuming that the code will only every run on CPUs
+// that support SSSE3, otherwise __cpuid() can be used to detect support at
+// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported
+// by the CPU.
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set
+#elif defined(__SSSE3__)
+#define ABSL_INTERNAL_HAVE_SSSE3 1
+#endif
+
+// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM
+// SIMD).
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
+#elif defined(__ARM_NEON)
+#define ABSL_INTERNAL_HAVE_ARM_NEON 1
+#endif
+
#endif // ABSL_BASE_CONFIG_H_
diff --git a/absl/base/exception_safety_testing_test.cc b/absl/base/exception_safety_testing_test.cc
index a59be29e..a87fd6a9 100644
--- a/absl/base/exception_safety_testing_test.cc
+++ b/absl/base/exception_safety_testing_test.cc
@@ -701,7 +701,10 @@ struct BasicGuaranteeWithExtraContracts : public NonNegative {
static constexpr int kExceptionSentinel = 9999;
};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel;
+#endif
TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) {
auto tester_with_val =
diff --git a/absl/base/internal/cycleclock.cc b/absl/base/internal/cycleclock.cc
index 0e65005b..902e3f5e 100644
--- a/absl/base/internal/cycleclock.cc
+++ b/absl/base/internal/cycleclock.cc
@@ -25,6 +25,8 @@
#include <atomic>
#include <chrono> // NOLINT(build/c++11)
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
@@ -33,44 +35,20 @@ namespace base_internal {
#if ABSL_USE_UNSCALED_CYCLECLOCK
-namespace {
-
-#ifdef NDEBUG
-#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
-// Not debug mode and the UnscaledCycleClock frequency is the CPU
-// frequency. Scale the CycleClock to prevent overflow if someone
-// tries to represent the time as cycles since the Unix epoch.
-static constexpr int32_t kShift = 1;
-#else
-// Not debug mode and the UnscaledCycleClock isn't operating at the
-// raw CPU frequency. There is no need to do any scaling, so don't
-// needlessly sacrifice precision.
-static constexpr int32_t kShift = 0;
-#endif
-#else
-// In debug mode use a different shift to discourage depending on a
-// particular shift value.
-static constexpr int32_t kShift = 2;
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int32_t CycleClock::kShift;
+constexpr double CycleClock::kFrequencyScale;
#endif
-static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
-static std::atomic<CycleClockSourceFunc> cycle_clock_source;
+ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
+ CycleClock::cycle_clock_source_{nullptr};
-CycleClockSourceFunc LoadCycleClockSource() {
- // Optimize for the common case (no callback) by first doing a relaxed load;
- // this is significantly faster on non-x86 platforms.
- if (cycle_clock_source.load(std::memory_order_relaxed) == nullptr) {
- return nullptr;
- }
- // This corresponds to the store(std::memory_order_release) in
- // CycleClockSource::Register, and makes sure that any updates made prior to
- // registering the callback are visible to this thread before the callback is
- // invoked.
- return cycle_clock_source.load(std::memory_order_acquire);
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+ // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+ CycleClock::cycle_clock_source_.store(source, std::memory_order_release);
}
-} // namespace
-
+#ifdef _WIN32
int64_t CycleClock::Now() {
auto fn = LoadCycleClockSource();
if (fn == nullptr) {
@@ -78,15 +56,7 @@ int64_t CycleClock::Now() {
}
return fn() >> kShift;
}
-
-double CycleClock::Frequency() {
- return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
-}
-
-void CycleClockSource::Register(CycleClockSourceFunc source) {
- // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
- cycle_clock_source.store(source, std::memory_order_release);
-}
+#endif
#else
diff --git a/absl/base/internal/cycleclock.h b/absl/base/internal/cycleclock.h
index a18b5844..9704e388 100644
--- a/absl/base/internal/cycleclock.h
+++ b/absl/base/internal/cycleclock.h
@@ -42,14 +42,19 @@
#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#include <atomic>
#include <cstdint>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
+using CycleClockSourceFunc = int64_t (*)();
+
// -----------------------------------------------------------------------------
// CycleClock
// -----------------------------------------------------------------------------
@@ -68,12 +73,37 @@ class CycleClock {
static double Frequency();
private:
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+ static CycleClockSourceFunc LoadCycleClockSource();
+
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+ // Not debug mode and the UnscaledCycleClock frequency is the CPU
+ // frequency. Scale the CycleClock to prevent overflow if someone
+ // tries to represent the time as cycles since the Unix epoch.
+ static constexpr int32_t kShift = 1;
+#else
+ // Not debug mode and the UnscaledCycleClock isn't operating at the
+ // raw CPU frequency. There is no need to do any scaling, so don't
+ // needlessly sacrifice precision.
+ static constexpr int32_t kShift = 0;
+#endif
+#else // NDEBUG
+ // In debug mode use a different shift to discourage depending on a
+ // particular shift value.
+ static constexpr int32_t kShift = 2;
+#endif // NDEBUG
+
+ static constexpr double kFrequencyScale = 1.0 / (1 << kShift);
+ ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
+#endif // ABSL_USE_UNSCALED_CYCLECLOC
+
CycleClock() = delete; // no instances
CycleClock(const CycleClock&) = delete;
CycleClock& operator=(const CycleClock&) = delete;
-};
-using CycleClockSourceFunc = int64_t (*)();
+ friend class CycleClockSource;
+};
class CycleClockSource {
private:
@@ -87,6 +117,41 @@ class CycleClockSource {
static void Register(CycleClockSourceFunc source);
};
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+#endif // !defined(__x86_64__)
+
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback
+ // is invoked.
+ return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/internal/direct_mmap.h b/absl/base/internal/direct_mmap.h
index 274054cd..e492bb00 100644
--- a/absl/base/internal/direct_mmap.h
+++ b/absl/base/internal/direct_mmap.h
@@ -20,7 +20,7 @@
#include "absl/base/config.h"
-#if ABSL_HAVE_MMAP
+#ifdef ABSL_HAVE_MMAP
#include <sys/mman.h>
@@ -41,13 +41,13 @@
#ifdef __mips__
// Include definitions of the ABI currently in use.
-#ifdef __BIONIC__
+#if defined(__BIONIC__) || !defined(__GLIBC__)
// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
// definitions we need.
#include <asm/sgidefs.h>
#else
#include <sgidefs.h>
-#endif // __BIONIC__
+#endif // __BIONIC__ || !__GLIBC__
#endif // __mips__
// SYS_mmap and SYS_munmap are not defined in Android.
diff --git a/absl/base/internal/endian.h b/absl/base/internal/endian.h
index dad0e9ae..50747d75 100644
--- a/absl/base/internal/endian.h
+++ b/absl/base/internal/endian.h
@@ -16,16 +16,9 @@
#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
#define ABSL_BASE_INTERNAL_ENDIAN_H_
-// The following guarantees declaration of the byte swap functions
-#ifdef _MSC_VER
-#include <stdlib.h> // NOLINT(build/include)
-#elif defined(__FreeBSD__)
-#include <sys/endian.h>
-#elif defined(__GLIBC__)
-#include <byteswap.h> // IWYU pragma: export
-#endif
-
#include <cstdint>
+#include <cstdlib>
+
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
@@ -34,47 +27,11 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
-// Use compiler byte-swapping intrinsics if they are available. 32-bit
-// and 64-bit versions are available in Clang and GCC as of GCC 4.3.0.
-// The 16-bit version is available in Clang and GCC only as of GCC 4.8.0.
-// For simplicity, we enable them all only for GCC 4.8.0 or later.
-#if defined(__clang__) || \
- (defined(__GNUC__) && \
- ((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
inline uint64_t gbswap_64(uint64_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
return __builtin_bswap64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return __builtin_bswap32(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return __builtin_bswap16(host_int);
-}
-
#elif defined(_MSC_VER)
-inline uint64_t gbswap_64(uint64_t host_int) {
return _byteswap_uint64(host_int);
-}
-inline uint32_t gbswap_32(uint32_t host_int) {
- return _byteswap_ulong(host_int);
-}
-inline uint16_t gbswap_16(uint16_t host_int) {
- return _byteswap_ushort(host_int);
-}
-
-#else
-inline uint64_t gbswap_64(uint64_t host_int) {
-#if defined(__GNUC__) && defined(__x86_64__) && !defined(__APPLE__)
- // Adapted from /usr/include/byteswap.h. Not available on Mac.
- if (__builtin_constant_p(host_int)) {
- return __bswap_constant_64(host_int);
- } else {
- uint64_t result;
- __asm__("bswap %0" : "=r"(result) : "0"(host_int));
- return result;
- }
-#elif defined(__GLIBC__)
- return bswap_64(host_int);
#else
return (((host_int & uint64_t{0xFF}) << 56) |
((host_int & uint64_t{0xFF00}) << 40) |
@@ -84,12 +41,14 @@ inline uint64_t gbswap_64(uint64_t host_int) {
((host_int & uint64_t{0xFF0000000000}) >> 24) |
((host_int & uint64_t{0xFF000000000000}) >> 40) |
((host_int & uint64_t{0xFF00000000000000}) >> 56));
-#endif // bswap_64
+#endif
}
inline uint32_t gbswap_32(uint32_t host_int) {
-#if defined(__GLIBC__)
- return bswap_32(host_int);
+#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
+ return __builtin_bswap32(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ulong(host_int);
#else
return (((host_int & uint32_t{0xFF}) << 24) |
((host_int & uint32_t{0xFF00}) << 8) |
@@ -99,33 +58,29 @@ inline uint32_t gbswap_32(uint32_t host_int) {
}
inline uint16_t gbswap_16(uint16_t host_int) {
-#if defined(__GLIBC__)
- return bswap_16(host_int);
+#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
+ return __builtin_bswap16(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ushort(host_int);
#else
return (((host_int & uint16_t{0xFF}) << 8) |
((host_int & uint16_t{0xFF00}) >> 8));
#endif
}
-#endif // intrinsics available
-
#ifdef ABSL_IS_LITTLE_ENDIAN
-// Definitions for ntohl etc. that don't require us to include
-// netinet/in.h. We wrap gbswap_32 and gbswap_16 in functions rather
-// than just #defining them because in debug mode, gcc doesn't
-// correctly handle the (rather involved) definitions of bswap_32.
-// gcc guarantees that inline functions are as fast as macros, so
-// this isn't a performance hit.
+// Portable definitions for htonl (host-to-network) and friends on little-endian
+// architectures.
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
#elif defined ABSL_IS_BIG_ENDIAN
-// These definitions are simpler on big-endian machines
-// These are functions instead of macros to avoid self-assignment warnings
-// on calls such as "i = ghtnol(i);". This also provides type checking.
+// Portable definitions for htonl (host-to-network) etc on big-endian
+// architectures. These definitions are simpler since the host byte order is the
+// same as network byte order.
inline uint16_t ghtons(uint16_t x) { return x; }
inline uint32_t ghtonl(uint32_t x) { return x; }
inline uint64_t ghtonll(uint64_t x) { return x; }
diff --git a/absl/base/internal/fast_type_id.h b/absl/base/internal/fast_type_id.h
index 3db59e83..a547b3a8 100644
--- a/absl/base/internal/fast_type_id.h
+++ b/absl/base/internal/fast_type_id.h
@@ -28,8 +28,10 @@ struct FastTypeTag {
constexpr static char dummy_var = 0;
};
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename Type>
constexpr char FastTypeTag<Type>::dummy_var;
+#endif
// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
// passed-in type. These are meant to be good match for keys into maps or
diff --git a/absl/base/internal/invoke.h b/absl/base/internal/invoke.h
index 5c71f328..643c2a42 100644
--- a/absl/base/internal/invoke.h
+++ b/absl/base/internal/invoke.h
@@ -14,6 +14,8 @@
//
// absl::base_internal::invoke(f, args...) is an implementation of
// INVOKE(f, args...) from section [func.require] of the C++ standard.
+// When compiled as C++17 and later versions, it is implemented as an alias of
+// std::invoke.
//
// [func.require]
// Define INVOKE (f, t1, t2, ..., tN) as follows:
@@ -35,6 +37,26 @@
#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
#define ABSL_BASE_INTERNAL_INVOKE_H_
+#include "absl/base/config.h"
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#include <functional>
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using std::invoke;
+using std::invoke_result_t;
+using std::is_invocable_r;
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
#include <algorithm>
#include <type_traits>
#include <utility>
@@ -80,8 +102,18 @@ struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
static decltype((std::declval<Obj>().*
std::declval<MemFun>())(std::declval<Args>()...))
Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+// Ignore bogus GCC warnings on this line.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return (std::forward<Obj>(obj).*
std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic pop
+#endif
}
};
@@ -180,8 +212,30 @@ invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
std::forward<Args>(args)...);
}
+
+template <typename AlwaysVoid, typename, typename, typename...>
+struct IsInvocableRImpl : std::false_type {};
+
+template <typename R, typename F, typename... Args>
+struct IsInvocableRImpl<
+ absl::void_t<absl::base_internal::invoke_result_t<F, Args...> >, R, F,
+ Args...>
+ : std::integral_constant<
+ bool,
+ std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>,
+ R>::value ||
+ std::is_void<R>::value> {};
+
+// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
+// and either the return type is convertible to `R`, or `R` is void.
+// C++11-compatible version of `std::is_invocable_r`.
+template <typename R, typename F, typename... Args>
+using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
+#endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
#endif // ABSL_BASE_INTERNAL_INVOKE_H_
diff --git a/absl/base/internal/low_level_alloc_test.cc b/absl/base/internal/low_level_alloc_test.cc
index 31abb888..8fdec09e 100644
--- a/absl/base/internal/low_level_alloc_test.cc
+++ b/absl/base/internal/low_level_alloc_test.cc
@@ -86,7 +86,7 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
AllocMap::iterator it;
BlockDesc block_desc;
int rnd;
- LowLevelAlloc::Arena *arena = 0;
+ LowLevelAlloc::Arena *arena = nullptr;
if (use_new_arena) {
int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
arena = LowLevelAlloc::NewArena(flags);
@@ -101,11 +101,10 @@ static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
case 0: // coin came up heads: add a block
using_low_level_alloc = true;
block_desc.len = rand() & 0x3fff;
- block_desc.ptr =
- reinterpret_cast<char *>(
- arena == 0
- ? LowLevelAlloc::Alloc(block_desc.len)
- : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+ block_desc.ptr = reinterpret_cast<char *>(
+ arena == nullptr
+ ? LowLevelAlloc::Alloc(block_desc.len)
+ : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
using_low_level_alloc = false;
RandomizeBlockDesc(&block_desc);
rnd = rand();
diff --git a/absl/base/internal/prefetch.h b/absl/base/internal/prefetch.h
new file mode 100644
index 00000000..06419283
--- /dev/null
+++ b/absl/base/internal/prefetch.h
@@ -0,0 +1,138 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PREFETCH_H_
+#define ABSL_BASE_INTERNAL_PREFETCH_H_
+
+#include "absl/base/config.h"
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE)
+#include <intrin.h>
+#pragma intrinsic(_mm_prefetch)
+#endif
+
+// Compatibility wrappers around __builtin_prefetch, to prefetch data
+// for read if supported by the toolchain.
+
+// Move data into the cache before it is read, or "prefetch" it.
+//
+// The value of `addr` is the address of the memory to prefetch. If
+// the target and compiler support it, data prefetch instructions are
+// generated. If the prefetch is done some time before the memory is
+// read, it may be in the cache by the time the read occurs.
+//
+// The function names specify the temporal locality heuristic applied,
+// using the names of Intel prefetch instructions:
+//
+// T0 - high degree of temporal locality; data should be left in as
+// many levels of the cache possible
+// T1 - moderate degree of temporal locality
+// T2 - low degree of temporal locality
+// Nta - no temporal locality, data need not be left in the cache
+// after the read
+//
+// Incorrect or gratuitous use of these functions can degrade
+// performance, so use them only when representative benchmarks show
+// an improvement.
+//
+// Example usage:
+//
+// absl::base_internal::PrefetchT0(addr);
+//
+// Currently, the different prefetch calls behave on some Intel
+// architectures as follows:
+//
+// SNB..SKL SKX
+// PrefetchT0() L1/L2/L3 L1/L2
+// PrefetchT1() L2/L3 L2
+// PrefetchT2() L2/L3 L2
+// PrefetchNta() L1/--/L3 L1*
+//
+// * On SKX PrefetchNta() will bring the line into L1 but will evict
+// from L3 cache. This might result in surprising behavior.
+//
+// SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon.
+//
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+void PrefetchT0(const void* addr);
+void PrefetchT1(const void* addr);
+void PrefetchT2(const void* addr);
+void PrefetchNta(const void* addr);
+
+// Implementation details follow.
+
+#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
+
+#define ABSL_INTERNAL_HAVE_PREFETCH 1
+
+// See __builtin_prefetch:
+// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html.
+//
+// These functions speculatively load for read only. This is
+// safe for all currently supported platforms. However, prefetch for
+// store may have problems depending on the target platform.
+//
+inline void PrefetchT0(const void* addr) {
+ // Note: this uses prefetcht0 on Intel.
+ __builtin_prefetch(addr, 0, 3);
+}
+inline void PrefetchT1(const void* addr) {
+ // Note: this uses prefetcht1 on Intel.
+ __builtin_prefetch(addr, 0, 2);
+}
+inline void PrefetchT2(const void* addr) {
+ // Note: this uses prefetcht2 on Intel.
+ __builtin_prefetch(addr, 0, 1);
+}
+inline void PrefetchNta(const void* addr) {
+ // Note: this uses prefetchtnta on Intel.
+ __builtin_prefetch(addr, 0, 0);
+}
+
+#elif defined(ABSL_INTERNAL_HAVE_SSE)
+
+#define ABSL_INTERNAL_HAVE_PREFETCH 1
+
+inline void PrefetchT0(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T0);
+}
+inline void PrefetchT1(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T1);
+}
+inline void PrefetchT2(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_T2);
+}
+inline void PrefetchNta(const void* addr) {
+ _mm_prefetch(reinterpret_cast<const char*>(addr), _MM_HINT_NTA);
+}
+
+#else
+inline void PrefetchT0(const void*) {}
+inline void PrefetchT1(const void*) {}
+inline void PrefetchT2(const void*) {}
+inline void PrefetchNta(const void*) {}
+#endif
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_PREFETCH_H_
diff --git a/absl/base/internal/prefetch_test.cc b/absl/base/internal/prefetch_test.cc
new file mode 100644
index 00000000..7c1dae46
--- /dev/null
+++ b/absl/base/internal/prefetch_test.cc
@@ -0,0 +1,43 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/prefetch.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+int number = 42;
+
+TEST(Prefetch, TemporalLocalityNone) {
+ absl::base_internal::PrefetchNta(&number);
+ EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityLow) {
+ absl::base_internal::PrefetchT2(&number);
+ EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityMedium) {
+ absl::base_internal::PrefetchT1(&number);
+ EXPECT_EQ(number, 42);
+}
+
+TEST(Prefetch, TemporalLocalityHigh) {
+ absl::base_internal::PrefetchT0(&number);
+ EXPECT_EQ(number, 42);
+}
+
+} // namespace
diff --git a/absl/base/internal/raw_logging.cc b/absl/base/internal/raw_logging.cc
index 074e026a..54e71a3f 100644
--- a/absl/base/internal/raw_logging.cc
+++ b/absl/base/internal/raw_logging.cc
@@ -14,15 +14,17 @@
#include "absl/base/internal/raw_logging.h"
-#include <stddef.h>
#include <cstdarg>
+#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <cstring>
+#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/errno_saver.h"
#include "absl/base/log_severity.h"
// We know how to perform low-level writes to stderr in POSIX and Windows. For
@@ -36,8 +38,8 @@
// This preprocessor token is also defined in raw_io.cc. If you need to copy
// this, consider moving both to config.h instead.
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
- defined(__Fuchsia__) || defined(__native_client__) || \
- defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+ defined(__Fuchsia__) || defined(__native_client__) || \
+ defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__)
#include <unistd.h>
@@ -50,7 +52,8 @@
// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
// syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
// for low level operations that want to avoid libc.
-#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \
+ !defined(__ANDROID__)
#include <sys/syscall.h>
#define ABSL_HAVE_SYSCALL_WRITE 1
#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
@@ -76,13 +79,6 @@ namespace {
// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
// a selected set of platforms for which we expect not to be able to raw log.
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<LogPrefixHook>
- log_prefix_hook;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
- absl::base_internal::AtomicHook<AbortHook>
- abort_hook;
-
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
constexpr char kTruncated[] = " ... (message truncated)\n";
@@ -130,6 +126,18 @@ bool DoRawLog(char** buf, int* size, const char* format, ...) {
return true;
}
+bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line,
+ char** buf, int* buf_size) {
+ DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line);
+ return true;
+}
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<LogFilterAndPrefixHook>
+ log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<AbortHook> abort_hook;
+
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
void RawLogVA(absl::LogSeverity severity, const char* file, int line,
@@ -150,14 +158,7 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
}
#endif
- auto log_prefix_hook_ptr = log_prefix_hook.Load();
- if (log_prefix_hook_ptr) {
- enabled = log_prefix_hook_ptr(severity, file, line, &buf, &size);
- } else {
- if (enabled) {
- DoRawLog(&buf, &size, "[%s : %d] RAW: ", file, line);
- }
- }
+ enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);
const char* const prefix_end = buf;
#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
@@ -168,11 +169,12 @@ void RawLogVA(absl::LogSeverity severity, const char* file, int line,
} else {
DoRawLog(&buf, &size, "%s", kTruncated);
}
- SafeWriteToStderr(buffer, strlen(buffer));
+ AsyncSignalSafeWriteToStderr(buffer, strlen(buffer));
}
#else
static_cast<void>(format);
static_cast<void>(ap);
+ static_cast<void>(enabled);
#endif
// Abort the process after logging a FATAL message, even if the output itself
@@ -195,8 +197,11 @@ void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
} // namespace
-void SafeWriteToStderr(const char *s, size_t len) {
+void AsyncSignalSafeWriteToStderr(const char* s, size_t len) {
+ absl::base_internal::ErrnoSaver errno_saver;
#if defined(ABSL_HAVE_SYSCALL_WRITE)
+ // We prefer calling write via `syscall` to minimize the risk of libc doing
+ // something "helpful".
syscall(SYS_write, STDERR_FILENO, s, len);
#elif defined(ABSL_HAVE_POSIX_WRITE)
write(STDERR_FILENO, s, len);
@@ -229,7 +234,9 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
absl::base_internal::AtomicHook<InternalLogFunction>
internal_log_function(DefaultInternalLog);
-void RegisterLogPrefixHook(LogPrefixHook func) { log_prefix_hook.Store(func); }
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {
+ log_filter_and_prefix_hook.Store(func);
+}
void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
diff --git a/absl/base/internal/raw_logging.h b/absl/base/internal/raw_logging.h
index 2bf7aaba..0747c9df 100644
--- a/absl/base/internal/raw_logging.h
+++ b/absl/base/internal/raw_logging.h
@@ -109,12 +109,9 @@ namespace raw_logging_internal {
void RawLog(absl::LogSeverity severity, const char* file, int line,
const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
-// Writes the provided buffer directly to stderr, in a safe, low-level manner.
-//
-// In POSIX this means calling write(), which is async-signal safe and does
-// not malloc. If the platform supports the SYS_write syscall, we invoke that
-// directly to side-step any libc interception.
-void SafeWriteToStderr(const char *s, size_t len);
+// Writes the provided buffer directly to stderr, in a signal-safe, low-level
+// manner.
+void AsyncSignalSafeWriteToStderr(const char* s, size_t len);
// compile-time function to get the "base" filename, that is, the part of
// a filename after the last "/" or "\" path separator. The search starts at
@@ -148,11 +145,12 @@ bool RawLoggingFullySupported();
// 'severity' is the severity level of the message being written.
// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
// was located.
-// 'buffer' and 'buf_size' are pointers to the buffer and buffer size. If the
-// hook writes a prefix, it must increment *buffer and decrement *buf_size
+// 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the
+// hook writes a prefix, it must increment *buf and decrement *buf_size
// accordingly.
-using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
- int line, char** buffer, int* buf_size);
+using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity,
+ const char* file, int line, char** buf,
+ int* buf_size);
// Function type for a raw_logging customization hook called to abort a process
// when a FATAL message is logged. If the provided AbortHook() returns, the
@@ -162,7 +160,10 @@ using LogPrefixHook = bool (*)(absl::LogSeverity severity, const char* file,
// was located.
// The NUL-terminated logged message lives in the buffer between 'buf_start'
// and 'buf_end'. 'prefix_end' points to the first non-prefix character of the
-// buffer (as written by the LogPrefixHook.)
+// buffer (as written by the LogFilterAndPrefixHook.)
+//
+// The lifetime of the filename and message buffers will not end while the
+// process remains alive.
using AbortHook = void (*)(const char* file, int line, const char* buf_start,
const char* prefix_end, const char* buf_end);
@@ -184,7 +185,7 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
//
// These functions are safe to call at any point during initialization; they do
// not block or malloc, and are async-signal safe.
-void RegisterLogPrefixHook(LogPrefixHook func);
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
void RegisterAbortHook(AbortHook func);
void RegisterInternalLogFunction(InternalLogFunction func);
diff --git a/absl/base/internal/spinlock.cc b/absl/base/internal/spinlock.cc
index 35c0696a..9b5ed6e4 100644
--- a/absl/base/internal/spinlock.cc
+++ b/absl/base/internal/spinlock.cc
@@ -19,6 +19,7 @@
#include <limits>
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/atomic_hook.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/spinlock_wait.h"
@@ -66,12 +67,14 @@ void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
submit_profile_data.Store(fn);
}
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
// Static member variable definitions.
constexpr uint32_t SpinLock::kSpinLockHeld;
constexpr uint32_t SpinLock::kSpinLockCooperative;
constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
constexpr uint32_t SpinLock::kSpinLockSleeper;
constexpr uint32_t SpinLock::kWaitTimeMask;
+#endif
// Uncommon constructors.
SpinLock::SpinLock(base_internal::SchedulingMode mode)
diff --git a/absl/base/internal/spinlock.h b/absl/base/internal/spinlock.h
index ac40daff..6d8d8ddd 100644
--- a/absl/base/internal/spinlock.h
+++ b/absl/base/internal/spinlock.h
@@ -120,6 +120,14 @@ class ABSL_LOCKABLE SpinLock {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
+ // Return immediately if this thread holds the SpinLock exclusively.
+ // Otherwise, report an error by crashing with a diagnostic.
+ inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
+ if (!IsHeld()) {
+ ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
+ }
+ }
+
protected:
// These should not be exported except for testing.
diff --git a/absl/base/internal/spinlock_linux.inc b/absl/base/internal/spinlock_linux.inc
index 202f7cdf..fe8ba674 100644
--- a/absl/base/internal/spinlock_linux.inc
+++ b/absl/base/internal/spinlock_linux.inc
@@ -57,13 +57,10 @@ static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
- std::atomic<uint32_t> *w, uint32_t value, int loop,
+ std::atomic<uint32_t> *w, uint32_t value, int,
absl::base_internal::SchedulingMode) {
absl::base_internal::ErrnoSaver errno_saver;
- struct timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
- syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, &tm);
+ syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr);
}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
diff --git a/absl/base/internal/sysinfo.cc b/absl/base/internal/sysinfo.cc
index a7cfb461..c8366df1 100644
--- a/absl/base/internal/sysinfo.cc
+++ b/absl/base/internal/sysinfo.cc
@@ -124,7 +124,6 @@ int Win32NumCPUs() {
} // namespace
-
static int GetNumCPUs() {
#if defined(__myriad2__)
return 1;
diff --git a/absl/base/internal/sysinfo_test.cc b/absl/base/internal/sysinfo_test.cc
index 5f9e45f6..f305b6c5 100644
--- a/absl/base/internal/sysinfo_test.cc
+++ b/absl/base/internal/sysinfo_test.cc
@@ -37,29 +37,6 @@ TEST(SysinfoTest, NumCPUs) {
<< "NumCPUs() should not have the default value of 0";
}
-// Ensure that NominalCPUFrequency returns a reasonable value, or 1.00 on
-// platforms where the CPU frequency is not available through sysfs.
-//
-// POWER is particularly problematic here; some Linux kernels expose the CPU
-// frequency, while others do not. Since we can't predict a priori what a given
-// machine is going to do, just disable this test on POWER on Linux.
-#if !(defined(__linux) && (defined(__ppc64__) || defined(__PPC64__)))
-TEST(SysinfoTest, NominalCPUFrequency) {
- // Linux only exposes the CPU frequency on certain architectures, and
- // Emscripten doesn't expose it at all.
-#if defined(__linux__) && \
- (defined(__aarch64__) || defined(__hppa__) || defined(__mips__) || \
- defined(__riscv) || defined(__s390x__)) || \
- defined(__EMSCRIPTEN__)
- EXPECT_EQ(NominalCPUFrequency(), 1.0)
- << "CPU frequency detection was fixed! Please update unittest.";
-#else
- EXPECT_GE(NominalCPUFrequency(), 1000.0)
- << "NominalCPUFrequency() did not return a reasonable value";
-#endif
-}
-#endif
-
TEST(SysinfoTest, GetTID) {
EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test.
#ifdef __native_client__
diff --git a/absl/base/internal/thread_identity.cc b/absl/base/internal/thread_identity.cc
index 9950e63a..79853f09 100644
--- a/absl/base/internal/thread_identity.cc
+++ b/absl/base/internal/thread_identity.cc
@@ -14,7 +14,7 @@
#include "absl/base/internal/thread_identity.h"
-#ifndef _WIN32
+#if !defined(_WIN32) || defined(__MINGW32__)
#include <pthread.h>
#include <signal.h>
#endif
@@ -56,6 +56,7 @@ void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
// *different* instances of this ptr.
// Apple platforms have the visibility attribute, but issue a compile warning
// that protected visibility is unsupported.
+ABSL_CONST_INIT // Must come before __attribute__((visibility("protected")))
#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
__attribute__((visibility("protected")))
#endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
diff --git a/absl/base/internal/unscaledcycleclock.cc b/absl/base/internal/unscaledcycleclock.cc
index 4d352bd1..b1c396c6 100644
--- a/absl/base/internal/unscaledcycleclock.cc
+++ b/absl/base/internal/unscaledcycleclock.cc
@@ -24,8 +24,13 @@
#ifdef __GLIBC__
#include <sys/platform/ppc.h>
#elif defined(__FreeBSD__)
-#include <sys/sysctl.h>
+// clang-format off
+// This order does actually matter =(.
#include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+
+#include "absl/base/call_once.h"
#endif
#endif
@@ -49,12 +54,6 @@ double UnscaledCycleClock::Frequency() {
#elif defined(__x86_64__)
-int64_t UnscaledCycleClock::Now() {
- uint64_t low, high;
- __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
- return (high << 32) | low;
-}
-
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
diff --git a/absl/base/internal/unscaledcycleclock.h b/absl/base/internal/unscaledcycleclock.h
index 681ff8f9..2cbeae31 100644
--- a/absl/base/internal/unscaledcycleclock.h
+++ b/absl/base/internal/unscaledcycleclock.h
@@ -47,7 +47,7 @@
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
- defined(_M_IX86) || defined(_M_X64)
+ defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
@@ -59,8 +59,7 @@
// CycleClock that runs at atleast 1 MHz. We've found some Android
// ARM64 devices where this is not the case, so we disable it by
// default on Android ARM64.
-#if defined(__native_client__) || \
- (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) || \
+#if defined(__native_client__) || (defined(__APPLE__)) || \
(defined(__ANDROID__) && defined(__aarch64__))
#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
#else
@@ -115,6 +114,16 @@ class UnscaledCycleClock {
friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
};
+#if defined(__x86_64__)
+
+inline int64_t UnscaledCycleClock::Now() {
+ uint64_t low, high;
+ __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+ return (high << 32) | low;
+}
+
+#endif
+
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/invoke_test.cc b/absl/base/invoke_test.cc
index bcdef36c..7be26f64 100644
--- a/absl/base/invoke_test.cc
+++ b/absl/base/invoke_test.cc
@@ -31,6 +31,14 @@ namespace {
int Function(int a, int b) { return a - b; }
+void VoidFunction(int& a, int& b) {
+ a += b;
+ b = a - b;
+ a -= b;
+}
+
+int ZeroArgFunction() { return -1937; }
+
int Sink(std::unique_ptr<int> p) {
return *p;
}
@@ -223,6 +231,100 @@ TEST(InvokeTest, SfinaeFriendly) {
EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42));
}
+TEST(IsInvocableRTest, CallableExactMatch) {
+ static_assert(
+ base_internal::is_invocable_r<int, decltype(Function), int, int>::value,
+ "Should be true for exact match of types on a free function");
+}
+
+TEST(IsInvocableRTest, CallableArgumentConversionMatch) {
+ static_assert(
+ base_internal::is_invocable_r<int, decltype(Function), char, int>::value,
+ "Should be true for convertible argument type");
+}
+
+TEST(IsInvocableRTest, CallableReturnConversionMatch) {
+ static_assert(base_internal::is_invocable_r<double, decltype(Function), int,
+ int>::value,
+ "Should be true for convertible return type");
+}
+
+TEST(IsInvocableRTest, CallableReturnVoid) {
+ static_assert(base_internal::is_invocable_r<void, decltype(VoidFunction),
+ int&, int&>::value,
+ "Should be true for void expected and actual return types");
+ static_assert(
+ base_internal::is_invocable_r<void, decltype(Function), int, int>::value,
+ "Should be true for void expected and non-void actual return types");
+}
+
+TEST(IsInvocableRTest, CallableRefQualifierMismatch) {
+ static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+ int&, const int&>::value,
+ "Should be false for reference constness mismatch");
+ static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+ int&&, int&>::value,
+ "Should be false for reference value category mismatch");
+}
+
+TEST(IsInvocableRTest, CallableArgumentTypeMismatch) {
+ static_assert(!base_internal::is_invocable_r<int, decltype(Function),
+ std::string, int>::value,
+ "Should be false for argument type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableReturnTypeMismatch) {
+ static_assert(!base_internal::is_invocable_r<std::string, decltype(Function),
+ int, int>::value,
+ "Should be false for return type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableTooFewArgs) {
+ static_assert(
+ !base_internal::is_invocable_r<int, decltype(Function), int>::value,
+ "Should be false for too few arguments");
+}
+
+TEST(IsInvocableRTest, CallableTooManyArgs) {
+ static_assert(!base_internal::is_invocable_r<int, decltype(Function), int,
+ int, int>::value,
+ "Should be false for too many arguments");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndReference) {
+ static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+ Class&, int, int>::value,
+ "Should be true for exact match of types on a member function "
+ "and class reference");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndPointer) {
+ static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+ Class*, int, int>::value,
+ "Should be true for exact match of types on a member function "
+ "and class pointer");
+}
+
+TEST(IsInvocableRTest, DataMemberAndReference) {
+ static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+ Class&>::value,
+ "Should be true for exact match of types on a data member and "
+ "class reference");
+}
+
+TEST(IsInvocableRTest, DataMemberAndPointer) {
+ static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+ Class*>::value,
+ "Should be true for exact match of types on a data member and "
+ "class pointer");
+}
+
+TEST(IsInvocableRTest, CallableZeroArgs) {
+ static_assert(
+ base_internal::is_invocable_r<int, decltype(ZeroArgFunction)>::value,
+ "Should be true for exact match for a zero-arg free function");
+}
+
} // namespace
} // namespace base_internal
ABSL_NAMESPACE_END
diff --git a/absl/base/log_severity.cc b/absl/base/log_severity.cc
index 72312afd..60a8fc1f 100644
--- a/absl/base/log_severity.cc
+++ b/absl/base/log_severity.cc
@@ -16,6 +16,8 @@
#include <ostream>
+#include "absl/base/attributes.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -23,5 +25,31 @@ std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) {
if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s);
return os << "absl::LogSeverity(" << static_cast<int>(s) << ")";
}
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) {
+ switch (s) {
+ case absl::LogSeverityAtLeast::kInfo:
+ case absl::LogSeverityAtLeast::kWarning:
+ case absl::LogSeverityAtLeast::kError:
+ case absl::LogSeverityAtLeast::kFatal:
+ return os << ">=" << static_cast<absl::LogSeverity>(s);
+ case absl::LogSeverityAtLeast::kInfinity:
+ return os << "INFINITY";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) {
+ switch (s) {
+ case absl::LogSeverityAtMost::kInfo:
+ case absl::LogSeverityAtMost::kWarning:
+ case absl::LogSeverityAtMost::kError:
+ case absl::LogSeverityAtMost::kFatal:
+ return os << "<=" << static_cast<absl::LogSeverity>(s);
+ case absl::LogSeverityAtMost::kNegativeInfinity:
+ return os << "NEGATIVE_INFINITY";
+ }
+ return os;
+}
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/log_severity.h b/absl/base/log_severity.h
index 22364224..8bdca38b 100644
--- a/absl/base/log_severity.h
+++ b/absl/base/log_severity.h
@@ -115,6 +115,57 @@ constexpr absl::LogSeverity NormalizeLogSeverity(int s) {
// unspecified; do not rely on it.
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
+// Enums representing a lower bound for LogSeverity. APIs that only operate on
+// messages of at least a certain level (for example, `SetMinLogLevel()`) use
+// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is
+// a level above all threshold levels and therefore no log message will
+// ever meet this threshold.
+enum class LogSeverityAtLeast : int {
+ kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+ kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+ kError = static_cast<int>(absl::LogSeverity::kError),
+ kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+ kInfinity = 1000,
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s);
+
+// Enums representing an upper bound for LogSeverity. APIs that only operate on
+// messages of at most a certain level (for example, buffer all messages at or
+// below a certain level) use this type to specify that level.
+// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
+// levels and therefore will exclude all log messages.
+enum class LogSeverityAtMost : int {
+ kNegativeInfinity = -1000,
+ kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+ kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+ kError = static_cast<int>(absl::LogSeverity::kError),
+ kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s);
+
+#define COMPOP(op1, op2, T) \
+ constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \
+ return static_cast<absl::LogSeverity>(lhs) op1 rhs; \
+ } \
+ constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \
+ return lhs op2 static_cast<absl::LogSeverity>(rhs); \
+ }
+
+// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
+// `LogSeverityAtMost` are only supported in one direction.
+// Valid checks are:
+// LogSeverity >= LogSeverityAtLeast
+// LogSeverity < LogSeverityAtLeast
+// LogSeverity <= LogSeverityAtMost
+// LogSeverity > LogSeverityAtMost
+COMPOP(>, <, LogSeverityAtLeast)
+COMPOP(<=, >=, LogSeverityAtLeast)
+COMPOP(<, >, LogSeverityAtMost)
+COMPOP(>=, <=, LogSeverityAtMost)
+#undef COMPOP
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/base/log_severity_test.cc b/absl/base/log_severity_test.cc
index 55b26d17..16091a5b 100644
--- a/absl/base/log_severity_test.cc
+++ b/absl/base/log_severity_test.cc
@@ -35,7 +35,8 @@ using ::testing::IsTrue;
using ::testing::TestWithParam;
using ::testing::Values;
-std::string StreamHelper(absl::LogSeverity value) {
+template <typename T>
+std::string StreamHelper(T value) {
std::ostringstream stream;
stream << value;
return stream.str();
@@ -201,4 +202,44 @@ TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) {
IsTrue());
EXPECT_THAT(reparsed_value, Eq(to_unparse));
}
+
+TEST(LogThresholdTest, LogSeverityAtLeastTest) {
+ EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal);
+ EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo);
+
+ EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError);
+ EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo);
+}
+
+TEST(LogThresholdTest, LogSeverityAtMostTest) {
+ EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning);
+ EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal);
+
+ EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError);
+ EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError);
+}
+
+TEST(LogThresholdTest, Extremes) {
+ EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity);
+ EXPECT_GT(absl::LogSeverity::kInfo,
+ absl::LogSeverityAtMost::kNegativeInfinity);
+}
+
+TEST(LogThresholdTest, Output) {
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning),
+ Eq(">=WARNING"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity),
+ Eq("INFINITY"));
+
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL"));
+ EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity),
+ Eq("NEGATIVE_INFINITY"));
+}
+
} // namespace
diff --git a/absl/base/optimization.h b/absl/base/optimization.h
index d090be12..db5cc097 100644
--- a/absl/base/optimization.h
+++ b/absl/base/optimization.h
@@ -181,35 +181,43 @@
#define ABSL_PREDICT_TRUE(x) (x)
#endif
-// ABSL_INTERNAL_ASSUME(cond)
+// ABSL_ASSUME(cond)
+//
// Informs the compiler that a condition is always true and that it can assume
-// it to be true for optimization purposes. The call has undefined behavior if
-// the condition is false.
+// it to be true for optimization purposes.
+//
+// WARNING: If the condition is false, the program can produce undefined and
+// potentially dangerous behavior.
+//
// In !NDEBUG mode, the condition is checked with an assert().
-// NOTE: The expression must not have side effects, as it will only be evaluated
-// in some compilation modes and not others.
+//
+// NOTE: The expression must not have side effects, as it may only be evaluated
+// in some compilation modes and not others. Some compilers may issue a warning
+// if the compiler cannot prove the expression has no side effects. For example,
+// the expression should not use a function call since the compiler cannot prove
+// that a function call does not have side effects.
//
// Example:
//
// int x = ...;
-// ABSL_INTERNAL_ASSUME(x >= 0);
+// ABSL_ASSUME(x >= 0);
// // The compiler can optimize the division to a simple right shift using the
// // assumption specified above.
// int y = x / 16;
//
#if !defined(NDEBUG)
-#define ABSL_INTERNAL_ASSUME(cond) assert(cond)
+#define ABSL_ASSUME(cond) assert(cond)
#elif ABSL_HAVE_BUILTIN(__builtin_assume)
-#define ABSL_INTERNAL_ASSUME(cond) __builtin_assume(cond)
+#define ABSL_ASSUME(cond) __builtin_assume(cond)
#elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable)
-#define ABSL_INTERNAL_ASSUME(cond) \
+#define ABSL_ASSUME(cond) \
do { \
if (!(cond)) __builtin_unreachable(); \
} while (0)
#elif defined(_MSC_VER)
-#define ABSL_INTERNAL_ASSUME(cond) __assume(cond)
+#define ABSL_ASSUME(cond) __assume(cond)
#else
-#define ABSL_INTERNAL_ASSUME(cond) \
+#define ABSL_ASSUME(cond) \
do { \
static_cast<void>(false && (cond)); \
} while (0)
diff --git a/absl/base/options.h b/absl/base/options.h
index 56b4e36e..85d235d8 100644
--- a/absl/base/options.h
+++ b/absl/base/options.h
@@ -100,7 +100,7 @@
// User code should not inspect this macro. To check in the preprocessor if
// absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY.
-#define ABSL_OPTION_USE_STD_ANY 2
+#define ABSL_OPTION_USE_STD_ANY 1
// ABSL_OPTION_USE_STD_OPTIONAL
@@ -127,7 +127,7 @@
// absl::optional is a typedef of std::optional, use the feature macro
// ABSL_USES_STD_OPTIONAL.
-#define ABSL_OPTION_USE_STD_OPTIONAL 2
+#define ABSL_OPTION_USE_STD_OPTIONAL 1
// ABSL_OPTION_USE_STD_STRING_VIEW
@@ -154,7 +154,7 @@
// absl::string_view is a typedef of std::string_view, use the feature macro
// ABSL_USES_STD_STRING_VIEW.
-#define ABSL_OPTION_USE_STD_STRING_VIEW 2
+#define ABSL_OPTION_USE_STD_STRING_VIEW 1
// ABSL_OPTION_USE_STD_VARIANT
//
@@ -180,7 +180,7 @@
// absl::variant is a typedef of std::variant, use the feature macro
// ABSL_USES_STD_VARIANT.
-#define ABSL_OPTION_USE_STD_VARIANT 2
+#define ABSL_OPTION_USE_STD_VARIANT 1
// ABSL_OPTION_USE_INLINE_NAMESPACE
@@ -206,7 +206,7 @@
// allowed.
#define ABSL_OPTION_USE_INLINE_NAMESPACE 1
-#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20211102
+#define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20220623
// ABSL_OPTION_HARDENED
//
diff --git a/absl/base/thread_annotations.h b/absl/base/thread_annotations.h
index 9695f6de..bc8a6203 100644
--- a/absl/base/thread_annotations.h
+++ b/absl/base/thread_annotations.h
@@ -154,8 +154,8 @@
// ABSL_LOCKS_EXCLUDED()
//
-// Documents the locks acquired in the body of the function. These locks
-// cannot be held when calling this function (as Abseil's `Mutex` locks are
+// Documents the locks that cannot be held by callers of this function, as they
+// might be acquired by this function (Abseil's `Mutex` locks are
// non-reentrant).
#if ABSL_HAVE_ATTRIBUTE(locks_excluded)
#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
diff --git a/absl/cleanup/CMakeLists.txt b/absl/cleanup/CMakeLists.txt
index 26a6d0dc..f5af40b4 100644
--- a/absl/cleanup/CMakeLists.txt
+++ b/absl/cleanup/CMakeLists.txt
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cleanup_internal
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index ffaee19c..d01d78e5 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -217,11 +217,6 @@ cc_test(
],
)
-NOTEST_TAGS_NONMOBILE = [
- "no_test_darwin_x86_64",
- "no_test_loonix",
-]
-
NOTEST_TAGS_MOBILE = [
"no_test_android_arm",
"no_test_android_arm64",
@@ -229,8 +224,6 @@ NOTEST_TAGS_MOBILE = [
"no_test_ios_x86_64",
]
-NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
-
cc_library(
name = "flat_hash_map",
hdrs = ["flat_hash_map.h"],
@@ -241,6 +234,7 @@ cc_library(
":hash_function_defaults",
":raw_hash_map",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -250,7 +244,7 @@ cc_test(
srcs = ["flat_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":flat_hash_map",
":hash_generator_testing",
@@ -284,7 +278,7 @@ cc_test(
srcs = ["flat_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":flat_hash_set",
":hash_generator_testing",
@@ -307,9 +301,10 @@ cc_library(
deps = [
":container_memory",
":hash_function_defaults",
- ":node_hash_policy",
+ ":node_slot_policy",
":raw_hash_map",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -319,7 +314,7 @@ cc_test(
srcs = ["node_hash_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":hash_generator_testing",
":node_hash_map",
@@ -339,9 +334,10 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_function_defaults",
- ":node_hash_policy",
+ ":node_slot_policy",
":raw_hash_set",
"//absl/algorithm:container",
+ "//absl/base:core_headers",
"//absl/memory",
],
)
@@ -351,7 +347,7 @@ cc_test(
srcs = ["node_hash_set_test.cc"],
copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":node_hash_set",
":unordered_set_constructor_test",
@@ -380,7 +376,7 @@ cc_test(
srcs = ["internal/container_memory_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":container_memory",
":test_instance_tracker",
@@ -407,7 +403,7 @@ cc_test(
srcs = ["internal/hash_function_defaults_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
deps = [
":hash_function_defaults",
"//absl/hash",
@@ -506,8 +502,8 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
- ":have_sse",
"//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/debugging:stacktrace",
"//absl/memory",
@@ -522,9 +518,12 @@ cc_test(
name = "hashtablez_sampler_test",
srcs = ["internal/hashtablez_sampler_test.cc"],
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":hashtablez_sampler",
- ":have_sse",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/profiling:sample_recorder",
"//absl/synchronization",
@@ -535,21 +534,21 @@ cc_test(
)
cc_library(
- name = "node_hash_policy",
- hdrs = ["internal/node_hash_policy.h"],
+ name = "node_slot_policy",
+ hdrs = ["internal/node_slot_policy.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"],
)
cc_test(
- name = "node_hash_policy_test",
- srcs = ["internal/node_hash_policy_test.cc"],
+ name = "node_slot_policy_test",
+ srcs = ["internal/node_slot_policy_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
- ":node_hash_policy",
+ ":node_slot_policy",
"@com_google_googletest//:gtest_main",
],
)
@@ -567,14 +566,6 @@ cc_library(
)
cc_library(
- name = "have_sse",
- hdrs = ["internal/have_sse.h"],
- copts = ABSL_DEFAULT_COPTS,
- linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
-)
-
-cc_library(
name = "common",
hdrs = ["internal/common.h"],
copts = ABSL_DEFAULT_COPTS,
@@ -598,10 +589,10 @@ cc_library(
":hash_policy_traits",
":hashtable_debug_hooks",
":hashtablez_sampler",
- ":have_sse",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
+ "//absl/base:prefetch",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/numeric:bits",
@@ -614,7 +605,7 @@ cc_test(
srcs = ["internal/raw_hash_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkstatic = 1,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
deps = [
":container_memory",
":hash_function_defaults",
@@ -624,6 +615,7 @@ cc_test(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:prefetch",
"//absl/base:raw_logging_internal",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -704,7 +696,7 @@ cc_test(
srcs = ["internal/layout_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS,
+ tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"],
visibility = ["//visibility:private"],
deps = [
":layout",
@@ -851,7 +843,7 @@ cc_test(
srcs = ["internal/unordered_set_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":unordered_set_constructor_test",
":unordered_set_lookup_test",
@@ -866,7 +858,7 @@ cc_test(
srcs = ["internal/unordered_map_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
deps = [
":unordered_map_constructor_test",
":unordered_map_lookup_test",
@@ -881,7 +873,7 @@ cc_test(
srcs = ["sample_element_size_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = NOTEST_TAGS_NONMOBILE,
+ tags = ["no_test_loonix"],
visibility = ["//visibility:private"],
deps = [
":flat_hash_map",
@@ -911,6 +903,7 @@ cc_library(
":container_memory",
":layout",
"//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
"//absl/base:throw_delegate",
"//absl/memory",
"//absl/meta:type_traits",
@@ -946,6 +939,10 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
shard_count = 10,
+ tags = [
+ "no_test_ios",
+ "no_test_wasm",
+ ],
visibility = ["//visibility:private"],
deps = [
":btree",
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 78584d2c..9b5c59a4 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -35,12 +35,14 @@ absl_cc_library(
absl::core_headers
absl::layout
absl::memory
+ absl::raw_logging_internal
absl::strings
absl::throw_delegate
absl::type_traits
absl::utility
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
btree_test_common
@@ -83,6 +85,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
compressed_tuple
@@ -161,6 +164,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
inlined_vector_internal
@@ -193,6 +197,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
counting_allocator
@@ -239,6 +244,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
test_instance_tracker
@@ -274,6 +280,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
+ absl::core_headers
absl::hash_function_defaults
absl::raw_hash_map
absl::algorithm_container
@@ -347,8 +354,9 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
+ absl::core_headers
absl::hash_function_defaults
- absl::node_hash_policy
+ absl::node_slot_policy
absl::raw_hash_map
absl::algorithm_container
absl::memory
@@ -381,8 +389,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::core_headers
absl::hash_function_defaults
- absl::node_hash_policy
+ absl::node_slot_policy
absl::raw_hash_set
absl::algorithm_container
absl::memory
@@ -407,6 +416,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
container_memory
@@ -436,6 +446,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_function_defaults
@@ -468,6 +479,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_generator_testing
@@ -485,6 +497,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_policy_testing
@@ -510,6 +523,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hash_policy_traits
@@ -534,6 +548,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtablez_sampler
@@ -546,8 +561,8 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
+ absl::config
absl::exponential_biased
- absl::have_sse
absl::sample_recorder
absl::synchronization
)
@@ -560,11 +575,12 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::config
absl::hashtablez_sampler
- absl::have_sse
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtable_debug
@@ -576,6 +592,7 @@ absl_cc_library(
absl::hashtable_debug_hooks
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
hashtable_debug_hooks
@@ -588,20 +605,12 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
- have_sse
- HDRS
- "internal/have_sse.h"
- COPTS
- ${ABSL_DEFAULT_COPTS}
-)
-
-absl_cc_library(
- NAME
- node_hash_policy
+ node_slot_policy
HDRS
- "internal/node_hash_policy.h"
+ "internal/node_slot_policy.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
@@ -611,17 +620,18 @@ absl_cc_library(
absl_cc_test(
NAME
- node_hash_policy_test
+ node_slot_policy_test
SRCS
- "internal/node_hash_policy_test.cc"
+ "internal/node_slot_policy_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
- absl::node_hash_policy
+ absl::node_slot_policy
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
raw_hash_map
@@ -636,6 +646,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
container_common
@@ -647,6 +658,7 @@ absl_cc_library(
absl::type_traits
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
raw_hash_set
@@ -666,10 +678,10 @@ absl_cc_library(
absl::endian
absl::hash_policy_traits
absl::hashtable_debug_hooks
- absl::have_sse
absl::memory
absl::meta
absl::optional
+ absl::prefetch
absl::utility
absl::hashtablez_sampler
PUBLIC
@@ -691,6 +703,7 @@ absl_cc_test(
absl::base
absl::config
absl::core_headers
+ absl::prefetch
absl::raw_logging_internal
absl::strings
GTest::gmock_main
@@ -710,6 +723,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
layout
@@ -743,6 +757,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
tracked
@@ -755,6 +770,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_constructor_test
@@ -769,6 +785,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_lookup_test
@@ -783,6 +800,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_members_test
@@ -796,6 +814,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_map_modifiers_test
@@ -810,6 +829,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_constructor_test
@@ -824,6 +844,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_lookup_test
@@ -838,6 +859,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_members_test
@@ -851,6 +873,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
unordered_set_modifiers_test
diff --git a/absl/container/btree_benchmark.cc b/absl/container/btree_benchmark.cc
index 65b6790b..0ca497c8 100644
--- a/absl/container/btree_benchmark.cc
+++ b/absl/container/btree_benchmark.cc
@@ -153,9 +153,9 @@ void BM_FullLookup(benchmark::State& state) {
BM_LookupImpl<T>(state, true);
}
-// Benchmark deletion of values from a container.
+// Benchmark erasing values from a container.
template <typename T>
-void BM_Delete(benchmark::State& state) {
+void BM_Erase(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -180,9 +180,9 @@ void BM_Delete(benchmark::State& state) {
}
}
-// Benchmark deletion of multiple values from a container.
+// Benchmark erasing multiple values from a container.
template <typename T>
-void BM_DeleteRange(benchmark::State& state) {
+void BM_EraseRange(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
@@ -222,6 +222,40 @@ void BM_DeleteRange(benchmark::State& state) {
}
}
+// Predicate that erases every other element. We can't use a lambda because
+// C++11 doesn't support generic lambdas.
+// TODO(b/207389011): consider adding benchmarks that remove different fractions
+// of keys (e.g. 10%, 90%).
+struct EraseIfPred {
+ uint64_t i = 0;
+ template <typename T>
+ bool operator()(const T&) {
+ return ++i % 2;
+ }
+};
+
+// Benchmark erasing multiple values from a container with a predicate.
+template <typename T>
+void BM_EraseIf(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+
+ // Removes half of the keys per batch.
+ const int batch_size = (kBenchmarkValues + 1) / 2;
+ EraseIfPred pred;
+ while (state.KeepRunningBatch(batch_size)) {
+ state.PauseTiming();
+ {
+ T container(values.begin(), values.end());
+ state.ResumeTiming();
+ erase_if(container, pred);
+ benchmark::DoNotOptimize(container);
+ state.PauseTiming();
+ }
+ state.ResumeTiming();
+ }
+}
+
// Benchmark steady-state insert (into first half of range) and remove (from
// second half of range), treating the container approximately like a queue with
// log-time access for all elements. This benchmark does not test the case where
@@ -477,14 +511,14 @@ BTREE_TYPES(Time);
void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
BENCHMARK(BM_##type##_##func)
-#define MY_BENCHMARK3(type) \
+#define MY_BENCHMARK3_STL(type) \
MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \
MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \
- MY_BENCHMARK4(type, Delete); \
- MY_BENCHMARK4(type, DeleteRange); \
+ MY_BENCHMARK4(type, Erase); \
+ MY_BENCHMARK4(type, EraseRange); \
MY_BENCHMARK4(type, QueueAddRem); \
MY_BENCHMARK4(type, MixedAddRem); \
MY_BENCHMARK4(type, Fifo); \
@@ -492,9 +526,13 @@ BTREE_TYPES(Time);
MY_BENCHMARK4(type, InsertRangeRandom); \
MY_BENCHMARK4(type, InsertRangeSorted)
+#define MY_BENCHMARK3(type) \
+ MY_BENCHMARK4(type, EraseIf); \
+ MY_BENCHMARK3_STL(type)
+
#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
- MY_BENCHMARK3(stl_##type); \
- MY_BENCHMARK3(stl_unordered_##type); \
+ MY_BENCHMARK3_STL(stl_##type); \
+ MY_BENCHMARK3_STL(stl_unordered_##type); \
MY_BENCHMARK3(btree_256_##type)
#define MY_BENCHMARK2(type) \
@@ -684,12 +722,12 @@ double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
btree_set<BigTypePtr<SIZE>>; \
using btree_256_map_size##SIZE##copies##SIZE##ptr = \
btree_map<int, BigTypePtr<SIZE>>; \
- MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \
- MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index f0a8d4a6..286817f1 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -35,14 +35,17 @@
//
// However, these types should not be considered drop-in replacements for
// `std::map` and `std::multimap` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
//
// Importantly, insertions and deletions may invalidate outstanding iterators,
// pointers, and references to elements. Such invalidations are typically only
// an issue if insertion and deletion operations are interleaved with the use of
// more than one iterator, pointer, or reference simultaneously. For this
// reason, `insert()` and `erase()` return a valid iterator at the current
-// position.
+// position. Another important difference is that key-types must be
+// copy-constructible.
#ifndef ABSL_CONTAINER_BTREE_MAP_H_
#define ABSL_CONTAINER_BTREE_MAP_H_
@@ -53,6 +56,14 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool IsMulti>
+struct map_params;
+
+} // namespace container_internal
+
// absl::btree_map<>
//
// An `absl::btree_map<K, V>` is an ordered associative container of
@@ -74,7 +85,7 @@ class btree_map
: public container_internal::btree_map_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/false>>> {
+ /*IsMulti=*/false>>> {
using Base = typename btree_map::btree_map_container;
public:
@@ -467,15 +478,11 @@ void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) {
// absl::erase_if(absl::btree_map<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_map<K, V, C, A> &map, Pred pred) {
- for (auto it = map.begin(); it != map.end();) {
- if (pred(*it)) {
- it = map.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_map<K, V, C, A>::size_type erase_if(
+ btree_map<K, V, C, A> &map, Pred pred) {
+ return container_internal::btree_access::erase_if(map, std::move(pred));
}
// absl::btree_multimap
@@ -500,7 +507,7 @@ class btree_multimap
: public container_internal::btree_multimap_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/true>>> {
+ /*IsMulti=*/true>>> {
using Base = typename btree_multimap::btree_multimap_container;
public:
@@ -798,17 +805,46 @@ void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) {
// absl::erase_if(absl::btree_multimap<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename V, typename C, typename A, typename Pred>
-void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) {
- for (auto it = map.begin(); it != map.end();) {
- if (pred(*it)) {
- it = map.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_multimap<K, V, C, A>::size_type erase_if(
+ btree_multimap<K, V, C, A> &map, Pred pred) {
+ return container_internal::btree_access::erase_if(map, std::move(pred));
}
+namespace container_internal {
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool IsMulti>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+ /*IsMap=*/true, map_slot_policy<Key, Data>> {
+ using super_type = typename map_params::common_params;
+ using mapped_type = Data;
+ // This type allows us to move keys when it is safe to do so. It is safe
+ // for maps in which value_type and mutable_value_type are layout compatible.
+ using slot_policy = typename super_type::slot_policy;
+ using slot_type = typename super_type::slot_type;
+ using value_type = typename super_type::value_type;
+ using init_type = typename super_type::init_type;
+
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
+ static mapped_type &value(value_type *value) { return value->second; }
+};
+
+} // namespace container_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index 89739006..695b09f5 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -35,7 +35,9 @@
//
// However, these types should not be considered drop-in replacements for
// `std::set` and `std::multiset` as there are some API differences, which are
-// noted in this header file.
+// noted in this header file. The most consequential differences with respect to
+// migrating to b-tree from the STL types are listed in the next paragraph.
+// Other API differences are minor.
//
// Importantly, insertions and deletions may invalidate outstanding iterators,
// pointers, and references to elements. Such invalidations are typically only
@@ -53,6 +55,17 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename Key>
+struct set_slot_policy;
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool IsMulti>
+struct set_params;
+
+} // namespace container_internal
+
// absl::btree_set<>
//
// An `absl::btree_set<K>` is an ordered associative container of unique key
@@ -74,7 +87,7 @@ class btree_set
: public container_internal::btree_set_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/false>>> {
+ /*IsMulti=*/false>>> {
using Base = typename btree_set::btree_set_container;
public:
@@ -385,15 +398,11 @@ void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) {
// absl::erase_if(absl::btree_set<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_set<K, C, A> &set, Pred pred) {
- for (auto it = set.begin(); it != set.end();) {
- if (pred(*it)) {
- it = set.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set,
+ Pred pred) {
+ return container_internal::btree_access::erase_if(set, std::move(pred));
}
// absl::btree_multiset<>
@@ -418,7 +427,7 @@ class btree_multiset
: public container_internal::btree_multiset_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,
- /*Multi=*/true>>> {
+ /*IsMulti=*/true>>> {
using Base = typename btree_multiset::btree_multiset_container;
public:
@@ -711,17 +720,73 @@ void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) {
// absl::erase_if(absl::btree_multiset<>, Pred)
//
// Erases all elements that satisfy the predicate pred from the container.
+// Returns the number of erased elements.
template <typename K, typename C, typename A, typename Pred>
-void erase_if(btree_multiset<K, C, A> &set, Pred pred) {
- for (auto it = set.begin(); it != set.end();) {
- if (pred(*it)) {
- it = set.erase(it);
- } else {
- ++it;
- }
- }
+typename btree_multiset<K, C, A>::size_type erase_if(
+ btree_multiset<K, C, A> & set, Pred pred) {
+ return container_internal::btree_access::erase_if(set, std::move(pred));
}
+namespace container_internal {
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface for btree_(multi)set.
+template <typename Key>
+struct set_slot_policy {
+ using slot_type = Key;
+ using value_type = Key;
+ using mutable_value_type = Key;
+
+ static value_type &element(slot_type *slot) { return *slot; }
+ static const value_type &element(const slot_type *slot) { return *slot; }
+
+ template <typename Alloc, class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&...args) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, *other);
+ }
+
+ template <typename Alloc>
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+ }
+
+ template <typename Alloc>
+ static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+ construct(alloc, new_slot, old_slot);
+ destroy(alloc, old_slot);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool IsMulti>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
+ /*IsMap=*/false, set_slot_policy<Key>> {
+ using value_type = Key;
+ using slot_type = typename set_params::common_params::slot_type;
+
+ template <typename V>
+ static const V &key(const V &value) {
+ return value;
+ }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
+};
+
+} // namespace container_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index d27cf271..f20f3430 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -14,10 +14,14 @@
#include "absl/container/btree_test.h"
+#include <algorithm>
+#include <array>
#include <cstdint>
+#include <functional>
#include <limits>
#include <map>
#include <memory>
+#include <numeric>
#include <stdexcept>
#include <string>
#include <type_traits>
@@ -1212,6 +1216,11 @@ class BtreeNodePeer {
constexpr static bool UsesLinearNodeSearch() {
return btree_node<typename Btree::params_type>::use_linear_search::value;
}
+
+ template <typename Btree>
+ constexpr static bool UsesGenerations() {
+ return Btree::params_type::kEnableGenerations;
+ }
};
namespace {
@@ -1285,7 +1294,7 @@ TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
std::unique_ptr<std::string> &v = m["A"];
EXPECT_TRUE(v == nullptr);
- v.reset(new std::string("X"));
+ v = absl::make_unique<std::string>("X");
auto iter = m.find("A");
EXPECT_EQ("X", *iter->second);
@@ -1344,38 +1353,34 @@ TEST(Btree, InitializerListInsert) {
EXPECT_EQ(++it, range.second);
}
-template <typename Compare, typename K>
-void AssertKeyCompareToAdapted() {
- using Adapted = typename key_compare_to_adapter<Compare>::type;
- static_assert(!std::is_same<Adapted, Compare>::value,
- "key_compare_to_adapter should have adapted this comparator.");
+template <typename Compare, typename Key>
+void AssertKeyCompareStringAdapted() {
+ using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
- std::is_same<absl::weak_ordering,
- absl::result_of_t<Adapted(const K &, const K &)>>::value,
- "Adapted comparator should be a key-compare-to comparator.");
+ std::is_same<Adapted, StringBtreeDefaultLess>::value ||
+ std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+ "key_compare_adapter should have string-adapted this comparator.");
}
-template <typename Compare, typename K>
-void AssertKeyCompareToNotAdapted() {
- using Unadapted = typename key_compare_to_adapter<Compare>::type;
+template <typename Compare, typename Key>
+void AssertKeyCompareNotStringAdapted() {
+ using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
- std::is_same<Unadapted, Compare>::value,
- "key_compare_to_adapter shouldn't have adapted this comparator.");
- static_assert(
- std::is_same<bool,
- absl::result_of_t<Unadapted(const K &, const K &)>>::value,
- "Un-adapted comparator should return bool.");
+ !std::is_same<Adapted, StringBtreeDefaultLess>::value &&
+ !std::is_same<Adapted, StringBtreeDefaultGreater>::value,
+ "key_compare_adapter shouldn't have string-adapted this comparator.");
}
-TEST(Btree, KeyCompareToAdapter) {
- AssertKeyCompareToAdapted<std::less<std::string>, std::string>();
- AssertKeyCompareToAdapted<std::greater<std::string>, std::string>();
- AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
- AssertKeyCompareToAdapted<std::greater<absl::string_view>,
- absl::string_view>();
- AssertKeyCompareToAdapted<std::less<absl::Cord>, absl::Cord>();
- AssertKeyCompareToAdapted<std::greater<absl::Cord>, absl::Cord>();
- AssertKeyCompareToNotAdapted<std::less<int>, int>();
- AssertKeyCompareToNotAdapted<std::greater<int>, int>();
+TEST(Btree, KeyCompareAdapter) {
+ AssertKeyCompareStringAdapted<std::less<std::string>, std::string>();
+ AssertKeyCompareStringAdapted<std::greater<std::string>, std::string>();
+ AssertKeyCompareStringAdapted<std::less<absl::string_view>,
+ absl::string_view>();
+ AssertKeyCompareStringAdapted<std::greater<absl::string_view>,
+ absl::string_view>();
+ AssertKeyCompareStringAdapted<std::less<absl::Cord>, absl::Cord>();
+ AssertKeyCompareStringAdapted<std::greater<absl::Cord>, absl::Cord>();
+ AssertKeyCompareNotStringAdapted<std::less<int>, int>();
+ AssertKeyCompareNotStringAdapted<std::greater<int>, int>();
}
TEST(Btree, RValueInsert) {
@@ -1425,11 +1430,19 @@ TEST(Btree, RValueInsert) {
EXPECT_EQ(tracker.swaps(), 0);
}
-// A btree set with a specific number of values per node.
+template <typename Cmp>
+struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase {
+ using Cmp::Cmp;
+ CheckedCompareOptedOutCmp() {}
+ CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT
+};
+
+// A btree set with a specific number of values per node. Opt out of
+// checked_compare so that we can expect exact numbers of comparisons.
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet
: public btree_set_container<btree<
- set_params<Key, Cmp, std::allocator<Key>,
+ set_params<Key, CheckedCompareOptedOutCmp<Cmp>, std::allocator<Key>,
BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode),
/*Multi=*/false>>> {
using Base = typename SizedBtreeSet::btree_set_container;
@@ -1473,8 +1486,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
+ EXPECT_EQ(
+ BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1528,8 +1543,10 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
+ EXPECT_EQ(
+ BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ // When we have generations, there is one fewer slot.
+ BtreeNodePeer::UsesGenerations<absl::btree_set<int32_t>>() ? 60 : 61);
}
// Test key insertion/deletion in random order.
@@ -1748,6 +1765,22 @@ TEST(Btree, ValueComp) {
EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)));
}
+// Test that we have the protected members from the std::map::value_compare API.
+// See https://en.cppreference.com/w/cpp/container/map/value_compare.
+TEST(Btree, MapValueCompProtected) {
+ struct key_compare {
+ bool operator()(int l, int r) const { return l < r; }
+ int id;
+ };
+ using value_compare = absl::btree_map<int, int, key_compare>::value_compare;
+ struct value_comp_child : public value_compare {
+ explicit value_comp_child(key_compare kc) : value_compare(kc) {}
+ int GetId() const { return comp.id; }
+ };
+ value_comp_child c(key_compare{10});
+ EXPECT_EQ(c.GetId(), 10);
+}
+
TEST(Btree, DefaultConstruction) {
absl::btree_set<int> s;
absl::btree_map<int, int> m;
@@ -2297,7 +2330,9 @@ TEST(Btree, TryEmplaceWithHintWorks) {
};
using Cmp = decltype(cmp);
- absl::btree_map<int, int, Cmp> m(cmp);
+ // Use a map that is opted out of key_compare being adapted so we can expect
+ // strict comparison call limits.
+ absl::btree_map<int, int, CheckedCompareOptedOutCmp<Cmp>> m(cmp);
for (int i = 0; i < 128; ++i) {
m.emplace(i, i);
}
@@ -2452,23 +2487,28 @@ TEST(Btree, EraseIf) {
// Test that erase_if works with all the container types and supports lambdas.
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
- erase_if(s, [](int k) { return k > 3; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3);
EXPECT_THAT(s, ElementsAre(1, 3));
}
{
absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100};
- erase_if(s, [](int k) { return k <= 3; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3);
EXPECT_THAT(s, ElementsAre(5, 6, 6, 100));
}
{
absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}};
- erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; });
+ EXPECT_EQ(
+ erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; }),
+ 2);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3)));
}
{
absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6},
{6, 6}, {6, 7}, {100, 6}};
- erase_if(m, [](std::pair<const int, int> kv) { return kv.second == 6; });
+ EXPECT_EQ(
+ erase_if(m,
+ [](std::pair<const int, int> kv) { return kv.second == 6; }),
+ 3);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7)));
}
// Test that erasing all elements from a large set works and test support for
@@ -2476,15 +2516,29 @@ TEST(Btree, EraseIf) {
{
absl::btree_set<int> s;
for (int i = 0; i < 1000; ++i) s.insert(2 * i);
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 1000);
EXPECT_THAT(s, IsEmpty());
}
// Test that erase_if supports other format of function pointers.
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, ElementsAre(1, 3, 5));
}
+ // Test that erase_if invokes the predicate once per element.
+ {
+ absl::btree_set<int> s;
+ for (int i = 0; i < 1000; ++i) s.insert(i);
+ int pred_calls = 0;
+ EXPECT_EQ(erase_if(s,
+ [&pred_calls](int k) {
+ ++pred_calls;
+ return k % 2;
+ }),
+ 500);
+ EXPECT_THAT(s, SizeIs(500));
+ EXPECT_EQ(pred_calls, 1000);
+ }
}
TEST(Btree, InsertOrAssign) {
@@ -2948,6 +3002,252 @@ TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
}
+#ifndef NDEBUG
+TEST(Btree, InvalidComparatorsCaught) {
+ {
+ struct ZeroAlwaysLessCmp {
+ bool operator()(int lhs, int rhs) const {
+ if (lhs == 0) return true;
+ return lhs < rhs;
+ }
+ };
+ absl::btree_set<int, ZeroAlwaysLessCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+ }
+ {
+ struct ThreeWayAlwaysLessCmp {
+ absl::weak_ordering operator()(int, int) const {
+ return absl::weak_ordering::less;
+ }
+ };
+ absl::btree_set<int, ThreeWayAlwaysLessCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
+ }
+ {
+ struct SumGreaterZeroCmp {
+ bool operator()(int lhs, int rhs) const {
+ // First, do equivalence correctly - so we can test later condition.
+ if (lhs == rhs) return false;
+ return lhs + rhs > 0;
+ }
+ };
+ absl::btree_set<int, SumGreaterZeroCmp> set;
+ // Note: '!' only needs to be escaped when it's the first character.
+ EXPECT_DEATH(set.insert({0, 1, 2}),
+ R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex");
+ }
+ {
+ struct ThreeWaySumGreaterZeroCmp {
+ absl::weak_ordering operator()(int lhs, int rhs) const {
+ // First, do equivalence correctly - so we can test later condition.
+ if (lhs == rhs) return absl::weak_ordering::equivalent;
+
+ if (lhs + rhs > 0) return absl::weak_ordering::less;
+ if (lhs + rhs == 0) return absl::weak_ordering::equivalent;
+ return absl::weak_ordering::greater;
+ }
+ };
+ absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
+ EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+ }
+}
+#endif
+
+#ifndef _MSC_VER
+// This test crashes on MSVC.
+TEST(Btree, InvalidIteratorUse) {
+ if (!BtreeNodePeer::UsesGenerations<absl::btree_set<int>>())
+ GTEST_SKIP() << "Generation validation for iterators is disabled.";
+
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10; ++i) set.insert(i);
+ auto it = set.begin();
+ set.erase(it++);
+ EXPECT_DEATH(set.erase(it++), "invalidated iterator");
+ }
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10; ++i) set.insert(i);
+ auto it = set.insert(20).first;
+ set.insert(30);
+ EXPECT_DEATH(*it, "invalidated iterator");
+ }
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10000; ++i) set.insert(i);
+ auto it = set.find(5000);
+ ASSERT_NE(it, set.end());
+ set.erase(1);
+ EXPECT_DEATH(*it, "invalidated iterator");
+ }
+}
+#endif
+
+class OnlyConstructibleByAllocator {
+ explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
+
+ public:
+ OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
+ : i_(other.i_) {}
+ OnlyConstructibleByAllocator &operator=(
+ const OnlyConstructibleByAllocator &other) {
+ i_ = other.i_;
+ return *this;
+ }
+ int Get() const { return i_; }
+ bool operator==(int i) const { return i_ == i; }
+
+ private:
+ template <typename T>
+ friend class OnlyConstructibleAllocator;
+
+ int i_;
+};
+
+template <typename T = OnlyConstructibleByAllocator>
+class OnlyConstructibleAllocator : public std::allocator<T> {
+ public:
+ OnlyConstructibleAllocator() = default;
+ template <class U>
+ explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
+
+ void construct(OnlyConstructibleByAllocator *p, int i) {
+ new (p) OnlyConstructibleByAllocator(i);
+ }
+ template <typename Pair>
+ void construct(Pair *p, const int i) {
+ OnlyConstructibleByAllocator only(i);
+ new (p) Pair(std::move(only), i);
+ }
+
+ template <class U>
+ struct rebind {
+ using other = OnlyConstructibleAllocator<U>;
+ };
+};
+
+struct OnlyConstructibleByAllocatorComp {
+ using is_transparent = void;
+ bool operator()(OnlyConstructibleByAllocator a,
+ OnlyConstructibleByAllocator b) const {
+ return a.Get() < b.Get();
+ }
+ bool operator()(int a, OnlyConstructibleByAllocator b) const {
+ return a < b.Get();
+ }
+ bool operator()(OnlyConstructibleByAllocator a, int b) const {
+ return a.Get() < b;
+ }
+};
+
+TEST(Btree, OnlyConstructibleByAllocatorType) {
+ const std::array<int, 2> arr = {3, 4};
+ {
+ absl::btree_set<OnlyConstructibleByAllocator,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(arr.begin(), arr.end());
+ EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+ }
+ {
+ absl::btree_multiset<OnlyConstructibleByAllocator,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ // TODO(ezb): fix insert_multi to allow this to compile.
+ // set.insert(arr.begin(), arr.end());
+ EXPECT_THAT(set, ElementsAre(1, 2));
+ }
+ {
+ absl::btree_map<OnlyConstructibleByAllocator, int,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ map;
+ map.emplace(1);
+ map.emplace_hint(map.end(), 2);
+ map.insert(arr.begin(), arr.end());
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ }
+ {
+ absl::btree_multimap<OnlyConstructibleByAllocator, int,
+ OnlyConstructibleByAllocatorComp,
+ OnlyConstructibleAllocator<>>
+ map;
+ map.emplace(1);
+ map.emplace_hint(map.end(), 2);
+ // TODO(ezb): fix insert_multi to allow this to compile.
+ // map.insert(arr.begin(), arr.end());
+ EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2)));
+ }
+}
+
+class NotAssignable {
+ public:
+ explicit NotAssignable(int i) : i_(i) {}
+ NotAssignable(const NotAssignable &other) : i_(other.i_) {}
+ NotAssignable &operator=(NotAssignable &&other) = delete;
+ int Get() const { return i_; }
+ bool operator==(int i) const { return i_ == i; }
+ friend bool operator<(NotAssignable a, NotAssignable b) {
+ return a.i_ < b.i_;
+ }
+
+ private:
+ int i_;
+};
+
+TEST(Btree, NotAssignableType) {
+ {
+ absl::btree_set<NotAssignable> set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(NotAssignable(3));
+ set.insert(set.end(), NotAssignable(4));
+ EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
+ set.erase(set.begin());
+ EXPECT_THAT(set, ElementsAre(2, 3, 4));
+ }
+ {
+ absl::btree_multiset<NotAssignable> set;
+ set.emplace(1);
+ set.emplace_hint(set.end(), 2);
+ set.insert(NotAssignable(2));
+ set.insert(set.end(), NotAssignable(3));
+ EXPECT_THAT(set, ElementsAre(1, 2, 2, 3));
+ set.erase(set.begin());
+ EXPECT_THAT(set, ElementsAre(2, 2, 3));
+ }
+ {
+ absl::btree_map<NotAssignable, int> map;
+ map.emplace(NotAssignable(1), 1);
+ map.emplace_hint(map.end(), NotAssignable(2), 2);
+ map.insert({NotAssignable(3), 3});
+ map.insert(map.end(), {NotAssignable(4), 4});
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ map.erase(map.begin());
+ EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4)));
+ }
+ {
+ absl::btree_multimap<NotAssignable, int> map;
+ map.emplace(NotAssignable(1), 1);
+ map.emplace_hint(map.end(), NotAssignable(2), 2);
+ map.insert({NotAssignable(2), 3});
+ map.insert(map.end(), {NotAssignable(3), 3});
+ EXPECT_THAT(map,
+ ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+ map.erase(map.begin());
+ EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3)));
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/fixed_array.h b/absl/container/fixed_array.h
index 839ba0bc..2aefae3b 100644
--- a/absl/container/fixed_array.h
+++ b/absl/container/fixed_array.h
@@ -489,12 +489,14 @@ class FixedArray {
Storage storage_;
};
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T, size_t N, typename A>
constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
template <typename T, size_t N, typename A>
constexpr typename FixedArray<T, N, A>::size_type
FixedArray<T, N, A>::inline_elements;
+#endif
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index 74def0df..e6bdbd9e 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -36,6 +36,7 @@
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
@@ -75,6 +76,10 @@ struct FlatHashMapPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::flat_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// NOTE: A `flat_hash_map` stores its value types directly inside its
// implementation array to avoid memory indirection. Because a `flat_hash_map`
// is designed to move data when rehashed, map values will not retain pointer
@@ -356,8 +361,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// `flat_hash_map`.
//
// iterator try_emplace(const_iterator hint,
- // const init_type& k, Args&&... args):
- // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `flat_hash_map` using the position of `hint` as a non-binding suggestion
@@ -541,10 +546,12 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// erase_if(flat_hash_map<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
-void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
+ flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 8dda1d35..263951f1 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -236,33 +236,36 @@ TEST(FlatHashMap, EraseIf) {
// Erase all elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
Pair(4, 4), Pair(5, 5)));
}
// Erase specific elements.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s,
- [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_EQ(erase_if(s,
+ [](std::pair<const int, int> kvp) {
+ return kvp.first % 2 == 1;
+ }),
+ 3);
EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
}
// Predicate is function reference.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, FirstIsEven);
+ EXPECT_EQ(erase_if(s, FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
// Predicate is function pointer.
{
flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, &FirstIsEven);
+ EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
}
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index 6b89da65..4938c703 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -67,11 +67,15 @@ struct FlatHashSetPolicy;
//
// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
// fundamental and Abseil types that support the `absl::Hash` framework have a
-// compatible equality operator for comparing insertions into `flat_hash_map`.
+// compatible equality operator for comparing insertions into `flat_hash_set`.
// If your type is not yet supported by the `absl::Hash` framework, see
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::flat_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
// array to avoid memory indirection. Because a `flat_hash_set` is designed to
// move data when rehashed, set keys will not retain pointer stability. If you
@@ -106,7 +110,7 @@ class flat_hash_set
public:
// Constructors and Assignment Operators
//
- // A flat_hash_set supports the same overload set as `std::unordered_map`
+ // A flat_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
@@ -173,7 +177,7 @@ class flat_hash_set
// available within the `flat_hash_set`.
//
// NOTE: this member function is particular to `absl::flat_hash_set` and is
- // not provided in the `std::unordered_map` API.
+ // not provided in the `std::unordered_set` API.
using Base::capacity;
// flat_hash_set::empty()
@@ -332,7 +336,7 @@ class flat_hash_set
// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
- // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // flat hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
@@ -340,7 +344,7 @@ class flat_hash_set
//
// `swap()` requires that the flat hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
- // non-member `swap()`. If the map's allocator has
+ // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -395,14 +399,14 @@ class flat_hash_set
// flat_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `flat_hash_set`. Note that
- // because a flat hash map contains all elements within its internal storage,
+ // because a flat hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `flat_hash_set`.
using Base::bucket_count;
// flat_hash_set::load_factor()
//
// Returns the current load factor of the `flat_hash_set` (the average number
- // of slots occupied with a value within the hash map).
+ // of slots occupied with a value within the hash set).
using Base::load_factor;
// flat_hash_set::max_load_factor()
@@ -443,9 +447,11 @@ class flat_hash_set
// erase_if(flat_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename flat_hash_set<T, H, E, A>::size_type erase_if(
+ flat_hash_set<T, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index 8f6f9944..b6a72a20 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -143,31 +143,31 @@ TEST(FlatHashSet, EraseIf) {
// Erase all elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return true; });
+ EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return false; });
+ EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
}
// Erase specific elements.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
EXPECT_THAT(s, UnorderedElementsAre(2, 4));
}
// Predicate is function reference.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
// Predicate is function pointer.
{
flat_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
}
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index df9e0991..bc1c4a77 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -36,7 +36,6 @@
#define ABSL_CONTAINER_INLINED_VECTOR_H_
#include <algorithm>
-#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <cstring>
@@ -152,7 +151,7 @@ class InlinedVector {
const allocator_type& allocator = allocator_type())
: storage_(allocator) {
storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
- std::distance(first, last));
+ static_cast<size_t>(std::distance(first, last)));
}
// Creates an inlined vector with elements constructed from the provided input
@@ -233,8 +232,8 @@ class InlinedVector {
// specified allocator is also `noexcept`.
InlinedVector(
InlinedVector&& other,
- const allocator_type& allocator)
- noexcept(absl::allocator_is_nothrow<allocator_type>::value)
+ const allocator_type&
+ allocator) noexcept(absl::allocator_is_nothrow<allocator_type>::value)
: storage_(allocator) {
if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
@@ -486,8 +485,8 @@ class InlinedVector {
InlinedVector& operator=(InlinedVector&& other) {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
if (IsMemcpyOk<A>::value || other.storage_.GetIsAllocated()) {
- inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(),
- data(), size());
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.MemcpyFrom(other.storage_);
@@ -523,7 +522,7 @@ class InlinedVector {
EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
void assign(ForwardIterator first, ForwardIterator last) {
storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
- std::distance(first, last));
+ static_cast<size_t>(std::distance(first, last)));
}
// Overload of `InlinedVector::assign(...)` to replace the contents of the
@@ -586,8 +585,20 @@ class InlinedVector {
if (ABSL_PREDICT_TRUE(n != 0)) {
value_type dealias = v;
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+ // It appears that GCC thinks that since `pos` is a const pointer and may
+ // point to uninitialized memory at this point, a warning should be
+ // issued. But `pos` is actually only used to compute an array index to
+ // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
n);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
} else {
return const_cast<iterator>(pos);
}
@@ -721,8 +732,8 @@ class InlinedVector {
// Destroys all elements in the inlined vector, setting the size to `0` and
// deallocating any held memory.
void clear() noexcept {
- inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(), data(),
- size());
+ inlined_vector_internal::DestroyAdapter<A>::DestroyElements(
+ storage_.GetAllocator(), data(), size());
storage_.DeallocateIfAllocated();
storage_.SetInlinedSize(0);
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 98aff334..4c1ba04a 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -1545,17 +1545,18 @@ TYPED_TEST_P(InstanceTest, InitializerListAssign) {
}
}
-REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors,
- CountConstructorsDestructorsOnCopyConstruction,
- CountConstructorsDestructorsOnMoveConstruction,
- CountConstructorsDestructorsOnAssignment,
- CountConstructorsDestructorsOnMoveAssignment,
- CountElemAssignInlineBacking, RangedConstructor,
- RangedAssign, InitializerListAssign);
+REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors,
+ CountConstructorsDestructorsOnCopyConstruction,
+ CountConstructorsDestructorsOnMoveConstruction,
+ CountConstructorsDestructorsOnAssignment,
+ CountConstructorsDestructorsOnMoveAssignment,
+ CountElemAssignInlineBacking, RangedConstructor,
+ RangedAssign, InitializerListAssign);
using InstanceTypes =
::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
-INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest,
+ InstanceTypes);
TEST(DynamicVec, DynamicVecCompiles) {
DynamicVec v;
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index f636c5fc..01f4e749 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -58,6 +58,7 @@
#include <type_traits>
#include <utility>
+#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -74,12 +75,24 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_MEMORY_SANITIZER)
+// When compiled in sanitizer mode, we add generation integers to the nodes and
+// iterators. When iterators are used, we validate that the container has not
+// been mutated since the iterator was constructed.
+#define ABSL_BTREE_ENABLE_GENERATIONS
+#endif
+
+template <typename Compare, typename T, typename U>
+using compare_result_t = absl::result_of_t<const Compare(const T &, const U &)>;
+
// A helper class that indicates if the Compare parameter is a key-compare-to
// comparator.
template <typename Compare, typename T>
using btree_is_key_compare_to =
- std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
- absl::weak_ordering>;
+ std::is_convertible<compare_result_t<Compare, T, T>, absl::weak_ordering>;
struct StringBtreeDefaultLess {
using is_transparent = void;
@@ -146,49 +159,140 @@ struct StringBtreeDefaultGreater {
}
};
-// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns an `absl::weak_ordering`. This helper
-// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, greater<string_view>, less<absl::Cord>, and
-// greater<absl::Cord>.
-//
-// key_compare_to_adapter is provided so that btree users
-// automatically get the more efficient compare-to code when using common
-// Abseil string types with common comparison functors.
-// These string-like specializations also turn on heterogeneous lookup by
-// default.
+// See below comments for checked_compare.
+template <typename Compare, bool is_class = std::is_class<Compare>::value>
+struct checked_compare_base : Compare {
+ using Compare::Compare;
+ explicit checked_compare_base(Compare c) : Compare(std::move(c)) {}
+ const Compare &comp() const { return *this; }
+};
template <typename Compare>
-struct key_compare_to_adapter {
- using type = Compare;
+struct checked_compare_base<Compare, false> {
+ explicit checked_compare_base(Compare c) : compare(std::move(c)) {}
+ const Compare &comp() const { return compare; }
+ Compare compare;
+};
+
+// A mechanism for opting out of checked_compare for use only in btree_test.cc.
+struct BtreeTestOnlyCheckedCompareOptOutBase {};
+
+// A helper class to adapt the specified comparator for two use cases:
+// (1) When using common Abseil string types with common comparison functors,
+// convert a boolean comparison into a three-way comparison that returns an
+// `absl::weak_ordering`. This helper class is specialized for
+// less<std::string>, greater<std::string>, less<string_view>,
+// greater<string_view>, less<absl::Cord>, and greater<absl::Cord>.
+// (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see
+// https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever
+// a comparison is made, we will make assertions to verify that the comparator
+// is valid.
+template <typename Compare, typename Key>
+struct key_compare_adapter {
+ // Inherit from checked_compare_base to support function pointers and also
+ // keep empty-base-optimization (EBO) support for classes.
+ // Note: we can't use CompressedTuple here because that would interfere
+ // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a
+ // CompressedTuple and nested `CompressedTuple`s don't support EBO.
+ // TODO(b/214288561): use CompressedTuple instead once it supports EBO for
+ // nested `CompressedTuple`s.
+ struct checked_compare : checked_compare_base<Compare> {
+ private:
+ using Base = typename checked_compare::checked_compare_base;
+ using Base::comp;
+
+ // If possible, returns whether `t` is equivalent to itself. We can only do
+ // this for `Key`s because we can't be sure that it's safe to call
+ // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a
+ // compilation failure inside the implementation of the comparison operator.
+ bool is_self_equivalent(const Key &k) const {
+ // Note: this works for both boolean and three-way comparators.
+ return comp()(k, k) == 0;
+ }
+ // If we can't compare `t` with itself, returns true unconditionally.
+ template <typename T>
+ bool is_self_equivalent(const T &) const {
+ return true;
+ }
+
+ public:
+ using Base::Base;
+ checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT
+
+ // Allow converting to Compare for use in key_comp()/value_comp().
+ explicit operator Compare() const { return comp(); }
+
+ template <typename T, typename U,
+ absl::enable_if_t<
+ std::is_same<bool, compare_result_t<Compare, T, U>>::value,
+ int> = 0>
+ bool operator()(const T &lhs, const U &rhs) const {
+ // NOTE: if any of these assertions fail, then the comparator does not
+ // establish a strict-weak-ordering (see
+ // https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(is_self_equivalent(lhs));
+ assert(is_self_equivalent(rhs));
+ const bool lhs_comp_rhs = comp()(lhs, rhs);
+ assert(!lhs_comp_rhs || !comp()(rhs, lhs));
+ return lhs_comp_rhs;
+ }
+
+ template <
+ typename T, typename U,
+ absl::enable_if_t<std::is_convertible<compare_result_t<Compare, T, U>,
+ absl::weak_ordering>::value,
+ int> = 0>
+ absl::weak_ordering operator()(const T &lhs, const U &rhs) const {
+ // NOTE: if any of these assertions fail, then the comparator does not
+ // establish a strict-weak-ordering (see
+ // https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(is_self_equivalent(lhs));
+ assert(is_self_equivalent(rhs));
+ const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs);
+#ifndef NDEBUG
+ const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs);
+ if (lhs_comp_rhs > 0) {
+ assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0");
+ } else if (lhs_comp_rhs == 0) {
+ assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0");
+ } else {
+ assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
+ }
+#endif
+ return lhs_comp_rhs;
+ }
+ };
+ using type = absl::conditional_t<
+ std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase, Compare>::value,
+ Compare, checked_compare>;
};
template <>
-struct key_compare_to_adapter<std::less<std::string>> {
+struct key_compare_adapter<std::less<std::string>, std::string> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<std::string>> {
+struct key_compare_adapter<std::greater<std::string>, std::string> {
using type = StringBtreeDefaultGreater;
};
template <>
-struct key_compare_to_adapter<std::less<absl::string_view>> {
+struct key_compare_adapter<std::less<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<absl::string_view>> {
+struct key_compare_adapter<std::greater<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultGreater;
};
template <>
-struct key_compare_to_adapter<std::less<absl::Cord>> {
+struct key_compare_adapter<std::less<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultLess;
};
template <>
-struct key_compare_to_adapter<std::greater<absl::Cord>> {
+struct key_compare_adapter<std::greater<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultGreater;
};
@@ -224,21 +328,70 @@ struct prefers_linear_node_search<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: T::absl_btree_prefer_linear_node_search {};
+template <typename Compare, typename Key>
+constexpr bool compare_has_valid_result_type() {
+ using compare_result_type = compare_result_t<Compare, Key, Key>;
+ return std::is_same<compare_result_type, bool>::value ||
+ std::is_convertible<compare_result_type, absl::weak_ordering>::value;
+}
+
+template <typename original_key_compare, typename value_type>
+class map_value_compare {
+ template <typename Params>
+ friend class btree;
+
+ // Note: this `protected` is part of the API of std::map::value_compare. See
+ // https://en.cppreference.com/w/cpp/container/map/value_compare.
+ protected:
+ explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {}
+
+ original_key_compare comp; // NOLINT
+
+ public:
+ auto operator()(const value_type &lhs, const value_type &rhs) const
+ -> decltype(comp(lhs.first, rhs.first)) {
+ return comp(lhs.first, rhs.first);
+ }
+};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi, typename SlotPolicy>
+ bool IsMulti, bool IsMap, typename SlotPolicy>
struct common_params {
using original_key_compare = Compare;
// If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
- using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // We also adapt the comparator to diagnose invalid comparators in debug mode.
+ // We disable this when `Compare` is invalid in a way that will cause
+ // adaptation to fail (having invalid return type) so that we can give a
+ // better compilation failure in static_assert_validation. If we don't do
+ // this, then there will be cascading compilation failures that are confusing
+ // for users.
+ using key_compare =
+ absl::conditional_t<!compare_has_valid_result_type<Compare, Key>(),
+ Compare,
+ typename key_compare_adapter<Compare, Key>::type>;
+
+ static constexpr bool kIsKeyCompareStringAdapted =
+ std::is_same<key_compare, StringBtreeDefaultLess>::value ||
+ std::is_same<key_compare, StringBtreeDefaultGreater>::value;
+ static constexpr bool kIsKeyCompareTransparent =
+ IsTransparent<original_key_compare>::value ||
+ kIsKeyCompareStringAdapted;
+ static constexpr bool kEnableGenerations =
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ true;
+#else
+ false;
+#endif
+
// A type which indicates if we have a key-compare-to functor or a plain old
// key-compare functor.
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
using allocator_type = Alloc;
using key_type = Key;
- using size_type = std::make_signed<size_t>::type;
+ using size_type = size_t;
using difference_type = ptrdiff_t;
using slot_policy = SlotPolicy;
@@ -250,6 +403,12 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
+ using value_compare =
+ absl::conditional_t<IsMap,
+ map_value_compare<original_key_compare, value_type>,
+ original_key_compare>;
+ using is_map_container = std::integral_constant<bool, IsMap>;
+
// For the given lookup key type, returns whether we can have multiple
// equivalent keys in the btree. If this is a multi-container, then we can.
// Otherwise, we can have multiple equivalent keys only if all of the
@@ -260,27 +419,25 @@ struct common_params {
// that we know has the same equivalence classes for all lookup types.
template <typename LookupKey>
constexpr static bool can_have_multiple_equivalent_keys() {
- return Multi ||
- (IsTransparent<key_compare>::value &&
- !std::is_same<LookupKey, Key>::value &&
- !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
- !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ return IsMulti || (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !kIsKeyCompareStringAdapted);
}
enum {
kTargetNodeSize = TargetNodeSize,
- // Upper bound for the available space for values. This is largest for leaf
+ // Upper bound for the available space for slots. This is largest for leaf
// nodes, which have overhead of at least a pointer + 4 bytes (for storing
// 3 field_types and an enum).
- kNodeValueSpace =
+ kNodeSlotSpace =
TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
};
- // This is an integral type large enough to hold as many
- // ValueSize-values as will fit a node of TargetNodeSize bytes.
+ // This is an integral type large enough to hold as many slots as will fit a
+ // node of TargetNodeSize bytes.
using node_count_type =
- absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+ absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) >
(std::numeric_limits<uint8_t>::max)()),
uint16_t, uint8_t>; // NOLINT
@@ -303,122 +460,10 @@ struct common_params {
slot_policy::destroy(alloc, slot);
}
static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
- construct(alloc, new_slot, old_slot);
- destroy(alloc, old_slot);
- }
- static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
- slot_policy::swap(alloc, a, b);
- }
- static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
- slot_policy::move(alloc, src, dest);
+ slot_policy::transfer(alloc, new_slot, old_slot);
}
};
-// A parameters structure for holding the type parameters for a btree_map.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Data, typename Compare, typename Alloc,
- int TargetNodeSize, bool Multi>
-struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- map_slot_policy<Key, Data>> {
- using super_type = typename map_params::common_params;
- using mapped_type = Data;
- // This type allows us to move keys when it is safe to do so. It is safe
- // for maps in which value_type and mutable_value_type are layout compatible.
- using slot_policy = typename super_type::slot_policy;
- using slot_type = typename super_type::slot_type;
- using value_type = typename super_type::value_type;
- using init_type = typename super_type::init_type;
-
- using original_key_compare = typename super_type::original_key_compare;
- // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
- class value_compare {
- template <typename Params>
- friend class btree;
-
- protected:
- explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
-
- original_key_compare comp; // NOLINT
-
- public:
- auto operator()(const value_type &lhs, const value_type &rhs) const
- -> decltype(comp(lhs.first, rhs.first)) {
- return comp(lhs.first, rhs.first);
- }
- };
- using is_map_container = std::true_type;
-
- template <typename V>
- static auto key(const V &value) -> decltype(value.first) {
- return value.first;
- }
- static const Key &key(const slot_type *s) { return slot_policy::key(s); }
- static const Key &key(slot_type *s) { return slot_policy::key(s); }
- // For use in node handle.
- static auto mutable_key(slot_type *s)
- -> decltype(slot_policy::mutable_key(s)) {
- return slot_policy::mutable_key(s);
- }
- static mapped_type &value(value_type *value) { return value->second; }
-};
-
-// This type implements the necessary functions from the
-// absl::container_internal::slot_type interface.
-template <typename Key>
-struct set_slot_policy {
- using slot_type = Key;
- using value_type = Key;
- using mutable_value_type = Key;
-
- static value_type &element(slot_type *slot) { return *slot; }
- static const value_type &element(const slot_type *slot) { return *slot; }
-
- template <typename Alloc, class... Args>
- static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot,
- std::forward<Args>(args)...);
- }
-
- template <typename Alloc>
- static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
- absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
- }
-
- template <typename Alloc>
- static void destroy(Alloc *alloc, slot_type *slot) {
- absl::allocator_traits<Alloc>::destroy(*alloc, slot);
- }
-
- template <typename Alloc>
- static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
- using std::swap;
- swap(*a, *b);
- }
-
- template <typename Alloc>
- static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
- *dest = std::move(*src);
- }
-};
-
-// A parameters structure for holding the type parameters for a btree_set.
-// Compare and Alloc should be nothrow copy-constructible.
-template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
- bool Multi>
-struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
- set_slot_policy<Key>> {
- using value_type = Key;
- using slot_type = typename set_params::common_params::slot_type;
- using value_compare =
- typename set_params::common_params::original_key_compare;
- using is_map_container = std::false_type;
-
- template <typename V>
- static const V &key(const V &value) { return value; }
- static const Key &key(const slot_type *slot) { return *slot; }
- static const Key &key(slot_type *slot) { return *slot; }
-};
-
// An adapter class that converts a lower-bound compare into an upper-bound
// compare. Note: there is no need to make a version of this adapter specialized
// for key-compare-to functors because the upper-bound (the first value greater
@@ -453,8 +498,8 @@ struct SearchResult {
template <typename V>
struct SearchResult<V, false> {
SearchResult() {}
- explicit SearchResult(V value) : value(value) {}
- SearchResult(V value, MatchKind /*match*/) : value(value) {}
+ explicit SearchResult(V v) : value(v) {}
+ SearchResult(V v, MatchKind /*match*/) : value(v) {}
V value;
@@ -471,6 +516,7 @@ class btree_node {
using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type;
+ using original_key_compare = typename Params::original_key_compare;
public:
using params_type = Params;
@@ -492,21 +538,28 @@ class btree_node {
// - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant<
- bool,
- has_linear_node_search_preference<key_compare>::value
- ? prefers_linear_node_search<key_compare>::value
- : has_linear_node_search_preference<key_type>::value
+ bool, has_linear_node_search_preference<original_key_compare>::value
+ ? prefers_linear_node_search<original_key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
? prefers_linear_node_search<key_type>::value
: std::is_arithmetic<key_type>::value &&
- (std::is_same<std::less<key_type>, key_compare>::value ||
+ (std::is_same<std::less<key_type>,
+ original_key_compare>::value ||
std::is_same<std::greater<key_type>,
- key_compare>::value)>;
+ original_key_compare>::value)>;
// This class is organized by absl::container_internal::Layout as if it had
// the following structure:
// // A pointer to the node's parent.
// btree_node *parent;
//
+ // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a
+ // // generation integer in order to check that when iterators are
+ // // used, they haven't been invalidated already. Only the generation on
+ // // the root is used, but we have one on each node because whether a node
+ // // is root or not can change.
+ // uint32_t generation;
+ //
// // The position of the node in the node's parent.
// field_type position;
// // The index of the first populated value in `values`.
@@ -553,23 +606,27 @@ class btree_node {
btree_node() = default;
private:
- using layout_type = absl::container_internal::Layout<btree_node *, field_type,
- slot_type, btree_node *>;
+ using layout_type =
+ absl::container_internal::Layout<btree_node *, uint32_t, field_type,
+ slot_type, btree_node *>;
constexpr static size_type SizeWithNSlots(size_type n) {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ n,
- /*children*/ 0)
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ n,
+ /*children*/ 0)
.AllocSize();
}
- // A lower bound for the overhead of fields other than values in a leaf node.
+ // A lower bound for the overhead of fields other than slots in a leaf node.
constexpr static size_type MinimumOverhead() {
- return SizeWithNSlots(1) - sizeof(value_type);
+ return SizeWithNSlots(1) - sizeof(slot_type);
}
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetSlots(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const size_type begin,
+ const size_type end) {
return begin == end ? begin
: SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
@@ -598,16 +655,20 @@ class btree_node {
// Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ slot_count,
- /*children*/ 0);
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ slot_count,
+ /*children*/ 0);
}
constexpr static layout_type InternalLayout() {
- return layout_type(/*parent*/ 1,
- /*position, start, finish, max_count*/ 4,
- /*slots*/ kNodeSlots,
- /*children*/ kNodeSlots + 1);
+ return layout_type(
+ /*parent*/ 1,
+ /*generation*/ params_type::kEnableGenerations ? 1 : 0,
+ /*position, start, finish, max_count*/ 4,
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
}
constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
@@ -621,44 +682,47 @@ class btree_node {
template <size_type N>
inline typename layout_type::template ElementType<N> *GetField() {
// We assert that we don't read from values that aren't there.
- assert(N < 3 || !leaf());
+ assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
}
template <size_type N>
inline const typename layout_type::template ElementType<N> *GetField() const {
- assert(N < 3 || !leaf());
+ assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(
reinterpret_cast<const char *>(this));
}
void set_parent(btree_node *p) { *GetField<0>() = p; }
- field_type &mutable_finish() { return GetField<1>()[2]; }
- slot_type *slot(int i) { return &GetField<2>()[i]; }
+ field_type &mutable_finish() { return GetField<2>()[2]; }
+ slot_type *slot(int i) { return &GetField<3>()[i]; }
slot_type *start_slot() { return slot(start()); }
slot_type *finish_slot() { return slot(finish()); }
- const slot_type *slot(int i) const { return &GetField<2>()[i]; }
- void set_position(field_type v) { GetField<1>()[0] = v; }
- void set_start(field_type v) { GetField<1>()[1] = v; }
- void set_finish(field_type v) { GetField<1>()[2] = v; }
+ const slot_type *slot(int i) const { return &GetField<3>()[i]; }
+ void set_position(field_type v) { GetField<2>()[0] = v; }
+ void set_start(field_type v) { GetField<2>()[1] = v; }
+ void set_finish(field_type v) { GetField<2>()[2] = v; }
// This method is only called by the node init methods.
- void set_max_count(field_type v) { GetField<1>()[3] = v; }
+ void set_max_count(field_type v) { GetField<2>()[3] = v; }
public:
// Whether this is a leaf node or not. This value doesn't change after the
// node is created.
- bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+ bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; }
+ // Whether this is an internal node or not. This value doesn't change after
+ // the node is created.
+ bool is_internal() const { return !is_leaf(); }
// Getter for the position of this node in its parent.
- field_type position() const { return GetField<1>()[0]; }
+ field_type position() const { return GetField<2>()[0]; }
// Getter for the offset of the first value in the `values` array.
field_type start() const {
- // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
- assert(GetField<1>()[1] == 0);
+ // TODO(ezb): when floating storage is implemented, return GetField<2>()[1];
+ assert(GetField<2>()[1] == 0);
return 0;
}
// Getter for the offset after the last value in the `values` array.
- field_type finish() const { return GetField<1>()[2]; }
+ field_type finish() const { return GetField<2>()[2]; }
// Getters for the number of values stored in this node.
field_type count() const {
@@ -668,7 +732,7 @@ class btree_node {
field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount.
// Leaf nodes have max_count in [1, kNodeSlots].
- const field_type max_count = GetField<1>()[3];
+ const field_type max_count = GetField<2>()[3];
return max_count == field_type{kInternalNodeMaxCount}
? field_type{kNodeSlots}
: max_count;
@@ -679,21 +743,44 @@ class btree_node {
// Getter for whether the node is the root of the tree. The parent of the
// root of the tree is the leftmost node in the tree which is guaranteed to
// be a leaf.
- bool is_root() const { return parent()->leaf(); }
+ bool is_root() const { return parent()->is_leaf(); }
void make_root() {
assert(parent()->is_root());
+ set_generation(parent()->generation());
set_parent(parent()->parent());
}
+ // Gets the root node's generation integer, which is the one used by the tree.
+ uint32_t *get_root_generation() const {
+ assert(params_type::kEnableGenerations);
+ const btree_node *curr = this;
+ for (; !curr->is_root(); curr = curr->parent()) continue;
+ return const_cast<uint32_t *>(&curr->GetField<1>()[0]);
+ }
+
+ // Returns the generation for iterator validation.
+ uint32_t generation() const {
+ return params_type::kEnableGenerations ? *get_root_generation() : 0;
+ }
+ // Updates generation. Should only be called on a root node or during node
+ // initialization.
+ void set_generation(uint32_t generation) {
+ if (params_type::kEnableGenerations) GetField<1>()[0] = generation;
+ }
+ // Updates the generation. We do this whenever the node is mutated.
+ void next_generation() {
+ if (params_type::kEnableGenerations) ++*get_root_generation();
+ }
+
// Getters for the key/value at position i in the node.
const key_type &key(int i) const { return params_type::key(slot(i)); }
reference value(int i) { return params_type::element(slot(i)); }
const_reference value(int i) const { return params_type::element(slot(i)); }
// Getters/setter for the child at position i in the node.
- btree_node *child(int i) const { return GetField<3>()[i]; }
+ btree_node *child(int i) const { return GetField<4>()[i]; }
btree_node *start_child() const { return child(start()); }
- btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+ btree_node *&mutable_child(int i) { return GetField<4>()[i]; }
void clear_child(int i) {
absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
}
@@ -850,7 +937,8 @@ class btree_node {
void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- void init_leaf(btree_node *parent, int max_count) {
+ void init_leaf(int max_count, btree_node *parent) {
+ set_generation(0);
set_parent(parent);
set_position(0);
set_start(0);
@@ -860,7 +948,7 @@ class btree_node {
start_slot(), max_count * sizeof(slot_type));
}
void init_internal(btree_node *parent) {
- init_leaf(parent, kNodeSlots);
+ init_leaf(kNodeSlots, parent);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
set_max_count(kInternalNodeMaxCount);
@@ -879,15 +967,18 @@ class btree_node {
private:
template <typename... Args>
void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
+ next_generation();
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
void value_destroy(const field_type i, allocator_type *alloc) {
+ next_generation();
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
void value_destroy_n(const field_type i, const field_type n,
allocator_type *alloc) {
+ next_generation();
for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
params_type::destroy(alloc, s);
absl::container_internal::SanitizerPoisonObject(s);
@@ -903,6 +994,7 @@ class btree_node {
// Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
void transfer(const size_type dest_i, const size_type src_i,
btree_node *src_node, allocator_type *alloc) {
+ next_generation();
transfer(slot(dest_i), src_node->slot(src_i), alloc);
}
@@ -911,6 +1003,7 @@ class btree_node {
void transfer_n(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
+ next_generation();
for (slot_type *src = src_node->slot(src_i), *end = src + n,
*dest = slot(dest_i);
src != end; ++src, ++dest) {
@@ -923,6 +1016,7 @@ class btree_node {
void transfer_n_backward(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
+ next_generation();
for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
*dest = slot(dest_i + n - 1);
src != end; --src, --dest) {
@@ -933,13 +1027,13 @@ class btree_node {
template <typename P>
friend class btree;
template <typename N, typename R, typename P>
- friend struct btree_iterator;
+ friend class btree_iterator;
friend class BtreeNodePeer;
+ friend struct btree_access;
};
template <typename Node, typename Reference, typename Pointer>
-struct btree_iterator {
- private:
+class btree_iterator {
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
@@ -967,9 +1061,15 @@ struct btree_iterator {
using reference = Reference;
using iterator_category = std::bidirectional_iterator_tag;
- btree_iterator() : node(nullptr), position(-1) {}
- explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
- btree_iterator(Node *n, int p) : node(n), position(p) {}
+ btree_iterator() : btree_iterator(nullptr, -1) {}
+ explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {}
+ btree_iterator(Node *n, int p) : node_(n), position_(p) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // Use `~uint32_t{}` as a sentinel value for iterator generations so it
+ // doesn't match the initial value for the actual generation.
+ generation_ = n != nullptr ? n->generation() : ~uint32_t{};
+#endif
+ }
// NOTE: this SFINAE allows for implicit conversions from iterator to
// const_iterator, but it specifically avoids hiding the copy constructor so
@@ -980,58 +1080,32 @@ struct btree_iterator {
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
- : node(other.node), position(other.position) {}
-
- private:
- // This SFINAE allows explicit conversions from const_iterator to
- // iterator, but also avoids hiding the copy constructor.
- // NOTE: the const_cast is safe because this constructor is only called by
- // non-const methods and the container owns the nodes.
- template <typename N, typename R, typename P,
- absl::enable_if_t<
- std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
- std::is_same<btree_iterator, iterator>::value,
- int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> other)
- : node(const_cast<node_type *>(other.node)), position(other.position) {}
-
- // Increment/decrement the iterator.
- void increment() {
- if (node->leaf() && ++position < node->finish()) {
- return;
- }
- increment_slow();
- }
- void increment_slow();
-
- void decrement() {
- if (node->leaf() && --position >= node->start()) {
- return;
- }
- decrement_slow();
+ : node_(other.node_), position_(other.position_) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ generation_ = other.generation_;
+#endif
}
- void decrement_slow();
- public:
bool operator==(const iterator &other) const {
- return node == other.node && position == other.position;
+ return node_ == other.node_ && position_ == other.position_;
}
bool operator==(const const_iterator &other) const {
- return node == other.node && position == other.position;
+ return node_ == other.node_ && position_ == other.position_;
}
bool operator!=(const iterator &other) const {
- return node != other.node || position != other.position;
+ return node_ != other.node_ || position_ != other.position_;
}
bool operator!=(const const_iterator &other) const {
- return node != other.node || position != other.position;
+ return node_ != other.node_ || position_ != other.position_;
}
// Accessors for the key/value the iterator is pointing at.
reference operator*() const {
- ABSL_HARDENING_ASSERT(node != nullptr);
- ABSL_HARDENING_ASSERT(node->start() <= position);
- ABSL_HARDENING_ASSERT(node->finish() > position);
- return node->value(position);
+ ABSL_HARDENING_ASSERT(node_ != nullptr);
+ ABSL_HARDENING_ASSERT(node_->start() <= position_);
+ ABSL_HARDENING_ASSERT(node_->finish() > position_);
+ assert_valid_generation();
+ return node_->value(position_);
}
pointer operator->() const { return &operator*(); }
@@ -1069,23 +1143,84 @@ struct btree_iterator {
friend class btree_multiset_container;
template <typename TreeType, typename CheckerType>
friend class base_checker;
+ friend struct btree_access;
+
+ // This SFINAE allows explicit conversions from const_iterator to
+ // iterator, but also avoids hiding the copy constructor.
+ // NOTE: the const_cast is safe because this constructor is only called by
+ // non-const methods and the container owns the nodes.
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+ std::is_same<btree_iterator, iterator>::value,
+ int> = 0>
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
+ : node_(const_cast<node_type *>(other.node_)),
+ position_(other.position_) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ generation_ = other.generation_;
+#endif
+ }
- const key_type &key() const { return node->key(position); }
- slot_type *slot() { return node->slot(position); }
+ // Increment/decrement the iterator.
+ void increment() {
+ assert_valid_generation();
+ if (node_->is_leaf() && ++position_ < node_->finish()) {
+ return;
+ }
+ increment_slow();
+ }
+ void increment_slow();
+
+ void decrement() {
+ assert_valid_generation();
+ if (node_->is_leaf() && --position_ >= node_->start()) {
+ return;
+ }
+ decrement_slow();
+ }
+ void decrement_slow();
+
+ // Updates the generation. For use internally right before we return an
+ // iterator to the user.
+ void update_generation() {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (node_ != nullptr) generation_ = node_->generation();
+#endif
+ }
+
+ const key_type &key() const { return node_->key(position_); }
+ decltype(std::declval<Node *>()->slot(0)) slot() {
+ return node_->slot(position_);
+ }
+
+ void assert_valid_generation() const {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (node_ != nullptr && node_->generation() != generation_) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ "Attempting to use an invalidated iterator. The corresponding b-tree "
+ "container has been mutated since this iterator was constructed.");
+ }
+#endif
+ }
// The node in the tree the iterator is pointing at.
- Node *node;
+ Node *node_;
// The position within the node of the tree the iterator is pointing at.
// NOTE: this is an int rather than a field_type because iterators can point
// to invalid positions (such as -1) in certain circumstances.
- int position;
+ int position_;
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // Used to check that the iterator hasn't been invalidated.
+ uint32_t generation_;
+#endif
};
template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
- using init_type = typename Params::init_type;
using field_type = typename node_type::field_type;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
@@ -1093,6 +1228,9 @@ class btree {
struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
using field_type = typename node_type::field_type;
node_type *parent;
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ uint32_t generation = 0;
+#endif
field_type position = 0;
field_type start = 0;
field_type finish = 0;
@@ -1166,14 +1304,6 @@ class btree {
using slot_type = typename Params::slot_type;
private:
- // For use in copy_or_move_values_in_order.
- const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
- value_type &&maybe_move_from_iterator(iterator it) {
- // This is a destructive operation on the other container so it's safe for
- // us to const_cast and move from the keys here even if it's a set.
- return std::move(const_cast<value_type &>(*it));
- }
-
// Copies or moves (depending on the template parameter) the values in
// other into this btree in their order in other. This btree must be empty
// before this method is called. This method is used in copy construction,
@@ -1186,7 +1316,7 @@ class btree {
public:
btree(const key_compare &comp, const allocator_type &alloc)
- : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+ : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {}
btree(const btree &other) : btree(other, other.allocator()) {}
btree(const btree &other, const allocator_type &alloc)
@@ -1194,10 +1324,10 @@ class btree {
copy_or_move_values_in_order(other);
}
btree(btree &&other) noexcept
- : root_(std::move(other.root_)),
- rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+ : root_(absl::exchange(other.root_, EmptyNode())),
+ rightmost_(std::move(other.rightmost_)),
size_(absl::exchange(other.size_, 0)) {
- other.mutable_root() = EmptyNode();
+ other.mutable_rightmost() = EmptyNode();
}
btree(btree &&other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
@@ -1222,9 +1352,9 @@ class btree {
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
- iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+ iterator end() { return iterator(rightmost(), rightmost()->finish()); }
const_iterator end() const {
- return const_iterator(rightmost_, rightmost_->finish());
+ return const_iterator(rightmost(), rightmost()->finish());
}
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
@@ -1350,7 +1480,7 @@ class btree {
void swap(btree &other);
const key_compare &key_comp() const noexcept {
- return root_.template get<0>();
+ return rightmost_.template get<0>();
}
template <typename K1, typename K2>
bool compare_keys(const K1 &a, const K2 &b) const {
@@ -1397,6 +1527,7 @@ class btree {
}
// The total number of bytes used by the btree.
+ // TODO(b/169338300): update to support node_btree_*.
size_type bytes_used() const {
node_stats stats = internal_stats(root());
if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
@@ -1440,11 +1571,20 @@ class btree {
allocator_type get_allocator() const { return allocator(); }
private:
+ friend struct btree_access;
+
// Internal accessor routines.
- node_type *root() { return root_.template get<2>(); }
- const node_type *root() const { return root_.template get<2>(); }
- node_type *&mutable_root() noexcept { return root_.template get<2>(); }
- key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+ node_type *root() { return root_; }
+ const node_type *root() const { return root_; }
+ node_type *&mutable_root() noexcept { return root_; }
+ node_type *rightmost() { return rightmost_.template get<2>(); }
+ const node_type *rightmost() const { return rightmost_.template get<2>(); }
+ node_type *&mutable_rightmost() noexcept {
+ return rightmost_.template get<2>();
+ }
+ key_compare *mutable_key_comp() noexcept {
+ return &rightmost_.template get<0>();
+ }
// The leftmost node is stored as the parent of the root node.
node_type *leftmost() { return root()->parent(); }
@@ -1452,10 +1592,10 @@ class btree {
// Allocator routines.
allocator_type *mutable_allocator() noexcept {
- return &root_.template get<1>();
+ return &rightmost_.template get<1>();
}
const allocator_type &allocator() const noexcept {
- return root_.template get<1>();
+ return rightmost_.template get<1>();
}
// Allocates a correctly aligned node of at least size bytes using the
@@ -1474,12 +1614,12 @@ class btree {
}
node_type *new_leaf_node(node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
- n->init_leaf(parent, kNodeSlots);
+ n->init_leaf(kNodeSlots, parent);
return n;
}
node_type *new_leaf_root_node(const int max_count) {
node_type *n = allocate(node_type::LeafSize(max_count));
- n->init_leaf(/*parent=*/n, max_count);
+ n->init_leaf(max_count, /*parent=*/n);
return n;
}
@@ -1503,10 +1643,10 @@ class btree {
void try_shrink();
iterator internal_end(iterator iter) {
- return iter.node != nullptr ? iter : end();
+ return iter.node_ != nullptr ? iter : end();
}
const_iterator internal_end(const_iterator iter) const {
- return iter.node != nullptr ? iter : end();
+ return iter.node_ != nullptr ? iter : end();
}
// Emplaces a value into the btree immediately before iter. Requires that
@@ -1516,9 +1656,8 @@ class btree {
// Returns an iterator pointing to the first value >= the value "iter" is
// pointing at. Note that "iter" might be pointing to an invalid location such
- // as iter.position == iter.node->finish(). This routine simply moves iter up
- // in the tree to a valid location.
- // Requires: iter.node is non-null.
+ // as iter.position_ == iter.node_->finish(). This routine simply moves iter
+ // up in the tree to a valid location. Requires: iter.node_ is non-null.
template <typename IterType>
static IterType internal_last(IterType iter);
@@ -1554,7 +1693,7 @@ class btree {
if (node == nullptr || (node == root() && empty())) {
return node_stats(0, 0);
}
- if (node->leaf()) {
+ if (node->is_leaf()) {
return node_stats(1, 0);
}
node_stats res(0, 1);
@@ -1564,15 +1703,14 @@ class btree {
return res;
}
- // We use compressed tuple in order to save space because key_compare and
- // allocator_type are usually empty.
- absl::container_internal::CompressedTuple<key_compare, allocator_type,
- node_type *>
- root_;
+ node_type *root_;
// A pointer to the rightmost node. Note that the leftmost node is stored as
- // the root's parent.
- node_type *rightmost_;
+ // the root's parent. We use compressed tuple in order to save space because
+ // key_compare and allocator_type are usually empty.
+ absl::container_internal::CompressedTuple<key_compare, allocator_type,
+ node_type *>
+ rightmost_;
// Number of values.
size_type size_;
@@ -1596,8 +1734,8 @@ inline void btree_node<P>::emplace_value(const size_type i,
value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
- if (!leaf() && finish() > i + 1) {
- for (int j = finish(); j > i + 1; --j) {
+ if (is_internal() && finish() > i + 1) {
+ for (field_type j = finish(); j > i + 1; --j) {
set_child(j, child(j - 1));
}
clear_child(i + 1);
@@ -1614,7 +1752,7 @@ inline void btree_node<P>::remove_values(const field_type i,
const field_type src_i = i + to_erase;
transfer_n(orig_finish - src_i, i, src_i, this, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Delete all children between begin and end.
for (int j = 0; j < to_erase; ++j) {
clear_and_delete(child(i + j + 1), alloc);
@@ -1651,7 +1789,7 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
right->transfer_n(right->count() - to_move, right->start(),
right->start() + to_move, right, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the right to the left node.
for (int i = 0; i < to_move; ++i) {
init_child(finish() + i + 1, right->child(i));
@@ -1698,7 +1836,7 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
// 4) Move the new delimiting value to the parent from the left node.
parent()->transfer(position(), finish() - to_move, this, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the left to the right node.
for (int i = right->finish(); i >= right->start(); --i) {
right->init_child(i + to_move, right->child(i));
@@ -1744,7 +1882,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
value_destroy(finish(), alloc);
parent()->init_child(position() + 1, dest);
- if (!leaf()) {
+ if (is_internal()) {
for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
++i, ++j) {
assert(child(j) != nullptr);
@@ -1765,7 +1903,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
// Move the values from the right to the left node.
transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
- if (!leaf()) {
+ if (is_internal()) {
// Move the child pointers from the right to the left node.
for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
init_child(j, src->child(i));
@@ -1783,7 +1921,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
template <typename P>
void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
- if (node->leaf()) {
+ if (node->is_leaf()) {
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(LeafSize(node->max_count()), node, alloc);
return;
@@ -1797,7 +1935,15 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
btree_node *delete_root_parent = node->parent();
// Navigate to the leftmost leaf under node, and then delete upwards.
- while (!node->leaf()) node = node->start_child();
+ while (node->is_internal()) node = node->start_child();
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ // When generations are enabled, we delete the leftmost leaf last in case it's
+ // the parent of the root and we need to check whether it's a leaf before we
+ // can update the root's generation.
+ // TODO(ezb): if we change btree_node::is_root to check a bool inside the node
+ // instead of checking whether the parent is a leaf, we can remove this logic.
+ btree_node *leftmost_leaf = node;
+#endif
// Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
// isn't guaranteed to be a valid `field_type`.
int pos = node->position();
@@ -1807,14 +1953,17 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
assert(pos <= parent->finish());
do {
node = parent->child(pos);
- if (!node->leaf()) {
+ if (node->is_internal()) {
// Navigate to the leftmost leaf under node.
- while (!node->leaf()) node = node->start_child();
+ while (node->is_internal()) node = node->start_child();
pos = node->position();
parent = node->parent();
}
node->value_destroy_n(node->start(), node->count(), alloc);
- deallocate(LeafSize(node->max_count()), node, alloc);
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ if (leftmost_leaf != node)
+#endif
+ deallocate(LeafSize(node->max_count()), node, alloc);
++pos;
} while (pos <= parent->finish());
@@ -1826,7 +1975,12 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
parent = node->parent();
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(InternalSize(), node, alloc);
- if (parent == delete_root_parent) return;
+ if (parent == delete_root_parent) {
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+ deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc);
+#endif
+ return;
+ }
++pos;
} while (pos > parent->finish());
}
@@ -1836,49 +1990,49 @@ void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
// btree_iterator methods
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::increment_slow() {
- if (node->leaf()) {
- assert(position >= node->finish());
+ if (node_->is_leaf()) {
+ assert(position_ >= node_->finish());
btree_iterator save(*this);
- while (position == node->finish() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position();
- node = node->parent();
+ while (position_ == node_->finish() && !node_->is_root()) {
+ assert(node_->parent()->child(node_->position()) == node_);
+ position_ = node_->position();
+ node_ = node_->parent();
}
// TODO(ezb): assert we aren't incrementing end() instead of handling.
- if (position == node->finish()) {
+ if (position_ == node_->finish()) {
*this = save;
}
} else {
- assert(position < node->finish());
- node = node->child(position + 1);
- while (!node->leaf()) {
- node = node->start_child();
+ assert(position_ < node_->finish());
+ node_ = node_->child(position_ + 1);
+ while (node_->is_internal()) {
+ node_ = node_->start_child();
}
- position = node->start();
+ position_ = node_->start();
}
}
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::decrement_slow() {
- if (node->leaf()) {
- assert(position <= -1);
+ if (node_->is_leaf()) {
+ assert(position_ <= -1);
btree_iterator save(*this);
- while (position < node->start() && !node->is_root()) {
- assert(node->parent()->child(node->position()) == node);
- position = node->position() - 1;
- node = node->parent();
+ while (position_ < node_->start() && !node_->is_root()) {
+ assert(node_->parent()->child(node_->position()) == node_);
+ position_ = node_->position() - 1;
+ node_ = node_->parent();
}
// TODO(ezb): assert we aren't decrementing begin() instead of handling.
- if (position < node->start()) {
+ if (position_ < node_->start()) {
*this = save;
}
} else {
- assert(position >= node->start());
- node = node->child(position);
- while (!node->leaf()) {
- node = node->child(node->finish());
+ assert(position_ >= node_->start());
+ node_ = node_->child(position_);
+ while (node_->is_internal()) {
+ node_ = node_->child(node_->finish());
}
- position = node->finish() - 1;
+ position_ = node_->finish() - 1;
}
}
@@ -1896,12 +2050,12 @@ void btree<P>::copy_or_move_values_in_order(Btree &other) {
// values is the same order we'll store them in.
auto iter = other.begin();
if (iter == other.end()) return;
- insert_multi(maybe_move_from_iterator(iter));
+ insert_multi(iter.slot());
++iter;
for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
- internal_emplace(end(), maybe_move_from_iterator(iter));
+ internal_emplace(end(), iter.slot());
}
}
@@ -1921,15 +2075,12 @@ constexpr bool btree<P>::static_assert_validation() {
"target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
- using compare_result_type =
- absl::result_of_t<key_compare(key_type, key_type)>;
static_assert(
- std::is_same<compare_result_type, bool>::value ||
- std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+ compare_has_valid_result_type<key_compare, key_type>(),
"key comparison function must return absl::{weak,strong}_ordering or "
"bool.");
- // Test the assumption made in setting kNodeValueSpace.
+ // Test the assumption made in setting kNodeSlotSpace.
static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
"node space assumption incorrect");
@@ -1983,7 +2134,7 @@ template <typename K, typename... Args>
auto btree<P>::insert_unique(const K &key, Args &&... args)
-> std::pair<iterator, bool> {
if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
+ mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
@@ -1996,7 +2147,7 @@ auto btree<P>::insert_unique(const K &key, Args &&... args)
}
} else {
iterator last = internal_last(iter);
- if (last.node && !compare_keys(key, last.key())) {
+ if (last.node_ && !compare_keys(key, last.key())) {
// The key already exists in the tree, do nothing.
return {last, false};
}
@@ -2041,8 +2192,11 @@ template <typename P>
template <typename InputIterator>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
for (; b != e; ++b) {
- init_type value(*b);
- insert_hint_unique(end(), params_type::key(value), std::move(value));
+ // Use a node handle to manage a temp slot.
+ auto node_handle =
+ CommonAccess::Construct<node_handle_type>(get_allocator(), *b);
+ slot_type *slot = CommonAccess::GetSlot(node_handle);
+ insert_hint_unique(end(), params_type::key(slot), slot);
}
}
@@ -2050,11 +2204,11 @@ template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
- mutable_root() = rightmost_ = new_leaf_root_node(1);
+ mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
iterator iter = internal_upper_bound(key);
- if (iter.node == nullptr) {
+ if (iter.node_ == nullptr) {
iter = end();
}
return internal_emplace(iter, std::forward<ValueType>(v));
@@ -2114,15 +2268,15 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
- // Note: `root_` also contains the allocator and the key comparator.
swap(root_, other.root_);
+ // Note: `rightmost_` also contains the allocator and the key comparator.
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
} else {
if (allocator() == other.allocator()) {
swap(mutable_root(), other.mutable_root());
swap(*mutable_key_comp(), *other.mutable_key_comp());
- swap(rightmost_, other.rightmost_);
+ swap(mutable_rightmost(), other.mutable_rightmost());
swap(size_, other.size_);
} else {
// We aren't allowed to propagate the allocator and the allocator is
@@ -2140,22 +2294,29 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
template <typename P>
auto btree<P>::erase(iterator iter) -> iterator {
- bool internal_delete = false;
- if (!iter.node->leaf()) {
- // Deletion of a value on an internal node. First, move the largest value
- // from our left child here, then delete that position (in remove_values()
- // below). We can get to the largest value from our left child by
- // decrementing iter.
+ iter.node_->value_destroy(iter.position_, mutable_allocator());
+ iter.update_generation();
+
+ const bool internal_delete = iter.node_->is_internal();
+ if (internal_delete) {
+ // Deletion of a value on an internal node. First, transfer the largest
+ // value from our left child here, then erase/rebalance from that position.
+ // We can get to the largest value from our left child by decrementing iter.
iterator internal_iter(iter);
--iter;
- assert(iter.node->leaf());
- params_type::move(mutable_allocator(), iter.node->slot(iter.position),
- internal_iter.node->slot(internal_iter.position));
- internal_delete = true;
- }
-
- // Delete the key from the leaf.
- iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
+ assert(iter.node_->is_leaf());
+ internal_iter.node_->transfer(internal_iter.position_, iter.position_,
+ iter.node_, mutable_allocator());
+ } else {
+ // Shift values after erased position in leaf. In the internal case, we
+ // don't need to do this because the leaf position is the end of the node.
+ const field_type transfer_from = iter.position_ + 1;
+ const field_type num_to_transfer = iter.node_->finish() - transfer_from;
+ iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from,
+ iter.node_, mutable_allocator());
+ }
+ // Update node finish and container size.
+ iter.node_->set_finish(iter.node_->finish() - 1);
--size_;
// We want to return the next value after the one we just erased. If we
@@ -2163,7 +2324,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
// value is ++(++iter). If we erased from a leaf node (internal_delete ==
// false) then the next value is ++iter. Note that ++iter may point to an
// internal node and the value in the internal node may move to a leaf node
- // (iter.node) when rebalancing is performed at the leaf level.
+ // (iter.node_) when rebalancing is performed at the leaf level.
iterator res = rebalance_after_delete(iter);
@@ -2180,14 +2341,14 @@ auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
iterator res(iter);
bool first_iteration = true;
for (;;) {
- if (iter.node == root()) {
+ if (iter.node_ == root()) {
try_shrink();
if (empty()) {
return end();
}
break;
}
- if (iter.node->count() >= kMinNodeValues) {
+ if (iter.node_->count() >= kMinNodeValues) {
break;
}
bool merged = try_merge_or_rebalance(&iter);
@@ -2200,14 +2361,15 @@ auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
if (!merged) {
break;
}
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
+ iter.position_ = iter.node_->position();
+ iter.node_ = iter.node_->parent();
}
+ res.update_generation();
// Adjust our return value. If we're pointing at the end of a node, advance
// the iterator.
- if (res.position == res.node->finish()) {
- res.position = res.node->finish() - 1;
+ if (res.position_ == res.node_->finish()) {
+ res.position_ = res.node_->finish() - 1;
++res;
}
@@ -2224,33 +2386,36 @@ auto btree<P>::erase_range(iterator begin, iterator end)
return {0, begin};
}
- if (count == size_) {
+ if (static_cast<size_type>(count) == size_) {
clear();
return {count, this->end()};
}
- if (begin.node == end.node) {
- assert(end.position > begin.position);
- begin.node->remove_values(begin.position, end.position - begin.position,
- mutable_allocator());
+ if (begin.node_ == end.node_) {
+ assert(end.position_ > begin.position_);
+ begin.node_->remove_values(begin.position_, end.position_ - begin.position_,
+ mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
const size_type target_size = size_ - count;
while (size_ > target_size) {
- if (begin.node->leaf()) {
+ if (begin.node_->is_leaf()) {
const size_type remaining_to_erase = size_ - target_size;
- const size_type remaining_in_node = begin.node->finish() - begin.position;
+ const size_type remaining_in_node =
+ begin.node_->finish() - begin.position_;
const size_type to_erase =
(std::min)(remaining_to_erase, remaining_in_node);
- begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ begin.node_->remove_values(begin.position_, to_erase,
+ mutable_allocator());
size_ -= to_erase;
begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
}
+ begin.update_generation();
return {count, begin};
}
@@ -2259,8 +2424,7 @@ void btree<P>::clear() {
if (!empty()) {
node_type::clear_and_delete(root(), mutable_allocator());
}
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
+ mutable_root() = mutable_rightmost() = EmptyNode();
size_ = 0;
}
@@ -2269,15 +2433,15 @@ void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
- // Note: `root_` also contains the allocator and the key comparator.
- swap(root_, other.root_);
+ // Note: `rightmost_` also contains the allocator and the key comparator.
+ swap(rightmost_, other.rightmost_);
} else {
// It's undefined behavior if the allocators are unequal here.
assert(allocator() == other.allocator());
- swap(mutable_root(), other.mutable_root());
+ swap(mutable_rightmost(), other.mutable_rightmost());
swap(*mutable_key_comp(), *other.mutable_key_comp());
}
- swap(rightmost_, other.rightmost_);
+ swap(mutable_root(), other.mutable_root());
swap(size_, other.size_);
}
@@ -2285,18 +2449,18 @@ template <typename P>
void btree<P>::verify() const {
assert(root() != nullptr);
assert(leftmost() != nullptr);
- assert(rightmost_ != nullptr);
+ assert(rightmost() != nullptr);
assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
- assert(leftmost() == (++const_iterator(root(), -1)).node);
- assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
- assert(leftmost()->leaf());
- assert(rightmost_->leaf());
+ assert(leftmost() == (++const_iterator(root(), -1)).node_);
+ assert(rightmost() == (--const_iterator(root(), root()->finish())).node_);
+ assert(leftmost()->is_leaf());
+ assert(rightmost()->is_leaf());
}
template <typename P>
void btree<P>::rebalance_or_split(iterator *iter) {
- node_type *&node = iter->node;
- int &insert_position = iter->position;
+ node_type *&node = iter->node_;
+ int &insert_position = iter->position_;
assert(node->count() == node->max_count());
assert(kNodeSlots == node->max_count());
@@ -2371,19 +2535,20 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Create a new root node and set the current root node as the child of the
// new root.
parent = new_internal_node(parent);
+ parent->set_generation(root()->generation());
parent->init_child(parent->start(), root());
mutable_root() = parent;
// If the former root was a leaf node, then it's now the rightmost node.
- assert(!parent->start_child()->leaf() ||
- parent->start_child() == rightmost_);
+ assert(parent->start_child()->is_internal() ||
+ parent->start_child() == rightmost());
}
// Split the node.
node_type *split_node;
- if (node->leaf()) {
+ if (node->is_leaf()) {
split_node = new_leaf_node(parent);
node->split(insert_position, split_node, mutable_allocator());
- if (rightmost_ == node) rightmost_ = split_node;
+ if (rightmost() == node) mutable_rightmost() = split_node;
} else {
split_node = new_internal_node(parent);
node->split(insert_position, split_node, mutable_allocator());
@@ -2398,55 +2563,56 @@ void btree<P>::rebalance_or_split(iterator *iter) {
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
- if (rightmost_ == right) rightmost_ = left;
+ if (rightmost() == right) mutable_rightmost() = left;
}
template <typename P>
bool btree<P>::try_merge_or_rebalance(iterator *iter) {
- node_type *parent = iter->node->parent();
- if (iter->node->position() > parent->start()) {
+ node_type *parent = iter->node_->parent();
+ if (iter->node_->position() > parent->start()) {
// Try merging with our left sibling.
- node_type *left = parent->child(iter->node->position() - 1);
+ node_type *left = parent->child(iter->node_->position() - 1);
assert(left->max_count() == kNodeSlots);
- if (1U + left->count() + iter->node->count() <= kNodeSlots) {
- iter->position += 1 + left->count();
- merge_nodes(left, iter->node);
- iter->node = left;
+ if (1U + left->count() + iter->node_->count() <= kNodeSlots) {
+ iter->position_ += 1 + left->count();
+ merge_nodes(left, iter->node_);
+ iter->node_ = left;
return true;
}
}
- if (iter->node->position() < parent->finish()) {
+ if (iter->node_->position() < parent->finish()) {
// Try merging with our right sibling.
- node_type *right = parent->child(iter->node->position() + 1);
+ node_type *right = parent->child(iter->node_->position() + 1);
assert(right->max_count() == kNodeSlots);
- if (1U + iter->node->count() + right->count() <= kNodeSlots) {
- merge_nodes(iter->node, right);
+ if (1U + iter->node_->count() + right->count() <= kNodeSlots) {
+ merge_nodes(iter->node_, right);
return true;
}
// Try rebalancing with our right sibling. We don't perform rebalancing if
- // we deleted the first element from iter->node and the node is not
+ // we deleted the first element from iter->node_ and the node is not
// empty. This is a small optimization for the common pattern of deleting
// from the front of the tree.
if (right->count() > kMinNodeValues &&
- (iter->node->count() == 0 || iter->position > iter->node->start())) {
- int to_move = (right->count() - iter->node->count()) / 2;
+ (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) {
+ int to_move = (right->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, right->count() - 1);
- iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+ iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator());
return false;
}
}
- if (iter->node->position() > parent->start()) {
+ if (iter->node_->position() > parent->start()) {
// Try rebalancing with our left sibling. We don't perform rebalancing if
- // we deleted the last element from iter->node and the node is not
+ // we deleted the last element from iter->node_ and the node is not
// empty. This is a small optimization for the common pattern of deleting
// from the back of the tree.
- node_type *left = parent->child(iter->node->position() - 1);
+ node_type *left = parent->child(iter->node_->position() - 1);
if (left->count() > kMinNodeValues &&
- (iter->node->count() == 0 || iter->position < iter->node->finish())) {
- int to_move = (left->count() - iter->node->count()) / 2;
+ (iter->node_->count() == 0 ||
+ iter->position_ < iter->node_->finish())) {
+ int to_move = (left->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, left->count() - 1);
- left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
- iter->position += to_move;
+ left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator());
+ iter->position_ += to_move;
return false;
}
}
@@ -2460,9 +2626,9 @@ void btree<P>::try_shrink() {
return;
}
// Deleted the last item on the root node, shrink the height of the tree.
- if (orig_root->leaf()) {
+ if (orig_root->is_leaf()) {
assert(size() == 0);
- mutable_root() = rightmost_ = EmptyNode();
+ mutable_root() = mutable_rightmost() = EmptyNode();
} else {
node_type *child = orig_root->start_child();
child->make_root();
@@ -2474,15 +2640,16 @@ void btree<P>::try_shrink() {
template <typename P>
template <typename IterType>
inline IterType btree<P>::internal_last(IterType iter) {
- assert(iter.node != nullptr);
- while (iter.position == iter.node->finish()) {
- iter.position = iter.node->position();
- iter.node = iter.node->parent();
- if (iter.node->leaf()) {
- iter.node = nullptr;
+ assert(iter.node_ != nullptr);
+ while (iter.position_ == iter.node_->finish()) {
+ iter.position_ = iter.node_->position();
+ iter.node_ = iter.node_->parent();
+ if (iter.node_->is_leaf()) {
+ iter.node_ = nullptr;
break;
}
}
+ iter.update_generation();
return iter;
}
@@ -2490,37 +2657,39 @@ template <typename P>
template <typename... Args>
inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
-> iterator {
- if (!iter.node->leaf()) {
+ if (iter.node_->is_internal()) {
// We can't insert on an internal node. Instead, we'll insert after the
// previous value which is guaranteed to be on a leaf node.
--iter;
- ++iter.position;
+ ++iter.position_;
}
- const field_type max_count = iter.node->max_count();
+ const field_type max_count = iter.node_->max_count();
allocator_type *alloc = mutable_allocator();
- if (iter.node->count() == max_count) {
+ if (iter.node_->count() == max_count) {
// Make room in the leaf for the new item.
if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
- assert(iter.node == root());
- iter.node =
+ assert(iter.node_ == root());
+ iter.node_ =
new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
// Transfer the values from the old root to the new root.
node_type *old_root = root();
- node_type *new_root = iter.node;
+ node_type *new_root = iter.node_;
new_root->transfer_n(old_root->count(), new_root->start(),
old_root->start(), old_root, alloc);
new_root->set_finish(old_root->finish());
old_root->set_finish(old_root->start());
+ new_root->set_generation(old_root->generation());
node_type::clear_and_delete(old_root, alloc);
- mutable_root() = rightmost_ = new_root;
+ mutable_root() = mutable_rightmost() = new_root;
} else {
rebalance_or_split(&iter);
}
}
- iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
+ iter.node_->emplace_value(iter.position_, alloc, std::forward<Args>(args)...);
++size_;
+ iter.update_generation();
return iter;
}
@@ -2531,8 +2700,8 @@ inline auto btree<P>::internal_locate(const K &key) const
iterator iter(const_cast<node_type *>(root()));
for (;;) {
SearchResult<int, is_key_compare_to::value> res =
- iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
+ iter.node_->lower_bound(key, key_comp());
+ iter.position_ = res.value;
if (res.IsEq()) {
return {iter, MatchKind::kEq};
}
@@ -2540,10 +2709,10 @@ inline auto btree<P>::internal_locate(const K &key) const
// down the tree if the keys are equal, but determining equality would
// require doing an extra comparison on each node on the way down, and we
// will need to go all the way to the leaf node in the expected case.
- if (iter.node->leaf()) {
+ if (iter.node_->is_leaf()) {
break;
}
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
// Note: in the non-key-compare-to case, the key may actually be equivalent
// here (and the MatchKind::kNe is ignored).
@@ -2563,13 +2732,13 @@ auto btree<P>::internal_lower_bound(const K &key) const
SearchResult<int, is_key_compare_to::value> res;
bool seen_eq = false;
for (;;) {
- res = iter.node->lower_bound(key, key_comp());
- iter.position = res.value;
- if (iter.node->leaf()) {
+ res = iter.node_->lower_bound(key, key_comp());
+ iter.position_ = res.value;
+ if (iter.node_->is_leaf()) {
break;
}
seen_eq = seen_eq || res.IsEq();
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
@@ -2580,11 +2749,11 @@ template <typename K>
auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
- iter.position = iter.node->upper_bound(key, key_comp());
- if (iter.node->leaf()) {
+ iter.position_ = iter.node_->upper_bound(key, key_comp());
+ if (iter.node_->is_leaf()) {
break;
}
- iter.node = iter.node->child(iter.position);
+ iter.node_ = iter.node_->child(iter.position_);
}
return internal_last(iter);
}
@@ -2599,7 +2768,7 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
}
} else {
const iterator iter = internal_last(res.value);
- if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+ if (iter.node_ != nullptr && !compare_keys(key, iter.key())) {
return iter;
}
}
@@ -2621,7 +2790,7 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
assert(!compare_keys(node->key(i), node->key(i - 1)));
}
int count = node->count();
- if (!node->leaf()) {
+ if (node->is_internal()) {
for (int i = node->start(); i <= node->finish(); ++i) {
assert(node->child(i) != nullptr);
assert(node->child(i)->parent() == node);
@@ -2634,6 +2803,50 @@ int btree<P>::internal_verify(const node_type *node, const key_type *lo,
return count;
}
+struct btree_access {
+ template <typename BtreeContainer, typename Pred>
+ static auto erase_if(BtreeContainer &container, Pred pred)
+ -> typename BtreeContainer::size_type {
+ const auto initial_size = container.size();
+ auto &tree = container.tree_;
+ auto *alloc = tree.mutable_allocator();
+ for (auto it = container.begin(); it != container.end();) {
+ if (!pred(*it)) {
+ ++it;
+ continue;
+ }
+ auto *node = it.node_;
+ if (node->is_internal()) {
+ // Handle internal nodes normally.
+ it = container.erase(it);
+ continue;
+ }
+ // If this is a leaf node, then we do all the erases from this node
+ // at once before doing rebalancing.
+
+ // The current position to transfer slots to.
+ int to_pos = it.position_;
+ node->value_destroy(it.position_, alloc);
+ while (++it.position_ < node->finish()) {
+ it.update_generation();
+ if (pred(*it)) {
+ node->value_destroy(it.position_, alloc);
+ } else {
+ node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc);
+ }
+ }
+ const int num_deleted = node->finish() - to_pos;
+ tree.size_ -= num_deleted;
+ node->set_finish(to_pos);
+ it.position_ = to_pos;
+ it = tree.rebalance_after_delete(it);
+ }
+ return initial_size - container.size();
+ }
+};
+
+#undef ABSL_BTREE_ENABLE_GENERATIONS
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index a99668c7..fc2f740a 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -44,8 +44,8 @@ class btree_container {
// transparent case.
template <class K>
using key_arg =
- typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
- template type<K, typename Tree::key_type>;
+ typename KeyArg<params_type::kIsKeyCompareTransparent>::template type<
+ K, typename Tree::key_type>;
public:
using key_type = typename Tree::key_type;
@@ -166,9 +166,10 @@ class btree_container {
// Extract routines.
node_type extract(iterator position) {
- // Use Move instead of Transfer, because the rebalancing code expects to
- // have a valid object to scribble metadata bits on top of.
- auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+ // Use Construct instead of Transfer because the rebalancing code will
+ // destroy the slot later.
+ auto node =
+ CommonAccess::Construct<node_type>(get_allocator(), position.slot());
erase(position);
return node;
}
@@ -228,6 +229,7 @@ class btree_container {
}
protected:
+ friend struct btree_access;
Tree tree_;
};
@@ -290,8 +292,11 @@ class btree_set_container : public btree_container<Tree> {
}
template <typename... Args>
std::pair<iterator, bool> emplace(Args &&... args) {
- init_type v(std::forward<Args>(args)...);
- return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ auto *slot = CommonAccess::GetSlot(node);
+ return this->tree_.insert_unique(params_type::key(slot), slot);
}
iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
@@ -305,9 +310,12 @@ class btree_set_container : public btree_container<Tree> {
}
template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- init_type v(std::forward<Args>(args)...);
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ auto *slot = CommonAccess::GetSlot(node);
return this->tree_
- .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
+ .insert_hint_unique(iterator(hint), params_type::key(slot), slot)
.first;
}
template <typename InputIterator>
@@ -536,6 +544,7 @@ class btree_multiset_container : public btree_container<Tree> {
using params_type = typename Tree::params_type;
using init_type = typename params_type::init_type;
using is_key_compare_to = typename params_type::is_key_compare_to;
+ friend class BtreeNodePeer;
template <class K>
using key_arg = typename super_type::template key_arg<K>;
@@ -596,12 +605,18 @@ class btree_multiset_container : public btree_container<Tree> {
}
template <typename... Args>
iterator emplace(Args &&... args) {
- return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ return this->tree_.insert_multi(CommonAccess::GetSlot(node));
}
template <typename... Args>
iterator emplace_hint(const_iterator hint, Args &&... args) {
- return this->tree_.insert_hint_multi(
- iterator(hint), init_type(std::forward<Args>(args)...));
+ // Use a node handle to manage a temp slot.
+ auto node = CommonAccess::Construct<node_type>(this->get_allocator(),
+ std::forward<Args>(args)...);
+ return this->tree_.insert_hint_multi(iterator(hint),
+ CommonAccess::GetSlot(node));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
@@ -667,6 +682,7 @@ template <typename Tree>
class btree_multimap_container : public btree_multiset_container<Tree> {
using super_type = btree_multiset_container<Tree>;
using params_type = typename Tree::params_type;
+ friend class BtreeNodePeer;
public:
using mapped_type = typename params_type::mapped_type;
diff --git a/absl/container/internal/common.h b/absl/container/internal/common.h
index 030e9d4a..416d9aa3 100644
--- a/absl/container/internal/common.h
+++ b/absl/container/internal/common.h
@@ -84,10 +84,11 @@ class node_handle_base {
PolicyTraits::transfer(alloc(), slot(), s);
}
- struct move_tag_t {};
- node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ struct construct_tag_t {};
+ template <typename... Args>
+ node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args)
: alloc_(a) {
- PolicyTraits::construct(alloc(), slot(), s);
+ PolicyTraits::construct(alloc(), slot(), std::forward<Args>(args)...);
}
void destroy() {
@@ -186,8 +187,8 @@ struct CommonAccess {
}
template <typename T, typename... Args>
- static T Move(Args&&... args) {
- return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ static T Construct(Args&&... args) {
+ return T(typename T::construct_tag_t{}, std::forward<Args>(args)...);
}
};
diff --git a/absl/container/internal/compressed_tuple_test.cc b/absl/container/internal/compressed_tuple_test.cc
index 62a7483e..74111f97 100644
--- a/absl/container/internal/compressed_tuple_test.cc
+++ b/absl/container/internal/compressed_tuple_test.cc
@@ -403,6 +403,16 @@ TEST(CompressedTupleTest, EmptyFinalClass) {
}
#endif
+// TODO(b/214288561): enable this test.
+TEST(CompressedTupleTest, DISABLED_NestedEbo) {
+ struct Empty1 {};
+ struct Empty2 {};
+ CompressedTuple<Empty1, CompressedTuple<Empty2>, int> x;
+ CompressedTuple<Empty1, Empty2, int> y;
+ // Currently fails with sizeof(x) == 8, sizeof(y) == 4.
+ EXPECT_EQ(sizeof(x), sizeof(y));
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index e67529ec..00e9f6d7 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -174,7 +174,7 @@ decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
//
// 2. auto a = PairArgs(args...);
// std::pair<F, S> p(std::piecewise_construct,
-// std::move(p.first), std::move(p.second));
+// std::move(a.first), std::move(a.second));
inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
template <class F, class S>
std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
@@ -402,6 +402,15 @@ struct map_slot_policy {
}
}
+ // Construct this slot by copying from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot,
+ const slot_type* other) {
+ emplace(slot);
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ other->value);
+ }
+
template <class Allocator>
static void destroy(Allocator* alloc, slot_type* slot) {
if (kMutableKeys::value) {
@@ -424,33 +433,6 @@ struct map_slot_policy {
}
destroy(alloc, old_slot);
}
-
- template <class Allocator>
- static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
- if (kMutableKeys::value) {
- using std::swap;
- swap(a->mutable_value, b->mutable_value);
- } else {
- value_type tmp = std::move(a->value);
- absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
- std::move(b->value));
- absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
- std::move(tmp));
- }
- }
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
- if (kMutableKeys::value) {
- dest->mutable_value = std::move(src->mutable_value);
- } else {
- absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
- absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
- std::move(src->value));
- }
- }
};
} // namespace container_internal
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
index 927cf082..66068a5a 100644
--- a/absl/container/internal/counting_allocator.h
+++ b/absl/container/internal/counting_allocator.h
@@ -80,7 +80,15 @@ class CountingAllocator {
template <typename U>
void destroy(U* p) {
Allocator allocator;
+ // Ignore GCC warning bug.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuse-after-free"
+#endif
AllocatorTraits::destroy(allocator, p);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
if (instance_count_ != nullptr) {
*instance_count_ -= 1;
}
diff --git a/absl/container/internal/hash_function_defaults_test.cc b/absl/container/internal/hash_function_defaults_test.cc
index 59576b8e..9f0a4c72 100644
--- a/absl/container/internal/hash_function_defaults_test.cc
+++ b/absl/container/internal/hash_function_defaults_test.cc
@@ -310,7 +310,7 @@ struct StringLikeTest : public ::testing::Test {
hash_default_hash<typename T::first_type> hash;
};
-TYPED_TEST_CASE_P(StringLikeTest);
+TYPED_TEST_SUITE_P(StringLikeTest);
TYPED_TEST_P(StringLikeTest, Eq) {
EXPECT_TRUE(this->eq(this->a1, this->b1));
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 40cce047..efc1be58 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -21,33 +21,43 @@
#include <limits>
#include "absl/base/attributes.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int HashtablezInfo::kMaxStackDepth;
+#endif
namespace {
ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+std::atomic<HashtablezConfigListener> g_hashtablez_config_listener{nullptr};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
+void TriggerHashtablezConfigListener() {
+ auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire);
+ if (listener != nullptr) listener();
+}
+
} // namespace
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
HashtablezSampler& GlobalHashtablezSampler() {
@@ -55,13 +65,11 @@ HashtablezSampler& GlobalHashtablezSampler() {
return *sampler;
}
-// TODO(bradleybear): The comments at this constructors declaration say that the
-// fields are not initialized, but this definition does initialize the fields.
-// Something needs to be cleaned up.
-HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
-void HashtablezInfo::PrepareForSampling() {
+void HashtablezInfo::PrepareForSampling(int64_t stride,
+ size_t inline_element_size_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -74,11 +82,13 @@ void HashtablezInfo::PrepareForSampling() {
max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
+ weight = stride;
// The inliner makes hardcoded skip_count difficult (especially when combined
// with LTO). We use the ability to exclude stacks by regex when encoding
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
+ inline_element_size = inline_element_size_value;
}
static bool ShouldForceSampling() {
@@ -101,23 +111,32 @@ static bool ShouldForceSampling() {
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
- *next_sample = 1;
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
+ next_sample.next_sample = 1;
+ const int64_t old_stride = exchange(next_sample.sample_stride, 1);
+ HashtablezInfo* result =
+ GlobalHashtablezSampler().Register(old_stride, inline_element_size);
return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- *next_sample = std::numeric_limits<int64_t>::max();
+ next_sample = {
+ std::numeric_limits<int64_t>::max(),
+ std::numeric_limits<int64_t>::max(),
+ };
return nullptr;
#else
- bool first = *next_sample < 0;
- *next_sample = g_exponential_biased_generator.GetStride(
+ bool first = next_sample.next_sample < 0;
+
+ const int64_t next_stride = g_exponential_biased_generator.GetStride(
g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+
+ next_sample.next_sample = next_stride;
+ const int64_t old_stride = exchange(next_sample.sample_stride, next_stride);
// Small values of interval are equivalent to just sampling next time.
- ABSL_ASSERT(*next_sample >= 1);
+ ABSL_ASSERT(next_stride >= 1);
// g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
// low enough that we will start sampling in a reasonable time, so we just use
@@ -127,13 +146,11 @@ HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
// We will only be negative on our first count, so we should just retry in
// that case.
if (first) {
- if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
}
- HashtablezInfo* result = GlobalHashtablezSampler().Register();
- result->inline_element_size = inline_element_size;
- return result;
+ return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
#endif
}
@@ -146,7 +163,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
@@ -163,11 +180,33 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->size.fetch_add(1, std::memory_order_relaxed);
}
+void SetHashtablezConfigListener(HashtablezConfigListener l) {
+ g_hashtablez_config_listener.store(l, std::memory_order_release);
+}
+
+bool IsHashtablezEnabled() {
+ return g_hashtablez_enabled.load(std::memory_order_acquire);
+}
+
void SetHashtablezEnabled(bool enabled) {
+ SetHashtablezEnabledInternal(enabled);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezEnabledInternal(bool enabled) {
g_hashtablez_enabled.store(enabled, std::memory_order_release);
}
+int32_t GetHashtablezSampleParameter() {
+ return g_hashtablez_sample_parameter.load(std::memory_order_acquire);
+}
+
void SetHashtablezSampleParameter(int32_t rate) {
+ SetHashtablezSampleParameterInternal(rate);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezSampleParameterInternal(int32_t rate) {
if (rate > 0) {
g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
} else {
@@ -176,7 +215,16 @@ void SetHashtablezSampleParameter(int32_t rate) {
}
}
+int32_t GetHashtablezMaxSamples() {
+ return GlobalHashtablezSampler().GetMaxSamples();
+}
+
void SetHashtablezMaxSamples(int32_t max) {
+ SetHashtablezMaxSamplesInternal(max);
+ TriggerHashtablezConfigListener();
+}
+
+void SetHashtablezMaxSamplesInternal(int32_t max) {
if (max > 0) {
GlobalHashtablezSampler().SetMaxSamples(max);
} else {
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index 91fcdb34..d4016d8a 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -44,9 +44,9 @@
#include <memory>
#include <vector>
+#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
-#include "absl/container/internal/have_sse.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@@ -67,7 +67,8 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
- void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+ void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
+ ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
// thread-safe.
@@ -84,18 +85,18 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
- // These are guarded by init_mu, but that is not externalized to clients, who
- // can only read them during `HashtablezSampler::Iterate` which will hold the
- // lock.
+ // These are guarded by init_mu, but that is not externalized to clients,
+ // which can read them only during `SampleRecorder::Iterate` which will hold
+ // the lock.
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
- size_t inline_element_size;
+ size_t inline_element_size; // How big is the slot?
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
@@ -144,7 +145,15 @@ inline void RecordEraseSlow(HashtablezInfo* info) {
std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+struct SamplingState {
+ int64_t next_sample;
+ // When we make a sampling decision, we record that distance so we can weight
+ // each sample.
+ int64_t sample_stride;
+};
+
+HashtablezInfo* SampleSlow(SamplingState& next_sample,
+ size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -234,7 +243,7 @@ class HashtablezInfoHandle {
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
@@ -242,11 +251,11 @@ extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
- if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
- SampleSlow(&global_next_sample, inline_element_size));
+ SampleSlow(global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
@@ -258,14 +267,23 @@ using HashtablezSampler =
// Returns a global Sampler.
HashtablezSampler& GlobalHashtablezSampler();
+using HashtablezConfigListener = void (*)();
+void SetHashtablezConfigListener(HashtablezConfigListener l);
+
// Enables or disables sampling for Swiss tables.
+bool IsHashtablezEnabled();
void SetHashtablezEnabled(bool enabled);
+void SetHashtablezEnabledInternal(bool enabled);
// Sets the rate at which Swiss tables will be sampled.
+int32_t GetHashtablezSampleParameter();
void SetHashtablezSampleParameter(int32_t rate);
+void SetHashtablezSampleParameterInternal(int32_t rate);
// Sets a soft max for the number of samples that will be kept.
+int32_t GetHashtablezMaxSamples();
void SetHashtablezMaxSamples(int32_t max);
+void SetHashtablezMaxSamplesInternal(int32_t max);
// Configuration override.
// This allows process-wide sampling without depending on order of
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 449619a3..665d518f 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -21,7 +21,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
-#include "absl/container/internal/have_sse.h"
+#include "absl/base/config.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/internal/thread_pool.h"
@@ -30,7 +30,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
@@ -70,7 +70,9 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
}
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
- auto* info = s->Register();
+ const int64_t test_stride = 123;
+ const size_t test_element_size = 17;
+ auto* info = s->Register(test_stride, test_element_size);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -78,12 +80,12 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
+ const int64_t test_stride = 123;
const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride, test_element_size);
- info.inline_element_size = test_element_size;
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -95,6 +97,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
+ EXPECT_EQ(info.weight, test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
@@ -108,7 +111,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
- info.PrepareForSampling();
+ info.PrepareForSampling(test_stride * 2, test_element_size);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -119,6 +122,7 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
EXPECT_EQ(info.max_reserve.load(), 0);
+ EXPECT_EQ(info.weight, 2 * test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@@ -126,7 +130,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
TEST(HashtablezInfoTest, RecordStorageChanged) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const int64_t test_stride = 21;
+ const size_t test_element_size = 19;
+ info.PrepareForSampling(test_stride, test_element_size);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@@ -138,7 +144,9 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
TEST(HashtablezInfoTest, RecordInsert) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const int64_t test_stride = 25;
+ const size_t test_element_size = 23;
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -158,11 +166,11 @@ TEST(HashtablezInfoTest, RecordInsert) {
}
TEST(HashtablezInfoTest, RecordErase) {
+ const int64_t test_stride = 31;
const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
- info.inline_element_size = test_element_size;
+ info.PrepareForSampling(test_stride, test_element_size);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -174,11 +182,11 @@ TEST(HashtablezInfoTest, RecordErase) {
}
TEST(HashtablezInfoTest, RecordRehash) {
+ const int64_t test_stride = 33;
const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
- info.inline_element_size = test_element_size;
+ info.PrepareForSampling(test_stride, test_element_size);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -203,7 +211,9 @@ TEST(HashtablezInfoTest, RecordRehash) {
TEST(HashtablezInfoTest, RecordReservation) {
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
- info.PrepareForSampling();
+ const int64_t test_stride = 35;
+ const size_t test_element_size = 33;
+ info.PrepareForSampling(test_stride, test_element_size);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
@@ -223,9 +233,10 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@@ -237,9 +248,10 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
- int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
- EXPECT_GT(next_sample, 0);
+ SamplingState next_sample = {0, 0};
+ HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
+ EXPECT_GT(next_sample.next_sample, 0);
+ EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
}
@@ -266,13 +278,16 @@ TEST(HashtablezSamplerTest, Sample) {
TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
- HashtablezInfoHandle h(sampler.Register());
+ const int64_t test_stride = 41;
+ const size_t test_element_size = 39;
+ HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
bool found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
if (&h == info) {
+ EXPECT_EQ(h.weight, test_stride);
EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
found = true;
}
@@ -338,18 +353,20 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
- pool.Schedule([&sampler, &stop]() {
+ const int64_t sampling_stride = 11 + i % 3;
+ const size_t elt_size = 10 + i % 2;
+ pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(sampling_stride, elt_size));
break;
}
case 1: {
@@ -358,6 +375,7 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
HashtablezInfo* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
+ EXPECT_EQ(info->weight, sampling_stride);
sampler.Unregister(info);
break;
}
diff --git a/absl/container/internal/have_sse.h b/absl/container/internal/have_sse.h
deleted file mode 100644
index e75e1a16..00000000
--- a/absl/container/internal/have_sse.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Shared config probing for SSE instructions used in Swiss tables.
-#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#if defined(__SSE2__) || \
- (defined(_MSC_VER) && \
- (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
-#endif
-#endif
-
-#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#ifdef __SSSE3__
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
-#else
-#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
-#endif
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
- !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#error "Bad configuration!"
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
-#include <emmintrin.h>
-#endif
-
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
-#include <tmmintrin.h>
-#endif
-
-#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index 1d7d6cda..54c92a01 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -40,7 +40,6 @@ namespace inlined_vector_internal {
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
-#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
template <typename A>
@@ -94,16 +93,30 @@ struct TypeIdentity {
template <typename T>
using NoTypeDeduction = typename TypeIdentity<T>::type;
+template <typename A, bool IsTriviallyDestructible =
+ absl::is_trivially_destructible<ValueType<A>>::value>
+struct DestroyAdapter;
+
template <typename A>
-void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
- SizeType<A> destroy_size) {
- if (destroy_first != nullptr) {
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ false> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
for (SizeType<A> i = destroy_size; i != 0;) {
--i;
AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
}
-}
+};
+
+template <typename A>
+struct DestroyAdapter<A, /* IsTriviallyDestructible */ true> {
+ static void DestroyElements(A& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
+ static_cast<void>(allocator);
+ static_cast<void>(destroy_first);
+ static_cast<void>(destroy_size);
+ }
+};
template <typename A>
struct Allocation {
@@ -133,7 +146,7 @@ void ConstructElements(NoTypeDeduction<A>& allocator,
for (SizeType<A> i = 0; i < construct_size; ++i) {
ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
ABSL_INTERNAL_CATCH_ANY {
- DestroyElements<A>(allocator, construct_first, i);
+ DestroyAdapter<A>::DestroyElements(allocator, construct_first, i);
ABSL_INTERNAL_RETHROW;
}
}
@@ -253,7 +266,7 @@ class ConstructionTransaction {
~ConstructionTransaction() {
if (DidConstruct()) {
- DestroyElements<A>(GetAllocator(), GetData(), GetSize());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), GetData(), GetSize());
}
}
@@ -297,10 +310,10 @@ class Storage {
// Storage Constructors and Destructor
// ---------------------------------------------------------------------------
- Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
+ Storage() : metadata_(A(), /* size and is_allocated */ 0u) {}
explicit Storage(const A& allocator)
- : metadata_(allocator, /* size and is_allocated */ 0) {}
+ : metadata_(allocator, /* size and is_allocated */ 0u) {}
~Storage() {
if (GetSizeAndIsAllocated() == 0) {
@@ -416,7 +429,7 @@ class Storage {
}
void SubtractSize(SizeType<A> count) {
- assert(count <= GetSize());
+ ABSL_HARDENING_ASSERT(count <= GetSize());
GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
@@ -427,7 +440,8 @@ class Storage {
}
void MemcpyFrom(const Storage& other_storage) {
- assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
+ ABSL_HARDENING_ASSERT(IsMemcpyOk<A>::value ||
+ other_storage.GetIsAllocated());
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -469,14 +483,14 @@ class Storage {
template <typename T, size_t N, typename A>
void Storage<T, N, A>::DestroyContents() {
Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
- DestroyElements<A>(GetAllocator(), data, GetSize());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), data, GetSize());
DeallocateIfAllocated();
}
template <typename T, size_t N, typename A>
void Storage<T, N, A>::InitFrom(const Storage& other) {
const SizeType<A> n = other.GetSize();
- assert(n > 0); // Empty sources handled handled in caller.
+ ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller.
ConstPointer<A> src;
Pointer<A> dst;
if (!other.GetIsAllocated()) {
@@ -508,8 +522,8 @@ template <typename ValueAdapter>
auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
// Only callable from constructors!
- assert(!GetIsAllocated());
- assert(GetSize() == 0);
+ ABSL_HARDENING_ASSERT(!GetIsAllocated());
+ ABSL_HARDENING_ASSERT(GetSize() == 0);
Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
@@ -566,7 +580,8 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
construct_loop.size());
- DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), destroy_loop.data(),
+ destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
@@ -587,7 +602,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
A& alloc = GetAllocator();
if (new_size <= size) {
// Destroy extra old elements.
- DestroyElements<A>(alloc, base + new_size, size - new_size);
+ DestroyAdapter<A>::DestroyElements(alloc, base + new_size, size - new_size);
} else if (new_size <= storage_view.capacity) {
// Construct new elements in place.
ConstructElements<A>(alloc, base + size, values, new_size - size);
@@ -595,7 +610,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
// Steps:
// a. Allocate new backing store.
// b. Construct new elements in new backing store.
- // c. Move existing elements from old backing store to now.
+ // c. Move existing elements from old backing store to new backing store.
// d. Destroy all elements in old backing store.
// Use transactional wrappers for the first two steps so we can roll
// back if necessary due to exceptions.
@@ -611,7 +626,7 @@ auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
(MoveIterator<A>(base)));
ConstructElements<A>(alloc, new_data, move_values, size);
- DestroyElements<A>(alloc, base, size);
+ DestroyAdapter<A>::DestroyElements(alloc, base, size);
std::move(construction_tx).Commit();
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -650,7 +665,8 @@ auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
move_values, storage_view.size - insert_index);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
std::move(construction_tx).Commit();
std::move(move_construction_tx).Commit();
@@ -753,7 +769,8 @@ auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
ABSL_INTERNAL_RETHROW;
}
// Destroy elements in old backing store.
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -778,9 +795,9 @@ auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
- DestroyElements<A>(GetAllocator(),
- storage_view.data + (storage_view.size - erase_size),
- erase_size);
+ DestroyAdapter<A>::DestroyElements(
+ GetAllocator(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
@@ -804,7 +821,8 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
ConstructElements<A>(GetAllocator(), new_data, move_values,
storage_view.size);
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
DeallocateIfAllocated();
SetAllocation(std::move(allocation_tx).Release());
@@ -814,7 +832,7 @@ auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::ShrinkToFit() -> void {
// May only be called on allocated instances!
- assert(GetIsAllocated());
+ ABSL_HARDENING_ASSERT(GetIsAllocated());
StorageView<A> storage_view{GetAllocatedData(), GetSize(),
GetAllocatedCapacity()};
@@ -847,7 +865,8 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DestroyAdapter<A>::DestroyElements(GetAllocator(), storage_view.data,
+ storage_view.size);
MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
storage_view.capacity);
@@ -862,7 +881,7 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
template <typename T, size_t N, typename A>
auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
using std::swap;
- assert(this != other_storage_ptr);
+ ABSL_HARDENING_ASSERT(this != other_storage_ptr);
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
@@ -883,9 +902,10 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
move_values,
large_ptr->GetSize() - small_ptr->GetSize());
- DestroyElements<A>(large_ptr->GetAllocator(),
- large_ptr->GetInlinedData() + small_ptr->GetSize(),
- large_ptr->GetSize() - small_ptr->GetSize());
+ DestroyAdapter<A>::DestroyElements(
+ large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
@@ -904,23 +924,24 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocation(
- {allocated_storage_view.data, allocated_storage_view.capacity});
+ allocated_ptr->SetAllocation(Allocation<A>{
+ allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- DestroyElements<A>(inlined_ptr->GetAllocator(),
- inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
+ DestroyAdapter<A>::DestroyElements(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
- inlined_ptr->SetAllocation(
- {allocated_storage_view.data, allocated_storage_view.capacity});
+ inlined_ptr->SetAllocation(Allocation<A>{allocated_storage_view.data,
+ allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
-// End ignore "array-bounds" and "maybe-uninitialized"
+// End ignore "array-bounds"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
diff --git a/absl/container/internal/node_hash_policy.h b/absl/container/internal/node_slot_policy.h
index 4617162f..baba5743 100644
--- a/absl/container/internal/node_hash_policy.h
+++ b/absl/container/internal/node_slot_policy.h
@@ -30,8 +30,8 @@
// It may also optionally define `value()` and `apply()`. For documentation on
// these, see hash_policy_traits.h.
-#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
-#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
#include <cassert>
#include <cstddef>
@@ -46,7 +46,7 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Reference, class Policy>
-struct node_hash_policy {
+struct node_slot_policy {
static_assert(std::is_lvalue_reference<Reference>::value, "");
using slot_type = typename std::remove_cv<
@@ -89,4 +89,4 @@ struct node_hash_policy {
ABSL_NAMESPACE_END
} // namespace absl
-#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_
diff --git a/absl/container/internal/node_hash_policy_test.cc b/absl/container/internal/node_slot_policy_test.cc
index 84aabba9..51b7467b 100644
--- a/absl/container/internal/node_hash_policy_test.cc
+++ b/absl/container/internal/node_slot_policy_test.cc
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include <memory>
@@ -27,7 +27,7 @@ namespace {
using ::testing::Pointee;
-struct Policy : node_hash_policy<int&, Policy> {
+struct Policy : node_slot_policy<int&, Policy> {
using key_type = int;
using init_type = int;
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 687bcb8a..c63a2e02 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -23,13 +23,17 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
+#endif
// Returns "random" seed.
inline size_t RandomSeed() {
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 12682b35..ea912f83 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -53,51 +53,121 @@
//
// IMPLEMENTATION DETAILS
//
-// The table stores elements inline in a slot array. In addition to the slot
-// array the table maintains some control state per slot. The extra state is one
-// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
-// the hash of an occupied slot. The table is split into logical groups of
-// slots, like so:
+// # Table Layout
+//
+// A raw_hash_set's backing array consists of control bytes followed by slots
+// that may or may not contain objects.
+//
+// The layout of the backing array, for `capacity` slots, is thus, as a
+// pseudo-struct:
+//
+// struct BackingArray {
+// // Control bytes for the "real" slots.
+// ctrl_t ctrl[capacity];
+// // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
+// // stop and serves no other purpose.
+// ctrl_t sentinel;
+// // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
+// // that if a probe sequence picks a value near the end of `ctrl`,
+// // `Group` will have valid control bytes to look at.
+// ctrl_t clones[kWidth - 1];
+// // The actual slot data.
+// slot_type slots[capacity];
+// };
+//
+// The length of this array is computed by `AllocSize()` below.
+//
+// Control bytes (`ctrl_t`) are bytes (collected into groups of a
+// platform-specific size) that define the state of the corresponding slot in
+// the slot array. Group manipulation is tightly optimized to be as efficient
+// as possible: SSE and friends on x86, clever bit operations on other arches.
//
// Group 1 Group 2 Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
-// On lookup the hash is split into two parts:
-// - H2: 7 bits (those stored in the control bytes)
-// - H1: the rest of the bits
-// The groups are probed using H1. For each group the slots are matched to H2 in
-// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
-// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+// Each control byte is either a special value for empty slots, deleted slots
+// (sometimes called *tombstones*), and a special end-of-table marker used by
+// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
+// corresponding slot.
+//
+// Storing control bytes in a separate array also has beneficial cache effects,
+// since more logical slots will fit into a cache line.
+//
+// # Hashing
+//
+// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
+// `H1(hash(x))` is an index into `slots`, and essentially the starting point
+// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
+// objects that cannot possibly be the one we are looking for.
+//
+// # Table operations.
//
-// On insert, once the right group is found (as in lookup), its slots are
-// filled in order.
+// The key operations are `insert`, `find`, and `erase`.
//
-// On erase a slot is cleared. In case the group did not have any empty slots
-// before the erase, the erased slot is marked as deleted.
+// Since `insert` and `erase` are implemented in terms of `find`, we describe
+// `find` first. To `find` a value `x`, we compute `hash(x)`. From
+// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
+// group of slots in some interesting order.
//
-// Groups without empty slots (but maybe with deleted slots) extend the probe
-// sequence. The probing algorithm is quadratic. Given N the number of groups,
-// the probing function for the i'th probe is:
+// We now walk through these indices. At each index, we select the entire group
+// starting with that index and extract potential candidates: occupied slots
+// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
+// group, we stop and return an error. Each candidate slot `y` is compared with
+// `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
+// next probe index. Tombstones effectively behave like full slots that never
+// match the value we're looking for.
//
-// P(0) = H1 % N
+// The `H2` bits ensure when we compare a slot to an object with `==`, we are
+// likely to have actually found the object. That is, the chance is low that
+// `==` is called and returns `false`. Thus, when we search for an object, we
+// are unlikely to call `==` many times. This likelyhood can be analyzed as
+// follows (assuming that H2 is a random enough hash function).
//
-// P(i) = (P(i - 1) + i) % N
+// Let's assume that there are `k` "wrong" objects that must be examined in a
+// probe sequence. For example, when doing a `find` on an object that is in the
+// table, `k` is the number of objects between the start of the probe sequence
+// and the final found object (not including the final found object). The
+// expected number of objects with an H2 match is then `k/128`. Measurements
+// and analysis indicate that even at high load factors, `k` is less than 32,
+// meaning that the number of "false positive" comparisons we must perform is
+// less than 1/8 per `find`.
+
+// `insert` is implemented in terms of `unchecked_insert`, which inserts a
+// value presumed to not be in the table (violating this requirement will cause
+// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
+// it, we construct a `probe_seq` once again, and use it to find the first
+// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
+// first such slot in the group and mark it as full with `x`'s H2.
//
-// This probing function guarantees that after N probes, all the groups of the
-// table will be probed exactly once.
+// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
+// perform a `find` to see if it's already present; if it is, we're done. If
+// it's not, we may decide the table is getting overcrowded (i.e. the load
+// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
+// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
+// each element of the table into the new array (we know that no insertion here
+// will insert an already-present value), and discard the old backing array. At
+// this point, we may `unchecked_insert` the value `x`.
//
-// The control state and slot array are stored contiguously in a shared heap
-// allocation. The layout of this allocation is: `capacity()` control bytes,
-// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
-// <possible padding>, `capacity()` slots. The sentinel control byte is used in
-// iteration so we know when we reach the end of the table. The cloned control
-// bytes at the end of the table are cloned from the beginning of the table so
-// groups that begin near the end of the table can see a full group. In cases in
-// which there are more than `capacity()` cloned control bytes, the extra bytes
-// are `kEmpty`, and these ensure that we always see at least one empty slot and
-// can stop an unsuccessful search.
+// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
+// presents a viable, initialized slot pointee to the caller.
+//
+// `erase` is implemented in terms of `erase_at`, which takes an index to a
+// slot. Given an offset, we simply create a tombstone and destroy its contents.
+// If we can prove that the slot would not appear in a probe sequence, we can
+// make the slot as empty, instead. We can prove this by observing that if a
+// group has any empty slots, it has never been full (assuming we never create
+// an empty slot in a group with no empties, which this heuristic guarantees we
+// never do) and find would stop at this group anyways (since it does not probe
+// beyond groups with empties).
+//
+// `erase` is `erase_at` composed with `find`: if we
+// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
+// slot.
+//
+// To iterate, we simply traverse the array, skipping empty and deleted slots
+// and stopping when we hit a `kSentinel`.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -113,7 +183,9 @@
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/internal/prefetch.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/common.h"
@@ -122,12 +194,27 @@
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
-#include "absl/container/internal/have_sse.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#include <arm_neon.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -142,14 +229,40 @@ template <typename AllocType>
void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
std::false_type /* propagate_on_container_swap */) {}
+// The state for a probe sequence.
+//
+// Currently, the sequence is a triangular progression of the form
+//
+// p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
+//
+// The use of `Width` ensures that each probe step does not overlap groups;
+// the sequence effectively outputs the addresses of *groups* (although not
+// necessarily aligned to any boundary). The `Group` machinery allows us
+// to check an entire group with minimal branching.
+//
+// Wrapping around at `mask + 1` is important, but not for the obvious reason.
+// As described above, the first few entries of the control byte array
+// are mirrored at the end of the array, which `Group` will find and use
+// for selecting candidates. However, when those candidates' slots are
+// actually inspected, there are no corresponding slots for the cloned bytes,
+// so we need to make sure we've treated those offsets as "wrapping around".
+//
+// It turns out that this probe sequence visits every group exactly once if the
+// number of groups is a power of two, since (i^2+i)/2 is a bijection in
+// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
template <size_t Width>
class probe_seq {
public:
+ // Creates a new probe sequence using `hash` as the initial value of the
+ // sequence and `mask` (usually the capacity of the table) as the mask to
+ // apply to each value in the progression.
probe_seq(size_t hash, size_t mask) {
assert(((mask + 1) & mask) == 0 && "not a mask");
mask_ = mask;
offset_ = hash & mask_;
}
+
+ // The offset within the table, i.e., the value `p(i)` above.
size_t offset() const { return offset_; }
size_t offset(size_t i) const { return (offset_ + i) & mask_; }
@@ -158,7 +271,7 @@ class probe_seq {
offset_ += index_;
offset_ &= mask_;
}
- // 0-based probe index. The i-th probe in the probe sequence.
+ // 0-based probe index, a multiple of `Width`.
size_t index() const { return index_; }
private:
@@ -182,9 +295,9 @@ struct IsDecomposable : std::false_type {};
template <class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable<
- absl::void_t<decltype(
- Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
- std::declval<Ts>()...))>,
+ absl::void_t<decltype(Policy::apply(
+ RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
Policy, Hash, Eq, Ts...> : std::true_type {};
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
@@ -200,57 +313,84 @@ constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
template <typename T>
uint32_t TrailingZeros(T x) {
- ABSL_INTERNAL_ASSUME(x != 0);
- return countr_zero(x);
+ ABSL_ASSUME(x != 0);
+ return static_cast<uint32_t>(countr_zero(x));
}
-// An abstraction over a bitmask. It provides an easy way to iterate through the
-// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
-// this is a true bitmask. On non-SSE, platforms the arithematic used to
-// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
-// either 0x00 or 0x80.
+// An abstract bitmask, such as that emitted by a SIMD instruction.
//
-// For example:
-// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
-// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+// Specifically, this type implements a simple bitset whose representation is
+// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
+// of abstract bits in the bitset, while `Shift` is the log-base-two of the
+// width of an abstract bit in the representation.
+// This mask provides operations for any number of real bits set in an abstract
+// bit. To add iteration on top of that, implementation must guarantee no more
+// than one real bit is set in an abstract bit.
template <class T, int SignificantBits, int Shift = 0>
-class BitMask {
- static_assert(std::is_unsigned<T>::value, "");
- static_assert(Shift == 0 || Shift == 3, "");
-
+class NonIterableBitMask {
public:
- // These are useful for unit tests (gunit).
- using value_type = int;
- using iterator = BitMask;
- using const_iterator = BitMask;
+ explicit NonIterableBitMask(T mask) : mask_(mask) {}
- explicit BitMask(T mask) : mask_(mask) {}
- BitMask& operator++() {
- mask_ &= (mask_ - 1);
- return *this;
- }
- explicit operator bool() const { return mask_ != 0; }
- int operator*() const { return LowestBitSet(); }
+ explicit operator bool() const { return this->mask_ != 0; }
+
+ // Returns the index of the lowest *abstract* bit set in `self`.
uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+
+ // Returns the index of the highest *abstract* bit set in `self`.
uint32_t HighestBitSet() const {
return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
- BitMask begin() const { return *this; }
- BitMask end() const { return BitMask(0); }
-
+ // Return the number of trailing zero *abstract* bits.
uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
+ // Return the number of leading zero *abstract* bits.
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return countl_zero(mask_ << extra_bits) >> Shift;
+ return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
}
+ T mask_;
+};
+
+// Mask that can be iterable
+//
+// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
+// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
+// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
+// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
+ using Base = NonIterableBitMask<T, SignificantBits, Shift>;
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ explicit BitMask(T mask) : Base(mask) {}
+ // BitMask is an iterator over the indices of its abstract bits.
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ BitMask& operator++() {
+ this->mask_ &= (this->mask_ - 1);
+ return *this;
+ }
+
+ uint32_t operator*() const { return Base::LowestBitSet(); }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
private:
friend bool operator==(const BitMask& a, const BitMask& b) {
return a.mask_ == b.mask_;
@@ -258,15 +398,27 @@ class BitMask {
friend bool operator!=(const BitMask& a, const BitMask& b) {
return a.mask_ != b.mask_;
}
-
- T mask_;
};
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
-// below for details. We use an enum class so that when strict aliasing is
-// enabled, the compiler knows ctrl_t doesn't alias other types.
+// below for details.
+
+// A `ctrl_t` is a single control byte, which can have one of four
+// states: empty, deleted, full (which has an associated seven-bit h2_t value)
+// and the sentinel. They have the following bit patterns:
+//
+// empty: 1 0 0 0 0 0 0 0
+// deleted: 1 1 1 1 1 1 1 0
+// full: 0 h h h h h h h // h represents the hash bits.
+// sentinel: 1 1 1 1 1 1 1 1
+//
+// These values are specifically tuned for SSE-flavored SIMD.
+// The static_asserts below detail the source of these choices.
+//
+// We use an enum class so that when strict aliasing is enabled, the compiler
+// knows ctrl_t doesn't alias other types.
enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
@@ -294,15 +446,17 @@ static_assert(
static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
"ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
"shared by ctrl_t::kSentinel to make the scalar test for "
- "MatchEmptyOrDeleted() efficient");
+ "MaskEmptyOrDeleted() efficient");
static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
"ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
-// A single block of empty control bytes for tables without any slots allocated.
-// This enables removing a branch in the hot path of find().
ABSL_DLL extern const ctrl_t kEmptyGroup[16];
+
+// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {
+ // Const must be cast away here; no uses of this function will actually write
+ // to it, because it is only used for empty tables.
return const_cast<ctrl_t*>(kEmptyGroup);
}
@@ -310,28 +464,61 @@ inline ctrl_t* EmptyGroup() {
// randomize insertion order within groups.
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
-// Returns a hash seed.
+// Returns a per-table, hash salt, which changes on resize. This gets mixed into
+// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
-inline size_t HashSeed(const ctrl_t* ctrl) {
+inline size_t PerTableSalt(const ctrl_t* ctrl) {
// The low bits of the pointer have little or no entropy because of
// alignment. We shift the pointer to try to use higher entropy bits. A
// good number seems to be 12 bits, because that aligns with page size.
return reinterpret_cast<uintptr_t>(ctrl) >> 12;
}
-
+// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
- return (hash >> 7) ^ HashSeed(ctrl);
+ return (hash >> 7) ^ PerTableSalt(ctrl);
}
+
+// Extracts the H2 portion of a hash: the 7 bits not used for H1.
+//
+// These are used as an occupied control byte.
inline h2_t H2(size_t hash) { return hash & 0x7F; }
+// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+// Quick reference guide for intrinsics used below:
+//
+// * __m128i: An XMM (128-bit) word.
+//
+// * _mm_setzero_si128: Returns a zero vector.
+// * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
+//
+// * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
+// * _mm_and_si128: Ands two i128s together.
+// * _mm_or_si128: Ors two i128s together.
+// * _mm_andnot_si128: And-nots two i128s together.
+//
+// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
+// filling each lane with 0x00 or 0xff.
+// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
+//
+// * _mm_loadu_si128: Performs an unaligned load of an i128.
+// * _mm_storeu_si128: Performs an unaligned store of an i128.
+//
+// * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
+// argument if the corresponding lane of the second
+// argument is positive, negative, or zero, respectively.
+// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
+// bitmask consisting of those bits.
+// * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
+// four bits of each i8 lane in the second argument as
+// indices.
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -360,30 +547,32 @@ struct GroupSse2Impl {
BitMask<uint32_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(hash);
return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
}
// Returns a bitmask representing the positions of empty slots.
- BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+ NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
// This only works because ctrl_t::kEmpty is -128.
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
- return Match(static_cast<h2_t>(ctrl_t::kEmpty));
+ auto match = _mm_set1_epi8(static_cast<h2_t>(ctrl_t::kEmpty));
+ return NonIterableBitMask<uint32_t, kWidth>(
+ static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
- BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
- return BitMask<uint32_t, kWidth>(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
+ return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ auto special = _mm_set1_epi8(static_cast<uint8_t>(ctrl_t::kSentinel));
return TrailingZeros(static_cast<uint32_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
@@ -391,7 +580,7 @@ struct GroupSse2Impl {
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -405,6 +594,63 @@ struct GroupSse2Impl {
};
#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+struct GroupAArch64Impl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupAArch64Impl(const ctrl_t* pos) {
+ ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
+ }
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ uint8x8_t dup = vdup_n_u8(hash);
+ auto mask = vceq_u8(ctrl, dup);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>(
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(
+ vceq_s8(vdup_n_s8(static_cast<h2_t>(ctrl_t::kEmpty)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
+ uint64_t mask =
+ vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
+ vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
+ vreinterpret_s8_u8(ctrl))),
+ 0);
+ return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
+ // so we should be fine.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((mask | ~(mask >> 7)) & bits) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = mask & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint8x8_t ctrl;
+};
+#endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
+
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -431,19 +677,23 @@ struct GroupPortableImpl {
return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+ msbs);
}
- BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+ msbs);
}
uint32_t CountLeadingEmptyOrDeleted() const {
- constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
- return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
+ // kDeleted. We lower all other bits and count number of trailing zeros.
+ constexpr uint64_t bits = 0x0101010101010101ULL;
+ return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3;
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
@@ -457,32 +707,40 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#ifdef ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
+#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
+using Group = GroupAArch64Impl;
#else
using Group = GroupPortableImpl;
#endif
-// The number of cloned control bytes that we copy from the beginning to the
-// end of the control bytes array.
+// Returns he number of "cloned control bytes".
+//
+// This is the number of control bytes that are present both at the beginning
+// of the control byte array and at the end, such that we can create a
+// `Group::kWidth`-width probe window starting from any control byte.
constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
+// Returns whether `n` is a valid capacity (i.e., number of slots).
+//
+// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+// Applies the following mapping to every byte in the control array:
+// * kDeleted -> kEmpty
+// * kEmpty -> kEmpty
+// * _ -> kDeleted
// PRECONDITION:
// IsValidCapacity(capacity)
// ctrl[capacity] == ctrl_t::kSentinel
// ctrl[i] != ctrl_t::kSentinel for all i < capacity
-// Applies mapping for every byte in ctrl:
-// DELETED -> EMPTY
-// EMPTY -> EMPTY
-// FULL -> DELETED
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
-// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {
return n ? ~size_t{} >> countl_zero(n) : 1;
}
@@ -495,8 +753,8 @@ inline size_t NormalizeCapacity(size_t n) {
// never need to probe (the whole table fits in one group) so we don't need a
// load factor less than 1.
-// Given `capacity` of the table, returns the size (i.e. number of full slots)
-// at which we should grow the capacity.
+// Given `capacity`, applies the load factor; i.e., it returns the maximum
+// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -506,8 +764,12 @@ inline size_t CapacityToGrowth(size_t capacity) {
}
return capacity - capacity / 8;
}
-// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and requires NormalizeCapacity().
+
+// Given `growth`, "unapplies" the load factor to find how large the capacity
+// should be to stay within the load factor.
+//
+// This might not be a valid capacity and `NormalizeCapacity()` should be
+// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -533,16 +795,15 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-inline void AssertIsFull(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
-}
+#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg)
inline void AssertIsValid(ctrl_t* ctrl) {
- ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, or the table might have rehashed.");
+ ABSL_HARDENING_ASSERT(
+ (ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, the table might have rehashed, or this may "
+ "be an end() iterator.");
}
struct FindInfo {
@@ -550,44 +811,40 @@ struct FindInfo {
size_t probe_length;
};
-// The representation of the object has two modes:
-// - small: For capacities < kWidth-1
-// - large: For the rest.
+// Whether a table is "small". A small table fits entirely into a probing
+// group, i.e., has a capacity < `Group::kWidth`.
//
-// Differences:
-// - In small mode we are able to use the whole capacity. The extra control
-// bytes give us at least one "empty" control byte to stop the iteration.
-// This is important to make 1 a valid capacity.
+// In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
//
-// - In small mode only the first `capacity()` control bytes after the
-// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
-// represent a real slot. This is important to take into account on
-// find_first_non_full(), where we never try ShouldInsertBackwards() for
-// small tables.
+// In small mode only the first `capacity` control bytes after the sentinel
+// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// `find_first_non_full()`, where we never try
+// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+// Begins a probing operation on `ctrl`, using `hash`.
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
-// Probes the raw_hash_set with the probe sequence for hash and returns the
-// pointer to the first empty or deleted slot.
-// NOTE: this function must work with tables having both ctrl_t::kEmpty and
-// ctrl_t::kDeleted in one group. Such tables appears during
-// drop_deletes_without_resize.
+// Probes an array of control bits using a probe sequence derived from `hash`,
+// and returns the offset corresponding to the first deleted or empty slot.
//
-// This function is very useful when insertions happen and:
-// - the input is already a set
-// - there are enough slots
-// - the element with the hash is not in the table
+// Behavior when the entire table is full is undefined.
+//
+// NOTE: this function must work with tables having both empty and deleted
+// slots in the same group. Such tables appear during `erase()`.
template <typename = void>
inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
size_t capacity) {
auto seq = probe(ctrl, hash, capacity);
while (true) {
Group g{ctrl + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
+ auto mask = g.MaskEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
// We want to add entropy even when ASLR is not enabled.
@@ -610,7 +867,8 @@ inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
// corresponding translation unit.
extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
-// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
+// array as marked as empty.
inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
size_t slot_size) {
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
@@ -619,8 +877,10 @@ inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
}
-// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
-// at the end too.
+// Sets `ctrl[i]` to `h`.
+//
+// Unlike setting it directly, this function will perform bounds checks and
+// mirror the value to the cloned tail if necessary.
inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
assert(i < capacity);
@@ -636,25 +896,28 @@ inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
+// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
const void* slot, size_t slot_size) {
SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
}
-// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
-// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
-// SlotOffset returns the offset of the slots into the allocated block.
+// Given the capacity of a table, computes the offset (from the start of the
+// backing allocation) at which the slots begin.
inline size_t SlotOffset(size_t capacity, size_t slot_align) {
assert(IsValidCapacity(capacity));
const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
}
-// Returns the size of the allocated block. See also above comment.
+// Given the capacity of a table, computes the total size of the backing
+// array.
inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
+// A SwissTable.
+//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -769,16 +1032,22 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- AssertIsFull(ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator*() called on invalid iterator.");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
- pointer operator->() const { return &operator*(); }
+ pointer operator->() const {
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator-> called on invalid iterator.");
+ return &operator*();
+ }
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- AssertIsFull(ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
+ "operator++ called on invalid iterator.");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -804,9 +1073,13 @@ class raw_hash_set {
iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
// This assumption helps the compiler know that any non-end iterator is
// not equal to any end iterator.
- ABSL_INTERNAL_ASSUME(ctrl != nullptr);
+ ABSL_ASSUME(ctrl != nullptr);
}
+ // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
+ // they reach one.
+ //
+ // If a sentinel is reached, we null both of them out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@@ -1103,8 +1376,7 @@ class raw_hash_set {
// m.insert(std::make_pair("abc", 42));
// TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
// bug.
- template <class T, RequiresInsertable<T> = 0,
- class T2 = T,
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
T* = nullptr>
std::pair<iterator, bool> insert(T&& value) {
@@ -1324,7 +1596,8 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- AssertIsFull(it.ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_,
+ "erase() called on invalid iterator.");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1358,7 +1631,8 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_);
+ ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
+ "extract() called on invalid iterator.");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1445,12 +1719,13 @@ class raw_hash_set {
template <class K = key_type>
void prefetch(const key_arg<K>& key) const {
(void)key;
-#if defined(__GNUC__)
+ // Avoid probing if we won't be able to prefetch the addresses received.
+#ifdef ABSL_INTERNAL_HAVE_PREFETCH
prefetch_heap_block();
auto seq = probe(ctrl_, hash_ref()(key), capacity_);
- __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
- __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
-#endif // __GNUC__
+ base_internal::PrefetchT0(ctrl_ + seq.offset());
+ base_internal::PrefetchT0(slots_ + seq.offset());
+#endif // ABSL_INTERNAL_HAVE_PREFETCH
}
// The API of find() has two extensions.
@@ -1465,13 +1740,13 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
@@ -1538,6 +1813,14 @@ class raw_hash_set {
return !(a == b);
}
+ template <typename H>
+ friend typename std::enable_if<H::template is_hashable<value_type>::value,
+ H>::type
+ AbslHashValue(H h, const raw_hash_set& s) {
+ return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
+ s.size());
+ }
+
friend void swap(raw_hash_set& a,
raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
a.swap(b);
@@ -1603,17 +1886,17 @@ class raw_hash_set {
slot_type&& slot;
};
- // "erases" the object from the container, except that it doesn't actually
- // destroy the object. It only updates all the metadata of the class.
- // This can be used in conjunction with Policy::transfer to move the object to
- // another place.
+ // Erases, but does not destroy, the value pointed to by `it`.
+ //
+ // This merely updates the pertinent control byte. This can be used in
+ // conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
--size_;
- const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index = static_cast<size_t>(it.inner_.ctrl_ - ctrl_);
const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+ const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
@@ -1629,6 +1912,11 @@ class raw_hash_set {
infoz().RecordErase();
}
+ // Allocates a backing array for `self` and initializes its control bytes.
+ // This reads `capacity_` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // This does not free the currently held array; `capacity_` must be nonzero.
void initialize_slots() {
assert(capacity_);
// Folks with custom allocators often make unwarranted assumptions about the
@@ -1657,6 +1945,10 @@ class raw_hash_set {
infoz().RecordStorageChanged(size_, capacity_);
}
+ // Destroys all slots in the backing array, frees the backing array, and
+ // clears all top-level book-keeping data.
+ //
+ // This essentially implements `map = raw_hash_set();`.
void destroy_slots() {
if (!capacity_) return;
for (size_t i = 0; i != capacity_; ++i) {
@@ -1707,6 +1999,9 @@ class raw_hash_set {
infoz().RecordRehash(total_probe_length);
}
+ // Prunes control bytes to remove as many tombstones as possible.
+ //
+ // See the comment on `rehash_and_grow_if_necessary()`.
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
assert(!is_small(capacity_));
@@ -1773,6 +2068,11 @@ class raw_hash_set {
infoz().RecordRehash(total_probe_length);
}
+ // Called whenever the table *might* need to conditionally grow.
+ //
+ // This function is an optimization opportunity to perform a rehash even when
+ // growth is unnecessary, because vacating tombstones is beneficial for
+ // performance in the long-run.
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
@@ -1832,12 +2132,12 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
elem))
return true;
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
@@ -1857,6 +2157,9 @@ class raw_hash_set {
}
protected:
+ // Attempts to find `key` in the table; if it isn't found, returns a slot that
+ // the value can be inserted into, with the control byte already set to
+ // `key`'s H2.
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
@@ -1864,19 +2167,23 @@ class raw_hash_set {
auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
- for (int i : g.Match(H2(hash))) {
+ for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
PolicyTraits::element(slots_ + seq.offset(i)))))
return {seq.offset(i), false};
}
- if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
+ // Given the hash of a value not currently in the table, finds the next
+ // viable slot index to insert it at.
+ //
+ // REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
@@ -1920,15 +2227,23 @@ class raw_hash_set {
growth_left() = CapacityToGrowth(capacity()) - size_;
}
+ // The number of slots we can still fill without needing to rehash.
+ //
+ // This is stored separately due to tombstones: we do not include tombstones
+ // in the growth capacity, because we'd like to rehash when the table is
+ // otherwise filled with tombstones: otherwise, probe sequences might get
+ // unacceptably long without triggering a rehash. Callers can also force a
+ // rehash via the standard `rehash(0)`, which will recompute this value as a
+ // side-effect.
+ //
+ // See `CapacityToGrowth()`.
size_t& growth_left() { return settings_.template get<0>(); }
+ // Prefetch the heap-allocated memory region to resolve potential TLB misses.
+ // This is intended to overlap with execution of calculating the hash for a
+ // key.
void prefetch_heap_block() const {
- // Prefetch the heap-allocated memory region to resolve potential TLB
- // misses. This is intended to overlap with execution of calculating the
- // hash for a key.
-#if defined(__GNUC__)
- __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
-#endif // __GNUC__
+ base_internal::PrefetchT2(ctrl_);
}
HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
@@ -1945,20 +2260,33 @@ class raw_hash_set {
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
+
+ // The control bytes (and, also, a pointer to the base of the backing array).
+ //
+ // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ ctrl_t* ctrl_ = EmptyGroup();
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `ctrl_`. May be null for empty tables.
+ slot_type* slots_ = nullptr;
+
+ // The number of filled slots.
+ size_t size_ = 0;
+
+ // The total number of available slots.
+ size_t capacity_ = 0;
absl::container_internal::CompressedTuple<size_t /* growth_left */,
HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{},
allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+typename raw_hash_set<P, H, E, A>::size_type EraseIf(
+ Predicate& pred, raw_hash_set<P, H, E, A>* c) {
+ const auto initial_size = c->size();
for (auto it = c->begin(), last = c->end(); it != last;) {
if (pred(*it)) {
c->erase(it++);
@@ -1966,6 +2294,7 @@ void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
++it;
}
}
+ return initial_size - c->size();
}
namespace hashtable_debug_internal {
@@ -1981,7 +2310,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
- for (int i : g.Match(container_internal::H2(hash))) {
+ for (uint32_t i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
@@ -1989,7 +2318,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
return num_probes;
++num_probes;
}
- if (g.MatchEmpty()) return num_probes;
+ if (g.MaskEmpty()) return num_probes;
seq.next();
++num_probes;
}
@@ -2031,4 +2360,6 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
ABSL_NAMESPACE_END
} // namespace absl
+#undef ABSL_INTERNAL_ASSERT_IS_FULL
+
#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index c886d3ad..47dc9048 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -330,33 +330,42 @@ void BM_Group_Match(benchmark::State& state) {
h2_t h = 1;
for (auto _ : state) {
::benchmark::DoNotOptimize(h);
+ ::benchmark::DoNotOptimize(g);
::benchmark::DoNotOptimize(g.Match(h));
}
}
BENCHMARK(BM_Group_Match);
-void BM_Group_MatchEmpty(benchmark::State& state) {
+void BM_Group_MaskEmpty(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
Iota(group.begin(), group.end(), -4);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmpty());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmpty());
+ }
}
-BENCHMARK(BM_Group_MatchEmpty);
+BENCHMARK(BM_Group_MaskEmpty);
-void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) {
+void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
Iota(group.begin(), group.end(), -4);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted());
+ }
}
-BENCHMARK(BM_Group_MatchEmptyOrDeleted);
+BENCHMARK(BM_Group_MaskEmptyOrDeleted);
void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
Iota(group.begin(), group.end(), -2);
Group g{group.data()};
- for (auto _ : state)
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
+ }
}
BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
@@ -364,7 +373,10 @@ void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
Iota(group.begin(), group.end(), -2);
Group g{group.data()};
- for (auto _ : state) ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted());
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(g);
+ ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet());
+ }
}
BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 362b3cae..f77ffbc1 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -31,6 +31,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/prefetch.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
@@ -194,35 +195,39 @@ TEST(Group, Match) {
}
}
-TEST(Group, MatchEmpty) {
+TEST(Group, MaskEmpty) {
if (Group::kWidth == 16) {
ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
- EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+ EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
} else if (Group::kWidth == 8) {
ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
ctrl_t::kSentinel, CtrlT(1)};
- EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+ EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
}
}
-TEST(Group, MatchEmptyOrDeleted) {
+TEST(Group, MaskEmptyOrDeleted) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
- ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
- CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
- CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
- EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
+ ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
} else if (Group::kWidth == 8) {
ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
ctrl_t::kSentinel, CtrlT(1)};
- EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
+ EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
}
@@ -1244,7 +1249,7 @@ ExpectedStats XorSeedExpectedStats() {
case 16:
if (kRandomizesInserts) {
return {0.1,
- 1.0,
+ 2.0,
{{0.95, 0.1}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
@@ -1258,6 +1263,7 @@ ExpectedStats XorSeedExpectedStats() {
return {};
}
+// TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -1330,17 +1336,17 @@ ExpectedStats LinearTransformExpectedStats() {
{{0.95, 0.3}},
{{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
} else {
- return {0.15,
- 0.5,
- {{0.95, 0.3}},
- {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+ return {0.4,
+ 0.6,
+ {{0.95, 0.5}},
+ {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
}
case 16:
if (kRandomizesInserts) {
return {0.1,
0.4,
{{0.95, 0.3}},
- {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
} else {
return {0.05,
0.2,
@@ -1352,6 +1358,7 @@ ExpectedStats LinearTransformExpectedStats() {
return {};
}
+// TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
ProbeStatsPerSize stats;
std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
@@ -2028,7 +2035,7 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "Invalid operation on iterator";
+ constexpr char kDeathMsg[] = "erase.. called on invalid iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
diff --git a/absl/container/internal/unordered_map_constructor_test.h b/absl/container/internal/unordered_map_constructor_test.h
index c1d20f3c..7e84dc25 100644
--- a/absl/container/internal/unordered_map_constructor_test.h
+++ b/absl/container/internal/unordered_map_constructor_test.h
@@ -476,7 +476,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
// containers in unspecified state (and in practice in causes memory-leak
// according to heap-checker!).
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/absl/container/internal/unordered_map_lookup_test.h b/absl/container/internal/unordered_map_lookup_test.h
index e76421e5..3713cd9a 100644
--- a/absl/container/internal/unordered_map_lookup_test.h
+++ b/absl/container/internal/unordered_map_lookup_test.h
@@ -107,8 +107,8 @@ TYPED_TEST_P(LookupTest, EqualRange) {
}
}
-REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
- EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/unordered_map_modifiers_test.h b/absl/container/internal/unordered_map_modifiers_test.h
index d3543936..4d9ab30f 100644
--- a/absl/container/internal/unordered_map_modifiers_test.h
+++ b/absl/container/internal/unordered_map_modifiers_test.h
@@ -297,11 +297,12 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertWithinCapacity,
- InsertRangeWithinCapacity, InsertOrAssign,
- InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
- TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, InsertOrAssign,
+ InsertOrAssignHint, Emplace, EmplaceHint,
+ TryEmplace, TryEmplaceHint, Erase, EraseRange,
+ EraseKey, Swap);
template <typename Type>
struct is_unique_ptr : std::false_type {};
diff --git a/absl/container/internal/unordered_set_constructor_test.h b/absl/container/internal/unordered_set_constructor_test.h
index 41165b05..af1116e6 100644
--- a/absl/container/internal/unordered_set_constructor_test.h
+++ b/absl/container/internal/unordered_set_constructor_test.h
@@ -478,7 +478,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
-REGISTER_TYPED_TEST_CASE_P(
+REGISTER_TYPED_TEST_SUITE_P(
ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
diff --git a/absl/container/internal/unordered_set_lookup_test.h b/absl/container/internal/unordered_set_lookup_test.h
index 8f2f4b20..b35f766e 100644
--- a/absl/container/internal/unordered_set_lookup_test.h
+++ b/absl/container/internal/unordered_set_lookup_test.h
@@ -82,7 +82,7 @@ TYPED_TEST_P(LookupTest, EqualRange) {
}
}
-REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+REGISTER_TYPED_TEST_SUITE_P(LookupTest, Count, Find, EqualRange);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/unordered_set_modifiers_test.h b/absl/container/internal/unordered_set_modifiers_test.h
index 6e473e45..d8864bb2 100644
--- a/absl/container/internal/unordered_set_modifiers_test.h
+++ b/absl/container/internal/unordered_set_modifiers_test.h
@@ -209,10 +209,10 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for extract.
// TODO(alkis): Write tests for merge.
-REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertWithinCapacity,
- InsertRangeWithinCapacity, Emplace, EmplaceHint,
- Erase, EraseRange, EraseKey, Swap);
+REGISTER_TYPED_TEST_SUITE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, Emplace, EmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/node_hash_map.h b/absl/container/node_hash_map.h
index 7a39f628..6868e63a 100644
--- a/absl/container/node_hash_map.h
+++ b/absl/container/node_hash_map.h
@@ -41,9 +41,10 @@
#include <utility>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
@@ -77,6 +78,10 @@ class NodeHashMapPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::node_hash_map` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// Example:
//
// // Create a node hash map of three strings (that map to strings)
@@ -347,8 +352,8 @@ class node_hash_map
// `node_hash_map`.
//
// iterator try_emplace(const_iterator hint,
- // const init_type& k, Args&&... args):
- // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
//
// Inserts (via copy or move) the element of the specified key into the
// `node_hash_map` using the position of `hint` as a non-binding suggestion
@@ -525,17 +530,19 @@ class node_hash_map
// erase_if(node_hash_map<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
-void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename node_hash_map<K, V, H, E, A>::size_type erase_if(
+ node_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy
- : public absl::container_internal::node_hash_policy<
+ : public absl::container_internal::node_slot_policy<
std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
using value_type = std::pair<const Key, Value>;
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index 8f59a1e4..e941a836 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -223,33 +223,36 @@ TEST(NodeHashMap, EraseIf) {
// Erase all elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
Pair(4, 4), Pair(5, 5)));
}
// Erase specific elements.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s,
- [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_EQ(erase_if(s,
+ [](std::pair<const int, int> kvp) {
+ return kvp.first % 2 == 1;
+ }),
+ 3);
EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
}
// Predicate is function reference.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, FirstIsEven);
+ EXPECT_EQ(erase_if(s, FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
// Predicate is function pointer.
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
- erase_if(s, &FirstIsEven);
+ EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
}
diff --git a/absl/container/node_hash_set.h b/absl/container/node_hash_set.h
index 93b15f46..f2cc70c3 100644
--- a/absl/container/node_hash_set.h
+++ b/absl/container/node_hash_set.h
@@ -38,8 +38,9 @@
#include <type_traits>
#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
-#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
@@ -73,6 +74,10 @@ struct NodeHashSetPolicy;
// absl/hash/hash.h for information on extending Abseil hashing to user-defined
// types.
//
+// Using `absl::node_hash_set` at interface boundaries in dynamically loaded
+// libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may
+// be randomized across dynamically loaded libraries.
+//
// Example:
//
// // Create a node hash set of three strings
@@ -433,16 +438,18 @@ class node_hash_set
// erase_if(node_hash_set<>, Pred)
//
// Erases all elements that satisfy the predicate `pred` from the container `c`.
+// Returns the number of erased elements.
template <typename T, typename H, typename E, typename A, typename Predicate>
-void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) {
- container_internal::EraseIf(pred, &c);
+typename node_hash_set<T, H, E, A>::size_type erase_if(
+ node_hash_set<T, H, E, A>& c, Predicate pred) {
+ return container_internal::EraseIf(pred, &c);
}
namespace container_internal {
template <class T>
struct NodeHashSetPolicy
- : absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
+ : absl::container_internal::node_slot_policy<T&, NodeHashSetPolicy<T>> {
using key_type = T;
using init_type = T;
using constant_iterators = std::true_type;
diff --git a/absl/container/node_hash_set_test.cc b/absl/container/node_hash_set_test.cc
index 7ddad202..98a8dbdd 100644
--- a/absl/container/node_hash_set_test.cc
+++ b/absl/container/node_hash_set_test.cc
@@ -108,31 +108,31 @@ TEST(NodeHashSet, EraseIf) {
// Erase all elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return true; });
+ EXPECT_EQ(erase_if(s, [](int) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
// Erase no elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int) { return false; });
+ EXPECT_EQ(erase_if(s, [](int) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
}
// Erase specific elements.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3);
EXPECT_THAT(s, UnorderedElementsAre(2, 4));
}
// Predicate is function reference.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, IsEven);
+ EXPECT_EQ(erase_if(s, IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
// Predicate is function pointer.
{
node_hash_set<int> s = {1, 2, 3, 4, 5};
- erase_if(s, &IsEven);
+ EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
}
}
diff --git a/absl/copts/AbseilConfigureCopts.cmake b/absl/copts/AbseilConfigureCopts.cmake
index 942ce90a..73435e99 100644
--- a/absl/copts/AbseilConfigureCopts.cmake
+++ b/absl/copts/AbseilConfigureCopts.cmake
@@ -1,8 +1,6 @@
# See absl/copts/copts.py and absl/copts/generate_copts.py
include(GENERATED_AbseilCopts)
-set(ABSL_LSAN_LINKOPTS "")
-set(ABSL_HAVE_LSAN OFF)
set(ABSL_DEFAULT_LINKOPTS "")
if (BUILD_SHARED_LIBS AND MSVC)
@@ -12,7 +10,49 @@ else()
set(ABSL_BUILD_DLL FALSE)
endif()
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64")
+if(APPLE AND CMAKE_CXX_COMPILER_ID MATCHES [[Clang]])
+ # Some CMake targets (not known at the moment of processing) could be set to
+ # compile for multiple architectures as specified by the OSX_ARCHITECTURES
+ # property, which is target-specific. We should neither inspect nor rely on
+ # any CMake property or variable to detect an architecture, in particular:
+ #
+ # - CMAKE_OSX_ARCHITECTURES
+ # is just an initial value for OSX_ARCHITECTURES; set too early.
+ #
+ # - OSX_ARCHITECTURES
+ # is a per-target property; targets could be defined later, and their
+ # properties could be modified any time later.
+ #
+ # - CMAKE_SYSTEM_PROCESSOR
+ # does not reflect multiple architectures at all.
+ #
+ # When compiling for multiple architectures, a build system can invoke a
+ # compiler either
+ #
+ # - once: a single command line for multiple architectures (Ninja build)
+ # - twice: two command lines per each architecture (Xcode build system)
+ #
+ # If case of Xcode, it would be possible to set an Xcode-specific attributes
+ # like XCODE_ATTRIBUTE_OTHER_CPLUSPLUSFLAGS[arch=arm64] or similar.
+ #
+ # In both cases, the viable strategy is to pass all arguments at once, allowing
+ # the compiler to dispatch arch-specific arguments to a designated backend.
+ set(ABSL_RANDOM_RANDEN_COPTS "")
+ foreach(_arch IN ITEMS "x86_64" "arm64")
+ string(TOUPPER "${_arch}" _arch_uppercase)
+ string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase})
+ foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS)
+ list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}")
+ endforeach()
+ endforeach()
+ # If a compiler happens to deal with an argument for a currently unused
+ # architecture, it will warn about an unused command line argument.
+ option(ABSL_RANDOM_RANDEN_COPTS_WARNING OFF
+ "Warn if one of ABSL_RANDOM_RANDEN_COPTS is unused")
+ if(ABSL_RANDOM_RANDEN_COPTS AND NOT ABSL_RANDOM_RANDEN_COPTS_WARNING)
+ list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Wno-unused-command-line-argument")
+ endif()
+elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64|AMD64")
if (MSVC)
set(ABSL_RANDOM_RANDEN_COPTS "${ABSL_RANDOM_HWAES_MSVC_X64_FLAGS}")
else()
@@ -43,14 +83,6 @@ elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") # MATCHES so we get both Clang an
else()
set(ABSL_DEFAULT_COPTS "${ABSL_LLVM_FLAGS}")
set(ABSL_TEST_COPTS "${ABSL_LLVM_FLAGS};${ABSL_LLVM_TEST_FLAGS}")
- if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- # AppleClang doesn't have lsan
- # https://developer.apple.com/documentation/code_diagnostics
- if(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.5)
- set(ABSL_LSAN_LINKOPTS "-fsanitize=leak")
- set(ABSL_HAVE_LSAN ON)
- endif()
- endif()
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
set(ABSL_DEFAULT_COPTS "${ABSL_MSVC_FLAGS}")
diff --git a/absl/debugging/BUILD.bazel b/absl/debugging/BUILD.bazel
index 3c4ea8dc..932a8e9f 100644
--- a/absl/debugging/BUILD.bazel
+++ b/absl/debugging/BUILD.bazel
@@ -143,7 +143,6 @@ cc_library(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
- "//absl/base:errno_saver",
"//absl/base:raw_logging_internal",
],
)
@@ -197,6 +196,7 @@ cc_library(
srcs = ["internal/demangle.cc"],
hdrs = ["internal/demangle.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
deps = [
"//absl/base",
@@ -225,6 +225,7 @@ cc_library(
name = "leak_check",
srcs = ["leak_check.cc"],
hdrs = ["leak_check.h"],
+ copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
@@ -232,98 +233,33 @@ cc_library(
],
)
-# Adding a dependency to leak_check_disable will disable
-# sanitizer leak checking (asan/lsan) in a test without
-# the need to mess around with build features.
-cc_library(
- name = "leak_check_disable",
- srcs = ["leak_check_disable.cc"],
- linkopts = ABSL_DEFAULT_LINKOPTS,
- linkstatic = 1,
- deps = ["//absl/base:config"],
- alwayslink = 1,
-)
-
-# These targets exists for use in tests only, explicitly configuring the
-# LEAK_SANITIZER macro. It must be linked with -fsanitize=leak for lsan.
-ABSL_LSAN_LINKOPTS = select({
- "//absl:clang_compiler": ["-fsanitize=leak"],
- "//conditions:default": [],
-})
-
-cc_library(
- name = "leak_check_api_enabled_for_testing",
- testonly = 1,
- srcs = ["leak_check.cc"],
- hdrs = ["leak_check.h"],
- copts = select({
- "//absl:clang_compiler": ["-DLEAK_SANITIZER"],
- "//conditions:default": [],
- }),
- linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
- deps = [
- "//absl/base:config",
- "//absl/base:core_headers",
- ],
-)
-
-cc_library(
- name = "leak_check_api_disabled_for_testing",
- testonly = 1,
- srcs = ["leak_check.cc"],
- hdrs = ["leak_check.h"],
- copts = ["-ULEAK_SANITIZER"],
- linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = ["//visibility:private"],
- deps = [
- "//absl/base:config",
- "//absl/base:core_headers",
- ],
-)
-
cc_test(
name = "leak_check_test",
srcs = ["leak_check_test.cc"],
- copts = select({
- "//absl:clang_compiler": ["-DABSL_EXPECT_LEAK_SANITIZER"],
- "//conditions:default": [],
- }),
- linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
- tags = ["notsan"],
- deps = [
- ":leak_check_api_enabled_for_testing",
- "//absl/base",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
- name = "leak_check_no_lsan_test",
- srcs = ["leak_check_test.cc"],
- copts = ["-UABSL_EXPECT_LEAK_SANITIZER"],
+ copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = ["noasan"],
+ tags = ["notsan"],
deps = [
- ":leak_check_api_disabled_for_testing",
- "//absl/base", # for raw_logging
+ ":leak_check",
+ "//absl/base:config",
+ "//absl/base:raw_logging_internal",
"@com_google_googletest//:gtest_main",
],
)
-# Test that leak checking is skipped when lsan is enabled but
-# ":leak_check_disable" is linked in.
-#
-# This test should fail in the absence of a dependency on ":leak_check_disable"
-cc_test(
- name = "disabled_leak_check_test",
+# Binary that leaks memory and expects to fail on exit. This isn't a
+# test that expected to pass on its own; it exists to be called by a
+# script that checks exit status and output.
+# TODO(absl-team): Write a test to run this with a script that
+# verifies that it correctly fails.
+cc_binary(
+ name = "leak_check_fail_test_binary",
srcs = ["leak_check_fail_test.cc"],
- linkopts = ABSL_LSAN_LINKOPTS + ABSL_DEFAULT_LINKOPTS,
- tags = ["notsan"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
- ":leak_check_api_enabled_for_testing",
- ":leak_check_disable",
- "//absl/base",
+ ":leak_check",
+ "//absl/base:raw_logging_internal",
"@com_google_googletest//:gtest_main",
],
)
@@ -356,3 +292,18 @@ cc_test(
"@com_google_googletest//:gtest_main",
],
)
+
+cc_binary(
+ name = "stacktrace_benchmark",
+ testonly = 1,
+ srcs = ["stacktrace_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ deps = [
+ ":stacktrace",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
diff --git a/absl/debugging/CMakeLists.txt b/absl/debugging/CMakeLists.txt
index b16fa007..d8207d6a 100644
--- a/absl/debugging/CMakeLists.txt
+++ b/absl/debugging/CMakeLists.txt
@@ -14,6 +14,8 @@
# limitations under the License.
#
+find_library(EXECINFO_LIBRARY execinfo)
+
absl_cc_library(
NAME
stacktrace
@@ -33,6 +35,8 @@ absl_cc_library(
"stacktrace.cc"
COPTS
${ABSL_DEFAULT_COPTS}
+ LINKOPTS
+ $<$<BOOL:${EXECINFO_LIBRARY}>:${EXECINFO_LIBRARY}>
DEPS
absl::debugging_internal
absl::config
@@ -93,6 +97,7 @@ absl_cc_test(
GTest::gmock
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
examine_stack
@@ -125,7 +130,6 @@ absl_cc_library(
absl::base
absl::config
absl::core_headers
- absl::errno_saver
absl::raw_logging_internal
PUBLIC
)
@@ -147,6 +151,7 @@ absl_cc_test(
GTest::gmock
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
debugging_internal
@@ -168,6 +173,7 @@ absl_cc_library(
absl::raw_logging_internal
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
demangle_internal
@@ -215,42 +221,6 @@ absl_cc_library(
PUBLIC
)
-absl_cc_library(
- NAME
- leak_check_disable
- SRCS
- "leak_check_disable.cc"
- COPTS
- ${ABSL_DEFAULT_COPTS}
- PUBLIC
-)
-
-absl_cc_library(
- NAME
- leak_check_api_enabled_for_testing
- HDRS
- "leak_check.h"
- SRCS
- "leak_check.cc"
- COPTS
- ${ABSL_DEFAULT_COPTS}
- $<$<BOOL:${ABSL_HAVE_LSAN}>:-DLEAK_SANITIZER>
- TESTONLY
-)
-
-absl_cc_library(
- NAME
- leak_check_api_disabled_for_testing
- HDRS
- "leak_check.h"
- SRCS
- "leak_check.cc"
- COPTS
- ${ABSL_DEFAULT_COPTS}
- "-ULEAK_SANITIZER"
- TESTONLY
-)
-
absl_cc_test(
NAME
leak_check_test
@@ -258,46 +228,15 @@ absl_cc_test(
"leak_check_test.cc"
COPTS
${ABSL_TEST_COPTS}
- "$<$<BOOL:${ABSL_HAVE_LSAN}>:-DABSL_EXPECT_LEAK_SANITIZER>"
LINKOPTS
- "${ABSL_LSAN_LINKOPTS}"
- DEPS
- absl::leak_check_api_enabled_for_testing
- absl::base
- GTest::gmock_main
-)
-
-absl_cc_test(
- NAME
- leak_check_no_lsan_test
- SRCS
- "leak_check_test.cc"
- COPTS
- ${ABSL_TEST_COPTS}
- "-UABSL_EXPECT_LEAK_SANITIZER"
- DEPS
- absl::leak_check_api_disabled_for_testing
- absl::base
- GTest::gmock_main
-)
-
-absl_cc_test(
- NAME
- disabled_leak_check_test
- SRCS
- "leak_check_fail_test.cc"
- COPTS
- ${ABSL_TEST_COPTS}
- LINKOPTS
- "${ABSL_LSAN_LINKOPTS}"
+ ${ABSL_DEFAULT_LINKOPTS}
DEPS
- absl::leak_check_api_enabled_for_testing
- absl::leak_check_disable
+ absl::leak_check
absl::base
- absl::raw_logging_internal
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
stack_consumption
diff --git a/absl/debugging/failure_signal_handler.cc b/absl/debugging/failure_signal_handler.cc
index 689e5979..affade3b 100644
--- a/absl/debugging/failure_signal_handler.cc
+++ b/absl/debugging/failure_signal_handler.cc
@@ -42,7 +42,6 @@
#include <ctime>
#include "absl/base/attributes.h"
-#include "absl/base/internal/errno_saver.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/debugging/internal/examine_stack.h"
@@ -52,7 +51,7 @@
#define ABSL_HAVE_SIGACTION
// Apple WatchOS and TVOS don't allow sigaltstack
#if !(defined(TARGET_OS_WATCH) && TARGET_OS_WATCH) && \
- !(defined(TARGET_OS_TV) && TARGET_OS_TV)
+ !(defined(TARGET_OS_TV) && TARGET_OS_TV) && !defined(__QNX__)
#define ABSL_HAVE_SIGALTSTACK
#endif
#endif
@@ -217,8 +216,7 @@ static void InstallOneFailureHandler(FailureSignalData* data,
#endif
static void WriteToStderr(const char* data) {
- absl::base_internal::ErrnoSaver errno_saver;
- absl::raw_logging_internal::SafeWriteToStderr(data, strlen(data));
+ absl::raw_logging_internal::AsyncSignalSafeWriteToStderr(data, strlen(data));
}
static void WriteSignalMessage(int signo, int cpu,
diff --git a/absl/debugging/internal/address_is_readable.cc b/absl/debugging/internal/address_is_readable.cc
index 329c285f..4be6256b 100644
--- a/absl/debugging/internal/address_is_readable.cc
+++ b/absl/debugging/internal/address_is_readable.cc
@@ -30,16 +30,12 @@ bool AddressIsReadable(const void* /* addr */) { return true; }
ABSL_NAMESPACE_END
} // namespace absl
-#else
+#else // __linux__ && !__ANDROID__
-#include <fcntl.h>
-#include <sys/syscall.h>
+#include <stdint.h>
+#include <syscall.h>
#include <unistd.h>
-#include <atomic>
-#include <cerrno>
-#include <cstdint>
-
#include "absl/base/internal/errno_saver.h"
#include "absl/base/internal/raw_logging.h"
@@ -47,93 +43,54 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
-// Pack a pid and two file descriptors into a 64-bit word,
-// using 16, 24, and 24 bits for each respectively.
-static uint64_t Pack(uint64_t pid, uint64_t read_fd, uint64_t write_fd) {
- ABSL_RAW_CHECK((read_fd >> 24) == 0 && (write_fd >> 24) == 0,
- "fd out of range");
- return (pid << 48) | ((read_fd & 0xffffff) << 24) | (write_fd & 0xffffff);
-}
-
-// Unpack x into a pid and two file descriptors, where x was created with
-// Pack().
-static void Unpack(uint64_t x, int *pid, int *read_fd, int *write_fd) {
- *pid = x >> 48;
- *read_fd = (x >> 24) & 0xffffff;
- *write_fd = x & 0xffffff;
-}
+// NOTE: be extra careful about adding any interposable function calls here
+// (such as open(), read(), etc.). These symbols may be interposed and will get
+// invoked in contexts they don't expect.
+//
+// NOTE: any new system calls here may also require sandbox reconfiguration.
+//
+bool AddressIsReadable(const void *addr) {
+ // Align address on 8-byte boundary. On aarch64, checking last
+ // byte before inaccessible page returned unexpected EFAULT.
+ const uintptr_t u_addr = reinterpret_cast<uintptr_t>(addr) & ~7;
+ addr = reinterpret_cast<const void *>(u_addr);
-// Return whether the byte at *addr is readable, without faulting.
-// Save and restores errno. Returns true on systems where
-// unimplemented.
-// This is a namespace-scoped variable for correct zero-initialization.
-static std::atomic<uint64_t> pid_and_fds; // initially 0, an invalid pid.
+ // rt_sigprocmask below will succeed for this input.
+ if (addr == nullptr) return false;
-bool AddressIsReadable(const void *addr) {
absl::base_internal::ErrnoSaver errno_saver;
- // We test whether a byte is readable by using write(). Normally, this would
- // be done via a cached file descriptor to /dev/null, but linux fails to
- // check whether the byte is readable when the destination is /dev/null, so
- // we use a cached pipe. We store the pid of the process that created the
- // pipe to handle the case where a process forks, and the child closes all
- // the file descriptors and then calls this routine. This is not perfect:
- // the child could use the routine, then close all file descriptors and then
- // use this routine again. But the likely use of this routine is when
- // crashing, to test the validity of pages when dumping the stack. Beware
- // that we may leak file descriptors, but we're unlikely to leak many.
- int bytes_written;
- int current_pid = getpid() & 0xffff; // we use only the low order 16 bits
- do { // until we do not get EBADF trying to use file descriptors
- int pid;
- int read_fd;
- int write_fd;
- uint64_t local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
- Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
- while (current_pid != pid) {
- int p[2];
- // new pipe
- if (pipe(p) != 0) {
- ABSL_RAW_LOG(FATAL, "Failed to create pipe, errno=%d", errno);
- }
- fcntl(p[0], F_SETFD, FD_CLOEXEC);
- fcntl(p[1], F_SETFD, FD_CLOEXEC);
- uint64_t new_pid_and_fds = Pack(current_pid, p[0], p[1]);
- if (pid_and_fds.compare_exchange_strong(
- local_pid_and_fds, new_pid_and_fds, std::memory_order_release,
- std::memory_order_relaxed)) {
- local_pid_and_fds = new_pid_and_fds; // fds exposed to other threads
- } else { // fds not exposed to other threads; we can close them.
- close(p[0]);
- close(p[1]);
- local_pid_and_fds = pid_and_fds.load(std::memory_order_acquire);
- }
- Unpack(local_pid_and_fds, &pid, &read_fd, &write_fd);
- }
- errno = 0;
- // Use syscall(SYS_write, ...) instead of write() to prevent ASAN
- // and other checkers from complaining about accesses to arbitrary
- // memory.
- do {
- bytes_written = syscall(SYS_write, write_fd, addr, 1);
- } while (bytes_written == -1 && errno == EINTR);
- if (bytes_written == 1) { // remove the byte from the pipe
- char c;
- while (read(read_fd, &c, 1) == -1 && errno == EINTR) {
- }
- }
- if (errno == EBADF) { // Descriptors invalid.
- // If pid_and_fds contains the problematic file descriptors we just used,
- // this call will forget them, and the loop will try again.
- pid_and_fds.compare_exchange_strong(local_pid_and_fds, 0,
- std::memory_order_release,
- std::memory_order_relaxed);
- }
- } while (errno == EBADF);
- return bytes_written == 1;
+
+ // Here we probe with some syscall which
+ // - accepts an 8-byte region of user memory as input
+ // - tests for EFAULT before other validation
+ // - has no problematic side-effects
+ //
+ // rt_sigprocmask(2) works for this. It copies sizeof(kernel_sigset_t)==8
+ // bytes from the address into the kernel memory before any validation.
+ //
+ // The call can never succeed, since the `how` parameter is not one of
+ // SIG_BLOCK, SIG_UNBLOCK, SIG_SETMASK.
+ //
+ // This strategy depends on Linux implementation details,
+ // so we rely on the test to alert us if it stops working.
+ //
+ // Some discarded past approaches:
+ // - msync() doesn't reject PROT_NONE regions
+ // - write() on /dev/null doesn't return EFAULT
+ // - write() on a pipe requires creating it and draining the writes
+ // - connect() works but is problematic for sandboxes and needs a valid
+ // file descriptor
+ //
+ // This can never succeed (invalid first argument to sigprocmask).
+ ABSL_RAW_CHECK(syscall(SYS_rt_sigprocmask, ~0, addr, nullptr,
+ /*sizeof(kernel_sigset_t)*/ 8) == -1,
+ "unexpected success");
+ ABSL_RAW_CHECK(errno == EFAULT || errno == EINVAL, "unexpected errno");
+ return errno != EFAULT;
}
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl
-#endif
+#endif // __linux__ && !__ANDROID__
diff --git a/absl/debugging/internal/elf_mem_image.cc b/absl/debugging/internal/elf_mem_image.cc
index 29a28181..a9d66714 100644
--- a/absl/debugging/internal/elf_mem_image.cc
+++ b/absl/debugging/internal/elf_mem_image.cc
@@ -351,7 +351,11 @@ void ElfMemImage::SymbolIterator::Update(int increment) {
const ElfW(Versym) *version_symbol = image->GetVersym(index_);
ABSL_RAW_CHECK(symbol && version_symbol, "");
const char *const symbol_name = image->GetDynstr(symbol->st_name);
+#if defined(__NetBSD__)
+ const int version_index = version_symbol->vs_vers & VERSYM_VERSION;
+#else
const ElfW(Versym) version_index = version_symbol[0] & VERSYM_VERSION;
+#endif
const ElfW(Verdef) *version_definition = nullptr;
const char *version_name = "";
if (symbol->st_shndx == SHN_UNDEF) {
diff --git a/absl/debugging/internal/elf_mem_image.h b/absl/debugging/internal/elf_mem_image.h
index a894bd42..113071a9 100644
--- a/absl/debugging/internal/elf_mem_image.h
+++ b/absl/debugging/internal/elf_mem_image.h
@@ -31,8 +31,9 @@
#error ABSL_HAVE_ELF_MEM_IMAGE cannot be directly set
#endif
-#if defined(__ELF__) && !defined(__native_client__) && !defined(__asmjs__) && \
- !defined(__wasm__)
+#if defined(__ELF__) && !defined(__OpenBSD__) && !defined(__QNX__) && \
+ !defined(__native_client__) && !defined(__asmjs__) && \
+ !defined(__wasm__) && !defined(__HAIKU__)
#define ABSL_HAVE_ELF_MEM_IMAGE 1
#endif
diff --git a/absl/debugging/internal/examine_stack.cc b/absl/debugging/internal/examine_stack.cc
index 589a3ef3..5bdd341e 100644
--- a/absl/debugging/internal/examine_stack.cc
+++ b/absl/debugging/internal/examine_stack.cc
@@ -20,7 +20,13 @@
#include <unistd.h>
#endif
-#ifdef __APPLE__
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_MMAP
+#include <sys/mman.h>
+#endif
+
+#if defined(__linux__) || defined(__APPLE__)
#include <sys/ucontext.h>
#endif
@@ -37,10 +43,115 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
+namespace {
+constexpr int kDefaultDumpStackFramesLimit = 64;
+// The %p field width for printf() functions is two characters per byte,
+// and two extra for the leading "0x".
+constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
+
+ABSL_CONST_INIT SymbolizeUrlEmitter debug_stack_trace_hook = nullptr;
+
+// Async-signal safe mmap allocator.
+void* Allocate(size_t num_bytes) {
+#ifdef ABSL_HAVE_MMAP
+ void* p = ::mmap(nullptr, num_bytes, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ return p == MAP_FAILED ? nullptr : p;
+#else
+ (void)num_bytes;
+ return nullptr;
+#endif // ABSL_HAVE_MMAP
+}
+
+void Deallocate(void* p, size_t size) {
+#ifdef ABSL_HAVE_MMAP
+ ::munmap(p, size);
+#else
+ (void)p;
+ (void)size;
+#endif // ABSL_HAVE_MMAP
+}
+
+// Print a program counter only.
+void DumpPC(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char buf[100];
+ snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth, pc);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding stack frame size.
+void DumpPCAndFrameSize(OutputWriter* writer, void* writer_arg, void* const pc,
+ int framesize, const char* const prefix) {
+ char buf[100];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
+ kPrintfPointerFieldWidth, pc);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize);
+ }
+ writer(buf, writer_arg);
+}
+
+// Print a program counter and the corresponding symbol.
+void DumpPCAndSymbol(OutputWriter* writer, void* writer_arg, void* const pc,
+ const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ // Symbolizes the previous address of pc because pc may be in the
+ // next function. The overrun happens when the function ends with
+ // a call to a function annotated noreturn (e.g. CHECK).
+ // If symbolization of pc-1 fails, also try pc on the off-chance
+ // that we crashed on the first instruction of a function (that
+ // actually happens very often for e.g. __restore_rt).
+ const uintptr_t prev_pc = reinterpret_cast<uintptr_t>(pc) - 1;
+ if (absl::Symbolize(reinterpret_cast<const char*>(prev_pc), tmp,
+ sizeof(tmp)) ||
+ absl::Symbolize(pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix, kPrintfPointerFieldWidth,
+ pc, symbol);
+ writer(buf, writer_arg);
+}
+
+// Print a program counter, its stack frame size, and its symbol name.
+// Note that there is a separate symbolize_pc argument. Return addresses may be
+// at the end of the function, and this allows the caller to back up from pc if
+// appropriate.
+void DumpPCAndFrameSizeAndSymbol(OutputWriter* writer, void* writer_arg,
+ void* const pc, void* const symbolize_pc,
+ int framesize, const char* const prefix) {
+ char tmp[1024];
+ const char* symbol = "(unknown)";
+ if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
+ symbol = tmp;
+ }
+ char buf[1024];
+ if (framesize <= 0) {
+ snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, symbol);
+ } else {
+ snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
+ kPrintfPointerFieldWidth, pc, framesize, symbol);
+ }
+ writer(buf, writer_arg);
+}
+
+} // namespace
+
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook) {
+ debug_stack_trace_hook = hook;
+}
+
+SymbolizeUrlEmitter GetDebugStackTraceHook() { return debug_stack_trace_hook; }
+
// Returns the program counter from signal context, nullptr if
// unknown. vuc is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc) {
+void* GetProgramCounter(void* const vuc) {
#ifdef __linux__
if (vuc != nullptr) {
ucontext_t* context = reinterpret_cast<ucontext_t*>(vuc);
@@ -82,6 +193,8 @@ void* GetProgramCounter(void* vuc) {
return reinterpret_cast<void*>(context->uc_mcontext.gregs[16]);
#elif defined(__e2k__)
return reinterpret_cast<void*>(context->uc_mcontext.cr0_hi);
+#elif defined(__loongarch__)
+ return reinterpret_cast<void*>(context->uc_mcontext.__pc);
#else
#error "Undefined Architecture."
#endif
@@ -120,59 +233,17 @@ void* GetProgramCounter(void* vuc) {
return nullptr;
}
-// The %p field width for printf() functions is two characters per byte,
-// and two extra for the leading "0x".
-static constexpr int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
-
-// Print a program counter, its stack frame size, and its symbol name.
-// Note that there is a separate symbolize_pc argument. Return addresses may be
-// at the end of the function, and this allows the caller to back up from pc if
-// appropriate.
-static void DumpPCAndFrameSizeAndSymbol(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc,
- void* symbolize_pc, int framesize,
- const char* const prefix) {
- char tmp[1024];
- const char* symbol = "(unknown)";
- if (absl::Symbolize(symbolize_pc, tmp, sizeof(tmp))) {
- symbol = tmp;
- }
- char buf[1024];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown) %s\n", prefix,
- kPrintfPointerFieldWidth, pc, symbol);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d %s\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize, symbol);
- }
- writerfn(buf, writerfn_arg);
-}
-
-// Print a program counter and the corresponding stack frame size.
-static void DumpPCAndFrameSize(void (*writerfn)(const char*, void*),
- void* writerfn_arg, void* pc, int framesize,
- const char* const prefix) {
- char buf[100];
- if (framesize <= 0) {
- snprintf(buf, sizeof(buf), "%s@ %*p (unknown)\n", prefix,
- kPrintfPointerFieldWidth, pc);
- } else {
- snprintf(buf, sizeof(buf), "%s@ %*p %9d\n", prefix,
- kPrintfPointerFieldWidth, pc, framesize);
- }
- writerfn(buf, writerfn_arg);
-}
-
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg) {
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg) {
if (pc != nullptr) {
// We don't know the stack frame size for PC, use 0.
if (symbolize_stacktrace) {
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, pc, pc, 0, "PC: ");
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, pc, pc, 0, "PC: ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, pc, 0, "PC: ");
+ DumpPCAndFrameSize(writer, writer_arg, pc, 0, "PC: ");
}
}
for (int i = 0; i < depth; i++) {
@@ -182,20 +253,61 @@ void DumpPCAndFrameSizesAndStackTrace(
// call to a function annotated noreturn (e.g. CHECK). Note that we don't
// do this for pc above, as the adjustment is only correct for return
// addresses.
- DumpPCAndFrameSizeAndSymbol(writerfn, writerfn_arg, stack[i],
+ DumpPCAndFrameSizeAndSymbol(writer, writer_arg, stack[i],
reinterpret_cast<char*>(stack[i]) - 1,
frame_sizes[i], " ");
} else {
- DumpPCAndFrameSize(writerfn, writerfn_arg, stack[i], frame_sizes[i],
- " ");
+ DumpPCAndFrameSize(writer, writer_arg, stack[i], frame_sizes[i], " ");
}
}
if (min_dropped_frames > 0) {
char buf[100];
snprintf(buf, sizeof(buf), " @ ... and at least %d more frames\n",
min_dropped_frames);
- writerfn(buf, writerfn_arg);
+ writer(buf, writer_arg);
+ }
+}
+
+// Dump current stack trace as directed by writer.
+// Make sure this function is not inlined to avoid skipping too many top frames.
+ABSL_ATTRIBUTE_NOINLINE
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg) {
+ // Print stack trace
+ void* stack_buf[kDefaultDumpStackFramesLimit];
+ void** stack = stack_buf;
+ int num_stack = kDefaultDumpStackFramesLimit;
+ int allocated_bytes = 0;
+
+ if (num_stack >= max_num_frames) {
+ // User requested fewer frames than we already have space for.
+ num_stack = max_num_frames;
+ } else {
+ const size_t needed_bytes = max_num_frames * sizeof(stack[0]);
+ void* p = Allocate(needed_bytes);
+ if (p != nullptr) { // We got the space.
+ num_stack = max_num_frames;
+ stack = reinterpret_cast<void**>(p);
+ allocated_bytes = needed_bytes;
+ }
}
+
+ size_t depth = absl::GetStackTrace(stack, num_stack, min_dropped_frames + 1);
+ for (size_t i = 0; i < depth; i++) {
+ if (symbolize_stacktrace) {
+ DumpPCAndSymbol(writer, writer_arg, stack[i], " ");
+ } else {
+ DumpPC(writer, writer_arg, stack[i], " ");
+ }
+ }
+
+ auto hook = GetDebugStackTraceHook();
+ if (hook != nullptr) {
+ (*hook)(stack, depth, writer, writer_arg);
+ }
+
+ if (allocated_bytes != 0) Deallocate(stack, allocated_bytes);
}
} // namespace debugging_internal
diff --git a/absl/debugging/internal/examine_stack.h b/absl/debugging/internal/examine_stack.h
index 39336913..190af87f 100644
--- a/absl/debugging/internal/examine_stack.h
+++ b/absl/debugging/internal/examine_stack.h
@@ -23,17 +23,39 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
+// Type of function used for printing in stack trace dumping, etc.
+// We avoid closures to keep things simple.
+typedef void OutputWriter(const char*, void*);
+
+// RegisterDebugStackTraceHook() allows to register a single routine
+// `hook` that is called each time DumpStackTrace() is called.
+// `hook` may be called from a signal handler.
+typedef void (*SymbolizeUrlEmitter)(void* const stack[], int depth,
+ OutputWriter* writer, void* writer_arg);
+
+// Registration of SymbolizeUrlEmitter for use inside of a signal handler.
+// This is inherently unsafe and must be signal safe code.
+void RegisterDebugStackTraceHook(SymbolizeUrlEmitter hook);
+SymbolizeUrlEmitter GetDebugStackTraceHook();
+
// Returns the program counter from signal context, or nullptr if
// unknown. `vuc` is a ucontext_t*. We use void* to avoid the use of
// ucontext_t on non-POSIX systems.
-void* GetProgramCounter(void* vuc);
+void* GetProgramCounter(void* const vuc);
-// Uses `writerfn` to dump the program counter, stack trace, and stack
+// Uses `writer` to dump the program counter, stack trace, and stack
// frame sizes.
-void DumpPCAndFrameSizesAndStackTrace(
- void* pc, void* const stack[], int frame_sizes[], int depth,
- int min_dropped_frames, bool symbolize_stacktrace,
- void (*writerfn)(const char*, void*), void* writerfn_arg);
+void DumpPCAndFrameSizesAndStackTrace(void* const pc, void* const stack[],
+ int frame_sizes[], int depth,
+ int min_dropped_frames,
+ bool symbolize_stacktrace,
+ OutputWriter* writer, void* writer_arg);
+
+// Dump current stack trace omitting the topmost `min_dropped_frames` stack
+// frames.
+void DumpStackTrace(int min_dropped_frames, int max_num_frames,
+ bool symbolize_stacktrace, OutputWriter* writer,
+ void* writer_arg);
} // namespace debugging_internal
ABSL_NAMESPACE_END
diff --git a/absl/debugging/internal/stacktrace_aarch64-inl.inc b/absl/debugging/internal/stacktrace_aarch64-inl.inc
index f4859d7c..4f9db9d6 100644
--- a/absl/debugging/internal/stacktrace_aarch64-inl.inc
+++ b/absl/debugging/internal/stacktrace_aarch64-inl.inc
@@ -176,12 +176,17 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
frame_pointer =
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/absl/debugging/internal/stacktrace_arm-inl.inc b/absl/debugging/internal/stacktrace_arm-inl.inc
index 2a1bf2e8..102a2a12 100644
--- a/absl/debugging/internal/stacktrace_arm-inl.inc
+++ b/absl/debugging/internal/stacktrace_arm-inl.inc
@@ -112,11 +112,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; sp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; sp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
sp = NextStackFrame<!IS_STACK_FRAMES>(sp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/absl/debugging/internal/stacktrace_config.h b/absl/debugging/internal/stacktrace_config.h
index ff21b719..3929b1b7 100644
--- a/absl/debugging/internal/stacktrace_config.h
+++ b/absl/debugging/internal/stacktrace_config.h
@@ -37,7 +37,8 @@
"absl/debugging/internal/stacktrace_generic-inl.inc"
#endif // defined(ABSL_HAVE_THREAD_LOCAL)
-#elif defined(__EMSCRIPTEN__)
+// Emscripten stacktraces rely on JS. Do not use them in standalone mode.
+#elif defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
#define ABSL_STACKTRACE_INL_HEADER \
"absl/debugging/internal/stacktrace_emscripten-inl.inc"
diff --git a/absl/debugging/internal/stacktrace_powerpc-inl.inc b/absl/debugging/internal/stacktrace_powerpc-inl.inc
index cf8c0516..085cef67 100644
--- a/absl/debugging/internal/stacktrace_powerpc-inl.inc
+++ b/absl/debugging/internal/stacktrace_powerpc-inl.inc
@@ -231,11 +231,16 @@ static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 1000;
- int j = 0;
- for (; next_sp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; next_sp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
next_sp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(next_sp, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/absl/debugging/internal/stacktrace_riscv-inl.inc b/absl/debugging/internal/stacktrace_riscv-inl.inc
index 8cbc7854..7123b71b 100644
--- a/absl/debugging/internal/stacktrace_riscv-inl.inc
+++ b/absl/debugging/internal/stacktrace_riscv-inl.inc
@@ -56,7 +56,7 @@ static const unsigned char *GetKernelRtSigreturnAddress() {
absl::debugging_internal::VDSOSupport::SymbolInfo symbol_info;
// Symbol versioning pulled from arch/riscv/kernel/vdso/vdso.lds at v5.10.
auto lookup = [&](int type) {
- return vdso.LookupSymbol("__kernel_rt_sigreturn", "LINUX_4.15", type,
+ return vdso.LookupSymbol("__vdso_rt_sigreturn", "LINUX_4.15", type,
&symbol_info);
};
if ((!lookup(STT_FUNC) && !lookup(STT_NOTYPE)) ||
@@ -171,26 +171,21 @@ ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // May read random elements from stack.
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // May read random elements from stack.
static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
const void *ucp, int *min_dropped_frames) {
+ // The `frame_pointer` that is computed here points to the top of the frame.
+ // The two words preceding the address are the return address and the previous
+ // frame pointer.
#if defined(__GNUC__)
void **frame_pointer = reinterpret_cast<void **>(__builtin_frame_address(0));
#else
#error reading stack pointer not yet supported on this platform
#endif
- skip_count++; // Skip the frame for this function.
int n = 0;
-
- // The `frame_pointer` that is computed here points to the top of the frame.
- // The two words preceding the address are the return address and the previous
- // frame pointer. To find a PC value associated with the current frame, we
- // need to go down a level in the call chain. So we remember the return
- // address of the last frame seen. This does not work for the first stack
- // frame, which belongs to `UnwindImp()` but we skip the frame for
- // `UnwindImp()` anyway.
- void *prev_return_address = nullptr;
-
+ void *return_address = nullptr;
while (frame_pointer && n < max_depth) {
- // The absl::GetStackFrames routine si called when we are in some
+ return_address = frame_pointer[-1];
+
+ // The absl::GetStackFrames routine is called when we are in some
// informational context (the failure signal handler for example). Use the
// non-strict unwinding rules to produce a stack trace that is as complete
// as possible (even if it contains a few bogus entries in some rare cases).
@@ -200,26 +195,33 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
if (skip_count > 0) {
skip_count--;
} else {
- result[n] = prev_return_address;
+ result[n] = return_address;
if (IS_STACK_FRAMES) {
sizes[n] = ComputeStackFrameSize(frame_pointer, next_frame_pointer);
}
n++;
}
- prev_return_address = frame_pointer[-1];
+
frame_pointer = next_frame_pointer;
}
+
if (min_dropped_frames != nullptr) {
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 200;
- int j = 0;
- for (; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; frame_pointer != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
frame_pointer =
NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(frame_pointer, ucp);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
+
return n;
}
diff --git a/absl/debugging/internal/stacktrace_x86-inl.inc b/absl/debugging/internal/stacktrace_x86-inl.inc
index 847a5473..1b5d8235 100644
--- a/absl/debugging/internal/stacktrace_x86-inl.inc
+++ b/absl/debugging/internal/stacktrace_x86-inl.inc
@@ -341,12 +341,17 @@ static int UnwindImpl(void **result, int *sizes, int max_depth, int skip_count,
// Implementation detail: we clamp the max of frames we are willing to
// count, so as not to spend too much time in the loop below.
const int kMaxUnwind = 1000;
- int j = 0;
- for (; fp != nullptr && j < kMaxUnwind; j++) {
+ int num_dropped_frames = 0;
+ for (int j = 0; fp != nullptr && j < kMaxUnwind; j++) {
+ if (skip_count > 0) {
+ skip_count--;
+ } else {
+ num_dropped_frames++;
+ }
fp = NextStackFrame<!IS_STACK_FRAMES, IS_WITH_CONTEXT>(fp, ucp, stack_low,
stack_high);
}
- *min_dropped_frames = j;
+ *min_dropped_frames = num_dropped_frames;
}
return n;
}
diff --git a/absl/debugging/internal/vdso_support.cc b/absl/debugging/internal/vdso_support.cc
index 977a9f6b..40eb055f 100644
--- a/absl/debugging/internal/vdso_support.cc
+++ b/absl/debugging/internal/vdso_support.cc
@@ -33,7 +33,7 @@
#endif
#include <unistd.h>
-#if defined(__GLIBC__) && \
+#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
(__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
#define ABSL_HAVE_GETAUXVAL
#endif
@@ -50,8 +50,14 @@
#define AT_SYSINFO_EHDR 33 // for crosstoolv10
#endif
+#if defined(__NetBSD__)
+using Elf32_auxv_t = Aux32Info;
+using Elf64_auxv_t = Aux64Info;
+#endif
#if defined(__FreeBSD__)
+#if defined(__ELF_WORD_SIZE) && __ELF_WORD_SIZE == 64
using Elf64_auxv_t = Elf64_Auxinfo;
+#endif
using Elf32_auxv_t = Elf32_Auxinfo;
#endif
@@ -63,7 +69,9 @@ ABSL_CONST_INIT
std::atomic<const void *> VDSOSupport::vdso_base_(
debugging_internal::ElfMemImage::kInvalidBase);
-std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
+ABSL_CONST_INIT std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(
+ &InitAndGetCPU);
+
VDSOSupport::VDSOSupport()
// If vdso_base_ is still set to kInvalidBase, we got here
// before VDSOSupport::Init has been called. Call it now.
@@ -104,8 +112,13 @@ const void *VDSOSupport::Init() {
ElfW(auxv_t) aux;
while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
if (aux.a_type == AT_SYSINFO_EHDR) {
+#if defined(__NetBSD__)
+ vdso_base_.store(reinterpret_cast<void *>(aux.a_v),
+ std::memory_order_relaxed);
+#else
vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
std::memory_order_relaxed);
+#endif
break;
}
}
diff --git a/absl/debugging/leak_check.cc b/absl/debugging/leak_check.cc
index 764ca0ad..195e82bf 100644
--- a/absl/debugging/leak_check.cc
+++ b/absl/debugging/leak_check.cc
@@ -11,29 +11,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
+//
// Wrappers around lsan_interface functions.
-// When lsan is not linked in, these functions are not available,
-// therefore Abseil code which depends on these functions is conditioned on the
-// definition of LEAK_SANITIZER.
-#include "absl/base/attributes.h"
-#include "absl/debugging/leak_check.h"
+//
+// These are always-available run-time functions manipulating the LeakSanitizer,
+// even when the lsan_interface (and LeakSanitizer) is not available. When
+// LeakSanitizer is not linked in, these functions become no-op stubs.
-#ifndef LEAK_SANITIZER
+#include "absl/debugging/leak_check.h"
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-bool HaveLeakSanitizer() { return false; }
-bool LeakCheckerIsActive() { return false; }
-void DoIgnoreLeak(const void*) { }
-void RegisterLivePointers(const void*, size_t) { }
-void UnRegisterLivePointers(const void*, size_t) { }
-LeakCheckDisabler::LeakCheckDisabler() { }
-LeakCheckDisabler::~LeakCheckDisabler() { }
-ABSL_NAMESPACE_END
-} // namespace absl
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
-#else
+#if defined(ABSL_HAVE_LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
@@ -66,4 +56,18 @@ LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
ABSL_NAMESPACE_END
} // namespace absl
-#endif // LEAK_SANITIZER
+#else // defined(ABSL_HAVE_LEAK_SANITIZER)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+bool HaveLeakSanitizer() { return false; }
+bool LeakCheckerIsActive() { return false; }
+void DoIgnoreLeak(const void*) { }
+void RegisterLivePointers(const void*, size_t) { }
+void UnRegisterLivePointers(const void*, size_t) { }
+LeakCheckDisabler::LeakCheckDisabler() { }
+LeakCheckDisabler::~LeakCheckDisabler() { }
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // defined(ABSL_HAVE_LEAK_SANITIZER)
diff --git a/absl/debugging/leak_check.h b/absl/debugging/leak_check.h
index 5fc2b052..eff162f6 100644
--- a/absl/debugging/leak_check.h
+++ b/absl/debugging/leak_check.h
@@ -24,7 +24,24 @@
// Note: this leak checking API is not yet supported in MSVC.
// Leak checking is enabled by default in all ASan builds.
//
-// See https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// GCC and Clang both automatically enable LeakSanitizer when AddressSanitizer
+// is enabled. To use the mode, simply pass `-fsanitize=address` to both the
+// compiler and linker. An example Bazel command could be
+//
+// $ bazel test --copt=-fsanitize=address --linkopt=-fsanitize=address ...
+//
+// GCC and Clang auto support a standalone LeakSanitizer mode (a mode which does
+// not also use AddressSanitizer). To use the mode, simply pass
+// `-fsanitize=leak` to both the compiler and linker. Since GCC does not
+// currently provide a way of detecting this mode at compile-time, GCC users
+// must also pass -DLEAK_SANIITIZER to the compiler. An example Bazel command
+// could be
+//
+// $ bazel test --copt=-DLEAK_SANITIZER --copt=-fsanitize=leak
+// --linkopt=-fsanitize=leak ...
//
// -----------------------------------------------------------------------------
#ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
diff --git a/absl/debugging/leak_check_test.cc b/absl/debugging/leak_check_test.cc
index 9fcfc8e5..6a42e31b 100644
--- a/absl/debugging/leak_check_test.cc
+++ b/absl/debugging/leak_check_test.cc
@@ -15,27 +15,24 @@
#include <string>
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/debugging/leak_check.h"
namespace {
-TEST(LeakCheckTest, DetectLeakSanitizer) {
-#ifdef ABSL_EXPECT_LEAK_SANITIZER
- EXPECT_TRUE(absl::HaveLeakSanitizer());
- EXPECT_TRUE(absl::LeakCheckerIsActive());
-#else
- EXPECT_FALSE(absl::HaveLeakSanitizer());
- EXPECT_FALSE(absl::LeakCheckerIsActive());
-#endif
-}
-
TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
+ if (!absl::LeakCheckerIsActive()) {
+ GTEST_SKIP() << "LeakChecker is not active";
+ }
auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
}
TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
+ if (!absl::LeakCheckerIsActive()) {
+ GTEST_SKIP() << "LeakChecker is not active";
+ }
absl::LeakCheckDisabler disabler;
auto foo = new std::string("some string leaked while checks are disabled");
ABSL_RAW_LOG(INFO, "Ignoring leaked string %s", foo->c_str());
diff --git a/absl/debugging/stacktrace_benchmark.cc b/absl/debugging/stacktrace_benchmark.cc
new file mode 100644
index 00000000..9360bafe
--- /dev/null
+++ b/absl/debugging/stacktrace_benchmark.cc
@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/debugging/stacktrace.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+static constexpr int kMaxStackDepth = 100;
+static constexpr int kCacheSize = (1 << 16);
+void* pcs[kMaxStackDepth];
+
+ABSL_ATTRIBUTE_NOINLINE void func(benchmark::State& state, int x, int depth) {
+ if (x <= 0) {
+ // Touch a significant amount of memory so that the stack is likely to be
+ // not cached in the L1 cache.
+ state.PauseTiming();
+ int* arr = new int[kCacheSize];
+ for (int i = 0; i < kCacheSize; ++i) benchmark::DoNotOptimize(arr[i] = 100);
+ delete[] arr;
+ state.ResumeTiming();
+ benchmark::DoNotOptimize(absl::GetStackTrace(pcs, depth, 0));
+ return;
+ }
+ ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
+ func(state, --x, depth);
+}
+
+void BM_GetStackTrace(benchmark::State& state) {
+ int depth = state.range(0);
+ for (auto s : state) {
+ func(state, depth, depth);
+ }
+}
+
+BENCHMARK(BM_GetStackTrace)->DenseRange(10, kMaxStackDepth, 10);
+} // namespace
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/debugging/symbolize.cc b/absl/debugging/symbolize.cc
index f1abdfda..638d3954 100644
--- a/absl/debugging/symbolize.cc
+++ b/absl/debugging/symbolize.cc
@@ -23,6 +23,11 @@
#endif
#endif
+// Emscripten symbolization relies on JS. Do not use them in standalone mode.
+#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
+#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM
+#endif
+
#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
#include "absl/debugging/symbolize_elf.inc"
#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32)
@@ -31,7 +36,7 @@
#include "absl/debugging/symbolize_win32.inc"
#elif defined(__APPLE__)
#include "absl/debugging/symbolize_darwin.inc"
-#elif defined(__EMSCRIPTEN__)
+#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM)
#include "absl/debugging/symbolize_emscripten.inc"
#else
#include "absl/debugging/symbolize_unimplemented.inc"
diff --git a/absl/debugging/symbolize_elf.inc b/absl/debugging/symbolize_elf.inc
index 3ff343d6..9bfdd915 100644
--- a/absl/debugging/symbolize_elf.inc
+++ b/absl/debugging/symbolize_elf.inc
@@ -323,6 +323,7 @@ class Symbolizer {
const ptrdiff_t relocation,
char *out, int out_size,
char *tmp_buf, int tmp_buf_size);
+ const char *GetUncachedSymbol(const void *pc);
enum {
SYMBOL_BUF_SIZE = 3072,
@@ -1145,6 +1146,14 @@ bool Symbolizer::RegisterObjFile(const char *filename,
reinterpret_cast<uintptr_t>(old->end_addr), old->filename);
}
return true;
+ } else if (old->end_addr == start_addr &&
+ reinterpret_cast<uintptr_t>(old->start_addr) - old->offset ==
+ reinterpret_cast<uintptr_t>(start_addr) - offset &&
+ strcmp(old->filename, filename) == 0) {
+ // Two contiguous map entries that span a contiguous region of the file,
+ // perhaps because some part of the file was mlock()ed. Combine them.
+ old->end_addr = end_addr;
+ return true;
}
}
ObjFile *obj = impl->addr_map_.Add();
@@ -1333,13 +1342,7 @@ static bool MaybeInitializeObjFile(ObjFile *obj) {
// they are called here as well.
// To keep stack consumption low, we would like this function to not
// get inlined.
-const char *Symbolizer::GetSymbol(const void *const pc) {
- const char *entry = FindSymbolInCache(pc);
- if (entry != nullptr) {
- return entry;
- }
- symbol_buf_[0] = '\0';
-
+const char *Symbolizer::GetUncachedSymbol(const void *pc) {
ObjFile *const obj = FindObjFile(pc, 1);
ptrdiff_t relocation = 0;
int fd = -1;
@@ -1427,6 +1430,42 @@ const char *Symbolizer::GetSymbol(const void *const pc) {
return InsertSymbolInCache(pc, symbol_buf_);
}
+const char *Symbolizer::GetSymbol(const void *pc) {
+ const char *entry = FindSymbolInCache(pc);
+ if (entry != nullptr) {
+ return entry;
+ }
+ symbol_buf_[0] = '\0';
+
+#ifdef __hppa__
+ {
+ // In some contexts (e.g., return addresses), PA-RISC uses the lowest two
+ // bits of the address to indicate the privilege level. Clear those bits
+ // before trying to symbolize.
+ const auto pc_bits = reinterpret_cast<uintptr_t>(pc);
+ const auto address = pc_bits & ~0x3;
+ entry = GetUncachedSymbol(reinterpret_cast<const void *>(address));
+ if (entry != nullptr) {
+ return entry;
+ }
+
+ // In some contexts, PA-RISC also uses bit 1 of the address to indicate that
+ // this is a cross-DSO function pointer. Such function pointers actually
+ // point to a procedure label, a struct whose first 32-bit (pointer) element
+ // actually points to the function text. With no symbol found for this
+ // address so far, try interpreting it as a cross-DSO function pointer and
+ // see how that goes.
+ if (pc_bits & 0x2) {
+ return GetUncachedSymbol(*reinterpret_cast<const void *const *>(address));
+ }
+
+ return nullptr;
+ }
+#else
+ return GetUncachedSymbol(pc);
+#endif
+}
+
bool RemoveAllSymbolDecorators(void) {
if (!g_decorators_mu.TryLock()) {
// Someone else is using decorators. Get out.
diff --git a/absl/debugging/symbolize_test.cc b/absl/debugging/symbolize_test.cc
index c710a3da..3165c6ed 100644
--- a/absl/debugging/symbolize_test.cc
+++ b/absl/debugging/symbolize_test.cc
@@ -392,12 +392,14 @@ TEST(Symbolize, InstallAndRemoveSymbolDecorators) {
DummySymbolDecorator, &c_message),
0);
- char *address = reinterpret_cast<char *>(1);
- EXPECT_STREQ("abc", TrySymbolize(address++));
+ // Use addresses 4 and 8 here to ensure that we always use valid addresses
+ // even on systems that require instructions to be 32-bit aligned.
+ char *address = reinterpret_cast<char *>(4);
+ EXPECT_STREQ("abc", TrySymbolize(address));
EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b));
- EXPECT_STREQ("ac", TrySymbolize(address++));
+ EXPECT_STREQ("ac", TrySymbolize(address + 4));
// Cleanup: remove all remaining decorators so other stack traces don't
// get mystery "ac" decoration.
@@ -481,7 +483,8 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
}
}
-#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+ ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
// Test that we correctly identify bounds of Thumb functions on ARM.
//
// Thumb functions have the lowest-order bit set in their addresses in the ELF
@@ -500,6 +503,10 @@ void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
// bit in the Thumb function's entry point. It will correctly compute the end of
// the Thumb function, it will find no overlap between the Thumb and ARM
// functions, and it will return the name of the ARM function.
+//
+// Unfortunately we cannot perform this test on armv6 or lower systems that use
+// the hard float ABI because gcc refuses to compile thumb functions on such
+// systems with a "sorry, unimplemented: Thumb-1 hard-float VFP ABI" error.
__attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) {
return x * x * x;
@@ -519,7 +526,8 @@ void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() {
#endif
}
-#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#endif // defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && ((__ARM_ARCH >= 7)
+ // || !defined(__ARM_PCS_VFP))
#elif defined(_WIN32)
#if !defined(ABSL_CONSUME_DLL)
@@ -594,7 +602,8 @@ int main(int argc, char **argv) {
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
TestWithReturnAddress();
-#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target)
+#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
+ ((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
TestArmThumbOverlap();
#endif
#endif
diff --git a/absl/flags/BUILD.bazel b/absl/flags/BUILD.bazel
index d20deab4..4ca687ee 100644
--- a/absl/flags/BUILD.bazel
+++ b/absl/flags/BUILD.bazel
@@ -100,6 +100,7 @@ cc_library(
"//absl/base:log_severity",
"//absl/strings",
"//absl/strings:str_format",
+ "//absl/types:optional",
],
)
@@ -113,6 +114,9 @@ cc_library(
],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = [
+ "//visibility:private",
+ ],
deps = [
"//absl/base:config",
"//absl/base:fast_type_id",
@@ -204,6 +208,7 @@ cc_library(
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
@@ -321,6 +326,11 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_android",
+ "no_test_ios",
+ "no_test_wasm",
+ ],
deps = [
":commandlineflag",
":commandlineflag_internal",
@@ -357,6 +367,11 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_android",
+ "no_test_ios",
+ "no_test_wasm",
+ ],
deps = [
":config",
":flag",
@@ -418,6 +433,11 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_android",
+ "no_test_ios",
+ "no_test_wasm",
+ ],
deps = [
":flag",
":parse",
@@ -453,6 +473,7 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["no_test_wasm"],
deps = [
":program_name",
"//absl/strings",
@@ -468,6 +489,11 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_android",
+ "no_test_ios",
+ "no_test_wasm",
+ ],
deps = [
":commandlineflag_internal",
":flag",
@@ -490,6 +516,7 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
shard_count = 31,
+ tags = ["no_test_wasm"],
deps = [
":flag_internal",
"//absl/base",
@@ -524,6 +551,11 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_android",
+ "no_test_ios",
+ "no_test_wasm",
+ ],
deps = [
":config",
":flag",
diff --git a/absl/flags/CMakeLists.txt b/absl/flags/CMakeLists.txt
index 7f3298e9..3e9d5adf 100644
--- a/absl/flags/CMakeLists.txt
+++ b/absl/flags/CMakeLists.txt
@@ -87,6 +87,7 @@ absl_cc_library(
absl::config
absl::core_headers
absl::log_severity
+ absl::optional
absl::strings
absl::str_format
)
@@ -105,6 +106,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
+ absl::dynamic_annotations
absl::fast_type_id
)
@@ -464,5 +466,5 @@ absl_cc_test(
absl::flags_reflection
absl::flags_usage
absl::strings
- GTest::gtest
+ GTest::gmock
)
diff --git a/absl/flags/config.h b/absl/flags/config.h
index 5ab1f311..14c4235b 100644
--- a/absl/flags/config.h
+++ b/absl/flags/config.h
@@ -45,14 +45,6 @@
#define ABSL_FLAGS_STRIP_HELP ABSL_FLAGS_STRIP_NAMES
#endif
-// ABSL_FLAGS_INTERNAL_HAS_RTTI macro is used for selecting if we can use RTTI
-// for flag type identification.
-#ifdef ABSL_FLAGS_INTERNAL_HAS_RTTI
-#error ABSL_FLAGS_INTERNAL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
-#define ABSL_FLAGS_INTERNAL_HAS_RTTI 1
-#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
-
// These macros represent the "source of truth" for the list of supported
// built-in types.
#define ABSL_FLAGS_INTERNAL_BUILTIN_TYPES(A) \
diff --git a/absl/flags/declare.h b/absl/flags/declare.h
index b9794d8b..d1437bb9 100644
--- a/absl/flags/declare.h
+++ b/absl/flags/declare.h
@@ -60,6 +60,14 @@ ABSL_NAMESPACE_END
// The ABSL_DECLARE_FLAG(type, name) macro expands to:
//
// extern absl::Flag<type> FLAGS_name;
-#define ABSL_DECLARE_FLAG(type, name) extern ::absl::Flag<type> FLAGS_##name
+#define ABSL_DECLARE_FLAG(type, name) ABSL_DECLARE_FLAG_INTERNAL(type, name)
+
+// Internal implementation of ABSL_DECLARE_FLAG to allow macro expansion of its
+// arguments. Clients must use ABSL_DECLARE_FLAG instead.
+#define ABSL_DECLARE_FLAG_INTERNAL(type, name) \
+ extern absl::Flag<type> FLAGS_##name; \
+ namespace absl /* block flags in namespaces */ {} \
+ /* second redeclaration is to allow applying attributes */ \
+ extern absl::Flag<type> FLAGS_##name
#endif // ABSL_FLAGS_DECLARE_H_
diff --git a/absl/flags/flag.h b/absl/flags/flag.h
index a724ccc9..b7f94be7 100644
--- a/absl/flags/flag.h
+++ b/absl/flags/flag.h
@@ -67,6 +67,10 @@ ABSL_NAMESPACE_BEGIN
// ABSL_FLAG(int, count, 0, "Count of items to process");
//
// No public methods of `absl::Flag<T>` are part of the Abseil Flags API.
+//
+// For type support of Abseil Flags, see the marshalling.h header file, which
+// discusses supported standard types, optional flags, and additional Abseil
+// type support.
#if !defined(_MSC_VER) || defined(__clang__)
template <typename T>
using Flag = flags_internal::Flag<T>;
@@ -265,6 +269,7 @@ ABSL_NAMESPACE_END
// global name for FLAGS_no<flag_name> symbol, thus preventing the possibility
// of defining two flags with names foo and nofoo.
#define ABSL_FLAG_IMPL(Type, name, default_value, help) \
+ extern ::absl::Flag<Type> FLAGS_##name; \
namespace absl /* block flags in namespaces */ {} \
ABSL_FLAG_IMPL_DECLARE_DEF_VAL_WRAPPER(name, Type, default_value) \
ABSL_FLAG_IMPL_DECLARE_HELP_WRAPPER(name, help) \
diff --git a/absl/flags/flag_test.cc b/absl/flags/flag_test.cc
index 6e974a5b..845b4eba 100644
--- a/absl/flags/flag_test.cc
+++ b/absl/flags/flag_test.cc
@@ -854,7 +854,9 @@ ABSL_RETIRED_FLAG(bool, old_bool_flag, true, "old descr");
ABSL_RETIRED_FLAG(int, old_int_flag, (int)std::sqrt(10), "old descr");
ABSL_RETIRED_FLAG(std::string, old_str_flag, "", absl::StrCat("old ", "descr"));
-bool initializaion_order_fiasco_test = [] {
+namespace {
+
+bool initialization_order_fiasco_test ABSL_ATTRIBUTE_UNUSED = [] {
// Iterate over all the flags during static initialization.
// This should not trigger ASan's initialization-order-fiasco.
auto* handle1 = absl::FindCommandLineFlag("flag_on_separate_file");
@@ -865,8 +867,6 @@ bool initializaion_order_fiasco_test = [] {
return true;
}();
-namespace {
-
TEST_F(FlagTest, TestRetiredFlagRegistration) {
auto* handle = absl::FindCommandLineFlag("old_bool_flag");
EXPECT_TRUE(handle->IsOfType<bool>());
@@ -977,3 +977,190 @@ TEST_F(FlagTest, TesTypeWrappingEnum) {
value = absl::GetFlag(FLAGS_test_enum_wrapper_flag);
EXPECT_EQ(value.e, B);
}
+
+// This is a compile test to ensure macros are expanded within ABSL_FLAG and
+// ABSL_DECLARE_FLAG.
+#define FLAG_NAME_MACRO(name) prefix_ ## name
+ABSL_DECLARE_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag));
+ABSL_FLAG(int, FLAG_NAME_MACRO(test_macro_named_flag), 0,
+ "Testing macro expansion within ABSL_FLAG");
+
+TEST_F(FlagTest, MacroWithinAbslFlag) {
+ EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 0);
+ absl::SetFlag(&FLAGS_prefix_test_macro_named_flag, 1);
+ EXPECT_EQ(absl::GetFlag(FLAGS_prefix_test_macro_named_flag), 1);
+}
+
+// --------------------------------------------------------------------
+
+#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ <= 5
+#define ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+#endif
+
+#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+ABSL_FLAG(absl::optional<bool>, optional_bool, absl::nullopt, "help");
+#endif
+ABSL_FLAG(absl::optional<int>, optional_int, {}, "help");
+ABSL_FLAG(absl::optional<double>, optional_double, 9.3, "help");
+ABSL_FLAG(absl::optional<std::string>, optional_string, absl::nullopt, "help");
+ABSL_FLAG(absl::optional<absl::Duration>, optional_duration, absl::nullopt,
+ "help");
+ABSL_FLAG(absl::optional<absl::optional<int>>, optional_optional_int,
+ absl::nullopt, "help");
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+ABSL_FLAG(std::optional<int64_t>, std_optional_int64, std::nullopt, "help");
+#endif
+
+namespace {
+
+#ifndef ABSL_SKIP_OPTIONAL_BOOL_TEST_DUE_TO_GCC_BUG
+TEST_F(FlagTest, TestOptionalBool) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt);
+
+ absl::SetFlag(&FLAGS_optional_bool, false);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), false);
+
+ absl::SetFlag(&FLAGS_optional_bool, true);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_bool).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), true);
+
+ absl::SetFlag(&FLAGS_optional_bool, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_bool).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_bool), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+#endif
+
+TEST_F(FlagTest, TestOptionalInt) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt);
+
+ absl::SetFlag(&FLAGS_optional_int, 0);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 0);
+
+ absl::SetFlag(&FLAGS_optional_int, 10);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), 10);
+
+ absl::SetFlag(&FLAGS_optional_int, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_int), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalDouble) {
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+ EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 9.3);
+
+ absl::SetFlag(&FLAGS_optional_double, 0.0);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), 0.0);
+
+ absl::SetFlag(&FLAGS_optional_double, 1.234);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_double).has_value());
+ EXPECT_DOUBLE_EQ(*absl::GetFlag(FLAGS_optional_double), 1.234);
+
+ absl::SetFlag(&FLAGS_optional_double, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_double).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_double), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalString) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt);
+
+ // Setting optional string to "" leads to undefined behavior.
+
+ absl::SetFlag(&FLAGS_optional_string, " ");
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), " ");
+
+ absl::SetFlag(&FLAGS_optional_string, "QWERTY");
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_string).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), "QWERTY");
+
+ absl::SetFlag(&FLAGS_optional_string, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_string).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_string), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalDuration) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt);
+
+ absl::SetFlag(&FLAGS_optional_duration, absl::ZeroDuration());
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Seconds(0));
+
+ absl::SetFlag(&FLAGS_optional_duration, absl::Hours(3));
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_duration).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::Hours(3));
+
+ absl::SetFlag(&FLAGS_optional_duration, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_duration).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_duration), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+TEST_F(FlagTest, TestOptionalOptional) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt);
+
+ absl::optional<int> nullint{absl::nullopt};
+
+ absl::SetFlag(&FLAGS_optional_optional_int, nullint);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+ EXPECT_NE(absl::GetFlag(FLAGS_optional_optional_int), nullint);
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int),
+ absl::optional<absl::optional<int>>{nullint});
+
+ absl::SetFlag(&FLAGS_optional_optional_int, 0);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0);
+
+ absl::SetFlag(&FLAGS_optional_optional_int, absl::optional<int>{0});
+ EXPECT_TRUE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), 0);
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::optional<int>{0});
+
+ absl::SetFlag(&FLAGS_optional_optional_int, absl::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_optional_optional_int).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_optional_optional_int), absl::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+
+TEST_F(FlagTest, TestStdOptional) {
+ EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt);
+
+ absl::SetFlag(&FLAGS_std_optional_int64, 0);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0);
+
+ absl::SetFlag(&FLAGS_std_optional_int64, 0xFFFFFFFFFF16);
+ EXPECT_TRUE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), 0xFFFFFFFFFF16);
+
+ absl::SetFlag(&FLAGS_std_optional_int64, std::nullopt);
+ EXPECT_FALSE(absl::GetFlag(FLAGS_std_optional_int64).has_value());
+ EXPECT_EQ(absl::GetFlag(FLAGS_std_optional_int64), std::nullopt);
+}
+
+// --------------------------------------------------------------------
+
+#endif
+
+} // namespace
diff --git a/absl/flags/internal/flag.cc b/absl/flags/internal/flag.cc
index 1515022d..55892d77 100644
--- a/absl/flags/internal/flag.cc
+++ b/absl/flags/internal/flag.cc
@@ -30,6 +30,7 @@
#include "absl/base/call_once.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
+#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/flags/config.h"
#include "absl/flags/internal/commandlineflag.h"
@@ -160,6 +161,8 @@ void FlagImpl::Init() {
std::memcpy(buf.data() + Sizeof(op_), &initialized,
sizeof(initialized));
}
+ // Type can contain valid uninitialized bits, e.g. padding.
+ ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buf.data(), buf.size());
OneWordValue().store(absl::bit_cast<int64_t>(buf),
std::memory_order_release);
break;
@@ -205,7 +208,7 @@ void FlagImpl::AssertValidType(FlagFastTypeId rhs_type_id,
if (lhs_runtime_type_id == rhs_runtime_type_id) return;
-#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef ABSL_INTERNAL_HAS_RTTI
if (*lhs_runtime_type_id == *rhs_runtime_type_id) return;
#endif
diff --git a/absl/flags/internal/flag.h b/absl/flags/internal/flag.h
index 124a2f1c..6154638c 100644
--- a/absl/flags/internal/flag.h
+++ b/absl/flags/internal/flag.h
@@ -163,7 +163,7 @@ inline ptrdiff_t ValueOffset(FlagOpFn op) {
// Returns an address of RTTI's typeid(T).
template <typename T>
inline const std::type_info* GenRuntimeTypeId() {
-#if defined(ABSL_FLAGS_INTERNAL_HAS_RTTI)
+#ifdef ABSL_INTERNAL_HAS_RTTI
return &typeid(T);
#else
return nullptr;
@@ -303,7 +303,9 @@ constexpr FlagDefaultArg DefaultArg(char) {
///////////////////////////////////////////////////////////////////////////////
// Flag current value auxiliary structs.
-constexpr int64_t UninitializedFlagValue() { return 0xababababababababll; }
+constexpr int64_t UninitializedFlagValue() {
+ return static_cast<int64_t>(0xababababababababll);
+}
template <typename T>
using FlagUseValueAndInitBitStorage = std::integral_constant<
@@ -755,8 +757,8 @@ void* FlagOps(FlagOp op, const void* v1, void* v2, void* v3) {
case FlagOp::kValueOffset: {
// Round sizeof(FlagImp) to a multiple of alignof(FlagValue<T>) to get the
// offset of the data.
- ptrdiff_t round_to = alignof(FlagValue<T>);
- ptrdiff_t offset =
+ size_t round_to = alignof(FlagValue<T>);
+ size_t offset =
(sizeof(FlagImpl) + round_to - 1) / round_to * round_to;
return reinterpret_cast<void*>(offset);
}
diff --git a/absl/flags/internal/usage_test.cc b/absl/flags/internal/usage_test.cc
index 044d71c8..209a7be9 100644
--- a/absl/flags/internal/usage_test.cc
+++ b/absl/flags/internal/usage_test.cc
@@ -20,6 +20,7 @@
#include <sstream>
#include <string>
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/parse.h"
@@ -47,8 +48,10 @@ struct UDT {
UDT(const UDT&) = default;
UDT& operator=(const UDT&) = default;
};
-bool AbslParseFlag(absl::string_view, UDT*, std::string*) { return true; }
-std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
+static bool AbslParseFlag(absl::string_view, UDT*, std::string*) {
+ return true;
+}
+static std::string AbslUnparseFlag(const UDT&) { return "UDT{}"; }
ABSL_FLAG(UDT, usage_reporting_test_flag_05, {},
"usage_reporting_test_flag_05 help message");
@@ -103,14 +106,19 @@ class UsageReportingTest : public testing::Test {
using UsageReportingDeathTest = UsageReportingTest;
TEST_F(UsageReportingDeathTest, TestSetProgramUsageMessage) {
+#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
+ // Check for kTestUsageMessage set in main() below.
EXPECT_EQ(absl::ProgramUsageMessage(), kTestUsageMessage);
+#else
+ // Check for part of the usage message set by GoogleTest.
+ EXPECT_THAT(absl::ProgramUsageMessage(),
+ ::testing::HasSubstr(
+ "This program contains tests written using Google Test"));
+#endif
-#ifndef _WIN32
- // TODO(rogeeff): figure out why this does not work on Windows.
EXPECT_DEATH_IF_SUPPORTED(
absl::SetProgramUsageMessage("custom usage message"),
- ".*SetProgramUsageMessage\\(\\) called twice.*");
-#endif
+ ::testing::HasSubstr("SetProgramUsageMessage() called twice"));
}
// --------------------------------------------------------------------
@@ -487,8 +495,10 @@ path.
int main(int argc, char* argv[]) {
(void)absl::GetFlag(FLAGS_undefok); // Force linking of parse.cc
flags::SetProgramInvocationName("usage_test");
+#if !defined(GTEST_HAS_ABSL) || !GTEST_HAS_ABSL
+ // GoogleTest calls absl::SetProgramUsageMessage() already.
absl::SetProgramUsageMessage(kTestUsageMessage);
+#endif
::testing::InitGoogleTest(&argc, argv);
-
return RUN_ALL_TESTS();
}
diff --git a/absl/flags/marshalling.h b/absl/flags/marshalling.h
index 7cbc136d..b1e2ffa5 100644
--- a/absl/flags/marshalling.h
+++ b/absl/flags/marshalling.h
@@ -33,6 +33,7 @@
// * `double`
// * `std::string`
// * `std::vector<std::string>`
+// * `std::optional<T>`
// * `absl::LogSeverity` (provided natively for layering reasons)
//
// Note that support for integral types is implemented using overloads for
@@ -65,6 +66,42 @@
// below.)
//
// -----------------------------------------------------------------------------
+// Optional Flags
+// -----------------------------------------------------------------------------
+//
+// The Abseil flags library supports flags of type `std::optional<T>` where
+// `T` is a type of one of the supported flags. We refer to this flag type as
+// an "optional flag." An optional flag is either "valueless", holding no value
+// of type `T` (indicating that the flag has not been set) or a value of type
+// `T`. The valueless state in C++ code is represented by a value of
+// `std::nullopt` for the optional flag.
+//
+// Using `std::nullopt` as an optional flag's default value allows you to check
+// whether such a flag was ever specified on the command line:
+//
+// if (absl::GetFlag(FLAGS_foo).has_value()) {
+// // flag was set on command line
+// } else {
+// // flag was not passed on command line
+// }
+//
+// Using an optional flag in this manner avoids common workarounds for
+// indicating such an unset flag (such as using sentinal values to indicate this
+// state).
+//
+// An optional flag also allows a developer to pass a flag in an "unset"
+// valueless state on the command line, allowing the flag to later be set in
+// binary logic. An optional flag's valueless state is indicated by the special
+// notation of passing the value as an empty string through the syntax `--flag=`
+// or `--flag ""`.
+//
+// $ binary_with_optional --flag_in_unset_state=
+// $ binary_with_optional --flag_in_unset_state ""
+//
+// Note: as a result of the above syntax requirements, an optional flag cannot
+// be set to a `T` of any value which unparses to the empty string.
+//
+// -----------------------------------------------------------------------------
// Adding Type Support for Abseil Flags
// -----------------------------------------------------------------------------
//
@@ -162,14 +199,27 @@
#ifndef ABSL_FLAGS_MARSHALLING_H_
#define ABSL_FLAGS_MARSHALLING_H_
+#include "absl/base/config.h"
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+#include <optional>
+#endif
#include <string>
#include <vector>
-#include "absl/base/config.h"
#include "absl/strings/string_view.h"
+#include "absl/types/optional.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
+
+// Forward declaration to be used inside composable flag parse/unparse
+// implementations
+template <typename T>
+inline bool ParseFlag(absl::string_view input, T* dst, std::string* error);
+template <typename T>
+inline std::string UnparseFlag(const T& v);
+
namespace flags_internal {
// Overloads of `AbslParseFlag()` and `AbslUnparseFlag()` for fundamental types.
@@ -189,6 +239,36 @@ bool AbslParseFlag(absl::string_view, std::string*, std::string*);
bool AbslParseFlag(absl::string_view, std::vector<std::string>*, std::string*);
template <typename T>
+bool AbslParseFlag(absl::string_view text, absl::optional<T>* f,
+ std::string* err) {
+ if (text.empty()) {
+ *f = absl::nullopt;
+ return true;
+ }
+ T value;
+ if (!absl::ParseFlag(text, &value, err)) return false;
+
+ *f = std::move(value);
+ return true;
+}
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+template <typename T>
+bool AbslParseFlag(absl::string_view text, std::optional<T>* f,
+ std::string* err) {
+ if (text.empty()) {
+ *f = std::nullopt;
+ return true;
+ }
+ T value;
+ if (!absl::ParseFlag(text, &value, err)) return false;
+
+ *f = std::move(value);
+ return true;
+}
+#endif
+
+template <typename T>
bool InvokeParseFlag(absl::string_view input, T* dst, std::string* err) {
// Comment on next line provides a good compiler error message if T
// does not have AbslParseFlag(absl::string_view, T*, std::string*).
@@ -202,6 +282,18 @@ std::string AbslUnparseFlag(absl::string_view v);
std::string AbslUnparseFlag(const std::vector<std::string>&);
template <typename T>
+std::string AbslUnparseFlag(const absl::optional<T>& f) {
+ return f.has_value() ? absl::UnparseFlag(*f) : "";
+}
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+template <typename T>
+std::string AbslUnparseFlag(const std::optional<T>& f) {
+ return f.has_value() ? absl::UnparseFlag(*f) : "";
+}
+#endif
+
+template <typename T>
std::string Unparse(const T& v) {
// Comment on next line provides a good compiler error message if T does not
// have UnparseFlag.
diff --git a/absl/flags/marshalling_test.cc b/absl/flags/marshalling_test.cc
index 4a64ce11..7b6d2ad5 100644
--- a/absl/flags/marshalling_test.cc
+++ b/absl/flags/marshalling_test.cc
@@ -659,6 +659,88 @@ TEST(MarshallingTest, TestVectorOfStringParsing) {
// --------------------------------------------------------------------
+TEST(MarshallingTest, TestOptionalBoolParsing) {
+ std::string err;
+ absl::optional<bool> value;
+
+ EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(value.has_value());
+
+ EXPECT_TRUE(absl::ParseFlag("true", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_TRUE(*value);
+
+ EXPECT_TRUE(absl::ParseFlag("false", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_FALSE(*value);
+
+ EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalIntParsing) {
+ std::string err;
+ absl::optional<int> value;
+
+ EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(value.has_value());
+
+ EXPECT_TRUE(absl::ParseFlag("10", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, 10);
+
+ EXPECT_TRUE(absl::ParseFlag("0x1F", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, 31);
+
+ EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalDoubleParsing) {
+ std::string err;
+ absl::optional<double> value;
+
+ EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(value.has_value());
+
+ EXPECT_TRUE(absl::ParseFlag("1.11", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, 1.11);
+
+ EXPECT_TRUE(absl::ParseFlag("-0.12", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, -0.12);
+
+ EXPECT_FALSE(absl::ParseFlag("nullopt", &value, &err));
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalStringParsing) {
+ std::string err;
+ absl::optional<std::string> value;
+
+ EXPECT_TRUE(absl::ParseFlag("", &value, &err));
+ EXPECT_FALSE(value.has_value());
+
+ EXPECT_TRUE(absl::ParseFlag(" ", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, " ");
+
+ EXPECT_TRUE(absl::ParseFlag("aqswde", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, "aqswde");
+
+ EXPECT_TRUE(absl::ParseFlag("nullopt", &value, &err));
+ EXPECT_TRUE(value.has_value());
+ EXPECT_EQ(*value, "nullopt");
+}
+
+// --------------------------------------------------------------------
+
TEST(MarshallingTest, TestBoolUnparsing) {
EXPECT_EQ(absl::UnparseFlag(true), "true");
EXPECT_EQ(absl::UnparseFlag(false), "false");
@@ -808,6 +890,90 @@ TEST(MarshallingTest, TestStringUnparsing) {
// --------------------------------------------------------------------
+TEST(MarshallingTest, TestOptionalBoolUnparsing) {
+ absl::optional<bool> value;
+
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+ value = true;
+ EXPECT_EQ(absl::UnparseFlag(value), "true");
+ value = false;
+ EXPECT_EQ(absl::UnparseFlag(value), "false");
+ value = absl::nullopt;
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalIntUnparsing) {
+ absl::optional<int> value;
+
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+ value = 0;
+ EXPECT_EQ(absl::UnparseFlag(value), "0");
+ value = -12;
+ EXPECT_EQ(absl::UnparseFlag(value), "-12");
+ value = absl::nullopt;
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalDoubleUnparsing) {
+ absl::optional<double> value;
+
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+ value = 1.;
+ EXPECT_EQ(absl::UnparseFlag(value), "1");
+ value = -1.23;
+ EXPECT_EQ(absl::UnparseFlag(value), "-1.23");
+ value = absl::nullopt;
+ EXPECT_EQ(absl::UnparseFlag(value), "");
+}
+
+// --------------------------------------------------------------------
+
+TEST(MarshallingTest, TestOptionalStringUnparsing) {
+ absl::optional<std::string> strvalue;
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+ strvalue = "asdfg";
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg");
+
+ strvalue = " ";
+ EXPECT_EQ(absl::UnparseFlag(strvalue), " ");
+
+ strvalue = ""; // It is UB to set an optional string flag to ""
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+}
+
+// --------------------------------------------------------------------
+
+#if defined(ABSL_HAVE_STD_OPTIONAL) && !defined(ABSL_USES_STD_OPTIONAL)
+
+TEST(MarshallingTest, TestStdOptionalUnparsing) {
+ std::optional<std::string> strvalue;
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+ strvalue = "asdfg";
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "asdfg");
+
+ strvalue = " ";
+ EXPECT_EQ(absl::UnparseFlag(strvalue), " ");
+
+ strvalue = ""; // It is UB to set an optional string flag to ""
+ EXPECT_EQ(absl::UnparseFlag(strvalue), "");
+
+ std::optional<int> intvalue;
+ EXPECT_EQ(absl::UnparseFlag(intvalue), "");
+
+ intvalue = 10;
+ EXPECT_EQ(absl::UnparseFlag(intvalue), "10");
+}
+
+// --------------------------------------------------------------------
+
+#endif
+
template <typename T>
void TestRoundtrip(T v) {
T new_v;
diff --git a/absl/functional/BUILD.bazel b/absl/functional/BUILD.bazel
index f9f2b9c2..c4fbce98 100644
--- a/absl/functional/BUILD.bazel
+++ b/absl/functional/BUILD.bazel
@@ -26,6 +26,40 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
cc_library(
+ name = "any_invocable",
+ srcs = ["internal/any_invocable.h"],
+ hdrs = ["any_invocable.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:base_internal",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/meta:type_traits",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "any_invocable_test",
+ srcs = [
+ "any_invocable_test.cc",
+ "internal/any_invocable.h",
+ ],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":any_invocable",
+ "//absl/base:base_internal",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/meta:type_traits",
+ "//absl/utility",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
name = "bind_front",
srcs = ["internal/front_binder.h"],
hdrs = ["bind_front.h"],
@@ -78,14 +112,15 @@ cc_test(
)
cc_test(
- name = "function_ref_benchmark",
+ name = "function_type_benchmark",
srcs = [
- "function_ref_benchmark.cc",
+ "function_type_benchmark.cc",
],
copts = ABSL_TEST_COPTS,
tags = ["benchmark"],
visibility = ["//visibility:private"],
deps = [
+ ":any_invocable",
":function_ref",
"//absl/base:core_headers",
"@com_github_google_benchmark//:benchmark_main",
diff --git a/absl/functional/CMakeLists.txt b/absl/functional/CMakeLists.txt
index 338ddc6c..c0f6eaaa 100644
--- a/absl/functional/CMakeLists.txt
+++ b/absl/functional/CMakeLists.txt
@@ -16,6 +16,42 @@
absl_cc_library(
NAME
+ any_invocable
+ SRCS
+ "internal/any_invocable.h"
+ HDRS
+ "any_invocable.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::base_internal
+ absl::config
+ absl::core_headers
+ absl::type_traits
+ absl::utility
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ any_invocable_test
+ SRCS
+ "any_invocable_test.cc"
+ "internal/any_invocable.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::any_invocable
+ absl::base_internal
+ absl::config
+ absl::core_headers
+ absl::type_traits
+ absl::utility
+ GTest::gmock_main
+)
+
+absl_cc_library(
+ NAME
bind_front
SRCS
"internal/front_binder.h"
diff --git a/absl/functional/any_invocable.h b/absl/functional/any_invocable.h
new file mode 100644
index 00000000..0c5faca0
--- /dev/null
+++ b/absl/functional/any_invocable.h
@@ -0,0 +1,313 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: any_invocable.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines an `absl::AnyInvocable` type that assumes ownership
+// and wraps an object of an invocable type. (Invocable types adhere to the
+// concept specified in https://en.cppreference.com/w/cpp/concepts/invocable.)
+//
+// In general, prefer `absl::AnyInvocable` when you need a type-erased
+// function parameter that needs to take ownership of the type.
+//
+// NOTE: `absl::AnyInvocable` is similar to the C++23 `std::move_only_function`
+// abstraction, but has a slightly different API and is not designed to be a
+// drop-in replacement or C++11-compatible backfill of that type.
+
+#ifndef ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
+#define ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
+
+#include <cstddef>
+#include <initializer_list>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/functional/internal/any_invocable.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::AnyInvocable
+//
+// `absl::AnyInvocable` is a functional wrapper type, like `std::function`, that
+// assumes ownership of an invocable object. Unlike `std::function`, an
+// `absl::AnyInvocable` is more type-safe and provides the following additional
+// benefits:
+//
+// * Properly adheres to const correctness of the underlying type
+// * Is move-only so avoids concurrency problems with copied invocables and
+// unnecessary copies in general.
+// * Supports reference qualifiers allowing it to perform unique actions (noted
+// below).
+//
+// `absl::AnyInvocable` is a template, and an `absl::AnyInvocable` instantiation
+// may wrap any invocable object with a compatible function signature, e.g.
+// having arguments and return types convertible to types matching the
+// `absl::AnyInvocable` signature, and also matching any stated reference
+// qualifiers, as long as that type is moveable. It therefore provides broad
+// type erasure for functional objects.
+//
+// An `absl::AnyInvocable` is typically used as a type-erased function parameter
+// for accepting various functional objects:
+//
+// // Define a function taking an AnyInvocable parameter.
+// void my_func(absl::AnyInvocable<int()> f) {
+// ...
+// };
+//
+// // That function can accept any invocable type:
+//
+// // Accept a function reference. We don't need to move a reference.
+// int func1() { return 0; };
+// my_func(func1);
+//
+// // Accept a lambda. We use std::move here because otherwise my_func would
+// // copy the lambda.
+// auto lambda = []() { return 0; };
+// my_func(std::move(lambda));
+//
+// // Accept a function pointer. We don't need to move a function pointer.
+// func2 = &func1;
+// my_func(func2);
+//
+// // Accept an std::function by moving it. Note that the lambda is copyable
+// // (satisfying std::function requirements) and moveable (satisfying
+// // absl::AnyInvocable requirements).
+// std::function<int()> func6 = []() { return 0; };
+// my_func(std::move(func6));
+//
+// `AnyInvocable` also properly respects `const` qualifiers, reference
+// qualifiers, and the `noexcept` specification (only in C++ 17 and beyond) as
+// part of the user-specified function type (e.g.
+// `AnyInvocable<void()&& const noexcept>`). These qualifiers will be applied to
+// the `AnyInvocable` object's `operator()`, and the underlying invocable must
+// be compatible with those qualifiers.
+//
+// Comparison of const and non-const function types:
+//
+// // Store a closure inside of `func` with the function type `int()`.
+// // Note that we have made `func` itself `const`.
+// const AnyInvocable<int()> func = [](){ return 0; };
+//
+// func(); // Compile-error: the passed type `int()` isn't `const`.
+//
+// // Store a closure inside of `const_func` with the function type
+// // `int() const`.
+// // Note that we have also made `const_func` itself `const`.
+// const AnyInvocable<int() const> const_func = [](){ return 0; };
+//
+// const_func(); // Fine: `int() const` is `const`.
+//
+// In the above example, the call `func()` would have compiled if
+// `std::function` were used even though the types are not const compatible.
+// This is a bug, and using `absl::AnyInvocable` properly detects that bug.
+//
+// In addition to affecting the signature of `operator()`, the `const` and
+// reference qualifiers of the function type also appropriately constrain which
+// kinds of invocable objects you are allowed to place into the `AnyInvocable`
+// instance. If you specify a function type that is const-qualified, then
+// anything that you attempt to put into the `AnyInvocable` must be callable on
+// a `const` instance of that type.
+//
+// Constraint example:
+//
+// // Fine because the lambda is callable when `const`.
+// AnyInvocable<int() const> func = [=](){ return 0; };
+//
+// // This is a compile-error because the lambda isn't callable when `const`.
+// AnyInvocable<int() const> error = [=]() mutable { return 0; };
+//
+// An `&&` qualifier can be used to express that an `absl::AnyInvocable`
+// instance should be invoked at most once:
+//
+// // Invokes `continuation` with the logical result of an operation when
+// // that operation completes (common in asynchronous code).
+// void CallOnCompletion(AnyInvocable<void(int)&&> continuation) {
+// int result_of_foo = foo();
+//
+// // `std::move` is required because the `operator()` of `continuation` is
+// // rvalue-reference qualified.
+// std::move(continuation)(result_of_foo);
+// }
+//
+// Credits to Matt Calabrese (https://github.com/mattcalabrese) for the original
+// implementation.
+template <class Sig>
+class AnyInvocable : private internal_any_invocable::Impl<Sig> {
+ private:
+ static_assert(
+ std::is_function<Sig>::value,
+ "The template argument of AnyInvocable must be a function type.");
+
+ using Impl = internal_any_invocable::Impl<Sig>;
+
+ public:
+ // The return type of Sig
+ using result_type = typename Impl::result_type;
+
+ // Constructors
+
+ // Constructs the `AnyInvocable` in an empty state.
+ AnyInvocable() noexcept = default;
+ AnyInvocable(std::nullptr_t) noexcept {} // NOLINT
+
+ // Constructs the `AnyInvocable` from an existing `AnyInvocable` by a move.
+ // Note that `f` is not guaranteed to be empty after move-construction,
+ // although it may be.
+ AnyInvocable(AnyInvocable&& /*f*/) noexcept = default;
+
+ // Constructs an `AnyInvocable` from an invocable object.
+ //
+ // Upon construction, `*this` is only empty if `f` is a function pointer or
+ // member pointer type and is null, or if `f` is an `AnyInvocable` that is
+ // empty.
+ template <class F, typename = absl::enable_if_t<
+ internal_any_invocable::CanConvert<Sig, F>::value>>
+ AnyInvocable(F&& f) // NOLINT
+ : Impl(internal_any_invocable::ConversionConstruct(),
+ std::forward<F>(f)) {}
+
+ // Constructs an `AnyInvocable` that holds an invocable object of type `T`,
+ // which is constructed in-place from the given arguments.
+ //
+ // Example:
+ //
+ // AnyInvocable<int(int)> func(
+ // absl::in_place_type<PossiblyImmovableType>, arg1, arg2);
+ //
+ template <class T, class... Args,
+ typename = absl::enable_if_t<
+ internal_any_invocable::CanEmplace<Sig, T, Args...>::value>>
+ explicit AnyInvocable(absl::in_place_type_t<T>, Args&&... args)
+ : Impl(absl::in_place_type<absl::decay_t<T>>,
+ std::forward<Args>(args)...) {
+ static_assert(std::is_same<T, absl::decay_t<T>>::value,
+ "The explicit template argument of in_place_type is required "
+ "to be an unqualified object type.");
+ }
+
+ // Overload of the above constructor to support list-initialization.
+ template <class T, class U, class... Args,
+ typename = absl::enable_if_t<internal_any_invocable::CanEmplace<
+ Sig, T, std::initializer_list<U>&, Args...>::value>>
+ explicit AnyInvocable(absl::in_place_type_t<T>,
+ std::initializer_list<U> ilist, Args&&... args)
+ : Impl(absl::in_place_type<absl::decay_t<T>>, ilist,
+ std::forward<Args>(args)...) {
+ static_assert(std::is_same<T, absl::decay_t<T>>::value,
+ "The explicit template argument of in_place_type is required "
+ "to be an unqualified object type.");
+ }
+
+ // Assignment Operators
+
+ // Assigns an `AnyInvocable` through move-assignment.
+ // Note that `f` is not guaranteed to be empty after move-assignment
+ // although it may be.
+ AnyInvocable& operator=(AnyInvocable&& /*f*/) noexcept = default;
+
+ // Assigns an `AnyInvocable` from a nullptr, clearing the `AnyInvocable`. If
+ // not empty, destroys the target, putting `*this` into an empty state.
+ AnyInvocable& operator=(std::nullptr_t) noexcept {
+ this->Clear();
+ return *this;
+ }
+
+ // Assigns an `AnyInvocable` from an existing `AnyInvocable` instance.
+ //
+ // Upon assignment, `*this` is only empty if `f` is a function pointer or
+ // member pointer type and is null, or if `f` is an `AnyInvocable` that is
+ // empty.
+ template <class F, typename = absl::enable_if_t<
+ internal_any_invocable::CanAssign<Sig, F>::value>>
+ AnyInvocable& operator=(F&& f) {
+ *this = AnyInvocable(std::forward<F>(f));
+ return *this;
+ }
+
+ // Assigns an `AnyInvocable` from a reference to an invocable object.
+ // Upon assignment, stores a reference to the invocable object in the
+ // `AnyInvocable` instance.
+ template <
+ class F,
+ typename = absl::enable_if_t<
+ internal_any_invocable::CanAssignReferenceWrapper<Sig, F>::value>>
+ AnyInvocable& operator=(std::reference_wrapper<F> f) noexcept {
+ *this = AnyInvocable(f);
+ return *this;
+ }
+
+ // Destructor
+
+ // If not empty, destroys the target.
+ ~AnyInvocable() = default;
+
+ // absl::AnyInvocable::swap()
+ //
+ // Exchanges the targets of `*this` and `other`.
+ void swap(AnyInvocable& other) noexcept { std::swap(*this, other); }
+
+ // abl::AnyInvocable::operator bool()
+ //
+ // Returns `true` if `*this` is not empty.
+ explicit operator bool() const noexcept { return this->HasValue(); }
+
+ // Invokes the target object of `*this`. `*this` must not be empty.
+ //
+ // Note: The signature of this function call operator is the same as the
+ // template parameter `Sig`.
+ using Impl::operator();
+
+ // Equality operators
+
+ // Returns `true` if `*this` is empty.
+ friend bool operator==(const AnyInvocable& f, std::nullptr_t) noexcept {
+ return !f.HasValue();
+ }
+
+ // Returns `true` if `*this` is empty.
+ friend bool operator==(std::nullptr_t, const AnyInvocable& f) noexcept {
+ return !f.HasValue();
+ }
+
+ // Returns `false` if `*this` is empty.
+ friend bool operator!=(const AnyInvocable& f, std::nullptr_t) noexcept {
+ return f.HasValue();
+ }
+
+ // Returns `false` if `*this` is empty.
+ friend bool operator!=(std::nullptr_t, const AnyInvocable& f) noexcept {
+ return f.HasValue();
+ }
+
+ // swap()
+ //
+ // Exchanges the targets of `f1` and `f2`.
+ friend void swap(AnyInvocable& f1, AnyInvocable& f2) noexcept { f1.swap(f2); }
+
+ private:
+ // Friending other instantiations is necessary for conversions.
+ template <bool /*SigIsNoexcept*/, class /*ReturnType*/, class... /*P*/>
+ friend class internal_any_invocable::CoreImpl;
+};
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_FUNCTIONAL_ANY_INVOCABLE_H_
diff --git a/absl/functional/any_invocable_test.cc b/absl/functional/any_invocable_test.cc
new file mode 100644
index 00000000..fb5e7792
--- /dev/null
+++ b/absl/functional/any_invocable_test.cc
@@ -0,0 +1,1696 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/functional/any_invocable.h"
+
+#include <cstddef>
+#include <initializer_list>
+#include <numeric>
+#include <type_traits>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+static_assert(absl::internal_any_invocable::kStorageSize >= sizeof(void*),
+ "These tests assume that the small object storage is at least "
+ "the size of a pointer.");
+
+namespace {
+
+// Helper macro used to avoid spelling `noexcept` in language versions older
+// than C++17, where it is not part of the type system, in order to avoid
+// compilation failures and internal compiler errors.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
+#else
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
+#endif
+
+// A dummy type we use when passing qualifiers to metafunctions
+struct _ {};
+
+template <class T>
+struct Wrapper {
+ template <class U,
+ class = absl::enable_if_t<std::is_convertible<U, T>::value>>
+ Wrapper(U&&); // NOLINT
+};
+
+// This will cause a recursive trait instantiation if the SFINAE checks are
+// not ordered correctly for constructibility.
+static_assert(std::is_constructible<Wrapper<absl::AnyInvocable<void()>>,
+ Wrapper<absl::AnyInvocable<void()>>>::value,
+ "");
+
+// A metafunction that takes the cv and l-value reference qualifiers that were
+// associated with a function type (here passed via qualifiers of an object
+// type), and .
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl {
+ static_assert(std::is_object<This>::value, "");
+ using type =
+ absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&;
+};
+
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl<Qualifiers&, This>
+ : QualifiersForThisImpl<Qualifiers, This> {};
+
+template <class Qualifiers, class This>
+struct QualifiersForThisImpl<Qualifiers&&, This> {
+ static_assert(std::is_object<This>::value, "");
+ using type =
+ absl::conditional_t<std::is_const<Qualifiers>::value, const This, This>&&;
+};
+
+template <class Qualifiers, class This>
+using QualifiersForThis =
+ typename QualifiersForThisImpl<Qualifiers, This>::type;
+
+// A metafunction that takes the cv and l-value reference qualifier of T and
+// applies them to U's function type qualifiers.
+template <class T, class Fun>
+struct GiveQualifiersToFunImpl;
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T, R(P...)> {
+ using type =
+ absl::conditional_t<std::is_const<T>::value, R(P...) const, R(P...)>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&, R(P...)> {
+ using type =
+ absl::conditional_t<std::is_const<T>::value, R(P...) const&, R(P...)&>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&&, R(P...)> {
+ using type =
+ absl::conditional_t<std::is_const<T>::value, R(P...) const&&, R(P...) &&>;
+};
+
+// If noexcept is a part of the type system, then provide the noexcept forms.
+#if defined(__cpp_noexcept_function_type)
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T, R(P...) noexcept> {
+ using type = absl::conditional_t<std::is_const<T>::value,
+ R(P...) const noexcept, R(P...) noexcept>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&, R(P...) noexcept> {
+ using type =
+ absl::conditional_t<std::is_const<T>::value, R(P...) const & noexcept,
+ R(P...) & noexcept>;
+};
+
+template <class T, class R, class... P>
+struct GiveQualifiersToFunImpl<T&&, R(P...) noexcept> {
+ using type =
+ absl::conditional_t<std::is_const<T>::value, R(P...) const && noexcept,
+ R(P...) && noexcept>;
+};
+
+#endif // defined(__cpp_noexcept_function_type)
+
+template <class T, class Fun>
+using GiveQualifiersToFun = typename GiveQualifiersToFunImpl<T, Fun>::type;
+
+// This is used in template parameters to decide whether or not to use a type
+// that fits in the small object optimization storage.
+enum class ObjSize { small, large };
+
+// A base type that is used with classes as a means to insert an
+// appropriately-sized dummy datamember when Size is ObjSize::large so that the
+// user's class type is guaranteed to not fit in small object storage.
+template <ObjSize Size>
+struct TypeErasedPadding;
+
+template <>
+struct TypeErasedPadding<ObjSize::small> {};
+
+template <>
+struct TypeErasedPadding<ObjSize::large> {
+ char dummy_data[absl::internal_any_invocable::kStorageSize + 1] = {};
+};
+
+struct Int {
+ Int(int v) noexcept : value(v) {} // NOLINT
+#ifndef _MSC_VER
+ Int(Int&&) noexcept {
+ // NOTE: Prior to C++17, this not being called requires optimizations to
+ // take place when performing the top-level invocation. In practice,
+ // most supported compilers perform this optimization prior to C++17.
+ std::abort();
+ }
+#else
+ Int(Int&& v) noexcept = default;
+#endif
+ operator int() && noexcept { return value; } // NOLINT
+
+ int MemberFunctionAdd(int const& b, int c) noexcept { // NOLINT
+ return value + b + c;
+ }
+
+ int value;
+};
+
+enum class Movable { no, yes, nothrow, trivial };
+
+enum class NothrowCall { no, yes };
+
+enum class Destructible { nothrow, trivial };
+
+enum class ObjAlign : std::size_t {
+ normal = absl::internal_any_invocable::kAlignment,
+ large = absl::internal_any_invocable::kAlignment * 2,
+};
+
+// A function-object template that has knobs for each property that can affect
+// how the object is stored in AnyInvocable.
+template <Movable Movability, Destructible Destructibility, class Qual,
+ NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add;
+
+#define ABSL_INTERNALS_ADD(qual) \
+ template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment> \
+ struct alignas(static_cast<std::size_t>(Alignment)) \
+ add<Movable::trivial, Destructible::trivial, _ qual, CallExceptionSpec, \
+ Size, Alignment> : TypeErasedPadding<Size> { \
+ explicit add(int state_init) : state(state_init) {} \
+ explicit add(std::initializer_list<int> state_init, int tail) \
+ : state(std::accumulate(std::begin(state_init), std::end(state_init), \
+ 0) + \
+ tail) {} \
+ add(add&& other) = default; /*NOLINT*/ \
+ Int operator()(int a, int b, int c) qual \
+ ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \
+ return state + a + b + c; \
+ } \
+ int state; \
+ }; \
+ \
+ template <NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment> \
+ struct alignas(static_cast<std::size_t>(Alignment)) \
+ add<Movable::trivial, Destructible::nothrow, _ qual, CallExceptionSpec, \
+ Size, Alignment> : TypeErasedPadding<Size> { \
+ explicit add(int state_init) : state(state_init) {} \
+ explicit add(std::initializer_list<int> state_init, int tail) \
+ : state(std::accumulate(std::begin(state_init), std::end(state_init), \
+ 0) + \
+ tail) {} \
+ ~add() noexcept {} \
+ add(add&& other) = default; /*NOLINT*/ \
+ Int operator()(int a, int b, int c) qual \
+ ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes) { \
+ return state + a + b + c; \
+ } \
+ int state; \
+ }
+
+// Explicitly specify an empty argument.
+// MSVC (at least up to _MSC_VER 1931, if not beyond) warns that
+// ABSL_INTERNALS_ADD() is an undefined zero-arg overload.
+#define ABSL_INTERNALS_NOARG
+ABSL_INTERNALS_ADD(ABSL_INTERNALS_NOARG);
+#undef ABSL_INTERNALS_NOARG
+
+ABSL_INTERNALS_ADD(const);
+ABSL_INTERNALS_ADD(&);
+ABSL_INTERNALS_ADD(const&);
+ABSL_INTERNALS_ADD(&&); // NOLINT
+ABSL_INTERNALS_ADD(const&&); // NOLINT
+
+#undef ABSL_INTERNALS_ADD
+
+template <Destructible Destructibility, class Qual,
+ NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::no, Destructibility, Qual, CallExceptionSpec, Size,
+ Alignment> : private add<Movable::trivial, Destructibility, Qual,
+ CallExceptionSpec, Size, Alignment> {
+ using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+ Size, Alignment>;
+
+ explicit add(int state_init) : Base(state_init) {}
+
+ explicit add(std::initializer_list<int> state_init, int tail)
+ : Base(state_init, tail) {}
+
+ add(add&&) = delete;
+
+ using Base::operator();
+ using Base::state;
+};
+
+template <Destructible Destructibility, class Qual,
+ NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::yes, Destructibility, Qual, CallExceptionSpec, Size,
+ Alignment> : private add<Movable::trivial, Destructibility, Qual,
+ CallExceptionSpec, Size, Alignment> {
+ using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+ Size, Alignment>;
+
+ explicit add(int state_init) : Base(state_init) {}
+
+ explicit add(std::initializer_list<int> state_init, int tail)
+ : Base(state_init, tail) {}
+
+ add(add&& other) noexcept(false) : Base(other.state) {} // NOLINT
+
+ using Base::operator();
+ using Base::state;
+};
+
+template <Destructible Destructibility, class Qual,
+ NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct add<Movable::nothrow, Destructibility, Qual, CallExceptionSpec, Size,
+ Alignment> : private add<Movable::trivial, Destructibility, Qual,
+ CallExceptionSpec, Size, Alignment> {
+ using Base = add<Movable::trivial, Destructibility, Qual, CallExceptionSpec,
+ Size, Alignment>;
+
+ explicit add(int state_init) : Base(state_init) {}
+
+ explicit add(std::initializer_list<int> state_init, int tail)
+ : Base(state_init, tail) {}
+
+ add(add&& other) noexcept : Base(other.state) {}
+
+ using Base::operator();
+ using Base::state;
+};
+
+// Actual non-member functions rather than function objects
+Int add_function(Int&& a, int b, int c) noexcept { return a.value + b + c; }
+
+Int mult_function(Int&& a, int b, int c) noexcept { return a.value * b * c; }
+
+Int square_function(Int const&& a) noexcept { return a.value * a.value; }
+
+template <class Sig>
+using AnyInvocable = absl::AnyInvocable<Sig>;
+
+// Instantiations of this template contains all of the compile-time parameters
+// for a given instantiation of the AnyInvocable test suite.
+template <Movable Movability, Destructible Destructibility, class Qual,
+ NothrowCall CallExceptionSpec, ObjSize Size, ObjAlign Alignment>
+struct TestParams {
+ static constexpr Movable kMovability = Movability;
+ static constexpr Destructible kDestructibility = Destructibility;
+ using Qualifiers = Qual;
+ static constexpr NothrowCall kCallExceptionSpec = CallExceptionSpec;
+ static constexpr bool kIsNoexcept = kCallExceptionSpec == NothrowCall::yes;
+ static constexpr bool kIsRvalueQualified =
+ std::is_rvalue_reference<Qual>::value;
+ static constexpr ObjSize kSize = Size;
+ static constexpr ObjAlign kAlignment = Alignment;
+
+ // These types are used when testing with member object pointer Invocables
+ using UnqualifiedUnaryFunType = int(Int const&&)
+ ABSL_INTERNAL_NOEXCEPT_SPEC(CallExceptionSpec == NothrowCall::yes);
+ using UnaryFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedUnaryFunType>;
+ using MemObjPtrType = int(Int::*);
+ using UnaryAnyInvType = AnyInvocable<UnaryFunType>;
+ using UnaryThisParamType = QualifiersForThis<Qualifiers, UnaryAnyInvType>;
+
+ template <class T>
+ static UnaryThisParamType ToUnaryThisParam(T&& fun) {
+ return static_cast<UnaryThisParamType>(fun);
+ }
+
+ // This function type intentionally uses 3 "kinds" of parameter types.
+ // - A user-defined type
+ // - A reference type
+ // - A scalar type
+ //
+ // These were chosen because internal forwarding takes place on parameters
+ // differently depending based on type properties (scalars are forwarded by
+ // value).
+ using ResultType = Int;
+ using AnyInvocableFunTypeNotNoexcept = Int(Int, const int&, int);
+ using UnqualifiedFunType =
+ typename std::conditional<kIsNoexcept, Int(Int, const int&, int) noexcept,
+ Int(Int, const int&, int)>::type;
+ using FunType = GiveQualifiersToFun<Qualifiers, UnqualifiedFunType>;
+ using MemFunPtrType =
+ typename std::conditional<kIsNoexcept,
+ Int (Int::*)(const int&, int) noexcept,
+ Int (Int::*)(const int&, int)>::type;
+ using AnyInvType = AnyInvocable<FunType>;
+ using AddType = add<kMovability, kDestructibility, Qualifiers,
+ kCallExceptionSpec, kSize, kAlignment>;
+ using ThisParamType = QualifiersForThis<Qualifiers, AnyInvType>;
+
+ template <class T>
+ static ThisParamType ToThisParam(T&& fun) {
+ return static_cast<ThisParamType>(fun);
+ }
+
+ // These typedefs are used when testing void return type covariance.
+ using UnqualifiedVoidFunType =
+ typename std::conditional<kIsNoexcept,
+ void(Int, const int&, int) noexcept,
+ void(Int, const int&, int)>::type;
+ using VoidFunType = GiveQualifiersToFun<Qualifiers, UnqualifiedVoidFunType>;
+ using VoidAnyInvType = AnyInvocable<VoidFunType>;
+ using VoidThisParamType = QualifiersForThis<Qualifiers, VoidAnyInvType>;
+
+ template <class T>
+ static VoidThisParamType ToVoidThisParam(T&& fun) {
+ return static_cast<VoidThisParamType>(fun);
+ }
+
+ using CompatibleAnyInvocableFunType =
+ absl::conditional_t<std::is_rvalue_reference<Qual>::value,
+ GiveQualifiersToFun<const _&&, UnqualifiedFunType>,
+ GiveQualifiersToFun<const _&, UnqualifiedFunType>>;
+
+ using CompatibleAnyInvType = AnyInvocable<CompatibleAnyInvocableFunType>;
+
+ using IncompatibleInvocable =
+ absl::conditional_t<std::is_rvalue_reference<Qual>::value,
+ GiveQualifiersToFun<_&, UnqualifiedFunType>(_::*),
+ GiveQualifiersToFun<_&&, UnqualifiedFunType>(_::*)>;
+};
+
+// Given a member-pointer type, this metafunction yields the target type of the
+// pointer, not including the class-type. It is used to verify that the function
+// call operator of AnyInvocable has the proper signature, corresponding to the
+// function type that the user provided.
+template <class MemberPtrType>
+struct MemberTypeOfImpl;
+
+template <class Class, class T>
+struct MemberTypeOfImpl<T(Class::*)> {
+ using type = T;
+};
+
+template <class MemberPtrType>
+using MemberTypeOf = typename MemberTypeOfImpl<MemberPtrType>::type;
+
+template <class T, class = void>
+struct IsMemberSwappableImpl : std::false_type {
+ static constexpr bool kIsNothrow = false;
+};
+
+template <class T>
+struct IsMemberSwappableImpl<
+ T, absl::void_t<decltype(std::declval<T&>().swap(std::declval<T&>()))>>
+ : std::true_type {
+ static constexpr bool kIsNothrow =
+ noexcept(std::declval<T&>().swap(std::declval<T&>()));
+};
+
+template <class T>
+using IsMemberSwappable = IsMemberSwappableImpl<T>;
+
+template <class T>
+using IsNothrowMemberSwappable =
+ std::integral_constant<bool, IsMemberSwappableImpl<T>::kIsNothrow>;
+
+template <class T>
+class AnyInvTestBasic : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestBasic);
+
+TYPED_TEST_P(AnyInvTestBasic, DefaultConstruction) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+
+ EXPECT_TRUE(std::is_nothrow_default_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullptr) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = nullptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+
+ EXPECT_TRUE(
+ (std::is_nothrow_constructible<AnyInvType, std::nullptr_t>::value));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullFunctionPtr) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+ UnqualifiedFunType* const null_fun_ptr = nullptr;
+ AnyInvType fun = null_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberFunctionPtr) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+ const MemFunPtrType null_mem_fun_ptr = nullptr;
+ AnyInvType fun = null_mem_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionNullMemberObjectPtr) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+ using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+ const MemObjPtrType null_mem_obj_ptr = nullptr;
+ UnaryAnyInvType fun = null_mem_obj_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberFunctionPtr) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = &Int::MemberFunctionAdd;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionMemberObjectPtr) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+ UnaryAnyInvType fun = &Int::value;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionFunctionReferenceDecay) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = add_function;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableEmpty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other;
+ AnyInvType fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+ EXPECT_EQ(other, nullptr); // NOLINT
+ EXPECT_EQ(nullptr, other); // NOLINT
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConstructionCompatibleAnyInvocableNonempty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other = &add_function;
+ AnyInvType fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+ EXPECT_EQ(other, nullptr); // NOLINT
+ EXPECT_EQ(nullptr, other); // NOLINT
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ConversionToBool) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ {
+ AnyInvType fun;
+
+ // This tests contextually-convertible-to-bool.
+ EXPECT_FALSE(fun ? true : false); // NOLINT
+
+ // Make sure that the conversion is not implicit.
+ EXPECT_TRUE(
+ (std::is_nothrow_constructible<bool, const AnyInvType&>::value));
+ EXPECT_FALSE((std::is_convertible<const AnyInvType&, bool>::value));
+ }
+
+ {
+ AnyInvType fun = &add_function;
+
+ // This tests contextually-convertible-to-bool.
+ EXPECT_TRUE(fun ? true : false); // NOLINT
+ }
+}
+
+TYPED_TEST_P(AnyInvTestBasic, Invocation) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ using FunType = typename TypeParam::FunType;
+ using AnyInvCallType = MemberTypeOf<decltype(&AnyInvType::operator())>;
+
+ // Make sure the function call operator of AnyInvocable always has the
+ // type that was specified via the template argument.
+ EXPECT_TRUE((std::is_same<AnyInvCallType, FunType>::value));
+
+ AnyInvType fun = &add_function;
+
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceConstruction) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceConstructionInitializerList) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun(absl::in_place_type<AddType>, {1, 2, 3, 4}, 5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(39, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstruction) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+ AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>, nullptr);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullFunPtrConstructionValueInit) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+ AnyInvType fun(absl::in_place_type<UnqualifiedFunType*>);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstruction) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+ AnyInvType fun(absl::in_place_type<MemFunPtrType>, nullptr);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemFunPtrConstructionValueInit) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+ AnyInvType fun(absl::in_place_type<MemFunPtrType>);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstruction) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+ using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+ UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>, nullptr);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceNullMemObjPtrConstructionValueInit) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+ using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+ UnaryAnyInvType fun(absl::in_place_type<MemObjPtrType>);
+
+ // In-place construction does not lead to empty.
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, InPlaceVoidCovarianceConstruction) {
+ using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ VoidAnyInvType fun(absl::in_place_type<AddType>, 5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromEmpty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType source_fun;
+ AnyInvType fun(std::move(source_fun));
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+
+ EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, MoveConstructionFromNonEmpty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+ AnyInvType fun(std::move(source_fun));
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(std::is_nothrow_move_constructible<AnyInvType>::value);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrEmpty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun;
+
+ EXPECT_TRUE(fun == nullptr);
+ EXPECT_TRUE(nullptr == fun);
+
+ EXPECT_FALSE(fun != nullptr);
+ EXPECT_FALSE(nullptr != fun);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ComparisonWithNullptrNonempty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+ EXPECT_FALSE(fun == nullptr);
+ EXPECT_FALSE(nullptr == fun);
+
+ EXPECT_TRUE(fun != nullptr);
+ EXPECT_TRUE(nullptr != fun);
+}
+
+TYPED_TEST_P(AnyInvTestBasic, ResultType) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using ExpectedResultType = typename TypeParam::ResultType;
+
+ EXPECT_TRUE((std::is_same<typename AnyInvType::result_type,
+ ExpectedResultType>::value));
+}
+
+template <class T>
+class AnyInvTestCombinatoric : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestCombinatoric);
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType source_fun;
+ AnyInvType fun;
+
+ fun = std::move(source_fun);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignEmptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+ AnyInvType fun;
+
+ fun = std::move(source_fun);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyEmptyLhsRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType source_fun;
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+
+ fun = std::move(source_fun);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, MoveAssignNonemptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+ AnyInvType fun(absl::in_place_type<AddType>, 20);
+
+ fun = std::move(source_fun);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignEmpty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType source_fun;
+ source_fun = std::move(source_fun);
+
+ // This space intentionally left blank.
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SelfMoveAssignNonempty) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType source_fun(absl::in_place_type<AddType>, 5);
+ source_fun = std::move(source_fun);
+
+ // This space intentionally left blank.
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun;
+ fun = nullptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+ UnqualifiedFunType* const null_fun_ptr = nullptr;
+ AnyInvType fun;
+ fun = null_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+ const MemFunPtrType null_mem_fun_ptr = nullptr;
+ AnyInvType fun;
+ fun = null_mem_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrEmptyLhs) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+ using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+ const MemObjPtrType null_mem_obj_ptr = nullptr;
+ UnaryAnyInvType fun;
+ fun = null_mem_obj_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun;
+ fun = &Int::MemberFunctionAdd;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrEmptyLhs) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+ UnaryAnyInvType fun;
+ fun = &Int::value;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun;
+ fun = add_function;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+ AssignCompatibleAnyInvocableEmptyLhsEmptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other;
+ AnyInvType fun;
+ fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+ EXPECT_EQ(other, nullptr); // NOLINT
+ EXPECT_EQ(nullptr, other); // NOLINT
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+ AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other = &add_function;
+ AnyInvType fun;
+ fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullptrNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = &mult_function;
+ fun = nullptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullFunctionPtrNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using UnqualifiedFunType = typename TypeParam::UnqualifiedFunType;
+
+ UnqualifiedFunType* const null_fun_ptr = nullptr;
+ AnyInvType fun = &mult_function;
+ fun = null_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberFunctionPtrNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using MemFunPtrType = typename TypeParam::MemFunPtrType;
+
+ const MemFunPtrType null_mem_fun_ptr = nullptr;
+ AnyInvType fun = &mult_function;
+ fun = null_mem_fun_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignNullMemberObjectPtrNonemptyLhs) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+ using MemObjPtrType = typename TypeParam::MemObjPtrType;
+
+ const MemObjPtrType null_mem_obj_ptr = nullptr;
+ UnaryAnyInvType fun = &square_function;
+ fun = null_mem_obj_ptr;
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberFunctionPtrNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = &mult_function;
+ fun = &Int::MemberFunctionAdd;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignMemberObjectPtrNonemptyLhs) {
+ using UnaryAnyInvType = typename TypeParam::UnaryAnyInvType;
+
+ UnaryAnyInvType fun = &square_function;
+ fun = &Int::value;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(13, TypeParam::ToUnaryThisParam(fun)(13));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, AssignFunctionReferenceDecayNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ AnyInvType fun = &mult_function;
+ fun = add_function;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+ AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other;
+ AnyInvType fun = &mult_function;
+ fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+ EXPECT_EQ(other, nullptr); // NOLINT
+ EXPECT_EQ(nullptr, other); // NOLINT
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric,
+ AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using CompatibleAnyInvType = typename TypeParam::CompatibleAnyInvType;
+
+ CompatibleAnyInvType other = &add_function;
+ AnyInvType fun = &mult_function;
+ fun = std::move(other);
+
+ EXPECT_FALSE(static_cast<bool>(other)); // NOLINT
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(24, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsEmptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ // Swap idiom
+ {
+ AnyInvType fun;
+ AnyInvType other;
+
+ using std::swap;
+ swap(fun, other);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+ EXPECT_FALSE(static_cast<bool>(other));
+
+ EXPECT_TRUE(
+ absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+ }
+
+ // Member swap
+ {
+ AnyInvType fun;
+ AnyInvType other;
+
+ fun.swap(other);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+ EXPECT_FALSE(static_cast<bool>(other));
+
+ EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+ }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapEmptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ // Swap idiom
+ {
+ AnyInvType fun;
+ AnyInvType other(absl::in_place_type<AddType>, 5);
+
+ using std::swap;
+ swap(fun, other);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_FALSE(static_cast<bool>(other));
+
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(
+ absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+ }
+
+ // Member swap
+ {
+ AnyInvType fun;
+ AnyInvType other(absl::in_place_type<AddType>, 5);
+
+ fun.swap(other);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_FALSE(static_cast<bool>(other));
+
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+ }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsEmptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ // Swap idiom
+ {
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+ AnyInvType other;
+
+ using std::swap;
+ swap(fun, other);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+ EXPECT_TRUE(static_cast<bool>(other));
+
+ EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+ EXPECT_TRUE(
+ absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+ }
+
+ // Member swap
+ {
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+ AnyInvType other;
+
+ fun.swap(other);
+
+ EXPECT_FALSE(static_cast<bool>(fun));
+ EXPECT_TRUE(static_cast<bool>(other));
+
+ EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+ EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+ }
+}
+
+TYPED_TEST_P(AnyInvTestCombinatoric, SwapNonemptyLhsNonemptyRhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ // Swap idiom
+ {
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+ AnyInvType other(absl::in_place_type<AddType>, 6);
+
+ using std::swap;
+ swap(fun, other);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_TRUE(static_cast<bool>(other));
+
+ EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+ EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+ EXPECT_TRUE(
+ absl::type_traits_internal::IsNothrowSwappable<AnyInvType>::value);
+ }
+
+ // Member swap
+ {
+ AnyInvType fun(absl::in_place_type<AddType>, 5);
+ AnyInvType other(absl::in_place_type<AddType>, 6);
+
+ fun.swap(other);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_TRUE(static_cast<bool>(other));
+
+ EXPECT_EQ(30, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+ EXPECT_EQ(29, TypeParam::ToThisParam(other)(7, 8, 9).value);
+
+ EXPECT_TRUE(IsNothrowMemberSwappable<AnyInvType>::value);
+ }
+}
+
+template <class T>
+class AnyInvTestMovable : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestMovable);
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionUserDefinedType) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun(AddType(5));
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionConstructionVoidCovariance) {
+ using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ VoidAnyInvType fun(AddType(5));
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun;
+ fun = AddType(5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignUserDefinedTypeNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AnyInvType fun = &add_function;
+ fun = AddType(5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestMovable, ConversionAssignVoidCovariance) {
+ using VoidAnyInvType = typename TypeParam::VoidAnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ VoidAnyInvType fun;
+ fun = AddType(5);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+}
+
+template <class T>
+class AnyInvTestNoexceptFalse : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse);
+
+TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionConstructionConstraints) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ EXPECT_TRUE((std::is_constructible<
+ AnyInvType,
+ typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+ EXPECT_FALSE((
+ std::is_constructible<AnyInvType,
+ typename TypeParam::IncompatibleInvocable>::value));
+}
+
+TYPED_TEST_P(AnyInvTestNoexceptFalse, ConversionAssignConstraints) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+ EXPECT_TRUE((std::is_assignable<
+ AnyInvType&,
+ typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+ EXPECT_FALSE(
+ (std::is_assignable<AnyInvType&,
+ typename TypeParam::IncompatibleInvocable>::value));
+}
+
+template <class T>
+class AnyInvTestNoexceptTrue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue);
+
+TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionConstructionConstraints) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+ GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
+#else
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+// TODO(b/217761454): Fix this and re-enable for MSVC.
+#ifndef _MSC_VER
+ EXPECT_FALSE((std::is_constructible<
+ AnyInvType,
+ typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+#endif
+ EXPECT_FALSE((
+ std::is_constructible<AnyInvType,
+ typename TypeParam::IncompatibleInvocable>::value));
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestNoexceptTrue, ConversionAssignConstraints) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+ GTEST_SKIP() << "Noexcept was not part of the type system before C++17.";
+#else
+ using AnyInvType = typename TypeParam::AnyInvType;
+
+// TODO(b/217761454): Fix this and re-enable for MSVC.
+#ifndef _MSC_VER
+ EXPECT_FALSE((std::is_assignable<
+ AnyInvType&,
+ typename TypeParam::AnyInvocableFunTypeNotNoexcept*>::value));
+#endif
+ EXPECT_FALSE(
+ (std::is_assignable<AnyInvType&,
+ typename TypeParam::IncompatibleInvocable>::value));
+#endif
+}
+
+template <class T>
+class AnyInvTestNonRvalue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestNonRvalue);
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionConstructionReferenceWrapper) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AddType add(4);
+ AnyInvType fun = std::ref(add);
+ add.state = 5;
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, NonMoveableResultType) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+ GTEST_SKIP() << "Copy/move elision was not standard before C++17";
+#else
+ // Define a result type that cannot be copy- or move-constructed.
+ struct Result {
+ int x;
+
+ explicit Result(const int x_in) : x(x_in) {}
+ Result(Result&&) = delete;
+ };
+
+ static_assert(!std::is_move_constructible<Result>::value, "");
+ static_assert(!std::is_copy_constructible<Result>::value, "");
+
+ // Assumption check: it should nevertheless be possible to use functors that
+ // return a Result struct according to the language rules.
+ const auto return_17 = []() noexcept { return Result(17); };
+ EXPECT_EQ(17, return_17().x);
+
+ // Just like plain functors, it should work fine to use an AnyInvocable that
+ // returns the non-moveable type.
+ using UnqualifiedFun =
+ absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
+
+ using Fun =
+ GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
+
+ AnyInvocable<Fun> any_inv(return_17);
+ EXPECT_EQ(17, any_inv().x);
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperEmptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AddType add(4);
+ AnyInvType fun;
+ fun = std::ref(add);
+ add.state = 5;
+ EXPECT_TRUE(
+ (std::is_nothrow_assignable<AnyInvType&,
+ std::reference_wrapper<AddType>>::value));
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+TYPED_TEST_P(AnyInvTestNonRvalue, ConversionAssignReferenceWrapperNonemptyLhs) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ AddType add(4);
+ AnyInvType fun = &mult_function;
+ fun = std::ref(add);
+ add.state = 5;
+ EXPECT_TRUE(
+ (std::is_nothrow_assignable<AnyInvType&,
+ std::reference_wrapper<AddType>>::value));
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(29, TypeParam::ToThisParam(fun)(7, 8, 9).value);
+
+ EXPECT_TRUE(static_cast<bool>(fun));
+ EXPECT_EQ(38, TypeParam::ToThisParam(fun)(10, 11, 12).value);
+}
+
+template <class T>
+class AnyInvTestRvalue : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(AnyInvTestRvalue);
+
+TYPED_TEST_P(AnyInvTestRvalue, ConversionConstructionReferenceWrapper) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ EXPECT_FALSE((
+ std::is_convertible<std::reference_wrapper<AddType>, AnyInvType>::value));
+}
+
+TYPED_TEST_P(AnyInvTestRvalue, NonMoveableResultType) {
+#if ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+ GTEST_SKIP() << "Copy/move elision was not standard before C++17";
+#else
+ // Define a result type that cannot be copy- or move-constructed.
+ struct Result {
+ int x;
+
+ explicit Result(const int x_in) : x(x_in) {}
+ Result(Result&&) = delete;
+ };
+
+ static_assert(!std::is_move_constructible<Result>::value, "");
+ static_assert(!std::is_copy_constructible<Result>::value, "");
+
+ // Assumption check: it should nevertheless be possible to use functors that
+ // return a Result struct according to the language rules.
+ const auto return_17 = []() noexcept { return Result(17); };
+ EXPECT_EQ(17, return_17().x);
+
+ // Just like plain functors, it should work fine to use an AnyInvocable that
+ // returns the non-moveable type.
+ using UnqualifiedFun =
+ absl::conditional_t<TypeParam::kIsNoexcept, Result() noexcept, Result()>;
+
+ using Fun =
+ GiveQualifiersToFun<typename TypeParam::Qualifiers, UnqualifiedFun>;
+
+ EXPECT_EQ(17, AnyInvocable<Fun>(return_17)().x);
+#endif
+}
+
+TYPED_TEST_P(AnyInvTestRvalue, ConversionAssignReferenceWrapper) {
+ using AnyInvType = typename TypeParam::AnyInvType;
+ using AddType = typename TypeParam::AddType;
+
+ EXPECT_FALSE((
+ std::is_assignable<AnyInvType&, std::reference_wrapper<AddType>>::value));
+}
+
+// NOTE: This test suite originally attempted to enumerate all possible
+// combinations of type properties but the build-time started getting too large.
+// Instead, it is now assumed that certain parameters are orthogonal and so
+// some combinations are elided.
+
+// A metafunction to form a TypeList of all cv and non-rvalue ref combinations,
+// coupled with all of the other explicitly specified parameters.
+template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
+ ObjSize Size, ObjAlign Align>
+using NonRvalueQualifiedTestParams = ::testing::Types< //
+ TestParams<Mov, Dest, _, CallExceptionSpec, Size, Align>, //
+ TestParams<Mov, Dest, const _, CallExceptionSpec, Size, Align>, //
+ TestParams<Mov, Dest, _&, CallExceptionSpec, Size, Align>, //
+ TestParams<Mov, Dest, const _&, CallExceptionSpec, Size, Align>>;
+
+// A metafunction to form a TypeList of const and non-const rvalue ref
+// qualifiers, coupled with all of the other explicitly specified parameters.
+template <Movable Mov, Destructible Dest, NothrowCall CallExceptionSpec,
+ ObjSize Size, ObjAlign Align>
+using RvalueQualifiedTestParams = ::testing::Types<
+ TestParams<Mov, Dest, _&&, CallExceptionSpec, Size, Align>, //
+ TestParams<Mov, Dest, const _&&, CallExceptionSpec, Size, Align> //
+ >;
+
+// All qualifier combinations and a noexcept function type
+using TestParameterListNonRvalueQualifiersNothrowCall =
+ NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+ NothrowCall::yes, ObjSize::small,
+ ObjAlign::normal>;
+using TestParameterListRvalueQualifiersNothrowCall =
+ RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+ NothrowCall::yes, ObjSize::small,
+ ObjAlign::normal>;
+
+// All qualifier combinations and a non-noexcept function type
+using TestParameterListNonRvalueQualifiersCallMayThrow =
+ NonRvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+ NothrowCall::no, ObjSize::small,
+ ObjAlign::normal>;
+using TestParameterListRvalueQualifiersCallMayThrow =
+ RvalueQualifiedTestParams<Movable::trivial, Destructible::trivial,
+ NothrowCall::no, ObjSize::small,
+ ObjAlign::normal>;
+
+// Lists of various cases that should lead to remote storage
+using TestParameterListRemoteMovable = ::testing::Types<
+ // "Normal" aligned types that are large and have trivial destructors
+ TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+ TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+ TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::yes, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+
+ // Same as above but with non-trivial destructors
+ TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+ TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+ TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::yes, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal> //
+
+// Dynamic memory allocation for over-aligned data was introduced in C++17.
+// See https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0035r4.html
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+ // Types that must use remote storage because of a large alignment.
+ ,
+ TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::large>, //
+ TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::large>, //
+ TestParams<Movable::trivial, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::large>, //
+ TestParams<Movable::nothrow, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::large> //
+#endif
+ >;
+using TestParameterListRemoteNonMovable = ::testing::Types<
+ // "Normal" aligned types that are large and have trivial destructors
+ TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::no, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal>, //
+ // Same as above but with non-trivial destructors
+ TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::no, Destructible::nothrow, _, NothrowCall::no,
+ ObjSize::large, ObjAlign::normal> //
+ >;
+
+// Parameters that lead to local storage
+using TestParameterListLocal = ::testing::Types<
+ // Types that meet the requirements and have trivial destructors
+ TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+
+ // Same as above but with non-trivial destructors
+ TestParams<Movable::trivial, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal>, //
+ TestParams<Movable::nothrow, Destructible::trivial, _, NothrowCall::no,
+ ObjSize::small, ObjAlign::normal> //
+ >;
+
+// All of the tests that are run for every possible combination of types.
+REGISTER_TYPED_TEST_SUITE_P(
+ AnyInvTestBasic, DefaultConstruction, ConstructionNullptr,
+ ConstructionNullFunctionPtr, ConstructionNullMemberFunctionPtr,
+ ConstructionNullMemberObjectPtr, ConstructionMemberFunctionPtr,
+ ConstructionMemberObjectPtr, ConstructionFunctionReferenceDecay,
+ ConstructionCompatibleAnyInvocableEmpty,
+ ConstructionCompatibleAnyInvocableNonempty, InPlaceConstruction,
+ ConversionToBool, Invocation, InPlaceConstructionInitializerList,
+ InPlaceNullFunPtrConstruction, InPlaceNullFunPtrConstructionValueInit,
+ InPlaceNullMemFunPtrConstruction, InPlaceNullMemFunPtrConstructionValueInit,
+ InPlaceNullMemObjPtrConstruction, InPlaceNullMemObjPtrConstructionValueInit,
+ InPlaceVoidCovarianceConstruction, MoveConstructionFromEmpty,
+ MoveConstructionFromNonEmpty, ComparisonWithNullptrEmpty,
+ ComparisonWithNullptrNonempty, ResultType);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+ NonRvalueCallMayThrow, AnyInvTestBasic,
+ TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestBasic,
+ TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestBasic,
+ TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestBasic,
+ TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestBasic, TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestBasic,
+ TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestBasic,
+ TestParameterListRvalueQualifiersNothrowCall);
+
+// Tests for functions that take two operands.
+REGISTER_TYPED_TEST_SUITE_P(
+ AnyInvTestCombinatoric, MoveAssignEmptyEmptyLhsRhs,
+ MoveAssignEmptyLhsNonemptyRhs, MoveAssignNonemptyEmptyLhsRhs,
+ MoveAssignNonemptyLhsNonemptyRhs, SelfMoveAssignEmpty,
+ SelfMoveAssignNonempty, AssignNullptrEmptyLhs,
+ AssignNullFunctionPtrEmptyLhs, AssignNullMemberFunctionPtrEmptyLhs,
+ AssignNullMemberObjectPtrEmptyLhs, AssignMemberFunctionPtrEmptyLhs,
+ AssignMemberObjectPtrEmptyLhs, AssignFunctionReferenceDecayEmptyLhs,
+ AssignCompatibleAnyInvocableEmptyLhsEmptyRhs,
+ AssignCompatibleAnyInvocableEmptyLhsNonemptyRhs, AssignNullptrNonemptyLhs,
+ AssignNullFunctionPtrNonemptyLhs, AssignNullMemberFunctionPtrNonemptyLhs,
+ AssignNullMemberObjectPtrNonemptyLhs, AssignMemberFunctionPtrNonemptyLhs,
+ AssignMemberObjectPtrNonemptyLhs, AssignFunctionReferenceDecayNonemptyLhs,
+ AssignCompatibleAnyInvocableNonemptyLhsEmptyRhs,
+ AssignCompatibleAnyInvocableNonemptyLhsNonemptyRhs, SwapEmptyLhsEmptyRhs,
+ SwapEmptyLhsNonemptyRhs, SwapNonemptyLhsEmptyRhs,
+ SwapNonemptyLhsNonemptyRhs);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+ NonRvalueCallMayThrow, AnyInvTestCombinatoric,
+ TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestCombinatoric,
+ TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestCombinatoric,
+ TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestCombinatoric,
+ TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestCombinatoric,
+ TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestCombinatoric,
+ TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestCombinatoric,
+ TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestMovable,
+ ConversionConstructionUserDefinedType,
+ ConversionConstructionVoidCovariance,
+ ConversionAssignUserDefinedTypeEmptyLhs,
+ ConversionAssignUserDefinedTypeNonemptyLhs,
+ ConversionAssignVoidCovariance);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+ NonRvalueCallMayThrow, AnyInvTestMovable,
+ TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestMovable,
+ TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestMovable,
+ TestParameterListRemoteMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestMovable,
+ TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestMovable,
+ TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestMovable,
+ TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptFalse,
+ ConversionConstructionConstraints,
+ ConversionAssignConstraints);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+ NonRvalueCallMayThrow, AnyInvTestNoexceptFalse,
+ TestParameterListNonRvalueQualifiersCallMayThrow);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestNoexceptFalse,
+ TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNoexceptFalse,
+ TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNoexceptFalse,
+ TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNoexceptFalse,
+ TestParameterListLocal);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNoexceptTrue,
+ ConversionConstructionConstraints,
+ ConversionAssignConstraints);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNoexceptTrue,
+ TestParameterListNonRvalueQualifiersNothrowCall);
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallNothrow, AnyInvTestNoexceptTrue,
+ TestParameterListRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestNonRvalue,
+ ConversionConstructionReferenceWrapper,
+ NonMoveableResultType,
+ ConversionAssignReferenceWrapperEmptyLhs,
+ ConversionAssignReferenceWrapperNonemptyLhs);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(
+ NonRvalueCallMayThrow, AnyInvTestNonRvalue,
+ TestParameterListNonRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteMovable, AnyInvTestNonRvalue,
+ TestParameterListRemoteMovable);
+INSTANTIATE_TYPED_TEST_SUITE_P(RemoteNonMovable, AnyInvTestNonRvalue,
+ TestParameterListRemoteNonMovable);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(Local, AnyInvTestNonRvalue,
+ TestParameterListLocal);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NonRvalueCallNothrow, AnyInvTestNonRvalue,
+ TestParameterListNonRvalueQualifiersNothrowCall);
+
+REGISTER_TYPED_TEST_SUITE_P(AnyInvTestRvalue,
+ ConversionConstructionReferenceWrapper,
+ NonMoveableResultType,
+ ConversionAssignReferenceWrapper);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(RvalueCallMayThrow, AnyInvTestRvalue,
+ TestParameterListRvalueQualifiersCallMayThrow);
+
+INSTANTIATE_TYPED_TEST_SUITE_P(CallNothrowRvalue, AnyInvTestRvalue,
+ TestParameterListRvalueQualifiersNothrowCall);
+
+// Minimal SFINAE testing for platforms where we can't run the tests, but we can
+// build binaries for.
+static_assert(
+ std::is_convertible<void (*)(), absl::AnyInvocable<void() &&>>::value, "");
+static_assert(!std::is_convertible<void*, absl::AnyInvocable<void() &&>>::value,
+ "");
+
+#undef ABSL_INTERNAL_NOEXCEPT_SPEC
+
+} // namespace
diff --git a/absl/functional/bind_front.h b/absl/functional/bind_front.h
index 5b47970e..f9075bd1 100644
--- a/absl/functional/bind_front.h
+++ b/absl/functional/bind_front.h
@@ -30,6 +30,10 @@
#ifndef ABSL_FUNCTIONAL_BIND_FRONT_H_
#define ABSL_FUNCTIONAL_BIND_FRONT_H_
+#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+#include <functional> // For std::bind_front.
+#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+
#include "absl/functional/internal/front_binder.h"
#include "absl/utility/utility.h"
@@ -46,7 +50,8 @@ ABSL_NAMESPACE_BEGIN
// specified. More importantly, it provides more reliable correctness guarantees
// than `std::bind()`; while `std::bind()` will silently ignore passing more
// parameters than expected, for example, `absl::bind_front()` will report such
-// mis-uses as errors.
+// mis-uses as errors. In C++20, `absl::bind_front` is replaced by
+// `std::bind_front`.
//
// absl::bind_front(a...) can be seen as storing the results of
// std::make_tuple(a...).
@@ -170,6 +175,9 @@ ABSL_NAMESPACE_BEGIN
// // Doesn't copy "hi".
// absl::bind_front(Print, absl::string_view(hi))("Chuk");
//
+#if defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
+using std::bind_front;
+#else // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
template <class F, class... BoundArgs>
constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
F&& func, BoundArgs&&... args) {
@@ -177,6 +185,7 @@ constexpr functional_internal::bind_front_t<F, BoundArgs...> bind_front(
absl::in_place, absl::forward<F>(func),
absl::forward<BoundArgs>(args)...);
}
+#endif // defined(__cpp_lib_bind_front) && __cpp_lib_bind_front >= 201907L
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/functional/function_ref.h b/absl/functional/function_ref.h
index 824e3cea..f9779607 100644
--- a/absl/functional/function_ref.h
+++ b/absl/functional/function_ref.h
@@ -69,7 +69,8 @@ class FunctionRef;
// An `absl::FunctionRef` is a lightweight wrapper to any invokable object with
// a compatible signature. Generally, an `absl::FunctionRef` should only be used
// as an argument type and should be preferred as an argument over a const
-// reference to a `std::function`.
+// reference to a `std::function`. `absl::FunctionRef` itself does not allocate,
+// although the wrapped invokable may.
//
// Example:
//
diff --git a/absl/functional/function_ref_test.cc b/absl/functional/function_ref_test.cc
index 3aa59745..412027cd 100644
--- a/absl/functional/function_ref_test.cc
+++ b/absl/functional/function_ref_test.cc
@@ -14,6 +14,7 @@
#include "absl/functional/function_ref.h"
+#include <functional>
#include <memory>
#include "gmock/gmock.h"
diff --git a/absl/functional/function_ref_benchmark.cc b/absl/functional/function_type_benchmark.cc
index 045305bf..03dc31d8 100644
--- a/absl/functional/function_ref_benchmark.cc
+++ b/absl/functional/function_type_benchmark.cc
@@ -1,4 +1,4 @@
-// Copyright 2019 The Abseil Authors.
+// Copyright 2022 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-#include "absl/functional/function_ref.h"
-
+#include <functional>
#include <memory>
+#include <string>
#include "benchmark/benchmark.h"
#include "absl/base/attributes.h"
+#include "absl/functional/any_invocable.h"
+#include "absl/functional/function_ref.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -61,6 +63,12 @@ void BM_TrivialFunctionRef(benchmark::State& state) {
}
BENCHMARK(BM_TrivialFunctionRef);
+void BM_TrivialAnyInvocable(benchmark::State& state) {
+ ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state,
+ TrivialFunctor{});
+}
+BENCHMARK(BM_TrivialAnyInvocable);
+
void BM_LargeStdFunction(benchmark::State& state) {
ConstructAndCallFunctionBenchmark<std::function<void()>>(state,
LargeFunctor{});
@@ -72,6 +80,13 @@ void BM_LargeFunctionRef(benchmark::State& state) {
}
BENCHMARK(BM_LargeFunctionRef);
+
+void BM_LargeAnyInvocable(benchmark::State& state) {
+ ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state,
+ LargeFunctor{});
+}
+BENCHMARK(BM_LargeAnyInvocable);
+
void BM_FunPtrStdFunction(benchmark::State& state) {
ConstructAndCallFunctionBenchmark<std::function<void()>>(state, FreeFunction);
}
@@ -82,6 +97,11 @@ void BM_FunPtrFunctionRef(benchmark::State& state) {
}
BENCHMARK(BM_FunPtrFunctionRef);
+void BM_FunPtrAnyInvocable(benchmark::State& state) {
+ ConstructAndCallFunctionBenchmark<AnyInvocable<void()>>(state, FreeFunction);
+}
+BENCHMARK(BM_FunPtrAnyInvocable);
+
// Doesn't include construction or copy overhead in the loop.
template <typename Function, typename Callable, typename... Args>
void CallFunctionBenchmark(benchmark::State& state, const Callable& c,
@@ -113,6 +133,12 @@ void BM_TrivialArgsFunctionRef(benchmark::State& state) {
}
BENCHMARK(BM_TrivialArgsFunctionRef);
+void BM_TrivialArgsAnyInvocable(benchmark::State& state) {
+ CallFunctionBenchmark<AnyInvocable<void(int, int, int)>>(
+ state, FunctorWithTrivialArgs{}, 1, 2, 3);
+}
+BENCHMARK(BM_TrivialArgsAnyInvocable);
+
struct FunctorWithNonTrivialArgs {
void operator()(std::string a, std::string b, std::string c) const {
benchmark::DoNotOptimize(&a);
@@ -137,6 +163,14 @@ void BM_NonTrivialArgsFunctionRef(benchmark::State& state) {
}
BENCHMARK(BM_NonTrivialArgsFunctionRef);
+void BM_NonTrivialArgsAnyInvocable(benchmark::State& state) {
+ std::string a, b, c;
+ CallFunctionBenchmark<
+ AnyInvocable<void(std::string, std::string, std::string)>>(
+ state, FunctorWithNonTrivialArgs{}, a, b, c);
+}
+BENCHMARK(BM_NonTrivialArgsAnyInvocable);
+
} // namespace
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/functional/internal/any_invocable.h b/absl/functional/internal/any_invocable.h
new file mode 100644
index 00000000..f353139c
--- /dev/null
+++ b/absl/functional/internal/any_invocable.h
@@ -0,0 +1,857 @@
+// Copyright 2022 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Implementation details for `absl::AnyInvocable`
+
+#ifndef ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
+#define ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
+
+////////////////////////////////////////////////////////////////////////////////
+// //
+// This implementation of the proposed `any_invocable` uses an approach that //
+// chooses between local storage and remote storage for the contained target //
+// object based on the target object's size, alignment requirements, and //
+// whether or not it has a nothrow move constructor. Additional optimizations //
+// are performed when the object is a trivially copyable type [basic.types]. //
+// //
+// There are three datamembers per `AnyInvocable` instance //
+// //
+// 1) A union containing either //
+// - A pointer to the target object referred to via a void*, or //
+// - the target object, emplaced into a raw char buffer //
+// //
+// 2) A function pointer to a "manager" function operation that takes a //
+// discriminator and logically branches to either perform a move operation //
+// or destroy operation based on that discriminator. //
+// //
+// 3) A function pointer to an "invoker" function operation that invokes the //
+// target object, directly returning the result. //
+// //
+// When in the logically empty state, the manager function is an empty //
+// function and the invoker function is one that would be undefined-behavior //
+// to call. //
+// //
+// An additional optimization is performed when converting from one //
+// AnyInvocable to another where only the noexcept specification and/or the //
+// cv/ref qualifiers of the function type differ. In these cases, the //
+// conversion works by "moving the guts", similar to if they were the same //
+// exact type, as opposed to having to perform an additional layer of //
+// wrapping through remote storage. //
+// //
+////////////////////////////////////////////////////////////////////////////////
+
+// IWYU pragma: private, include "absl/functional/any_invocable.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <functional>
+#include <initializer_list>
+#include <memory>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/invoke.h"
+#include "absl/base/macros.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// Helper macro used to prevent spelling `noexcept` in language versions older
+// than C++17, where it is not part of the type system, in order to avoid
+// compilation failures and internal compiler errors.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex) noexcept(noex)
+#else
+#define ABSL_INTERNAL_NOEXCEPT_SPEC(noex)
+#endif
+
+// Defined in functional/any_invocable.h
+template <class Sig>
+class AnyInvocable;
+
+namespace internal_any_invocable {
+
+// Constants relating to the small-object-storage for AnyInvocable
+enum StorageProperty : std::size_t {
+ kAlignment = alignof(std::max_align_t), // The alignment of the storage
+ kStorageSize = sizeof(void*) * 2 // The size of the storage
+};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction for checking if a type is an AnyInvocable instantiation.
+// This is used during conversion operations.
+template <class T>
+struct IsAnyInvocable : std::false_type {};
+
+template <class Sig>
+struct IsAnyInvocable<AnyInvocable<Sig>> : std::true_type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A type trait that tells us whether or not a target function type should be
+// stored locally in the small object optimization storage
+template <class T>
+using IsStoredLocally = std::integral_constant<
+ bool, sizeof(T) <= kStorageSize && alignof(T) <= kAlignment &&
+ kAlignment % alignof(T) == 0 &&
+ std::is_nothrow_move_constructible<T>::value>;
+
+// An implementation of std::remove_cvref_t of C++20.
+template <class T>
+using RemoveCVRef =
+ typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// An implementation of the C++ standard INVOKE<R> pseudo-macro, operation is
+// equivalent to std::invoke except that it forces an implicit conversion to the
+// specified return type. If "R" is void, the function is executed and the
+// return value is simply ignored.
+template <class ReturnType, class F, class... P,
+ typename = absl::enable_if_t<std::is_void<ReturnType>::value>>
+void InvokeR(F&& f, P&&... args) {
+ absl::base_internal::invoke(std::forward<F>(f), std::forward<P>(args)...);
+}
+
+template <class ReturnType, class F, class... P,
+ absl::enable_if_t<!std::is_void<ReturnType>::value, int> = 0>
+ReturnType InvokeR(F&& f, P&&... args) {
+ return absl::base_internal::invoke(std::forward<F>(f),
+ std::forward<P>(args)...);
+}
+
+//
+////////////////////////////////////////////////////////////////////////////////
+
+////////////////////////////////////////////////////////////////////////////////
+///
+// A metafunction that takes a "T" corresponding to a parameter type of the
+// user's specified function type, and yields the parameter type to use for the
+// type-erased invoker. In order to prevent observable moves, this must be
+// either a reference or, if the type is trivial, the original parameter type
+// itself. Since the parameter type may be incomplete at the point that this
+// metafunction is used, we can only do this optimization for scalar types
+// rather than for any trivial type.
+template <typename T>
+T ForwardImpl(std::true_type);
+
+template <typename T>
+T&& ForwardImpl(std::false_type);
+
+// NOTE: We deliberately use an intermediate struct instead of a direct alias,
+// as a workaround for b/206991861 on MSVC versions < 1924.
+template <class T>
+struct ForwardedParameter {
+ using type = decltype((
+ ForwardImpl<T>)(std::integral_constant<bool,
+ std::is_scalar<T>::value>()));
+};
+
+template <class T>
+using ForwardedParameterType = typename ForwardedParameter<T>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A discriminator when calling the "manager" function that describes operation
+// type-erased operation should be invoked.
+//
+// "relocate_from_to" specifies that the manager should perform a move.
+//
+// "dispose" specifies that the manager should perform a destroy.
+enum class FunctionToCall : bool { relocate_from_to, dispose };
+
+// The portion of `AnyInvocable` state that contains either a pointer to the
+// target object or the object itself in local storage
+union TypeErasedState {
+ struct {
+ // A pointer to the type-erased object when remotely stored
+ void* target;
+ // The size of the object for `RemoteManagerTrivial`
+ std::size_t size;
+ } remote;
+
+ // Local-storage for the type-erased object when small and trivial enough
+ alignas(kAlignment) char storage[kStorageSize];
+};
+
+// A typed accessor for the object in `TypeErasedState` storage
+template <class T>
+T& ObjectInLocalStorage(TypeErasedState* const state) {
+ // We launder here because the storage may be reused with the same type.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+ return *std::launder(reinterpret_cast<T*>(&state->storage));
+#elif ABSL_HAVE_BUILTIN(__builtin_launder)
+ return *__builtin_launder(reinterpret_cast<T*>(&state->storage));
+#else
+
+ // When `std::launder` or equivalent are not available, we rely on undefined
+ // behavior, which works as intended on Abseil's officially supported
+ // platforms as of Q2 2022.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic push
+#endif
+ return *reinterpret_cast<T*>(&state->storage);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
+#endif
+}
+
+// The type for functions issuing lifetime-related operations: move and dispose
+// A pointer to such a function is contained in each `AnyInvocable` instance.
+// NOTE: When specifying `FunctionToCall::`dispose, the same state must be
+// passed as both "from" and "to".
+using ManagerType = void(FunctionToCall /*operation*/,
+ TypeErasedState* /*from*/, TypeErasedState* /*to*/)
+ ABSL_INTERNAL_NOEXCEPT_SPEC(true);
+
+// The type for functions issuing the actual invocation of the object
+// A pointer to such a function is contained in each AnyInvocable instance.
+template <bool SigIsNoexcept, class ReturnType, class... P>
+using InvokerType = ReturnType(TypeErasedState*, ForwardedParameterType<P>...)
+ ABSL_INTERNAL_NOEXCEPT_SPEC(SigIsNoexcept);
+
+// The manager that is used when AnyInvocable is empty
+inline void EmptyManager(FunctionToCall /*operation*/,
+ TypeErasedState* /*from*/,
+ TypeErasedState* /*to*/) noexcept {}
+
+// The manager that is used when a target function is in local storage and is
+// a trivially copyable type.
+inline void LocalManagerTrivial(FunctionToCall /*operation*/,
+ TypeErasedState* const from,
+ TypeErasedState* const to) noexcept {
+ // This single statement without branching handles both possible operations.
+ //
+ // For FunctionToCall::dispose, "from" and "to" point to the same state, and
+ // so this assignment logically would do nothing.
+ //
+ // Note: Correctness here relies on http://wg21.link/p0593, which has only
+ // become standard in C++20, though implementations do not break it in
+ // practice for earlier versions of C++.
+ //
+ // The correct way to do this without that paper is to first placement-new a
+ // default-constructed T in "to->storage" prior to the memmove, but doing so
+ // requires a different function to be created for each T that is stored
+ // locally, which can cause unnecessary bloat and be less cache friendly.
+ *to = *from;
+
+ // Note: Because the type is trivially copyable, the destructor does not need
+ // to be called ("trivially copyable" requires a trivial destructor).
+}
+
+// The manager that is used when a target function is in local storage and is
+// not a trivially copyable type.
+template <class T>
+void LocalManagerNontrivial(FunctionToCall operation,
+ TypeErasedState* const from,
+ TypeErasedState* const to) noexcept {
+ static_assert(IsStoredLocally<T>::value,
+ "Local storage must only be used for supported types.");
+ static_assert(!std::is_trivially_copyable<T>::value,
+ "Locally stored types must be trivially copyable.");
+
+ T& from_object = (ObjectInLocalStorage<T>)(from);
+
+ switch (operation) {
+ case FunctionToCall::relocate_from_to:
+ // NOTE: Requires that the left-hand operand is already empty.
+ ::new (static_cast<void*>(&to->storage)) T(std::move(from_object));
+ ABSL_FALLTHROUGH_INTENDED;
+ case FunctionToCall::dispose:
+ from_object.~T(); // Must not throw. // NOLINT
+ return;
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+}
+
+// The invoker that is used when a target function is in local storage
+// Note: QualTRef here is the target function type along with cv and reference
+// qualifiers that must be used when calling the function.
+template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
+ReturnType LocalInvoker(
+ TypeErasedState* const state,
+ ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
+ using RawT = RemoveCVRef<QualTRef>;
+ static_assert(
+ IsStoredLocally<RawT>::value,
+ "Target object must be in local storage in order to be invoked from it.");
+
+ auto& f = (ObjectInLocalStorage<RawT>)(state);
+ return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
+ static_cast<ForwardedParameterType<P>>(args)...);
+}
+
+// The manager that is used when a target function is in remote storage and it
+// has a trivial destructor
+inline void RemoteManagerTrivial(FunctionToCall operation,
+ TypeErasedState* const from,
+ TypeErasedState* const to) noexcept {
+ switch (operation) {
+ case FunctionToCall::relocate_from_to:
+ // NOTE: Requires that the left-hand operand is already empty.
+ to->remote = from->remote;
+ return;
+ case FunctionToCall::dispose:
+#if defined(__cpp_sized_deallocation)
+ ::operator delete(from->remote.target, from->remote.size);
+#else // __cpp_sized_deallocation
+ ::operator delete(from->remote.target);
+#endif // __cpp_sized_deallocation
+ return;
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+}
+
+// The manager that is used when a target function is in remote storage and the
+// destructor of the type is not trivial
+template <class T>
+void RemoteManagerNontrivial(FunctionToCall operation,
+ TypeErasedState* const from,
+ TypeErasedState* const to) noexcept {
+ static_assert(!IsStoredLocally<T>::value,
+ "Remote storage must only be used for types that do not "
+ "qualify for local storage.");
+
+ switch (operation) {
+ case FunctionToCall::relocate_from_to:
+ // NOTE: Requires that the left-hand operand is already empty.
+ to->remote.target = from->remote.target;
+ return;
+ case FunctionToCall::dispose:
+ ::delete static_cast<T*>(from->remote.target); // Must not throw.
+ return;
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+}
+
+// The invoker that is used when a target function is in remote storage
+template <bool SigIsNoexcept, class ReturnType, class QualTRef, class... P>
+ReturnType RemoteInvoker(
+ TypeErasedState* const state,
+ ForwardedParameterType<P>... args) noexcept(SigIsNoexcept) {
+ using RawT = RemoveCVRef<QualTRef>;
+ static_assert(!IsStoredLocally<RawT>::value,
+ "Target object must be in remote storage in order to be "
+ "invoked from it.");
+
+ auto& f = *static_cast<RawT*>(state->remote.target);
+ return (InvokeR<ReturnType>)(static_cast<QualTRef>(f),
+ static_cast<ForwardedParameterType<P>>(args)...);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that checks if a type T is an instantiation of
+// absl::in_place_type_t (needed for constructor constraints of AnyInvocable).
+template <class T>
+struct IsInPlaceType : std::false_type {};
+
+template <class T>
+struct IsInPlaceType<absl::in_place_type_t<T>> : std::true_type {};
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A constructor name-tag used with CoreImpl (below) to request the
+// conversion-constructor. QualDecayedTRef is the decayed-type of the object to
+// wrap, along with the cv and reference qualifiers that must be applied when
+// performing an invocation of the wrapped object.
+template <class QualDecayedTRef>
+struct TypedConversionConstruct {};
+
+// A helper base class for all core operations of AnyInvocable. Most notably,
+// this class creates the function call operator and constraint-checkers so that
+// the top-level class does not have to be a series of partial specializations.
+//
+// Note: This definition exists (as opposed to being a declaration) so that if
+// the user of the top-level template accidentally passes a template argument
+// that is not a function type, they will get a static_assert in AnyInvocable's
+// class body rather than an error stating that Impl is not defined.
+template <class Sig>
+class Impl {}; // Note: This is partially-specialized later.
+
+// A std::unique_ptr deleter that deletes memory allocated via ::operator new.
+#if defined(__cpp_sized_deallocation)
+class TrivialDeleter {
+ public:
+ explicit TrivialDeleter(std::size_t size) : size_(size) {}
+
+ void operator()(void* target) const {
+ ::operator delete(target, size_);
+ }
+
+ private:
+ std::size_t size_;
+};
+#else // __cpp_sized_deallocation
+class TrivialDeleter {
+ public:
+ explicit TrivialDeleter(std::size_t) {}
+
+ void operator()(void* target) const { ::operator delete(target); }
+};
+#endif // __cpp_sized_deallocation
+
+template <bool SigIsNoexcept, class ReturnType, class... P>
+class CoreImpl;
+
+constexpr bool IsCompatibleConversion(void*, void*) { return false; }
+template <bool NoExceptSrc, bool NoExceptDest, class... T>
+constexpr bool IsCompatibleConversion(CoreImpl<NoExceptSrc, T...>*,
+ CoreImpl<NoExceptDest, T...>*) {
+ return !NoExceptDest || NoExceptSrc;
+}
+
+// A helper base class for all core operations of AnyInvocable that do not
+// depend on the cv/ref qualifiers of the function type.
+template <bool SigIsNoexcept, class ReturnType, class... P>
+class CoreImpl {
+ public:
+ using result_type = ReturnType;
+
+ CoreImpl() noexcept : manager_(EmptyManager), invoker_(nullptr) {}
+
+ enum class TargetType : int {
+ kPointer = 0,
+ kCompatibleAnyInvocable = 1,
+ kIncompatibleAnyInvocable = 2,
+ kOther = 3,
+ };
+
+ // Note: QualDecayedTRef here includes the cv-ref qualifiers associated with
+ // the invocation of the Invocable. The unqualified type is the target object
+ // type to be stored.
+ template <class QualDecayedTRef, class F>
+ explicit CoreImpl(TypedConversionConstruct<QualDecayedTRef>, F&& f) {
+ using DecayedT = RemoveCVRef<QualDecayedTRef>;
+
+ constexpr TargetType kTargetType =
+ (std::is_pointer<DecayedT>::value ||
+ std::is_member_pointer<DecayedT>::value)
+ ? TargetType::kPointer
+ : IsCompatibleAnyInvocable<DecayedT>::value
+ ? TargetType::kCompatibleAnyInvocable
+ : IsAnyInvocable<DecayedT>::value
+ ? TargetType::kIncompatibleAnyInvocable
+ : TargetType::kOther;
+ // NOTE: We only use integers instead of enums as template parameters in
+ // order to work around a bug on C++14 under MSVC 2017.
+ // See b/236131881.
+ Initialize<static_cast<int>(kTargetType), QualDecayedTRef>(
+ std::forward<F>(f));
+ }
+
+ // Note: QualTRef here includes the cv-ref qualifiers associated with the
+ // invocation of the Invocable. The unqualified type is the target object
+ // type to be stored.
+ template <class QualTRef, class... Args>
+ explicit CoreImpl(absl::in_place_type_t<QualTRef>, Args&&... args) {
+ InitializeStorage<QualTRef>(std::forward<Args>(args)...);
+ }
+
+ CoreImpl(CoreImpl&& other) noexcept {
+ other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
+ manager_ = other.manager_;
+ invoker_ = other.invoker_;
+ other.manager_ = EmptyManager;
+ other.invoker_ = nullptr;
+ }
+
+ CoreImpl& operator=(CoreImpl&& other) noexcept {
+ // Put the left-hand operand in an empty state.
+ //
+ // Note: A full reset that leaves us with an object that has its invariants
+ // intact is necessary in order to handle self-move. This is required by
+ // types that are used with certain operations of the standard library, such
+ // as the default definition of std::swap when both operands target the same
+ // object.
+ Clear();
+
+ // Perform the actual move/destory operation on the target function.
+ other.manager_(FunctionToCall::relocate_from_to, &other.state_, &state_);
+ manager_ = other.manager_;
+ invoker_ = other.invoker_;
+ other.manager_ = EmptyManager;
+ other.invoker_ = nullptr;
+
+ return *this;
+ }
+
+ ~CoreImpl() { manager_(FunctionToCall::dispose, &state_, &state_); }
+
+ // Check whether or not the AnyInvocable is in the empty state.
+ bool HasValue() const { return invoker_ != nullptr; }
+
+ // Effects: Puts the object into its empty state.
+ void Clear() {
+ manager_(FunctionToCall::dispose, &state_, &state_);
+ manager_ = EmptyManager;
+ invoker_ = nullptr;
+ }
+
+ template <int target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<target_type == 0, int> = 0>
+ void Initialize(F&& f) {
+// This condition handles types that decay into pointers, which includes
+// function references. Since function references cannot be null, GCC warns
+// against comparing their decayed form with nullptr.
+// Since this is template-heavy code, we prefer to disable these warnings
+// locally instead of adding yet another overload of this function.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wpragmas"
+#pragma GCC diagnostic ignored "-Waddress"
+#pragma GCC diagnostic ignored "-Wnonnull-compare"
+#pragma GCC diagnostic push
+#endif
+ if (static_cast<RemoveCVRef<QualDecayedTRef>>(f) == nullptr) {
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ manager_ = EmptyManager;
+ invoker_ = nullptr;
+ return;
+ }
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ }
+
+ template <int target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<target_type == 1, int> = 0>
+ void Initialize(F&& f) {
+ // In this case we can "steal the guts" of the other AnyInvocable.
+ f.manager_(FunctionToCall::relocate_from_to, &f.state_, &state_);
+ manager_ = f.manager_;
+ invoker_ = f.invoker_;
+
+ f.manager_ = EmptyManager;
+ f.invoker_ = nullptr;
+ }
+
+ template <int target_type, class QualDecayedTRef, class F,
+ absl::enable_if_t<target_type == 2, int> = 0>
+ void Initialize(F&& f) {
+ if (f.HasValue()) {
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ } else {
+ manager_ = EmptyManager;
+ invoker_ = nullptr;
+ }
+ }
+
+ template <int target_type, class QualDecayedTRef, class F,
+ typename = absl::enable_if_t<target_type == 3>>
+ void Initialize(F&& f) {
+ InitializeStorage<QualDecayedTRef>(std::forward<F>(f));
+ }
+
+ // Use local (inline) storage for applicable target object types.
+ template <class QualTRef, class... Args,
+ typename = absl::enable_if_t<
+ IsStoredLocally<RemoveCVRef<QualTRef>>::value>>
+ void InitializeStorage(Args&&... args) {
+ using RawT = RemoveCVRef<QualTRef>;
+ ::new (static_cast<void*>(&state_.storage))
+ RawT(std::forward<Args>(args)...);
+
+ invoker_ = LocalInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+ // We can simplify our manager if we know the type is trivially copyable.
+ InitializeLocalManager<RawT>();
+ }
+
+ // Use remote storage for target objects that cannot be stored locally.
+ template <class QualTRef, class... Args,
+ absl::enable_if_t<!IsStoredLocally<RemoveCVRef<QualTRef>>::value,
+ int> = 0>
+ void InitializeStorage(Args&&... args) {
+ InitializeRemoteManager<RemoveCVRef<QualTRef>>(std::forward<Args>(args)...);
+ // This is set after everything else in case an exception is thrown in an
+ // earlier step of the initialization.
+ invoker_ = RemoteInvoker<SigIsNoexcept, ReturnType, QualTRef, P...>;
+ }
+
+ template <class T,
+ typename = absl::enable_if_t<std::is_trivially_copyable<T>::value>>
+ void InitializeLocalManager() {
+ manager_ = LocalManagerTrivial;
+ }
+
+ template <class T,
+ absl::enable_if_t<!std::is_trivially_copyable<T>::value, int> = 0>
+ void InitializeLocalManager() {
+ manager_ = LocalManagerNontrivial<T>;
+ }
+
+ template <class T>
+ using HasTrivialRemoteStorage =
+ std::integral_constant<bool, std::is_trivially_destructible<T>::value &&
+ alignof(T) <=
+ ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT>;
+
+ template <class T, class... Args,
+ typename = absl::enable_if_t<HasTrivialRemoteStorage<T>::value>>
+ void InitializeRemoteManager(Args&&... args) {
+ // unique_ptr is used for exception-safety in case construction throws.
+ std::unique_ptr<void, TrivialDeleter> uninitialized_target(
+ ::operator new(sizeof(T)), TrivialDeleter(sizeof(T)));
+ ::new (uninitialized_target.get()) T(std::forward<Args>(args)...);
+ state_.remote.target = uninitialized_target.release();
+ state_.remote.size = sizeof(T);
+ manager_ = RemoteManagerTrivial;
+ }
+
+ template <class T, class... Args,
+ absl::enable_if_t<!HasTrivialRemoteStorage<T>::value, int> = 0>
+ void InitializeRemoteManager(Args&&... args) {
+ state_.remote.target = ::new T(std::forward<Args>(args)...);
+ manager_ = RemoteManagerNontrivial<T>;
+ }
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // Type trait to determine if the template argument is an AnyInvocable whose
+ // function type is compatible enough with ours such that we can
+ // "move the guts" out of it when moving, rather than having to place a new
+ // object into remote storage.
+
+ template <typename Other>
+ struct IsCompatibleAnyInvocable {
+ static constexpr bool value = false;
+ };
+
+ template <typename Sig>
+ struct IsCompatibleAnyInvocable<AnyInvocable<Sig>> {
+ static constexpr bool value =
+ (IsCompatibleConversion)(static_cast<
+ typename AnyInvocable<Sig>::CoreImpl*>(
+ nullptr),
+ static_cast<CoreImpl*>(nullptr));
+ };
+
+ //
+ //////////////////////////////////////////////////////////////////////////////
+
+ TypeErasedState state_;
+ ManagerType* manager_;
+ InvokerType<SigIsNoexcept, ReturnType, P...>* invoker_;
+};
+
+// A constructor name-tag used with Impl to request the
+// conversion-constructor
+struct ConversionConstruct {};
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// A metafunction that is normally an identity metafunction except that when
+// given a std::reference_wrapper<T>, it yields T&. This is necessary because
+// currently std::reference_wrapper's operator() is not conditionally noexcept,
+// so when checking if such an Invocable is nothrow-invocable, we must pull out
+// the underlying type.
+template <class T>
+struct UnwrapStdReferenceWrapperImpl {
+ using type = T;
+};
+
+template <class T>
+struct UnwrapStdReferenceWrapperImpl<std::reference_wrapper<T>> {
+ using type = T&;
+};
+
+template <class T>
+using UnwrapStdReferenceWrapper =
+ typename UnwrapStdReferenceWrapperImpl<T>::type;
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// An alias that always yields std::true_type (used with constraints) where
+// substitution failures happen when forming the template arguments.
+template <class... T>
+using True =
+ std::integral_constant<bool, sizeof(absl::void_t<T...>*) != 0>;
+
+/*SFINAE constraints for the conversion-constructor.*/
+template <class Sig, class F,
+ class = absl::enable_if_t<
+ !std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
+using CanConvert =
+ True<absl::enable_if_t<!IsInPlaceType<RemoveCVRef<F>>::value>,
+ absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+ absl::enable_if_t<
+ Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+ absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
+
+/*SFINAE constraints for the std::in_place constructors.*/
+template <class Sig, class F, class... Args>
+using CanEmplace = True<
+ absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+ absl::enable_if_t<
+ Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+ absl::enable_if_t<std::is_constructible<absl::decay_t<F>, Args...>::value>>;
+
+/*SFINAE constraints for the conversion-assign operator.*/
+template <class Sig, class F,
+ class = absl::enable_if_t<
+ !std::is_same<RemoveCVRef<F>, AnyInvocable<Sig>>::value>>
+using CanAssign =
+ True<absl::enable_if_t<Impl<Sig>::template CallIsValid<F>::value>,
+ absl::enable_if_t<
+ Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<F>::value>,
+ absl::enable_if_t<std::is_constructible<absl::decay_t<F>, F>::value>>;
+
+/*SFINAE constraints for the reference-wrapper conversion-assign operator.*/
+template <class Sig, class F>
+using CanAssignReferenceWrapper =
+ True<absl::enable_if_t<
+ Impl<Sig>::template CallIsValid<std::reference_wrapper<F>>::value>,
+ absl::enable_if_t<Impl<Sig>::template CallIsNoexceptIfSigIsNoexcept<
+ std::reference_wrapper<F>>::value>>;
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// The constraint for checking whether or not a call meets the noexcept
+// callability requirements. This is a preprocessor macro because specifying it
+// this way as opposed to a disjunction/branch can improve the user-side error
+// messages and avoids an instantiation of std::is_nothrow_invocable_r in the
+// cases where the user did not specify a noexcept function type.
+//
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, noex) \
+ ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_##noex(inv_quals)
+
+// The disjunction below is because we can't rely on std::is_nothrow_invocable_r
+// to give the right result when ReturnType is non-moveable in toolchains that
+// don't treat non-moveable result types correctly. For example this was the
+// case in libc++ before commit c3a24882 (2022-05).
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true(inv_quals) \
+ absl::enable_if_t<absl::disjunction< \
+ std::is_nothrow_invocable_r< \
+ ReturnType, UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
+ P...>, \
+ std::conjunction< \
+ std::is_nothrow_invocable< \
+ UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, P...>, \
+ std::is_same< \
+ ReturnType, \
+ absl::base_internal::invoke_result_t< \
+ UnwrapStdReferenceWrapper<absl::decay_t<F>> inv_quals, \
+ P...>>>>::value>
+
+#define ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false(inv_quals)
+//
+////////////////////////////////////////////////////////////////////////////////
+
+// A macro to generate partial specializations of Impl with the different
+// combinations of supported cv/reference qualifiers and noexcept specifier.
+//
+// Here, `cv` are the cv-qualifiers if any, `ref` is the ref-qualifier if any,
+// inv_quals is the reference type to be used when invoking the target, and
+// noex is "true" if the function type is noexcept, or false if it is not.
+//
+// The CallIsValid condition is more complicated than simply using
+// absl::base_internal::is_invocable_r because we can't rely on it to give the
+// right result when ReturnType is non-moveable in toolchains that don't treat
+// non-moveable result types correctly. For example this was the case in libc++
+// before commit c3a24882 (2022-05).
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, noex) \
+ template <class ReturnType, class... P> \
+ class Impl<ReturnType(P...) cv ref ABSL_INTERNAL_NOEXCEPT_SPEC(noex)> \
+ : public CoreImpl<noex, ReturnType, P...> { \
+ public: \
+ /*The base class, which contains the datamembers and core operations*/ \
+ using Core = CoreImpl<noex, ReturnType, P...>; \
+ \
+ /*SFINAE constraint to check if F is invocable with the proper signature*/ \
+ template <class F> \
+ using CallIsValid = True<absl::enable_if_t<absl::disjunction< \
+ absl::base_internal::is_invocable_r<ReturnType, \
+ absl::decay_t<F> inv_quals, P...>, \
+ std::is_same<ReturnType, \
+ absl::base_internal::invoke_result_t< \
+ absl::decay_t<F> inv_quals, P...>>>::value>>; \
+ \
+ /*SFINAE constraint to check if F is nothrow-invocable when necessary*/ \
+ template <class F> \
+ using CallIsNoexceptIfSigIsNoexcept = \
+ True<ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT(inv_quals, \
+ noex)>; \
+ \
+ /*Put the AnyInvocable into an empty state.*/ \
+ Impl() = default; \
+ \
+ /*The implementation of a conversion-constructor from "f*/ \
+ /*This forwards to Core, attaching inv_quals so that the base class*/ \
+ /*knows how to properly type-erase the invocation.*/ \
+ template <class F> \
+ explicit Impl(ConversionConstruct, F&& f) \
+ : Core(TypedConversionConstruct< \
+ typename std::decay<F>::type inv_quals>(), \
+ std::forward<F>(f)) {} \
+ \
+ /*Forward along the in-place construction parameters.*/ \
+ template <class T, class... Args> \
+ explicit Impl(absl::in_place_type_t<T>, Args&&... args) \
+ : Core(absl::in_place_type<absl::decay_t<T> inv_quals>, \
+ std::forward<Args>(args)...) {} \
+ \
+ /*The actual invocation operation with the proper signature*/ \
+ ReturnType operator()(P... args) cv ref noexcept(noex) { \
+ assert(this->invoker_ != nullptr); \
+ return this->invoker_(const_cast<TypeErasedState*>(&this->state_), \
+ static_cast<ForwardedParameterType<P>>(args)...); \
+ } \
+ }
+
+// Define the `noexcept(true)` specialization only for C++17 and beyond, when
+// `noexcept` is part of the type system.
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+// A convenience macro that defines specializations for the noexcept(true) and
+// noexcept(false) forms, given the other properties.
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
+ ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false); \
+ ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, true)
+#else
+#define ABSL_INTERNAL_ANY_INVOCABLE_IMPL(cv, ref, inv_quals) \
+ ABSL_INTERNAL_ANY_INVOCABLE_IMPL_(cv, ref, inv_quals, false)
+#endif
+
+// Non-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, , &);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, , const&);
+
+// Lvalue-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &, &);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &, const&);
+
+// Rvalue-ref-qualified partial specializations
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(, &&, &&);
+ABSL_INTERNAL_ANY_INVOCABLE_IMPL(const, &&, const&&);
+
+// Undef the detail-only macros.
+#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL
+#undef ABSL_INTERNAL_ANY_INVOCABLE_IMPL_
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_false
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT_true
+#undef ABSL_INTERNAL_ANY_INVOCABLE_NOEXCEPT_CONSTRAINT
+#undef ABSL_INTERNAL_NOEXCEPT_SPEC
+
+} // namespace internal_any_invocable
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_FUNCTIONAL_INTERNAL_ANY_INVOCABLE_H_
diff --git a/absl/hash/BUILD.bazel b/absl/hash/BUILD.bazel
index f0640d34..bcc316f9 100644
--- a/absl/hash/BUILD.bazel
+++ b/absl/hash/BUILD.bazel
@@ -41,6 +41,7 @@ cc_library(
"//absl/base:core_headers",
"//absl/base:endian",
"//absl/container:fixed_array",
+ "//absl/functional:function_ref",
"//absl/meta:type_traits",
"//absl/numeric:int128",
"//absl/strings",
@@ -74,7 +75,11 @@ cc_test(
":hash_testing",
":spy_hash_state",
"//absl/base:core_headers",
+ "//absl/container:btree",
+ "//absl/container:flat_hash_map",
"//absl/container:flat_hash_set",
+ "//absl/container:node_hash_map",
+ "//absl/container:node_hash_set",
"//absl/meta:type_traits",
"//absl/numeric:int128",
"//absl/strings:cord_test_helpers",
@@ -93,6 +98,7 @@ cc_binary(
deps = [
":hash",
"//absl/base:core_headers",
+ "//absl/container:flat_hash_set",
"//absl/random",
"//absl/strings",
"//absl/strings:cord",
diff --git a/absl/hash/CMakeLists.txt b/absl/hash/CMakeLists.txt
index 5916ae3c..423b74b5 100644
--- a/absl/hash/CMakeLists.txt
+++ b/absl/hash/CMakeLists.txt
@@ -30,6 +30,7 @@ absl_cc_library(
absl::core_headers
absl::endian
absl::fixed_array
+ absl::function_ref
absl::meta
absl::int128
absl::strings
@@ -68,13 +69,18 @@ absl_cc_test(
absl::hash
absl::hash_testing
absl::core_headers
+ absl::btree
+ absl::flat_hash_map
absl::flat_hash_set
+ absl::node_hash_map
+ absl::node_hash_set
absl::spy_hash_state
absl::meta
absl::int128
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
spy_hash_state
@@ -89,6 +95,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
city
@@ -116,6 +123,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
low_level_hash
diff --git a/absl/hash/hash.h b/absl/hash/hash.h
index 8282ea53..74e2d7c0 100644
--- a/absl/hash/hash.h
+++ b/absl/hash/hash.h
@@ -26,9 +26,9 @@
// support Abseil hashing without requiring you to define a hashing
// algorithm.
// * `HashState`, a type-erased class which implements the manipulation of the
-// hash state (H) itself, contains member functions `combine()` and
-// `combine_contiguous()`, which you can use to contribute to an existing
-// hash state when hashing your types.
+// hash state (H) itself; contains member functions `combine()`,
+// `combine_contiguous()`, and `combine_unordered()`; and which you can use
+// to contribute to an existing hash state when hashing your types.
//
// Unlike `std::hash` or other hashing frameworks, the Abseil hashing framework
// provides most of its utility by abstracting away the hash algorithm (and its
@@ -40,6 +40,11 @@
// each process. E.g., `absl::Hash<int>{}(9)` in one process and
// `absl::Hash<int>{}(9)` in another process are likely to differ.
//
+// `absl::Hash` may also produce different values from different dynamically
+// loaded libraries. For this reason, `absl::Hash` values must never cross
+// boundries in dynamically loaded libraries (including when used in types like
+// hash containers.)
+//
// `absl::Hash` is intended to strongly mix input bits with a target of passing
// an [Avalanche Test](https://en.wikipedia.org/wiki/Avalanche_effect).
//
@@ -74,7 +79,9 @@
#define ABSL_HASH_HASH_H_
#include <tuple>
+#include <utility>
+#include "absl/functional/function_ref.h"
#include "absl/hash/internal/hash.h"
namespace absl {
@@ -107,14 +114,27 @@ ABSL_NAMESPACE_BEGIN
// * std::string_view (as well as any instance of std::basic_string that
// uses char and std::char_traits)
// * All the standard sequence containers (provided the elements are hashable)
-// * All the standard ordered associative containers (provided the elements are
+// * All the standard associative containers (provided the elements are
// hashable)
// * absl types such as the following:
// * absl::string_view
-// * absl::InlinedVector
-// * absl::FixedArray
// * absl::uint128
// * absl::Time, absl::Duration, and absl::TimeZone
+// * absl containers (provided the elements are hashable) such as the
+// following:
+// * absl::flat_hash_set, absl::node_hash_set, absl::btree_set
+// * absl::flat_hash_map, absl::node_hash_map, absl::btree_map
+// * absl::btree_multiset, absl::btree_multimap
+// * absl::InlinedVector
+// * absl::FixedArray
+//
+// When absl::Hash is used to hash an unordered container with a custom hash
+// functor, the elements are hashed using default absl::Hash semantics, not
+// the custom hash functor. This is consistent with the behavior of
+// operator==() on unordered containers, which compares elements pairwise with
+// operator==() rather than the custom equality functor. It is usually a
+// mistake to use either operator==() or absl::Hash on unordered collections
+// that use functors incompatible with operator==() equality.
//
// Note: the list above is not meant to be exhaustive. Additional type support
// may be added, in which case the above list will be updated.
@@ -153,7 +173,8 @@ ABSL_NAMESPACE_BEGIN
// that are otherwise difficult to extend using `AbslHashValue()`. (See the
// `HashState` class below.)
//
-// The "hash state" concept contains two member functions for mixing hash state:
+// The "hash state" concept contains three member functions for mixing hash
+// state:
//
// * `H::combine(state, values...)`
//
@@ -187,6 +208,15 @@ ABSL_NAMESPACE_BEGIN
// (it may perform internal optimizations). If you need this guarantee, use a
// loop instead.
//
+// * `H::combine_unordered(state, begin, end)`
+//
+// Combines a set of elements denoted by an iterator pair into a hash
+// state, returning the updated state. Note that the existing hash
+// state is move-only and must be passed by value.
+//
+// Unlike the other two methods, the hashing is order-independent.
+// This can be used to hash unordered collections.
+//
// -----------------------------------------------------------------------------
// Adding Type Support to `absl::Hash`
// -----------------------------------------------------------------------------
@@ -243,8 +273,9 @@ size_t HashOf(const Types&... values) {
// classes, virtual functions, etc.). The type erasure adds overhead so it
// should be avoided unless necessary.
//
-// Note: This wrapper will only erase calls to:
+// Note: This wrapper will only erase calls to
// combine_contiguous(H, const unsigned char*, size_t)
+// RunCombineUnordered(H, CombinerF)
//
// All other calls will be handled internally and will not invoke overloads
// provided by the wrapped class.
@@ -318,6 +349,8 @@ class HashState : public hash_internal::HashStateBase<HashState> {
private:
HashState() = default;
+ friend class HashState::HashStateBase;
+
template <typename T>
static void CombineContiguousImpl(void* p, const unsigned char* first,
size_t size) {
@@ -329,16 +362,57 @@ class HashState : public hash_internal::HashStateBase<HashState> {
void Init(T* state) {
state_ = state;
combine_contiguous_ = &CombineContiguousImpl<T>;
+ run_combine_unordered_ = &RunCombineUnorderedImpl<T>;
+ }
+
+ template <typename HS>
+ struct CombineUnorderedInvoker {
+ template <typename T, typename ConsumerT>
+ void operator()(T inner_state, ConsumerT inner_cb) {
+ f(HashState::Create(&inner_state),
+ [&](HashState& inner_erased) { inner_cb(inner_erased.Real<T>()); });
+ }
+
+ absl::FunctionRef<void(HS, absl::FunctionRef<void(HS&)>)> f;
+ };
+
+ template <typename T>
+ static HashState RunCombineUnorderedImpl(
+ HashState state,
+ absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>
+ f) {
+ // Note that this implementation assumes that inner_state and outer_state
+ // are the same type. This isn't true in the SpyHash case, but SpyHash
+ // types are move-convertible to each other, so this still works.
+ T& real_state = state.Real<T>();
+ real_state = T::RunCombineUnordered(
+ std::move(real_state), CombineUnorderedInvoker<HashState>{f});
+ return state;
+ }
+
+ template <typename CombinerT>
+ static HashState RunCombineUnordered(HashState state, CombinerT combiner) {
+ auto* run = state.run_combine_unordered_;
+ return run(std::move(state), std::ref(combiner));
}
// Do not erase an already erased state.
void Init(HashState* state) {
state_ = state->state_;
combine_contiguous_ = state->combine_contiguous_;
+ run_combine_unordered_ = state->run_combine_unordered_;
+ }
+
+ template <typename T>
+ T& Real() {
+ return *static_cast<T*>(state_);
}
void* state_;
void (*combine_contiguous_)(void*, const unsigned char*, size_t);
+ HashState (*run_combine_unordered_)(
+ HashState state,
+ absl::FunctionRef<void(HashState, absl::FunctionRef<void(HashState&)>)>);
};
ABSL_NAMESPACE_END
diff --git a/absl/hash/hash_benchmark.cc b/absl/hash/hash_benchmark.cc
index d498ac29..8712a01c 100644
--- a/absl/hash/hash_benchmark.cc
+++ b/absl/hash/hash_benchmark.cc
@@ -19,6 +19,7 @@
#include <vector>
#include "absl/base/attributes.h"
+#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/strings/cord.h"
@@ -107,6 +108,44 @@ absl::Cord FragmentedCord(size_t size) {
return result;
}
+template <typename T>
+std::vector<T> Vector(size_t count) {
+ std::vector<T> result;
+ for (size_t v = 0; v < count; ++v) {
+ result.push_back(v);
+ }
+ return result;
+}
+
+// Bogus type that replicates an unorderd_set's bit mixing, but with
+// vector-speed iteration. This is intended to measure the overhead of unordered
+// hashing without counting the speed of unordered_set iteration.
+template <typename T>
+struct FastUnorderedSet {
+ explicit FastUnorderedSet(size_t count) {
+ for (size_t v = 0; v < count; ++v) {
+ values.push_back(v);
+ }
+ }
+ std::vector<T> values;
+
+ template <typename H>
+ friend H AbslHashValue(H h, const FastUnorderedSet& fus) {
+ return H::combine(H::combine_unordered(std::move(h), fus.values.begin(),
+ fus.values.end()),
+ fus.values.size());
+ }
+};
+
+template <typename T>
+absl::flat_hash_set<T> FlatHashSet(size_t count) {
+ absl::flat_hash_set<T> result;
+ for (size_t v = 0; v < count; ++v) {
+ result.insert(v);
+ }
+ return result;
+}
+
// Generates a benchmark and a codegen method for the provided types. The
// codegen method provides a well known entrypoint for dumping assembly.
#define MAKE_BENCHMARK(hash, name, ...) \
@@ -145,10 +184,22 @@ MAKE_BENCHMARK(AbslHash, Cord_Flat_200, FlatCord(200));
MAKE_BENCHMARK(AbslHash, Cord_Flat_5000, FlatCord(5000));
MAKE_BENCHMARK(AbslHash, Cord_Fragmented_200, FragmentedCord(200));
MAKE_BENCHMARK(AbslHash, Cord_Fragmented_5000, FragmentedCord(5000));
-MAKE_BENCHMARK(AbslHash, VectorInt64_10, std::vector<int64_t>(10));
-MAKE_BENCHMARK(AbslHash, VectorInt64_100, std::vector<int64_t>(100));
-MAKE_BENCHMARK(AbslHash, VectorDouble_10, std::vector<double>(10, 1.1));
-MAKE_BENCHMARK(AbslHash, VectorDouble_100, std::vector<double>(100, 1.1));
+MAKE_BENCHMARK(AbslHash, VectorInt64_10, Vector<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, VectorInt64_100, Vector<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, VectorInt64_1000, Vector<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, VectorDouble_10, Vector<double>(10));
+MAKE_BENCHMARK(AbslHash, VectorDouble_100, Vector<double>(100));
+MAKE_BENCHMARK(AbslHash, VectorDouble_1000, Vector<double>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_10, FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_100, FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_1000, FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_10, FlatHashSet<double>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_100, FlatHashSet<double>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_1000, FlatHashSet<double>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetInt64_1000,
+ FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetDouble_1000,
+ FastUnorderedSet<double>(1000));
MAKE_BENCHMARK(AbslHash, PairStringString_0,
std::make_pair(std::string(), std::string()));
MAKE_BENCHMARK(AbslHash, PairStringString_10,
@@ -180,6 +231,24 @@ MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_10,
std::vector<double>(10, 1.1));
MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_100,
std::vector<double>(100, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_1000,
+ std::vector<double>(1000, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_10,
+ FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_100,
+ FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_1000,
+ FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_10,
+ FlatHashSet<double>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_100,
+ FlatHashSet<double>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_1000,
+ FlatHashSet<double>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetInt64_1000,
+ FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetDouble_1000,
+ FastUnorderedSet<double>(1000));
// The latency benchmark attempts to model the speed of the hash function in
// production. When a hash function is used for hashtable lookups it is rarely
diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc
index b3ddebdd..ffa45e6e 100644
--- a/absl/hash/hash_test.cc
+++ b/absl/hash/hash_test.cc
@@ -14,12 +14,14 @@
#include "absl/hash/hash.h"
+#include <algorithm>
#include <array>
#include <bitset>
#include <cstring>
#include <deque>
#include <forward_list>
#include <functional>
+#include <initializer_list>
#include <iterator>
#include <limits>
#include <list>
@@ -32,12 +34,18 @@
#include <tuple>
#include <type_traits>
#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
#include "absl/hash/hash_testing.h"
#include "absl/hash/internal/spy_hash_state.h"
#include "absl/meta/type_traits.h"
@@ -46,6 +54,56 @@
namespace {
+// Utility wrapper of T for the purposes of testing the `AbslHash` type erasure
+// mechanism. `TypeErasedValue<T>` can be constructed with a `T`, and can
+// be compared and hashed. However, all hashing goes through the hashing
+// type-erasure framework.
+template <typename T>
+class TypeErasedValue {
+ public:
+ TypeErasedValue() = default;
+ TypeErasedValue(const TypeErasedValue&) = default;
+ TypeErasedValue(TypeErasedValue&&) = default;
+ explicit TypeErasedValue(const T& n) : n_(n) {}
+
+ template <typename H>
+ friend H AbslHashValue(H hash_state, const TypeErasedValue& v) {
+ v.HashValue(absl::HashState::Create(&hash_state));
+ return hash_state;
+ }
+
+ void HashValue(absl::HashState state) const {
+ absl::HashState::combine(std::move(state), n_);
+ }
+
+ bool operator==(const TypeErasedValue& rhs) const { return n_ == rhs.n_; }
+ bool operator!=(const TypeErasedValue& rhs) const { return !(*this == rhs); }
+
+ private:
+ T n_;
+};
+
+// A TypeErasedValue refinement, for containers. It exposes the wrapped
+// `value_type` and is constructible from an initializer list.
+template <typename T>
+class TypeErasedContainer : public TypeErasedValue<T> {
+ public:
+ using value_type = typename T::value_type;
+ TypeErasedContainer() = default;
+ TypeErasedContainer(const TypeErasedContainer&) = default;
+ TypeErasedContainer(TypeErasedContainer&&) = default;
+ explicit TypeErasedContainer(const T& n) : TypeErasedValue<T>(n) {}
+ TypeErasedContainer(std::initializer_list<value_type> init_list)
+ : TypeErasedContainer(T(init_list.begin(), init_list.end())) {}
+ // one-argument constructor of value type T, to appease older toolchains that
+ // get confused by one-element initializer lists in some contexts
+ explicit TypeErasedContainer(const value_type& v)
+ : TypeErasedContainer(T(&v, &v + 1)) {}
+};
+
+template <typename T>
+using TypeErasedVector = TypeErasedContainer<std::vector<T>>;
+
using absl::Hash;
using absl::hash_internal::SpyHashState;
@@ -81,10 +139,10 @@ TYPED_TEST_P(HashValueIntTest, FastPath) {
absl::Hash<std::tuple<TypeParam>>{}(std::tuple<TypeParam>(n)));
}
-REGISTER_TYPED_TEST_CASE_P(HashValueIntTest, BasicUsage, FastPath);
+REGISTER_TYPED_TEST_SUITE_P(HashValueIntTest, BasicUsage, FastPath);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
uint32_t, uint64_t, size_t>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueIntTest, IntTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueIntTest, IntTypes);
enum LegacyEnum { kValue1, kValue2, kValue3 };
@@ -127,6 +185,8 @@ TEST(HashValueTest, FloatingPoint) {
TEST(HashValueTest, Pointer) {
EXPECT_TRUE((is_hashable<int*>::value));
+ EXPECT_TRUE((is_hashable<int(*)(char, float)>::value));
+ EXPECT_TRUE((is_hashable<void(*)(int, int, ...)>::value));
int i;
int* ptr = &i;
@@ -166,6 +226,85 @@ TEST(HashValueTest, PointerAlignment) {
}
}
+TEST(HashValueTest, PointerToMember) {
+ struct Bass {
+ void q() {}
+ };
+
+ struct A : Bass {
+ virtual ~A() = default;
+ virtual void vfa() {}
+
+ static auto pq() -> void (A::*)() { return &A::q; }
+ };
+
+ struct B : Bass {
+ virtual ~B() = default;
+ virtual void vfb() {}
+
+ static auto pq() -> void (B::*)() { return &B::q; }
+ };
+
+ struct Foo : A, B {
+ void f1() {}
+ void f2() const {}
+
+ int g1() & { return 0; }
+ int g2() const & { return 0; }
+ int g3() && { return 0; }
+ int g4() const && { return 0; }
+
+ int h1() & { return 0; }
+ int h2() const & { return 0; }
+ int h3() && { return 0; }
+ int h4() const && { return 0; }
+
+ int a;
+ int b;
+
+ const int c = 11;
+ const int d = 22;
+ };
+
+ EXPECT_TRUE((is_hashable<float Foo::*>::value));
+ EXPECT_TRUE((is_hashable<double (Foo::*)(int, int)&&>::value));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(&Foo::a, &Foo::b, static_cast<int Foo::*>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(&Foo::c, &Foo::d, static_cast<const int Foo::*>(nullptr),
+ &Foo::a, &Foo::b)));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::f1, static_cast<void (Foo::*)()>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::f2, static_cast<void (Foo::*)() const>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::g1, &Foo::h1, static_cast<int (Foo::*)() &>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::g2, &Foo::h2, static_cast<int (Foo::*)() const &>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::g3, &Foo::h3, static_cast<int (Foo::*)() &&>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
+ &Foo::g4, &Foo::h4, static_cast<int (Foo::*)() const &&>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(static_cast<void (Foo::*)()>(&Foo::vfa),
+ static_cast<void (Foo::*)()>(&Foo::vfb),
+ static_cast<void (Foo::*)()>(nullptr))));
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
+ std::make_tuple(static_cast<void (Foo::*)()>(Foo::A::pq()),
+ static_cast<void (Foo::*)()>(Foo::B::pq()),
+ static_cast<void (Foo::*)()>(nullptr))));
+}
+
TEST(HashValueTest, PairAndTuple) {
EXPECT_TRUE((is_hashable<std::pair<int, int>>::value));
EXPECT_TRUE((is_hashable<std::pair<const int&, const int&>>::value));
@@ -381,6 +520,52 @@ TEST(HashValueTest, StdBitset) {
std::bitset<kNumBits>(bit_strings[5].c_str())}));
} // namespace
+// Dummy type with unordered equality and hashing semantics. This preserves
+// input order internally, and is used below to ensure we get test coverage
+// for equal sequences with different iteraton orders.
+template <typename T>
+class UnorderedSequence {
+ public:
+ UnorderedSequence() = default;
+ template <typename TT>
+ UnorderedSequence(std::initializer_list<TT> l)
+ : values_(l.begin(), l.end()) {}
+ template <typename ForwardIterator,
+ typename std::enable_if<!std::is_integral<ForwardIterator>::value,
+ bool>::type = true>
+ UnorderedSequence(ForwardIterator begin, ForwardIterator end)
+ : values_(begin, end) {}
+ // one-argument constructor of value type T, to appease older toolchains that
+ // get confused by one-element initializer lists in some contexts
+ explicit UnorderedSequence(const T& v) : values_(&v, &v + 1) {}
+
+ using value_type = T;
+
+ size_t size() const { return values_.size(); }
+ typename std::vector<T>::const_iterator begin() const {
+ return values_.begin();
+ }
+ typename std::vector<T>::const_iterator end() const { return values_.end(); }
+
+ friend bool operator==(const UnorderedSequence& lhs,
+ const UnorderedSequence& rhs) {
+ return lhs.size() == rhs.size() &&
+ std::is_permutation(lhs.begin(), lhs.end(), rhs.begin());
+ }
+ friend bool operator!=(const UnorderedSequence& lhs,
+ const UnorderedSequence& rhs) {
+ return !(lhs == rhs);
+ }
+ template <typename H>
+ friend H AbslHashValue(H h, const UnorderedSequence& u) {
+ return H::combine(H::combine_unordered(std::move(h), u.begin(), u.end()),
+ u.size());
+ }
+
+ private:
+ std::vector<T> values_;
+};
+
template <typename T>
class HashValueSequenceTest : public testing::Test {
};
@@ -389,22 +574,66 @@ TYPED_TEST_SUITE_P(HashValueSequenceTest);
TYPED_TEST_P(HashValueSequenceTest, BasicUsage) {
EXPECT_TRUE((is_hashable<TypeParam>::value));
- using ValueType = typename TypeParam::value_type;
- auto a = static_cast<ValueType>(0);
- auto b = static_cast<ValueType>(23);
- auto c = static_cast<ValueType>(42);
+ using IntType = typename TypeParam::value_type;
+ auto a = static_cast<IntType>(0);
+ auto b = static_cast<IntType>(23);
+ auto c = static_cast<IntType>(42);
+
+ std::vector<TypeParam> exemplars = {
+ TypeParam(), TypeParam(), TypeParam{a, b, c},
+ TypeParam{a, c, b}, TypeParam{c, a, b}, TypeParam{a},
+ TypeParam{a, a}, TypeParam{a, a, a}, TypeParam{a, a, b},
+ TypeParam{a, b, a}, TypeParam{b, a, a}, TypeParam{a, b},
+ TypeParam{b, c}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
+
+REGISTER_TYPED_TEST_SUITE_P(HashValueSequenceTest, BasicUsage);
+using IntSequenceTypes = testing::Types<
+ std::deque<int>, std::forward_list<int>, std::list<int>, std::vector<int>,
+ std::vector<bool>, TypeErasedContainer<std::vector<int>>, std::set<int>,
+ std::multiset<int>, UnorderedSequence<int>,
+ TypeErasedContainer<UnorderedSequence<int>>, std::unordered_set<int>,
+ std::unordered_multiset<int>, absl::flat_hash_set<int>,
+ absl::node_hash_set<int>, absl::btree_set<int>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueSequenceTest, IntSequenceTypes);
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
- std::make_tuple(TypeParam(), TypeParam{}, TypeParam{a, b, c},
- TypeParam{a, b}, TypeParam{b, c})));
+template <typename T>
+class HashValueNestedSequenceTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueNestedSequenceTest);
+
+TYPED_TEST_P(HashValueNestedSequenceTest, BasicUsage) {
+ using T = TypeParam;
+ using V = typename T::value_type;
+ std::vector<T> exemplars = {
+ // empty case
+ T{},
+ // sets of empty sets
+ T{V{}}, T{V{}, V{}}, T{V{}, V{}, V{}},
+ // multisets of different values
+ T{V{1}}, T{V{1, 1}, V{1, 1}}, T{V{1, 1, 1}, V{1, 1, 1}, V{1, 1, 1}},
+ // various orderings of same nested sets
+ T{V{}, V{1, 2}}, T{V{}, V{2, 1}}, T{V{1, 2}, V{}}, T{V{2, 1}, V{}},
+ // various orderings of various nested sets, case 2
+ T{V{1, 2}, V{3, 4}}, T{V{1, 2}, V{4, 3}}, T{V{1, 3}, V{2, 4}},
+ T{V{1, 3}, V{4, 2}}, T{V{1, 4}, V{2, 3}}, T{V{1, 4}, V{3, 2}},
+ T{V{2, 3}, V{1, 4}}, T{V{2, 3}, V{4, 1}}, T{V{2, 4}, V{1, 3}},
+ T{V{2, 4}, V{3, 1}}, T{V{3, 4}, V{1, 2}}, T{V{3, 4}, V{2, 1}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
}
-REGISTER_TYPED_TEST_CASE_P(HashValueSequenceTest, BasicUsage);
-using IntSequenceTypes =
- testing::Types<std::deque<int>, std::forward_list<int>, std::list<int>,
- std::vector<int>, std::vector<bool>, std::set<int>,
- std::multiset<int>>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashValueSequenceTest, IntSequenceTypes);
+REGISTER_TYPED_TEST_SUITE_P(HashValueNestedSequenceTest, BasicUsage);
+template <typename T>
+using TypeErasedSet = TypeErasedContainer<UnorderedSequence<T>>;
+
+using NestedIntSequenceTypes = testing::Types<
+ std::vector<std::vector<int>>, std::vector<UnorderedSequence<int>>,
+ std::vector<TypeErasedSet<int>>, UnorderedSequence<std::vector<int>>,
+ UnorderedSequence<UnorderedSequence<int>>,
+ UnorderedSequence<TypeErasedSet<int>>, TypeErasedSet<std::vector<int>>,
+ TypeErasedSet<UnorderedSequence<int>>, TypeErasedSet<TypeErasedSet<int>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueNestedSequenceTest,
+ NestedIntSequenceTypes);
// Private type that only supports AbslHashValue to make sure our chosen hash
// implementation is recursive within absl::Hash.
@@ -564,23 +793,64 @@ TEST(HashValueTest, Variant) {
#endif
}
-TEST(HashValueTest, Maps) {
- EXPECT_TRUE((is_hashable<std::map<int, std::string>>::value));
+template <typename T>
+class HashValueAssociativeMapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMapTest);
+
+TYPED_TEST_P(HashValueAssociativeMapTest, BasicUsage) {
+ using M = TypeParam;
+ using V = typename M::value_type;
+ std::vector<M> exemplars{M{},
+ M{V{0, "foo"}},
+ M{V{1, "foo"}},
+ M{V{0, "bar"}},
+ M{V{1, "bar"}},
+ M{V{0, "foo"}, V{42, "bar"}},
+ M{V{42, "bar"}, V{0, "foo"}},
+ M{V{1, "foo"}, V{42, "bar"}},
+ M{V{1, "foo"}, V{43, "bar"}},
+ M{V{1, "foo"}, V{43, "baz"}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
+}
- using M = std::map<int, std::string>;
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
- M{}, M{{0, "foo"}}, M{{1, "foo"}}, M{{0, "bar"}}, M{{1, "bar"}},
- M{{0, "foo"}, {42, "bar"}}, M{{1, "foo"}, {42, "bar"}},
- M{{1, "foo"}, {43, "bar"}}, M{{1, "foo"}, {43, "baz"}})));
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMapTest, BasicUsage);
+using AssociativeMapTypes = testing::Types<
+ std::map<int, std::string>, std::unordered_map<int, std::string>,
+ absl::flat_hash_map<int, std::string>,
+ absl::node_hash_map<int, std::string>, absl::btree_map<int, std::string>,
+ UnorderedSequence<std::pair<const int, std::string>>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMapTest,
+ AssociativeMapTypes);
- using MM = std::multimap<int, std::string>;
- EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(std::make_tuple(
- MM{}, MM{{0, "foo"}}, MM{{1, "foo"}}, MM{{0, "bar"}}, MM{{1, "bar"}},
- MM{{0, "foo"}, {0, "bar"}}, MM{{0, "bar"}, {0, "foo"}},
- MM{{0, "foo"}, {42, "bar"}}, MM{{1, "foo"}, {42, "bar"}},
- MM{{1, "foo"}, {1, "foo"}, {43, "bar"}}, MM{{1, "foo"}, {43, "baz"}})));
+template <typename T>
+class HashValueAssociativeMultimapTest : public testing::Test {};
+TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest);
+
+TYPED_TEST_P(HashValueAssociativeMultimapTest, BasicUsage) {
+ using MM = TypeParam;
+ using V = typename MM::value_type;
+ std::vector<MM> exemplars{MM{},
+ MM{V{0, "foo"}},
+ MM{V{1, "foo"}},
+ MM{V{0, "bar"}},
+ MM{V{1, "bar"}},
+ MM{V{0, "foo"}, V{0, "bar"}},
+ MM{V{0, "bar"}, V{0, "foo"}},
+ MM{V{0, "foo"}, V{42, "bar"}},
+ MM{V{1, "foo"}, V{42, "bar"}},
+ MM{V{1, "foo"}, V{1, "foo"}, V{43, "bar"}},
+ MM{V{1, "foo"}, V{43, "bar"}, V{1, "foo"}},
+ MM{V{1, "foo"}, V{43, "baz"}}};
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(exemplars));
}
+REGISTER_TYPED_TEST_SUITE_P(HashValueAssociativeMultimapTest, BasicUsage);
+using AssociativeMultimapTypes =
+ testing::Types<std::multimap<int, std::string>,
+ std::unordered_multimap<int, std::string>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashValueAssociativeMultimapTest,
+ AssociativeMultimapTypes);
+
TEST(HashValueTest, ReferenceWrapper) {
EXPECT_TRUE(is_hashable<std::reference_wrapper<Private>>::value);
@@ -818,10 +1088,10 @@ TYPED_TEST_P(HashIntTest, BasicUsage) {
Hash<CombineVariadic<TypeParam>>()({}));
}
-REGISTER_TYPED_TEST_CASE_P(HashIntTest, BasicUsage);
+REGISTER_TYPED_TEST_SUITE_P(HashIntTest, BasicUsage);
using IntTypes = testing::Types<unsigned char, char, int, int32_t, int64_t,
uint32_t, uint64_t, size_t>;
-INSTANTIATE_TYPED_TEST_CASE_P(My, HashIntTest, IntTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(My, HashIntTest, IntTypes);
struct StructWithPadding {
char c;
@@ -928,29 +1198,23 @@ TEST(HashTest, SmallValueOn64ByteBoundary) {
Hash<IntAndString>()(IntAndString{0, std::string(63, '0')});
}
-struct TypeErased {
- size_t n;
-
- template <typename H>
- friend H AbslHashValue(H hash_state, const TypeErased& v) {
- v.HashValue(absl::HashState::Create(&hash_state));
- return hash_state;
- }
-
- void HashValue(absl::HashState state) const {
- absl::HashState::combine(std::move(state), n);
- }
-};
-
TEST(HashTest, TypeErased) {
- EXPECT_TRUE((is_hashable<TypeErased>::value));
- EXPECT_TRUE((is_hashable<std::pair<TypeErased, int>>::value));
+ EXPECT_TRUE((is_hashable<TypeErasedValue<size_t>>::value));
+ EXPECT_TRUE((is_hashable<std::pair<TypeErasedValue<size_t>, int>>::value));
- EXPECT_EQ(SpyHash(TypeErased{7}), SpyHash(size_t{7}));
- EXPECT_NE(SpyHash(TypeErased{7}), SpyHash(size_t{13}));
+ EXPECT_EQ(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{7}));
+ EXPECT_NE(SpyHash(TypeErasedValue<size_t>(7)), SpyHash(size_t{13}));
- EXPECT_EQ(SpyHash(std::make_pair(TypeErased{7}, 17)),
+ EXPECT_EQ(SpyHash(std::make_pair(TypeErasedValue<size_t>(7), 17)),
SpyHash(std::make_pair(size_t{7}, 17)));
+
+ absl::flat_hash_set<absl::flat_hash_set<int>> ss = {{1, 2}, {3, 4}};
+ TypeErasedContainer<absl::flat_hash_set<absl::flat_hash_set<int>>> es = {
+ absl::flat_hash_set<int>{1, 2}, {3, 4}};
+ absl::flat_hash_set<TypeErasedContainer<absl::flat_hash_set<int>>> se = {
+ {1, 2}, {3, 4}};
+ EXPECT_EQ(SpyHash(ss), SpyHash(es));
+ EXPECT_EQ(SpyHash(ss), SpyHash(se));
}
struct ValueWithBoolConversion {
diff --git a/absl/hash/internal/city_test.cc b/absl/hash/internal/city_test.cc
index 251d381d..1bbf02e0 100644
--- a/absl/hash/internal/city_test.cc
+++ b/absl/hash/internal/city_test.cc
@@ -22,6 +22,7 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
+namespace {
static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
static const uint64_t kSeed0 = 1234567;
@@ -590,6 +591,7 @@ TEST(CityHashTest, Unchanging) {
TestUnchanging(testdata[i], 0, kDataSize);
}
+} // namespace
} // namespace hash_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h
index b1e33caf..45dfdd46 100644
--- a/absl/hash/internal/hash.h
+++ b/absl/hash/internal/hash.h
@@ -23,6 +23,7 @@
#include <array>
#include <bitset>
#include <cmath>
+#include <cstddef>
#include <cstring>
#include <deque>
#include <forward_list>
@@ -36,6 +37,8 @@
#include <string>
#include <tuple>
#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -54,6 +57,9 @@
namespace absl {
ABSL_NAMESPACE_BEGIN
+
+class HashState;
+
namespace hash_internal {
// Internal detail: Large buffers are hashed in smaller chunks. This function
@@ -115,24 +121,66 @@ class PiecewiseCombiner {
size_t position_;
};
+// is_hashable()
+//
+// Trait class which returns true if T is hashable by the absl::Hash framework.
+// Used for the AbslHashValue implementations for composite types below.
+template <typename T>
+struct is_hashable;
+
// HashStateBase
//
-// A hash state object represents an intermediate state in the computation
-// of an unspecified hash algorithm. `HashStateBase` provides a CRTP style
-// base class for hash state implementations. Developers adding type support
-// for `absl::Hash` should not rely on any parts of the state object other than
-// the following member functions:
+// An internal implementation detail that contains common implementation details
+// for all of the "hash state objects" objects generated by Abseil. This is not
+// a public API; users should not create classes that inherit from this.
+//
+// A hash state object is the template argument `H` passed to `AbslHashValue`.
+// It represents an intermediate state in the computation of an unspecified hash
+// algorithm. `HashStateBase` provides a CRTP style base class for hash state
+// implementations. Developers adding type support for `absl::Hash` should not
+// rely on any parts of the state object other than the following member
+// functions:
//
// * HashStateBase::combine()
// * HashStateBase::combine_contiguous()
+// * HashStateBase::combine_unordered()
//
-// A derived hash state class of type `H` must provide a static member function
+// A derived hash state class of type `H` must provide a public member function
// with a signature similar to the following:
//
// `static H combine_contiguous(H state, const unsigned char*, size_t)`.
//
+// It must also provide a private template method named RunCombineUnordered.
+//
+// A "consumer" is a 1-arg functor returning void. Its argument is a reference
+// to an inner hash state object, and it may be called multiple times. When
+// called, the functor consumes the entropy from the provided state object,
+// and resets that object to its empty state.
+//
+// A "combiner" is a stateless 2-arg functor returning void. Its arguments are
+// an inner hash state object and an ElementStateConsumer functor. A combiner
+// uses the provided inner hash state object to hash each element of the
+// container, passing the inner hash state object to the consumer after hashing
+// each element.
+//
+// Given these definitions, a derived hash state class of type H
+// must provide a private template method with a signature similar to the
+// following:
+//
+// `template <typename CombinerT>`
+// `static H RunCombineUnordered(H outer_state, CombinerT combiner)`
+//
+// This function is responsible for constructing the inner state object and
+// providing a consumer to the combiner. It uses side effects of the consumer
+// and combiner to mix the state of each element in an order-independent manner,
+// and uses this to return an updated value of `outer_state`.
+//
+// This inside-out approach generates efficient object code in the normal case,
+// but allows us to use stack storage to implement the absl::HashState type
+// erasure mechanism (avoiding heap allocations while hashing).
+//
// `HashStateBase` will provide a complete implementation for a hash state
-// object in terms of this method.
+// object in terms of these two methods.
//
// Example:
//
@@ -141,6 +189,10 @@ class PiecewiseCombiner {
// static H combine_contiguous(H state, const unsigned char*, size_t);
// using MyHashState::HashStateBase::combine;
// using MyHashState::HashStateBase::combine_contiguous;
+// using MyHashState::HashStateBase::combine_unordered;
+// private:
+// template <typename CombinerT>
+// static H RunCombineUnordered(H state, CombinerT combiner);
// };
template <typename H>
class HashStateBase {
@@ -181,7 +233,30 @@ class HashStateBase {
template <typename T>
static H combine_contiguous(H state, const T* data, size_t size);
+ template <typename I>
+ static H combine_unordered(H state, I begin, I end);
+
using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+
+ template <typename T>
+ using is_hashable = absl::hash_internal::is_hashable<T>;
+
+ private:
+ // Common implementation of the iteration step of a "combiner", as described
+ // above.
+ template <typename I>
+ struct CombineUnorderedCallback {
+ I begin;
+ I end;
+
+ template <typename InnerH, typename ElementStateConsumer>
+ void operator()(InnerH inner_state, ElementStateConsumer cb) {
+ for (; begin != end; ++begin) {
+ inner_state = H::combine(std::move(inner_state), *begin);
+ cb(inner_state);
+ }
+ }
+ };
};
// is_uniquely_represented
@@ -346,17 +421,43 @@ H AbslHashValue(H hash_state, std::nullptr_t) {
return H::combine(std::move(hash_state), static_cast<void*>(nullptr));
}
+// AbslHashValue() for hashing pointers-to-member
+template <typename H, typename T, typename C>
+H AbslHashValue(H hash_state, T C::* ptr) {
+ auto salient_ptm_size = [](std::size_t n) -> std::size_t {
+#if defined(_MSC_VER)
+ // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2,
+ // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain
+ // padding (namely when they have 1 or 3 ints). The value below is a lower
+ // bound on the number of salient, non-padding bytes that we use for
+ // hashing.
+ if (alignof(T C::*) == alignof(int)) {
+ // No padding when all subobjects have the same size as the total
+ // alignment. This happens in 32-bit mode.
+ return n;
+ } else {
+ // Padding for 1 int (size 16) or 3 ints (size 24).
+ // With 2 ints, the size is 16 with no padding, which we pessimize.
+ return n == 24 ? 20 : n == 16 ? 12 : n;
+ }
+#else
+ // On other platforms, we assume that pointers-to-members do not have
+ // padding.
+#ifdef __cpp_lib_has_unique_object_representations
+ static_assert(std::has_unique_object_representations_v<T C::*>);
+#endif // __cpp_lib_has_unique_object_representations
+ return n;
+#endif
+ };
+ return H::combine_contiguous(std::move(hash_state),
+ reinterpret_cast<unsigned char*>(&ptr),
+ salient_ptm_size(sizeof ptr));
+}
+
// -----------------------------------------------------------------------------
// AbslHashValue for Composite Types
// -----------------------------------------------------------------------------
-// is_hashable()
-//
-// Trait class which returns true if T is hashable by the absl::Hash framework.
-// Used for the AbslHashValue implementations for composite types below.
-template <typename T>
-struct is_hashable;
-
// AbslHashValue() for hashing pairs
template <typename H, typename T1, typename T2>
typename std::enable_if<is_hashable<T1>::value && is_hashable<T2>::value,
@@ -502,10 +603,11 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
vector.size());
}
+// AbslHashValue special cases for hashing std::vector<bool>
+
#if defined(ABSL_IS_BIG_ENDIAN) && \
(defined(__GLIBCXX__) || defined(__GLIBCPP__))
-// AbslHashValue for hashing std::vector<bool>
-//
+
// std::hash in libstdc++ does not work correctly with vector<bool> on Big
// Endian platforms therefore we need to implement a custom AbslHashValue for
// it. More details on the bug:
@@ -521,6 +623,22 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
}
return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
}
+#else
+// When not working around the libstdc++ bug above, we still have to contend
+// with the fact that std::hash<vector<bool>> is often poor quality, hashing
+// directly on the internal words and on no other state. On these platforms,
+// vector<bool>{1, 1} and vector<bool>{1, 1, 0} hash to the same value.
+//
+// Mixing in the size (as we do in our other vector<> implementations) on top
+// of the library-provided hash implementation avoids this QOI issue.
+template <typename H, typename T, typename Allocator>
+typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
+ H>::type
+AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
+ return H::combine(std::move(hash_state),
+ std::hash<std::vector<T, Allocator>>{}(vector),
+ vector.size());
+}
#endif
// -----------------------------------------------------------------------------
@@ -573,6 +691,55 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
}
// -----------------------------------------------------------------------------
+// AbslHashValue for Unordered Associative Containers
+// -----------------------------------------------------------------------------
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state, const std::unordered_set<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename Hash, typename KeyEqual,
+ typename Alloc>
+typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
+ H hash_state,
+ const std::unordered_multiset<Key, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_set
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_map<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// AbslHashValue for hashing std::unordered_multiset
+template <typename H, typename Key, typename T, typename Hash,
+ typename KeyEqual, typename Alloc>
+typename std::enable_if<is_hashable<Key>::value && is_hashable<T>::value,
+ H>::type
+AbslHashValue(H hash_state,
+ const std::unordered_multimap<Key, T, Hash, KeyEqual, Alloc>& s) {
+ return H::combine(
+ H::combine_unordered(std::move(hash_state), s.begin(), s.end()),
+ s.size());
+}
+
+// -----------------------------------------------------------------------------
// AbslHashValue for Wrapper Types
// -----------------------------------------------------------------------------
@@ -815,6 +982,31 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// move-only ensures that there is only one non-moved-from object.
MixingHashState() : state_(Seed()) {}
+ friend class MixingHashState::HashStateBase;
+
+ template <typename CombinerT>
+ static MixingHashState RunCombineUnordered(MixingHashState state,
+ CombinerT combiner) {
+ uint64_t unordered_state = 0;
+ combiner(MixingHashState{}, [&](MixingHashState& inner_state) {
+ // Add the hash state of the element to the running total, but mix the
+ // carry bit back into the low bit. This in intended to avoid losing
+ // entropy to overflow, especially when unordered_multisets contain
+ // multiple copies of the same value.
+ auto element_state = inner_state.state_;
+ unordered_state += element_state;
+ if (unordered_state < element_state) {
+ ++unordered_state;
+ }
+ inner_state = MixingHashState{};
+ });
+ return MixingHashState::combine(std::move(state), unordered_state);
+ }
+
+ // Allow the HashState type-erasure implementation to invoke
+ // RunCombinedUnordered() directly.
+ friend class absl::HashState;
+
// Workaround for MSVC bug.
// We make the type copyable to fix the calling convention, even though we
// never actually copy it. Keep it private to not affect the public API of the
@@ -898,15 +1090,10 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
-#if defined(__aarch64__)
- // On AArch64, calculating a 128-bit product is inefficient, because it
- // requires a sequence of two instructions to calculate the upper and lower
- // halves of the result.
- using MultType = uint64_t;
-#else
+ // Though the 128-bit product on AArch64 needs two instructions, it is
+ // still a good balance between speed and hash quality.
using MultType =
absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
-#endif
// We do the addition in 64-bit space to make sure the 128-bit
// multiplication is fast. If we were to do it as MultType the compiler has
// to assume that the high word is non-zero and needs to perform 2
@@ -1049,6 +1236,14 @@ H HashStateBase<H>::combine_contiguous(H state, const T* data, size_t size) {
return hash_internal::hash_range_or_bytes(std::move(state), data, size);
}
+// HashStateBase::combine_unordered()
+template <typename H>
+template <typename I>
+H HashStateBase<H>::combine_unordered(H state, I begin, I end) {
+ return H::RunCombineUnordered(std::move(state),
+ CombineUnorderedCallback<I>{begin, end});
+}
+
// HashStateBase::PiecewiseCombiner::add_buffer()
template <typename H>
H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
diff --git a/absl/hash/internal/spy_hash_state.h b/absl/hash/internal/spy_hash_state.h
index c0831208..09728266 100644
--- a/absl/hash/internal/spy_hash_state.h
+++ b/absl/hash/internal/spy_hash_state.h
@@ -15,6 +15,7 @@
#ifndef ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
#define ABSL_HASH_INTERNAL_SPY_HASH_STATE_H_
+#include <algorithm>
#include <ostream>
#include <string>
#include <vector>
@@ -167,6 +168,24 @@ class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> {
using SpyHashStateImpl::HashStateBase::combine_contiguous;
+ template <typename CombinerT>
+ static SpyHashStateImpl RunCombineUnordered(SpyHashStateImpl state,
+ CombinerT combiner) {
+ UnorderedCombinerCallback cb;
+
+ combiner(SpyHashStateImpl<void>{}, std::ref(cb));
+
+ std::sort(cb.element_hash_representations.begin(),
+ cb.element_hash_representations.end());
+ state.hash_representation_.insert(state.hash_representation_.end(),
+ cb.element_hash_representations.begin(),
+ cb.element_hash_representations.end());
+ if (cb.error && cb.error->has_value()) {
+ state.error_ = std::move(cb.error);
+ }
+ return state;
+ }
+
absl::optional<std::string> error() const {
if (moved_from_) {
return "Returned a moved-from instance of the hash state object.";
@@ -178,6 +197,22 @@ class SpyHashStateImpl : public HashStateBase<SpyHashStateImpl<T>> {
template <typename U>
friend class SpyHashStateImpl;
+ struct UnorderedCombinerCallback {
+ std::vector<std::string> element_hash_representations;
+ std::shared_ptr<absl::optional<std::string>> error;
+
+ // The inner spy can have a different type.
+ template <typename U>
+ void operator()(SpyHashStateImpl<U>& inner) {
+ element_hash_representations.push_back(
+ absl::StrJoin(inner.hash_representation_, ""));
+ if (inner.error_->has_value()) {
+ error = std::move(inner.error_);
+ }
+ inner = SpyHashStateImpl<void>{};
+ }
+ };
+
// This is true if SpyHashStateImpl<T> has been passed to a call of
// AbslHashValue with the wrong type. This detects that the user called
// AbslHashValue directly (because the hash state type does not match).
diff --git a/absl/memory/BUILD.bazel b/absl/memory/BUILD.bazel
index c16bf8a9..389aedf3 100644
--- a/absl/memory/BUILD.bazel
+++ b/absl/memory/BUILD.bazel
@@ -29,6 +29,9 @@ cc_library(
name = "memory",
hdrs = ["memory.h"],
copts = ABSL_DEFAULT_COPTS,
+ defines = select({
+ "//conditions:default": [],
+ }),
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:core_headers",
diff --git a/absl/memory/memory_test.cc b/absl/memory/memory_test.cc
index 1990c7ba..5d5719b3 100644
--- a/absl/memory/memory_test.cc
+++ b/absl/memory/memory_test.cc
@@ -548,22 +548,23 @@ struct MinimalMockAllocator {
TEST(AllocatorTraits, FunctionsMinimal) {
int trace = 0;
int hint;
- TestValue x(&trace);
+ alignas(TestValue) char buffer[sizeof(TestValue)];
+ auto* x = reinterpret_cast<TestValue*>(buffer);
MinimalMockAllocator mock;
using Traits = absl::allocator_traits<MinimalMockAllocator>;
- EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(&x));
- EXPECT_CALL(mock, deallocate(&x, 7));
+ EXPECT_CALL(mock, allocate(7)).WillRepeatedly(Return(x));
+ EXPECT_CALL(mock, deallocate(x, 7));
- EXPECT_EQ(&x, Traits::allocate(mock, 7));
+ EXPECT_EQ(x, Traits::allocate(mock, 7));
static_cast<void>(Traits::allocate(mock, 7, static_cast<const void*>(&hint)));
- EXPECT_EQ(&x, Traits::allocate(mock, 7, static_cast<const void*>(&hint)));
- Traits::deallocate(mock, &x, 7);
+ EXPECT_EQ(x, Traits::allocate(mock, 7, static_cast<const void*>(&hint)));
+ Traits::deallocate(mock, x, 7);
+ EXPECT_EQ(0, trace);
+ Traits::construct(mock, x, &trace);
EXPECT_EQ(1, trace);
- Traits::construct(mock, &x, &trace);
- EXPECT_EQ(2, trace);
- Traits::destroy(mock, &x);
- EXPECT_EQ(1, trace);
+ Traits::destroy(mock, x);
+ EXPECT_EQ(0, trace);
EXPECT_EQ(std::numeric_limits<size_t>::max() / sizeof(TestValue),
Traits::max_size(mock));
diff --git a/absl/numeric/BUILD.bazel b/absl/numeric/BUILD.bazel
index 1f9e0f20..eaa27dfd 100644
--- a/absl/numeric/BUILD.bazel
+++ b/absl/numeric/BUILD.bazel
@@ -37,6 +37,20 @@ cc_library(
],
)
+cc_binary(
+ name = "bits_benchmark",
+ testonly = 1,
+ srcs = ["bits_benchmark.cc"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":bits",
+ "//absl/base:core_headers",
+ "//absl/random",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
cc_test(
name = "bits_test",
size = "small",
diff --git a/absl/numeric/bits.h b/absl/numeric/bits.h
index 52013ad4..628cdf50 100644
--- a/absl/numeric/bits.h
+++ b/absl/numeric/bits.h
@@ -133,7 +133,8 @@ template <class T>
ABSL_INTERNAL_CONSTEXPR_CLZ inline
typename std::enable_if<std::is_unsigned<T>::value, T>::type
bit_width(T x) noexcept {
- return std::numeric_limits<T>::digits - countl_zero(x);
+ return std::numeric_limits<T>::digits -
+ static_cast<unsigned int>(countl_zero(x));
}
// Returns: If x == 0, 0; otherwise the maximal value y such that
diff --git a/absl/numeric/bits_benchmark.cc b/absl/numeric/bits_benchmark.cc
new file mode 100644
index 00000000..b9759583
--- /dev/null
+++ b/absl/numeric/bits_benchmark.cc
@@ -0,0 +1,73 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "absl/base/optimization.h"
+#include "absl/numeric/bits.h"
+#include "absl/random/random.h"
+
+namespace absl {
+namespace {
+
+template <typename T>
+static void BM_bitwidth(benchmark::State& state) {
+ const int count = state.range(0);
+
+ absl::BitGen rng;
+ std::vector<T> values;
+ values.reserve(count);
+ for (int i = 0; i < count; ++i) {
+ values.push_back(absl::Uniform<T>(rng, 0, std::numeric_limits<T>::max()));
+ }
+
+ while (state.KeepRunningBatch(count)) {
+ for (int i = 0; i < count; ++i) {
+ benchmark::DoNotOptimize(values[i]);
+ }
+ }
+}
+BENCHMARK_TEMPLATE(BM_bitwidth, uint8_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth, uint16_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth, uint32_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth, uint64_t)->Range(1, 1 << 20);
+
+template <typename T>
+static void BM_bitwidth_nonzero(benchmark::State& state) {
+ const int count = state.range(0);
+
+ absl::BitGen rng;
+ std::vector<T> values;
+ values.reserve(count);
+ for (int i = 0; i < count; ++i) {
+ values.push_back(absl::Uniform<T>(rng, 1, std::numeric_limits<T>::max()));
+ }
+
+ while (state.KeepRunningBatch(count)) {
+ for (int i = 0; i < count; ++i) {
+ const T value = values[i];
+ ABSL_ASSUME(value > 0);
+ benchmark::DoNotOptimize(value);
+ }
+ }
+}
+BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint8_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint16_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint32_t)->Range(1, 1 << 20);
+BENCHMARK_TEMPLATE(BM_bitwidth_nonzero, uint64_t)->Range(1, 1 << 20);
+
+} // namespace
+} // namespace absl
diff --git a/absl/numeric/int128.cc b/absl/numeric/int128.cc
index 17d88744..8cdcbf05 100644
--- a/absl/numeric/int128.cc
+++ b/absl/numeric/int128.cc
@@ -42,11 +42,11 @@ namespace {
// Returns: 2
inline ABSL_ATTRIBUTE_ALWAYS_INLINE int Fls128(uint128 n) {
if (uint64_t hi = Uint128High64(n)) {
- ABSL_INTERNAL_ASSUME(hi != 0);
+ ABSL_ASSUME(hi != 0);
return 127 - countl_zero(hi);
}
const uint64_t low = Uint128Low64(n);
- ABSL_INTERNAL_ASSUME(low != 0);
+ ABSL_ASSUME(low != 0);
return 63 - countl_zero(low);
}
@@ -332,6 +332,7 @@ std::ostream& operator<<(std::ostream& os, int128 v) {
ABSL_NAMESPACE_END
} // namespace absl
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
namespace std {
constexpr bool numeric_limits<absl::uint128>::is_specialized;
constexpr bool numeric_limits<absl::uint128>::is_signed;
@@ -381,3 +382,4 @@ constexpr int numeric_limits<absl::int128>::max_exponent10;
constexpr bool numeric_limits<absl::int128>::traps;
constexpr bool numeric_limits<absl::int128>::tinyness_before;
} // namespace std
+#endif
diff --git a/absl/numeric/int128.h b/absl/numeric/int128.h
index c7ad96be..7a899eec 100644
--- a/absl/numeric/int128.h
+++ b/absl/numeric/int128.h
@@ -44,7 +44,7 @@
// builtin type. We need to make sure not to define operator wchar_t()
// alongside operator unsigned short() in these instances.
#define ABSL_INTERNAL_WCHAR_T __wchar_t
-#if defined(_M_X64)
+#if defined(_M_X64) && !defined(_M_ARM64EC)
#include <intrin.h>
#pragma intrinsic(_umul128)
#endif // defined(_M_X64)
@@ -980,7 +980,7 @@ inline uint128 operator*(uint128 lhs, uint128 rhs) {
// can be used for uint128 storage.
return static_cast<unsigned __int128>(lhs) *
static_cast<unsigned __int128>(rhs);
-#elif defined(_MSC_VER) && defined(_M_X64)
+#elif defined(_MSC_VER) && defined(_M_X64) && !defined(_M_ARM64EC)
uint64_t carry;
uint64_t low = _umul128(Uint128Low64(lhs), Uint128Low64(rhs), &carry);
return MakeUint128(Uint128Low64(lhs) * Uint128High64(rhs) +
diff --git a/absl/profiling/BUILD.bazel b/absl/profiling/BUILD.bazel
index 496a06b2..3392c96c 100644
--- a/absl/profiling/BUILD.bazel
+++ b/absl/profiling/BUILD.bazel
@@ -43,6 +43,9 @@ cc_test(
name = "sample_recorder_test",
srcs = ["internal/sample_recorder_test.cc"],
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":sample_recorder",
"//absl/base:core_headers",
diff --git a/absl/profiling/internal/exponential_biased_test.cc b/absl/profiling/internal/exponential_biased_test.cc
index 5675001d..6a6c317e 100644
--- a/absl/profiling/internal/exponential_biased_test.cc
+++ b/absl/profiling/internal/exponential_biased_test.cc
@@ -29,6 +29,7 @@ using ::testing::Ge;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
+namespace {
MATCHER_P2(IsBetween, a, b,
absl::StrCat(std::string(negation ? "isn't" : "is"), " between ", a,
@@ -194,6 +195,7 @@ TEST(ExponentialBiasedTest, InitializationModes) {
EXPECT_THAT(eb_stack.GetSkipCount(2), Ge(0));
}
+} // namespace
} // namespace profiling_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/profiling/internal/sample_recorder.h b/absl/profiling/internal/sample_recorder.h
index 5e04a9cd..5f65983b 100644
--- a/absl/profiling/internal/sample_recorder.h
+++ b/absl/profiling/internal/sample_recorder.h
@@ -46,6 +46,7 @@ struct Sample {
absl::Mutex init_mu;
T* next = nullptr;
T* dead ABSL_GUARDED_BY(init_mu) = nullptr;
+ int64_t weight; // How many sampling events were required to sample this one.
};
// Holds samples and their associated stack traces with a soft limit of
@@ -59,7 +60,8 @@ class SampleRecorder {
~SampleRecorder();
// Registers for sampling. Returns an opaque registration info.
- T* Register();
+ template <typename... Targs>
+ T* Register(Targs&&... args);
// Unregisters the sample.
void Unregister(T* sample);
@@ -75,12 +77,14 @@ class SampleRecorder {
// samples that have been dropped.
int64_t Iterate(const std::function<void(const T& stack)>& f);
+ int32_t GetMaxSamples() const;
void SetMaxSamples(int32_t max);
private:
void PushNew(T* sample);
void PushDead(T* sample);
- T* PopDead();
+ template <typename... Targs>
+ T* PopDead(Targs... args);
std::atomic<size_t> dropped_samples_;
std::atomic<size_t> size_estimate_;
@@ -162,7 +166,8 @@ void SampleRecorder<T>::PushDead(T* sample) {
}
template <typename T>
-T* SampleRecorder<T>::PopDead() {
+template <typename... Targs>
+T* SampleRecorder<T>::PopDead(Targs... args) {
absl::MutexLock graveyard_lock(&graveyard_.init_mu);
// The list is circular, so eventually it collapses down to
@@ -174,12 +179,13 @@ T* SampleRecorder<T>::PopDead() {
absl::MutexLock sample_lock(&sample->init_mu);
graveyard_.dead = sample->dead;
sample->dead = nullptr;
- sample->PrepareForSampling();
+ sample->PrepareForSampling(std::forward<Targs>(args)...);
return sample;
}
template <typename T>
-T* SampleRecorder<T>::Register() {
+template <typename... Targs>
+T* SampleRecorder<T>::Register(Targs&&... args) {
int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
if (size > max_samples_.load(std::memory_order_relaxed)) {
size_estimate_.fetch_sub(1, std::memory_order_relaxed);
@@ -187,10 +193,14 @@ T* SampleRecorder<T>::Register() {
return nullptr;
}
- T* sample = PopDead();
+ T* sample = PopDead(args...);
if (sample == nullptr) {
// Resurrection failed. Hire a new warlock.
sample = new T();
+ {
+ absl::MutexLock sample_lock(&sample->init_mu);
+ sample->PrepareForSampling(std::forward<Targs>(args)...);
+ }
PushNew(sample);
}
@@ -223,6 +233,11 @@ void SampleRecorder<T>::SetMaxSamples(int32_t max) {
max_samples_.store(max, std::memory_order_release);
}
+template <typename T>
+int32_t SampleRecorder<T>::GetMaxSamples() const {
+ return max_samples_.load(std::memory_order_acquire);
+}
+
} // namespace profiling_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/profiling/internal/sample_recorder_test.cc b/absl/profiling/internal/sample_recorder_test.cc
index ec6e0fa2..3373329a 100644
--- a/absl/profiling/internal/sample_recorder_test.cc
+++ b/absl/profiling/internal/sample_recorder_test.cc
@@ -36,7 +36,7 @@ using ::testing::UnorderedElementsAre;
struct Info : public Sample<Info> {
public:
- void PrepareForSampling() {}
+ void PrepareForSampling(int64_t w) { weight = w; }
std::atomic<size_t> size;
absl::Time create_time;
};
@@ -49,8 +49,14 @@ std::vector<size_t> GetSizes(SampleRecorder<Info>* s) {
return res;
}
-Info* Register(SampleRecorder<Info>* s, size_t size) {
- auto* info = s->Register();
+std::vector<int64_t> GetWeights(SampleRecorder<Info>* s) {
+ std::vector<int64_t> res;
+ s->Iterate([&](const Info& info) { res.push_back(info.weight); });
+ return res;
+}
+
+Info* Register(SampleRecorder<Info>* s, int64_t weight, size_t size) {
+ auto* info = s->Register(weight);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -58,13 +64,15 @@ Info* Register(SampleRecorder<Info>* s, size_t size) {
TEST(SampleRecorderTest, Registration) {
SampleRecorder<Info> sampler;
- auto* info1 = Register(&sampler, 1);
+ auto* info1 = Register(&sampler, 31, 1);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31));
- auto* info2 = Register(&sampler, 2);
+ auto* info2 = Register(&sampler, 32, 2);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
info1->size.store(3);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(31, 32));
sampler.Unregister(info1);
sampler.Unregister(info2);
@@ -74,18 +82,22 @@ TEST(SampleRecorderTest, Unregistration) {
SampleRecorder<Info> sampler;
std::vector<Info*> infos;
for (size_t i = 0; i < 3; ++i) {
- infos.push_back(Register(&sampler, i));
+ infos.push_back(Register(&sampler, 33 + i, i));
}
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 34, 35));
sampler.Unregister(infos[1]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35));
- infos.push_back(Register(&sampler, 3));
- infos.push_back(Register(&sampler, 4));
+ infos.push_back(Register(&sampler, 36, 3));
+ infos.push_back(Register(&sampler, 37, 4));
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 36, 37));
sampler.Unregister(infos[3]);
EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+ EXPECT_THAT(GetWeights(&sampler), UnorderedElementsAre(33, 35, 37));
sampler.Unregister(infos[0]);
sampler.Unregister(infos[2]);
@@ -99,18 +111,18 @@ TEST(SampleRecorderTest, MultiThreaded) {
ThreadPool pool(10);
for (int i = 0; i < 10; ++i) {
- pool.Schedule([&sampler, &stop]() {
+ pool.Schedule([&sampler, &stop, i]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<Info*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(i));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
- infoz.push_back(sampler.Register());
+ infoz.push_back(sampler.Register(i));
break;
}
case 1: {
@@ -119,6 +131,7 @@ TEST(SampleRecorderTest, MultiThreaded) {
Info* info = infoz[p];
infoz[p] = infoz.back();
infoz.pop_back();
+ EXPECT_EQ(info->weight, i);
sampler.Unregister(info);
break;
}
@@ -143,8 +156,8 @@ TEST(SampleRecorderTest, MultiThreaded) {
TEST(SampleRecorderTest, Callback) {
SampleRecorder<Info> sampler;
- auto* info1 = Register(&sampler, 1);
- auto* info2 = Register(&sampler, 2);
+ auto* info1 = Register(&sampler, 39, 1);
+ auto* info2 = Register(&sampler, 40, 2);
static const Info* expected;
diff --git a/absl/random/BUILD.bazel b/absl/random/BUILD.bazel
index fdde78b6..08ecd197 100644
--- a/absl/random/BUILD.bazel
+++ b/absl/random/BUILD.bazel
@@ -100,8 +100,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":seed_gen_exception",
- "//absl/container:inlined_vector",
- "//absl/random/internal:nonsecure_base",
+ "//absl/base:config",
"//absl/random/internal:pool_urbg",
"//absl/random/internal:salted_seed_seq",
"//absl/random/internal:seed_material",
@@ -128,6 +127,7 @@ cc_library(
name = "mock_distributions",
testonly = 1,
hdrs = ["mock_distributions.h"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":distributions",
":mocking_bit_gen",
@@ -183,6 +183,9 @@ cc_test(
copts = ABSL_TEST_COPTS,
flaky = 1,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":distributions",
":random",
@@ -235,6 +238,9 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm", # Does not converge on WASM.
+ ],
deps = [
":distributions",
":random",
@@ -429,6 +435,9 @@ cc_test(
srcs = ["mocking_bit_gen_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":bit_gen_ref",
":mock_distributions",
@@ -444,6 +453,9 @@ cc_test(
srcs = ["mock_distributions_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":mock_distributions",
":mocking_bit_gen",
@@ -458,6 +470,9 @@ cc_test(
srcs = ["examples_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":random",
"@com_google_googletest//:gtest_main",
diff --git a/absl/random/CMakeLists.txt b/absl/random/CMakeLists.txt
index 9d1c67fb..d04c7081 100644
--- a/absl/random/CMakeLists.txt
+++ b/absl/random/CMakeLists.txt
@@ -121,6 +121,7 @@ absl_cc_library(
absl::variant
GTest::gmock
GTest::gtest
+ PUBLIC
TESTONLY
)
@@ -222,8 +223,8 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
+ absl::config
absl::inlined_vector
- absl::random_internal_nonsecure_base
absl::random_internal_pool_urbg
absl::random_internal_salted_seed_seq
absl::random_internal_seed_material
@@ -726,7 +727,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::core_headers
- absl::optional
+ absl::inlined_vector
absl::random_internal_pool_urbg
absl::random_internal_salted_seed_seq
absl::random_internal_seed_material
@@ -1210,5 +1211,6 @@ absl_cc_test(
absl::random_internal_wide_multiply
absl::bits
absl::int128
+ GTest::gmock
GTest::gtest_main
)
diff --git a/absl/random/bernoulli_distribution.h b/absl/random/bernoulli_distribution.h
index 25bd0d5c..d81b6ae6 100644
--- a/absl/random/bernoulli_distribution.h
+++ b/absl/random/bernoulli_distribution.h
@@ -138,16 +138,16 @@ bool bernoulli_distribution::Generate(double p,
// 64 bits.
//
// Second, `c` is constructed by first casting explicitly to a signed
- // integer and then converting implicitly to an unsigned integer of the same
+ // integer and then casting explicitly to an unsigned integer of the same
// size. This is done because the hardware conversion instructions produce
// signed integers from double; if taken as a uint64_t the conversion would
// be wrong for doubles greater than 2^63 (not relevant in this use-case).
// If converted directly to an unsigned integer, the compiler would end up
// emitting code to handle such large values that are not relevant due to
// the known bounds on `c`. To avoid these extra instructions this
- // implementation converts first to the signed type and then use the
- // implicit conversion to unsigned (which is a no-op).
- const uint64_t c = static_cast<int64_t>(p * kP32);
+ // implementation converts first to the signed type and then convert to
+ // unsigned (which is a no-op).
+ const uint64_t c = static_cast<uint64_t>(static_cast<int64_t>(p * kP32));
const uint32_t v = fast_u32(g);
// FAST PATH: this path fails with probability 1/2^32. Note that simply
// returning v <= c would approximate P very well (up to an absolute error
diff --git a/absl/random/beta_distribution_test.cc b/absl/random/beta_distribution_test.cc
index d980c969..c16fbb4f 100644
--- a/absl/random/beta_distribution_test.cc
+++ b/absl/random/beta_distribution_test.cc
@@ -45,16 +45,26 @@ namespace {
template <typename IntType>
class BetaDistributionInterfaceTest : public ::testing::Test {};
-// double-double arithmetic is not supported well by either GCC or Clang; see
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
-// https://bugs.llvm.org/show_bug.cgi?id=49131, and
-// https://bugs.llvm.org/show_bug.cgi?id=49132. Don't bother running these tests
-// with double doubles until compiler support is better.
-using RealTypes =
- std::conditional<absl::numeric_internal::IsDoubleDouble(),
- ::testing::Types<float, double>,
- ::testing::Types<float, double, long double>>::type;
-TYPED_TEST_CASE(BetaDistributionInterfaceTest, RealTypes);
+constexpr bool ShouldExerciseLongDoubleTests() {
+ // long double arithmetic is not supported well by either GCC or Clang on
+ // most platforms specifically not when implemented in terms of double-double;
+ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99048,
+ // https://bugs.llvm.org/show_bug.cgi?id=49131, and
+ // https://bugs.llvm.org/show_bug.cgi?id=49132.
+ // So a conservative choice here is to disable long-double tests pretty much
+ // everywhere except on x64 but only if long double is not implemented as
+ // double-double.
+#if defined(__i686__) && defined(__x86_64__)
+ return !absl::numeric_internal::IsDoubleDouble();
+#else
+ return false;
+#endif
+}
+
+using RealTypes = std::conditional<ShouldExerciseLongDoubleTests(),
+ ::testing::Types<float, double, long double>,
+ ::testing::Types<float, double>>::type;
+TYPED_TEST_SUITE(BetaDistributionInterfaceTest, RealTypes);
TYPED_TEST(BetaDistributionInterfaceTest, SerializeTest) {
// The threshold for whether std::exp(1/a) is finite.
@@ -431,13 +441,13 @@ std::string ParamName(
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
TestSampleStatisticsCombinations, BetaDistributionTest,
::testing::Combine(::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4),
::testing::Values(0.1, 0.2, 0.9, 1.1, 2.5, 10.0, 123.4)),
ParamName);
-INSTANTIATE_TEST_CASE_P(
+INSTANTIATE_TEST_SUITE_P(
TestSampleStatistics_SelectedPairs, BetaDistributionTest,
::testing::Values(std::make_pair(0.5, 1000), std::make_pair(1000, 0.5),
std::make_pair(900, 1000), std::make_pair(10000, 20000),
diff --git a/absl/random/bit_gen_ref.h b/absl/random/bit_gen_ref.h
index 9555460f..e475221a 100644
--- a/absl/random/bit_gen_ref.h
+++ b/absl/random/bit_gen_ref.h
@@ -24,6 +24,10 @@
#ifndef ABSL_RANDOM_BIT_GEN_REF_H_
#define ABSL_RANDOM_BIT_GEN_REF_H_
+#include <limits>
+#include <type_traits>
+#include <utility>
+
#include "absl/base/internal/fast_type_id.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
diff --git a/absl/random/distributions.h b/absl/random/distributions.h
index 31c79694..37fc3aa7 100644
--- a/absl/random/distributions.h
+++ b/absl/random/distributions.h
@@ -373,7 +373,7 @@ RealType Gaussian(URBG&& urbg, // NOLINT(runtime/references)
template <typename IntType, typename URBG>
IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references)
IntType lo, IntType hi, IntType base = 2) {
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::LogUniform<IntType, URBG>(...)");
@@ -403,7 +403,7 @@ IntType LogUniform(URBG&& urbg, // NOLINT(runtime/references)
template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg, // NOLINT(runtime/references)
double mean = 1.0) {
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
@@ -435,7 +435,7 @@ template <typename IntType, typename URBG>
IntType Zipf(URBG&& urbg, // NOLINT(runtime/references)
IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
double v = 1.0) {
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Zipf<IntType, URBG>(...)");
diff --git a/absl/random/distributions_test.cc b/absl/random/distributions_test.cc
index d3a5dd75..5321a11c 100644
--- a/absl/random/distributions_test.cc
+++ b/absl/random/distributions_test.cc
@@ -220,6 +220,7 @@ TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
+ absl::Uniform<absl::uint128>(gen);
}
TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
diff --git a/absl/random/exponential_distribution_test.cc b/absl/random/exponential_distribution_test.cc
index 81a5d17b..3c44d9ec 100644
--- a/absl/random/exponential_distribution_test.cc
+++ b/absl/random/exponential_distribution_test.cc
@@ -58,7 +58,7 @@ using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
-TYPED_TEST_CASE(ExponentialDistributionTypedTest, RealTypes);
+TYPED_TEST_SUITE(ExponentialDistributionTypedTest, RealTypes);
TYPED_TEST(ExponentialDistributionTypedTest, SerializeTest) {
using param_type =
@@ -343,8 +343,8 @@ std::string ParamName(const ::testing::TestParamInfo<Param>& info) {
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
-INSTANTIATE_TEST_CASE_P(All, ExponentialDistributionTests,
- ::testing::ValuesIn(GenParams()), ParamName);
+INSTANTIATE_TEST_SUITE_P(All, ExponentialDistributionTests,
+ ::testing::ValuesIn(GenParams()), ParamName);
// NOTE: absl::exponential_distribution is not guaranteed to be stable.
TEST(ExponentialDistributionTest, StabilityTest) {
diff --git a/absl/random/gaussian_distribution_test.cc b/absl/random/gaussian_distribution_test.cc
index c0bac2b0..4584ac92 100644
--- a/absl/random/gaussian_distribution_test.cc
+++ b/absl/random/gaussian_distribution_test.cc
@@ -54,7 +54,7 @@ using RealTypes =
std::conditional<absl::numeric_internal::IsDoubleDouble(),
::testing::Types<float, double>,
::testing::Types<float, double, long double>>::type;
-TYPED_TEST_CASE(GaussianDistributionInterfaceTest, RealTypes);
+TYPED_TEST_SUITE(GaussianDistributionInterfaceTest, RealTypes);
TYPED_TEST(GaussianDistributionInterfaceTest, SerializeTest) {
using param_type =
diff --git a/absl/random/generators_test.cc b/absl/random/generators_test.cc
index 41725f13..14fd24e9 100644
--- a/absl/random/generators_test.cc
+++ b/absl/random/generators_test.cc
@@ -107,6 +107,8 @@ void TestPoisson(URBG* gen) {
absl::Poisson<int64_t>(*gen);
absl::Poisson<uint64_t>(*gen);
absl::Poisson<uint64_t>(URBG());
+ absl::Poisson<absl::int128>(*gen);
+ absl::Poisson<absl::uint128>(*gen);
}
template <typename URBG>
@@ -126,6 +128,8 @@ void TestZipf(URBG* gen) {
absl::Zipf<int64_t>(*gen, 1 << 10);
absl::Zipf<uint64_t>(*gen, 1 << 10);
absl::Zipf<uint64_t>(URBG(), 1 << 10);
+ absl::Zipf<absl::int128>(*gen, 1 << 10);
+ absl::Zipf<absl::uint128>(*gen, 1 << 10);
}
template <typename URBG>
@@ -146,6 +150,8 @@ void TestLogNormal(URBG* gen) {
absl::LogUniform<int64_t>(*gen, 0, 1 << 10);
absl::LogUniform<uint64_t>(*gen, 0, 1 << 10);
absl::LogUniform<uint64_t>(URBG(), 0, 1 << 10);
+ absl::LogUniform<absl::int128>(*gen, 0, 1 << 10);
+ absl::LogUniform<absl::uint128>(*gen, 0, 1 << 10);
}
template <typename URBG>
diff --git a/absl/random/internal/BUILD.bazel b/absl/random/internal/BUILD.bazel
index e93eebb6..fd5b6195 100644
--- a/absl/random/internal/BUILD.bazel
+++ b/absl/random/internal/BUILD.bazel
@@ -35,7 +35,11 @@ cc_library(
hdrs = ["traits.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- deps = ["//absl/base:config"],
+ deps = [
+ "//absl/base:config",
+ "//absl/numeric:bits",
+ "//absl/numeric:int128",
+ ],
)
cc_library(
@@ -58,6 +62,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":traits",
"//absl/base:config",
"//absl/meta:type_traits",
],
@@ -217,8 +222,8 @@ cc_library(
":salted_seed_seq",
":seed_material",
"//absl/base:core_headers",
+ "//absl/container:inlined_vector",
"//absl/meta:type_traits",
- "//absl/types:optional",
"//absl/types:span",
],
)
@@ -497,6 +502,7 @@ cc_test(
cc_library(
name = "mock_helpers",
hdrs = ["mock_helpers.h"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:fast_type_id",
"//absl/types:optional",
@@ -507,6 +513,7 @@ cc_library(
name = "mock_overload_set",
testonly = 1,
hdrs = ["mock_overload_set.h"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":mock_helpers",
"//absl/random:mocking_bit_gen",
@@ -674,6 +681,7 @@ cc_library(
":traits",
"//absl/base:config",
"//absl/meta:type_traits",
+ "//absl/numeric:int128",
],
)
@@ -687,6 +695,7 @@ cc_test(
"benchmark",
"no_test_ios_x86_64",
"no_test_loonix", # Crashing.
+ "no_test_wasm",
],
deps = [
":nanobenchmark",
diff --git a/absl/random/internal/chi_square.cc b/absl/random/internal/chi_square.cc
index 640d48ce..fbe01732 100644
--- a/absl/random/internal/chi_square.cc
+++ b/absl/random/internal/chi_square.cc
@@ -125,7 +125,8 @@ double ChiSquareValue(int dof, double p) {
const double variance = 2.0 / (9 * dof);
// Cannot use this method if the variance is 0.
if (variance != 0) {
- return std::pow(z * std::sqrt(variance) + mean, 3.0) * dof;
+ double term = z * std::sqrt(variance) + mean;
+ return dof * (term * term * term);
}
}
diff --git a/absl/random/internal/distribution_caller.h b/absl/random/internal/distribution_caller.h
index fc81b787..0f162a4e 100644
--- a/absl/random/internal/distribution_caller.h
+++ b/absl/random/internal/distribution_caller.h
@@ -18,6 +18,7 @@
#define ABSL_RANDOM_INTERNAL_DISTRIBUTION_CALLER_H_
#include <utility>
+#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/internal/fast_type_id.h"
@@ -32,6 +33,8 @@ namespace random_internal {
// to intercept such calls.
template <typename URBG>
struct DistributionCaller {
+ static_assert(!std::is_pointer<URBG>::value,
+ "You must pass a reference, not a pointer.");
// SFINAE to detect whether the URBG type includes a member matching
// bool InvokeMock(base_internal::FastTypeIdType, void*, void*).
//
diff --git a/absl/random/internal/explicit_seed_seq_test.cc b/absl/random/internal/explicit_seed_seq_test.cc
index f867f610..e36d5fa0 100644
--- a/absl/random/internal/explicit_seed_seq_test.cc
+++ b/absl/random/internal/explicit_seed_seq_test.cc
@@ -140,10 +140,8 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
ExplicitSeedSeq seq_copy(seq_from_entropy);
EXPECT_EQ(seq_copy.size(), seq_from_entropy.size());
- std::vector<uint32_t> seeds_1;
- seeds_1.resize(1000, 0);
- std::vector<uint32_t> seeds_2;
- seeds_2.resize(1000, 1);
+ std::vector<uint32_t> seeds_1(1000, 0);
+ std::vector<uint32_t> seeds_2(1000, 1);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
seq_copy.generate(seeds_2.begin(), seeds_2.end());
@@ -157,10 +155,8 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
}
ExplicitSeedSeq another_seq(std::begin(entropy), std::end(entropy));
- std::vector<uint32_t> seeds_1;
- seeds_1.resize(1000, 0);
- std::vector<uint32_t> seeds_2;
- seeds_2.resize(1000, 0);
+ std::vector<uint32_t> seeds_1(1000, 0);
+ std::vector<uint32_t> seeds_2(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
another_seq.generate(seeds_2.begin(), seeds_2.end());
@@ -169,7 +165,15 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
EXPECT_THAT(seeds_1, Not(Pointwise(Eq(), seeds_2)));
// Apply the assignment-operator.
+ // GCC 12 has a false-positive -Wstringop-overflow warning here.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstringop-overflow"
+#endif
another_seq = seq_from_entropy;
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
// Re-generate seeds.
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
@@ -181,15 +185,13 @@ TEST(ExplicitSeedSeq, CopyAndMoveConstructors) {
// Move constructor.
{
// Get seeds from seed-sequence constructed from entropy.
- std::vector<uint32_t> seeds_1;
- seeds_1.resize(1000, 0);
+ std::vector<uint32_t> seeds_1(1000, 0);
seq_from_entropy.generate(seeds_1.begin(), seeds_1.end());
// Apply move-constructor move the sequence to another instance.
absl::random_internal::ExplicitSeedSeq moved_seq(
std::move(seq_from_entropy));
- std::vector<uint32_t> seeds_2;
- seeds_2.resize(1000, 1);
+ std::vector<uint32_t> seeds_2(1000, 1);
moved_seq.generate(seeds_2.begin(), seeds_2.end());
// Verify that seeds produced by moved-instance are the same as original.
EXPECT_THAT(seeds_1, Pointwise(Eq(), seeds_2));
diff --git a/absl/random/internal/fast_uniform_bits.h b/absl/random/internal/fast_uniform_bits.h
index 425aaf7d..f3a5c00f 100644
--- a/absl/random/internal/fast_uniform_bits.h
+++ b/absl/random/internal/fast_uniform_bits.h
@@ -22,6 +22,7 @@
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
+#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -98,7 +99,7 @@ class FastUniformBits {
result_type operator()(URBG& g); // NOLINT(runtime/references)
private:
- static_assert(std::is_unsigned<UIntType>::value,
+ static_assert(IsUnsigned<UIntType>::value,
"Class-template FastUniformBits<> must be parameterized using "
"an unsigned type.");
diff --git a/absl/random/internal/generate_real.h b/absl/random/internal/generate_real.h
index d5fbb44c..b569450c 100644
--- a/absl/random/internal/generate_real.h
+++ b/absl/random/internal/generate_real.h
@@ -50,10 +50,10 @@ struct GenerateSignedTag {};
// inputs, otherwise it never returns 0.
//
// When a value in U(0,1) is required, use:
-// Uniform64ToReal<double, PositiveValueT, true>;
+// GenerateRealFromBits<double, PositiveValueT, true>;
//
// When a value in U(-1,1) is required, use:
-// Uniform64ToReal<double, SignedValueT, false>;
+// GenerateRealFromBits<double, SignedValueT, false>;
//
// This generates more distinct values than the mathematical equivalent
// `U(0, 1) * 2.0 - 1.0`.
diff --git a/absl/random/internal/mock_helpers.h b/absl/random/internal/mock_helpers.h
index 9d6ab21e..882b0518 100644
--- a/absl/random/internal/mock_helpers.h
+++ b/absl/random/internal/mock_helpers.h
@@ -18,6 +18,7 @@
#include <tuple>
#include <type_traits>
+#include <utility>
#include "absl/base/internal/fast_type_id.h"
#include "absl/types/optional.h"
diff --git a/absl/random/internal/nonsecure_base.h b/absl/random/internal/nonsecure_base.h
index 730fa2ea..c7d7fa4b 100644
--- a/absl/random/internal/nonsecure_base.h
+++ b/absl/random/internal/nonsecure_base.h
@@ -17,28 +17,82 @@
#include <algorithm>
#include <cstdint>
-#include <iostream>
#include <iterator>
-#include <random>
-#include <string>
#include <type_traits>
+#include <utility>
#include <vector>
#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/pool_urbg.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
-#include "absl/types/optional.h"
#include "absl/types/span.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
+// RandenPoolSeedSeq is a custom seed sequence type where generate() fills the
+// provided buffer via the RandenPool entropy source.
+class RandenPoolSeedSeq {
+ private:
+ struct ContiguousTag {};
+ struct BufferTag {};
+
+ // Generate random unsigned values directly into the buffer.
+ template <typename Contiguous>
+ void generate_impl(ContiguousTag, Contiguous begin, Contiguous end) {
+ const size_t n = std::distance(begin, end);
+ auto* a = &(*begin);
+ RandenPool<uint8_t>::Fill(
+ absl::MakeSpan(reinterpret_cast<uint8_t*>(a), sizeof(*a) * n));
+ }
+
+ // Construct a buffer of size n and fill it with values, then copy
+ // those values into the seed iterators.
+ template <typename RandomAccessIterator>
+ void generate_impl(BufferTag, RandomAccessIterator begin,
+ RandomAccessIterator end) {
+ const size_t n = std::distance(begin, end);
+ absl::InlinedVector<uint32_t, 8> data(n, 0);
+ RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
+ std::copy(std::begin(data), std::end(data), begin);
+ }
+
+ public:
+ using result_type = uint32_t;
+
+ size_t size() { return 0; }
+
+ template <typename OutIterator>
+ void param(OutIterator) const {}
+
+ template <typename RandomAccessIterator>
+ void generate(RandomAccessIterator begin, RandomAccessIterator end) {
+ // RandomAccessIterator must be assignable from uint32_t
+ if (begin != end) {
+ using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
+ // ContiguousTag indicates the common case of a known contiguous buffer,
+ // which allows directly filling the buffer. In C++20,
+ // std::contiguous_iterator_tag provides a mechanism for testing this
+ // capability, however until Abseil's support requirements allow us to
+ // assume C++20, limit checks to a few common cases.
+ using TagType = absl::conditional_t<
+ (std::is_pointer<RandomAccessIterator>::value ||
+ std::is_same<RandomAccessIterator,
+ typename std::vector<U>::iterator>::value),
+ ContiguousTag, BufferTag>;
+
+ generate_impl(TagType{}, begin, end);
+ }
+ }
+};
+
// Each instance of NonsecureURBGBase<URBG> will be seeded by variates produced
// by a thread-unique URBG-instance.
-template <typename URBG>
+template <typename URBG, typename Seeder = RandenPoolSeedSeq>
class NonsecureURBGBase {
public:
using result_type = typename URBG::result_type;
@@ -85,49 +139,6 @@ class NonsecureURBGBase {
}
private:
- // Seeder is a custom seed sequence type where generate() fills the provided
- // buffer via the RandenPool entropy source.
- struct Seeder {
- using result_type = uint32_t;
-
- size_t size() { return 0; }
-
- template <typename OutIterator>
- void param(OutIterator) const {}
-
- template <typename RandomAccessIterator>
- void generate(RandomAccessIterator begin, RandomAccessIterator end) {
- if (begin != end) {
- // begin, end must be random access iterators assignable from uint32_t.
- generate_impl(
- std::integral_constant<bool, sizeof(*begin) == sizeof(uint32_t)>{},
- begin, end);
- }
- }
-
- // Commonly, generate is invoked with a pointer to a buffer which
- // can be cast to a uint32_t.
- template <typename RandomAccessIterator>
- void generate_impl(std::integral_constant<bool, true>,
- RandomAccessIterator begin, RandomAccessIterator end) {
- auto buffer = absl::MakeSpan(begin, end);
- auto target = absl::MakeSpan(reinterpret_cast<uint32_t*>(buffer.data()),
- buffer.size());
- RandenPool<uint32_t>::Fill(target);
- }
-
- // The non-uint32_t case should be uncommon, and involves an extra copy,
- // filling the uint32_t buffer and then mixing into the output.
- template <typename RandomAccessIterator>
- void generate_impl(std::integral_constant<bool, false>,
- RandomAccessIterator begin, RandomAccessIterator end) {
- const size_t n = std::distance(begin, end);
- absl::InlinedVector<uint32_t, 8> data(n, 0);
- RandenPool<uint32_t>::Fill(absl::MakeSpan(data.begin(), data.end()));
- std::copy(std::begin(data), std::end(data), begin);
- }
- };
-
static URBG ConstructURBG() {
Seeder seeder;
return URBG(seeder);
diff --git a/absl/random/internal/nonsecure_base_test.cc b/absl/random/internal/nonsecure_base_test.cc
index 698027fc..3502243e 100644
--- a/absl/random/internal/nonsecure_base_test.cc
+++ b/absl/random/internal/nonsecure_base_test.cc
@@ -15,6 +15,7 @@
#include "absl/random/internal/nonsecure_base.h"
#include <algorithm>
+#include <cstdint>
#include <iostream>
#include <memory>
#include <random>
@@ -192,54 +193,35 @@ TEST(NonsecureURBGBase, EqualSeedSequencesYieldEqualVariates) {
}
}
-// This is a PRNG-compatible type specifically designed to test
-// that NonsecureURBGBase::Seeder can correctly handle iterators
-// to arbitrary non-uint32_t size types.
-template <typename T>
-struct SeederTestEngine {
- using result_type = T;
+TEST(RandenPoolSeedSeqTest, SeederWorksForU32) {
+ absl::random_internal::RandenPoolSeedSeq seeder;
- static constexpr result_type(min)() {
- return (std::numeric_limits<result_type>::min)();
- }
- static constexpr result_type(max)() {
- return (std::numeric_limits<result_type>::max)();
- }
-
- template <class SeedSequence,
- typename = typename absl::enable_if_t<
- !std::is_same<SeedSequence, SeederTestEngine>::value>>
- explicit SeederTestEngine(SeedSequence&& seq) {
- seed(seq);
- }
-
- SeederTestEngine(const SeederTestEngine&) = default;
- SeederTestEngine& operator=(const SeederTestEngine&) = default;
- SeederTestEngine(SeederTestEngine&&) = default;
- SeederTestEngine& operator=(SeederTestEngine&&) = default;
+ uint32_t state[2] = {0, 0};
+ seeder.generate(std::begin(state), std::end(state));
+ EXPECT_FALSE(state[0] == 0 && state[1] == 0);
+}
- result_type operator()() { return state[0]; }
+TEST(RandenPoolSeedSeqTest, SeederWorksForU64) {
+ absl::random_internal::RandenPoolSeedSeq seeder;
- template <class SeedSequence>
- void seed(SeedSequence&& seq) {
- std::fill(std::begin(state), std::end(state), T(0));
- seq.generate(std::begin(state), std::end(state));
- }
+ uint64_t state[2] = {0, 0};
+ seeder.generate(std::begin(state), std::end(state));
+ EXPECT_FALSE(state[0] == 0 && state[1] == 0);
+ EXPECT_FALSE((state[0] >> 32) == 0 && (state[1] >> 32) == 0);
+}
- T state[2];
-};
+TEST(RandenPoolSeedSeqTest, SeederWorksForS32) {
+ absl::random_internal::RandenPoolSeedSeq seeder;
-TEST(NonsecureURBGBase, SeederWorksForU32) {
- using U32 =
- absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint32_t>>;
- U32 x;
- EXPECT_NE(0, x());
+ int32_t state[2] = {0, 0};
+ seeder.generate(std::begin(state), std::end(state));
+ EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
-TEST(NonsecureURBGBase, SeederWorksForU64) {
- using U64 =
- absl::random_internal::NonsecureURBGBase<SeederTestEngine<uint64_t>>;
+TEST(RandenPoolSeedSeqTest, SeederWorksForVector) {
+ absl::random_internal::RandenPoolSeedSeq seeder;
- U64 x;
- EXPECT_NE(0, x());
+ std::vector<uint32_t> state(2);
+ seeder.generate(std::begin(state), std::end(state));
+ EXPECT_FALSE(state[0] == 0 && state[1] == 0);
}
diff --git a/absl/random/internal/pcg_engine.h b/absl/random/internal/pcg_engine.h
index 8efaf2e0..4ab44c94 100644
--- a/absl/random/internal/pcg_engine.h
+++ b/absl/random/internal/pcg_engine.h
@@ -262,7 +262,7 @@ struct pcg_xsl_rr_128_64 {
uint64_t rotate = h >> 58u;
uint64_t s = Uint128Low64(state) ^ h;
#endif
- return rotr(s, rotate);
+ return rotr(s, static_cast<int>(rotate));
}
};
diff --git a/absl/random/internal/randen.h b/absl/random/internal/randen.h
index 9a3840b8..9ff4a7a5 100644
--- a/absl/random/internal/randen.h
+++ b/absl/random/internal/randen.h
@@ -43,10 +43,8 @@ class Randen {
// Generate updates the randen sponge. The outer portion of the sponge
// (kCapacityBytes .. kStateBytes) may be consumed as PRNG state.
- template <typename T, size_t N>
- void Generate(T (&state)[N]) const {
- static_assert(N * sizeof(T) == kStateBytes,
- "Randen::Generate() requires kStateBytes of state");
+ // REQUIRES: state points to kStateBytes of state.
+ inline void Generate(void* state) const {
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
// HW AES Dispatch.
if (has_crypto_) {
@@ -65,13 +63,9 @@ class Randen {
// Absorb incorporates additional seed material into the randen sponge. After
// absorb returns, Generate must be called before the state may be consumed.
- template <typename S, size_t M, typename T, size_t N>
- void Absorb(const S (&seed)[M], T (&state)[N]) const {
- static_assert(M * sizeof(S) == RandenTraits::kSeedBytes,
- "Randen::Absorb() requires kSeedBytes of seed");
-
- static_assert(N * sizeof(T) == RandenTraits::kStateBytes,
- "Randen::Absorb() requires kStateBytes of state");
+ // REQUIRES: seed points to kSeedBytes of seed.
+ // REQUIRES: state points to kStateBytes of state.
+ inline void Absorb(const void* seed, void* state) const {
#if ABSL_RANDOM_INTERNAL_AES_DISPATCH
// HW AES Dispatch.
if (has_crypto_) {
diff --git a/absl/random/internal/randen_detect.cc b/absl/random/internal/randen_detect.cc
index bbe7b965..6dababa3 100644
--- a/absl/random/internal/randen_detect.cc
+++ b/absl/random/internal/randen_detect.cc
@@ -24,6 +24,11 @@
#include "absl/random/internal/platform.h"
+#if !defined(__UCLIBC__) && defined(__GLIBC__) && \
+ (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 16))
+#define ABSL_HAVE_GETAUXVAL
+#endif
+
#if defined(ABSL_ARCH_X86_64)
#define ABSL_INTERNAL_USE_X86_CPUID
#elif defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
@@ -31,7 +36,7 @@
#if defined(__ANDROID__)
#define ABSL_INTERNAL_USE_ANDROID_GETAUXVAL
#define ABSL_INTERNAL_USE_GETAUXVAL
-#elif defined(__linux__)
+#elif defined(__linux__) && defined(ABSL_HAVE_GETAUXVAL)
#define ABSL_INTERNAL_USE_LINUX_GETAUXVAL
#define ABSL_INTERNAL_USE_GETAUXVAL
#endif
@@ -40,7 +45,6 @@
#if defined(ABSL_INTERNAL_USE_X86_CPUID)
#if defined(_WIN32) || defined(_WIN64)
#include <intrin.h> // NOLINT(build/include_order)
-#pragma intrinsic(__cpuid)
#else
// MSVC-equivalent __cpuid intrinsic function.
static void __cpuid(int cpu_info[4], int info_type) {
diff --git a/absl/random/internal/randen_engine.h b/absl/random/internal/randen_engine.h
index 372c3ac2..b4708664 100644
--- a/absl/random/internal/randen_engine.h
+++ b/absl/random/internal/randen_engine.h
@@ -42,7 +42,7 @@ namespace random_internal {
// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random
// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32.
template <typename T>
-class alignas(16) randen_engine {
+class alignas(8) randen_engine {
public:
// C++11 URBG interface:
using result_type = T;
@@ -58,7 +58,8 @@ class alignas(16) randen_engine {
return (std::numeric_limits<result_type>::max)();
}
- explicit randen_engine(result_type seed_value = 0) { seed(seed_value); }
+ randen_engine() : randen_engine(0) {}
+ explicit randen_engine(result_type seed_value) { seed(seed_value); }
template <class SeedSequence,
typename = typename absl::enable_if_t<
@@ -67,17 +68,27 @@ class alignas(16) randen_engine {
seed(seq);
}
- randen_engine(const randen_engine&) = default;
+ // alignment requirements dictate custom copy and move constructors.
+ randen_engine(const randen_engine& other)
+ : next_(other.next_), impl_(other.impl_) {
+ std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
+ }
+ randen_engine& operator=(const randen_engine& other) {
+ next_ = other.next_;
+ impl_ = other.impl_;
+ std::memcpy(state(), other.state(), kStateSizeT * sizeof(result_type));
+ return *this;
+ }
// Returns random bits from the buffer in units of result_type.
result_type operator()() {
// Refill the buffer if needed (unlikely).
+ auto* begin = state();
if (next_ >= kStateSizeT) {
next_ = kCapacityT;
- impl_.Generate(state_);
+ impl_.Generate(begin);
}
-
- return little_endian::ToHost(state_[next_++]);
+ return little_endian::ToHost(begin[next_++]);
}
template <class SeedSequence>
@@ -92,9 +103,10 @@ class alignas(16) randen_engine {
void seed(result_type seed_value = 0) {
next_ = kStateSizeT;
// Zeroes the inner state and fills the outer state with seed_value to
- // mimics behaviour of reseed
- std::fill(std::begin(state_), std::begin(state_) + kCapacityT, 0);
- std::fill(std::begin(state_) + kCapacityT, std::end(state_), seed_value);
+ // mimic the behaviour of reseed
+ auto* begin = state();
+ std::fill(begin, begin + kCapacityT, 0);
+ std::fill(begin + kCapacityT, begin + kStateSizeT, seed_value);
}
// Inserts entropy into (part of) the state. Calling this periodically with
@@ -105,7 +117,6 @@ class alignas(16) randen_engine {
using sequence_result_type = typename SeedSequence::result_type;
static_assert(sizeof(sequence_result_type) == 4,
"SeedSequence::result_type must be 32-bit");
-
constexpr size_t kBufferSize =
Randen::kSeedBytes / sizeof(sequence_result_type);
alignas(16) sequence_result_type buffer[kBufferSize];
@@ -119,8 +130,8 @@ class alignas(16) randen_engine {
if (entropy_size < kBufferSize) {
// ... and only request that many values, or 256-bits, when unspecified.
const size_t requested_entropy = (entropy_size == 0) ? 8u : entropy_size;
- std::fill(std::begin(buffer) + requested_entropy, std::end(buffer), 0);
- seq.generate(std::begin(buffer), std::begin(buffer) + requested_entropy);
+ std::fill(buffer + requested_entropy, buffer + kBufferSize, 0);
+ seq.generate(buffer, buffer + requested_entropy);
#ifdef ABSL_IS_BIG_ENDIAN
// Randen expects the seed buffer to be in Little Endian; reverse it on
// Big Endian platforms.
@@ -146,9 +157,9 @@ class alignas(16) randen_engine {
std::swap(buffer[--dst], buffer[--src]);
}
} else {
- seq.generate(std::begin(buffer), std::end(buffer));
+ seq.generate(buffer, buffer + kBufferSize);
}
- impl_.Absorb(buffer, state_);
+ impl_.Absorb(buffer, state());
// Generate will be called when operator() is called
next_ = kStateSizeT;
@@ -159,9 +170,10 @@ class alignas(16) randen_engine {
count -= step;
constexpr uint64_t kRateT = kStateSizeT - kCapacityT;
+ auto* begin = state();
while (count > 0) {
next_ = kCapacityT;
- impl_.Generate(state_);
+ impl_.Generate(*reinterpret_cast<result_type(*)[kStateSizeT]>(begin));
step = std::min<uint64_t>(kRateT, count);
count -= step;
}
@@ -169,9 +181,9 @@ class alignas(16) randen_engine {
}
bool operator==(const randen_engine& other) const {
+ const auto* begin = state();
return next_ == other.next_ &&
- std::equal(std::begin(state_), std::end(state_),
- std::begin(other.state_));
+ std::equal(begin, begin + kStateSizeT, other.state());
}
bool operator!=(const randen_engine& other) const {
@@ -185,11 +197,12 @@ class alignas(16) randen_engine {
using numeric_type =
typename random_internal::stream_format_type<result_type>::type;
auto saver = random_internal::make_ostream_state_saver(os);
- for (const auto& elem : engine.state_) {
+ auto* it = engine.state();
+ for (auto* end = it + kStateSizeT; it < end; ++it) {
// In the case that `elem` is `uint8_t`, it must be cast to something
// larger so that it prints as an integer rather than a character. For
// simplicity, apply the cast all circumstances.
- os << static_cast<numeric_type>(little_endian::FromHost(elem))
+ os << static_cast<numeric_type>(little_endian::FromHost(*it))
<< os.fill();
}
os << engine.next_;
@@ -215,7 +228,7 @@ class alignas(16) randen_engine {
if (is.fail()) {
return is;
}
- std::memcpy(engine.state_, state, sizeof(engine.state_));
+ std::memcpy(engine.state(), state, sizeof(state));
engine.next_ = next;
return is;
}
@@ -226,9 +239,21 @@ class alignas(16) randen_engine {
static constexpr size_t kCapacityT =
Randen::kCapacityBytes / sizeof(result_type);
- // First kCapacityT are `inner', the others are accessible random bits.
- alignas(16) result_type state_[kStateSizeT];
- size_t next_; // index within state_
+ // Returns the state array pointer, which is aligned to 16 bytes.
+ // The first kCapacityT are the `inner' sponge; the remainder are available.
+ result_type* state() {
+ return reinterpret_cast<result_type*>(
+ (reinterpret_cast<uintptr_t>(&raw_state_) & 0xf) ? (raw_state_ + 8)
+ : raw_state_);
+ }
+ const result_type* state() const {
+ return const_cast<randen_engine*>(this)->state();
+ }
+
+ // raw state array, manually aligned in state(). This overallocates
+ // by 8 bytes since C++ does not guarantee extended heap alignment.
+ alignas(8) char raw_state_[Randen::kStateBytes + 8];
+ size_t next_; // index within state()
Randen impl_;
};
diff --git a/absl/random/internal/salted_seed_seq.h b/absl/random/internal/salted_seed_seq.h
index 5953a090..06291865 100644
--- a/absl/random/internal/salted_seed_seq.h
+++ b/absl/random/internal/salted_seed_seq.h
@@ -22,6 +22,7 @@
#include <memory>
#include <type_traits>
#include <utility>
+#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/meta/type_traits.h"
@@ -65,15 +66,19 @@ class SaltedSeedSeq {
template <typename RandomAccessIterator>
void generate(RandomAccessIterator begin, RandomAccessIterator end) {
+ using U = typename std::iterator_traits<RandomAccessIterator>::value_type;
+
// The common case is that generate is called with ContiguousIterators
// to uint arrays. Such contiguous memory regions may be optimized,
// which we detect here.
- using tag = absl::conditional_t<
- (std::is_pointer<RandomAccessIterator>::value &&
- std::is_same<absl::decay_t<decltype(*begin)>, uint32_t>::value),
+ using TagType = absl::conditional_t<
+ (std::is_same<U, uint32_t>::value &&
+ (std::is_pointer<RandomAccessIterator>::value ||
+ std::is_same<RandomAccessIterator,
+ typename std::vector<U>::iterator>::value)),
ContiguousAndUint32Tag, DefaultTag>;
if (begin != end) {
- generate_impl(begin, end, tag{});
+ generate_impl(TagType{}, begin, end, std::distance(begin, end));
}
}
@@ -89,8 +94,15 @@ class SaltedSeedSeq {
struct DefaultTag {};
// Generate which requires the iterators are contiguous pointers to uint32_t.
- void generate_impl(uint32_t* begin, uint32_t* end, ContiguousAndUint32Tag) {
- generate_contiguous(absl::MakeSpan(begin, end));
+ // Fills the initial seed buffer the underlying SSeq::generate() call,
+ // then mixes in the salt material.
+ template <typename Contiguous>
+ void generate_impl(ContiguousAndUint32Tag, Contiguous begin, Contiguous end,
+ size_t n) {
+ seq_->generate(begin, end);
+ const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
+ auto span = absl::Span<uint32_t>(&*begin, n);
+ MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), span);
}
// The uncommon case for generate is that it is called with iterators over
@@ -98,27 +110,13 @@ class SaltedSeedSeq {
// case we allocate a temporary 32-bit buffer and then copy-assign back
// to the initial inputs.
template <typename RandomAccessIterator>
- void generate_impl(RandomAccessIterator begin, RandomAccessIterator end,
- DefaultTag) {
- return generate_and_copy(std::distance(begin, end), begin);
- }
-
- // Fills the initial seed buffer the underlying SSeq::generate() call,
- // mixing in the salt material.
- void generate_contiguous(absl::Span<uint32_t> buffer) {
- seq_->generate(buffer.begin(), buffer.end());
- const uint32_t salt = absl::random_internal::GetSaltMaterial().value_or(0);
- MixIntoSeedMaterial(absl::MakeConstSpan(&salt, 1), buffer);
- }
-
- // Allocates a seed buffer of `n` elements, generates the seed, then
- // copies the result into the `out` iterator.
- template <typename Iterator>
- void generate_and_copy(size_t n, Iterator out) {
- // Allocate a temporary buffer, generate, and then copy.
+ void generate_impl(DefaultTag, RandomAccessIterator begin,
+ RandomAccessIterator, size_t n) {
+ // Allocates a seed buffer of `n` elements, generates the seed, then
+ // copies the result into the `out` iterator.
absl::InlinedVector<uint32_t, 8> data(n, 0);
- generate_contiguous(absl::MakeSpan(data.data(), data.size()));
- std::copy(data.begin(), data.end(), out);
+ generate_impl(ContiguousAndUint32Tag{}, data.begin(), data.end(), n);
+ std::copy(data.begin(), data.end(), begin);
}
// Because [rand.req.seedseq] is not required to be copy-constructible,
diff --git a/absl/random/internal/traits.h b/absl/random/internal/traits.h
index 75772bd9..f874a0f7 100644
--- a/absl/random/internal/traits.h
+++ b/absl/random/internal/traits.h
@@ -20,6 +20,8 @@
#include <type_traits>
#include "absl/base/config.h"
+#include "absl/numeric/bits.h"
+#include "absl/numeric/int128.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -59,6 +61,31 @@ class is_widening_convertible {
rank<A>() <= rank<B>();
};
+template <typename T>
+struct IsIntegral : std::is_integral<T> {};
+template <>
+struct IsIntegral<absl::int128> : std::true_type {};
+template <>
+struct IsIntegral<absl::uint128> : std::true_type {};
+
+template <typename T>
+struct MakeUnsigned : std::make_unsigned<T> {};
+template <>
+struct MakeUnsigned<absl::int128> {
+ using type = absl::uint128;
+};
+template <>
+struct MakeUnsigned<absl::uint128> {
+ using type = absl::uint128;
+};
+
+template <typename T>
+struct IsUnsigned : std::is_unsigned<T> {};
+template <>
+struct IsUnsigned<absl::int128> : std::false_type {};
+template <>
+struct IsUnsigned<absl::uint128> : std::true_type {};
+
// unsigned_bits<N>::type returns the unsigned int type with the indicated
// number of bits.
template <size_t N>
@@ -81,19 +108,40 @@ struct unsigned_bits<64> {
using type = uint64_t;
};
-#ifdef ABSL_HAVE_INTRINSIC_INT128
template <>
struct unsigned_bits<128> {
- using type = __uint128_t;
+ using type = absl::uint128;
+};
+
+// 256-bit wrapper for wide multiplications.
+struct U256 {
+ uint128 hi;
+ uint128 lo;
+};
+template <>
+struct unsigned_bits<256> {
+ using type = U256;
};
-#endif
template <typename IntType>
struct make_unsigned_bits {
- using type = typename unsigned_bits<std::numeric_limits<
- typename std::make_unsigned<IntType>::type>::digits>::type;
+ using type = typename unsigned_bits<
+ std::numeric_limits<typename MakeUnsigned<IntType>::type>::digits>::type;
};
+template <typename T>
+int BitWidth(T v) {
+ // Workaround for bit_width not supporting int128.
+ // Don't hardcode `64` to make sure this code does not trigger compiler
+ // warnings in smaller types.
+ constexpr int half_bits = sizeof(T) * 8 / 2;
+ if (sizeof(T) == 16 && (v >> half_bits) != 0) {
+ return bit_width(static_cast<uint64_t>(v >> half_bits)) + half_bits;
+ } else {
+ return bit_width(static_cast<uint64_t>(v));
+ }
+}
+
} // namespace random_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/random/internal/uniform_helper.h b/absl/random/internal/uniform_helper.h
index 1243bc1c..e68b82ee 100644
--- a/absl/random/internal/uniform_helper.h
+++ b/absl/random/internal/uniform_helper.h
@@ -100,7 +100,7 @@ using uniform_inferred_return_t =
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
- std::is_integral<IntType>,
+ IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalOpenClosedTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
@@ -131,7 +131,7 @@ uniform_lower_bound(Tag, NumType a, NumType) {
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
- std::is_integral<IntType>,
+ IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedOpenTag>,
std::is_same<Tag, IntervalOpenOpenTag>>>::value,
IntType>
@@ -153,7 +153,7 @@ uniform_upper_bound(Tag, FloatType, FloatType b) {
template <typename IntType, typename Tag>
typename absl::enable_if_t<
absl::conjunction<
- std::is_integral<IntType>,
+ IsIntegral<IntType>,
absl::disjunction<std::is_same<Tag, IntervalClosedClosedTag>,
std::is_same<Tag, IntervalOpenClosedTag>>>::value,
IntType>
@@ -201,7 +201,7 @@ is_uniform_range_valid(FloatType a, FloatType b) {
}
template <typename IntType>
-absl::enable_if_t<std::is_integral<IntType>::value, bool>
+absl::enable_if_t<IsIntegral<IntType>::value, bool>
is_uniform_range_valid(IntType a, IntType b) {
return a <= b;
}
@@ -210,7 +210,7 @@ is_uniform_range_valid(IntType a, IntType b) {
// or absl::uniform_real_distribution depending on the NumType parameter.
template <typename NumType>
using UniformDistribution =
- typename std::conditional<std::is_integral<NumType>::value,
+ typename std::conditional<IsIntegral<NumType>::value,
absl::uniform_int_distribution<NumType>,
absl::uniform_real_distribution<NumType>>::type;
diff --git a/absl/random/internal/wide_multiply.h b/absl/random/internal/wide_multiply.h
index b6e6c4b6..891e3630 100644
--- a/absl/random/internal/wide_multiply.h
+++ b/absl/random/internal/wide_multiply.h
@@ -34,43 +34,6 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
-// Helper object to multiply two 64-bit values to a 128-bit value.
-// MultiplyU64ToU128 multiplies two 64-bit values to a 128-bit value.
-// If an intrinsic is available, it is used, otherwise use native 32-bit
-// multiplies to construct the result.
-inline absl::uint128 MultiplyU64ToU128(uint64_t a, uint64_t b) {
-#if defined(ABSL_HAVE_INTRINSIC_INT128)
- return absl::uint128(static_cast<__uint128_t>(a) * b);
-#elif defined(ABSL_INTERNAL_USE_UMUL128)
- // uint64_t * uint64_t => uint128 multiply using imul intrinsic on MSVC.
- uint64_t high = 0;
- const uint64_t low = _umul128(a, b, &high);
- return absl::MakeUint128(high, low);
-#else
- // uint128(a) * uint128(b) in emulated mode computes a full 128-bit x 128-bit
- // multiply. However there are many cases where that is not necessary, and it
- // is only necessary to support a 64-bit x 64-bit = 128-bit multiply. This is
- // for those cases.
- const uint64_t a00 = static_cast<uint32_t>(a);
- const uint64_t a32 = a >> 32;
- const uint64_t b00 = static_cast<uint32_t>(b);
- const uint64_t b32 = b >> 32;
-
- const uint64_t c00 = a00 * b00;
- const uint64_t c32a = a00 * b32;
- const uint64_t c32b = a32 * b00;
- const uint64_t c64 = a32 * b32;
-
- const uint32_t carry =
- static_cast<uint32_t>(((c00 >> 32) + static_cast<uint32_t>(c32a) +
- static_cast<uint32_t>(c32b)) >>
- 32);
-
- return absl::MakeUint128(c64 + (c32a >> 32) + (c32b >> 32) + carry,
- c00 + (c32a << 32) + (c32b << 32));
-#endif
-}
-
// wide_multiply<T> multiplies two N-bit values to a 2N-bit result.
template <typename UIntType>
struct wide_multiply {
@@ -82,27 +45,49 @@ struct wide_multiply {
return static_cast<result_type>(a) * b;
}
- static input_type hi(result_type r) { return r >> kN; }
- static input_type lo(result_type r) { return r; }
+ static input_type hi(result_type r) {
+ return static_cast<input_type>(r >> kN);
+ }
+ static input_type lo(result_type r) { return static_cast<input_type>(r); }
static_assert(std::is_unsigned<UIntType>::value,
"Class-template wide_multiply<> argument must be unsigned.");
};
-#ifndef ABSL_HAVE_INTRINSIC_INT128
+// MultiplyU128ToU256 multiplies two 128-bit values to a 256-bit value.
+inline U256 MultiplyU128ToU256(uint128 a, uint128 b) {
+ const uint128 a00 = static_cast<uint64_t>(a);
+ const uint128 a64 = a >> 64;
+ const uint128 b00 = static_cast<uint64_t>(b);
+ const uint128 b64 = b >> 64;
+
+ const uint128 c00 = a00 * b00;
+ const uint128 c64a = a00 * b64;
+ const uint128 c64b = a64 * b00;
+ const uint128 c128 = a64 * b64;
+
+ const uint64_t carry =
+ static_cast<uint64_t>(((c00 >> 64) + static_cast<uint64_t>(c64a) +
+ static_cast<uint64_t>(c64b)) >>
+ 64);
+
+ return {c128 + (c64a >> 64) + (c64b >> 64) + carry,
+ c00 + (c64a << 64) + (c64b << 64)};
+}
+
+
template <>
-struct wide_multiply<uint64_t> {
- using input_type = uint64_t;
- using result_type = absl::uint128;
+struct wide_multiply<uint128> {
+ using input_type = uint128;
+ using result_type = U256;
- static result_type multiply(uint64_t a, uint64_t b) {
- return MultiplyU64ToU128(a, b);
+ static result_type multiply(input_type a, input_type b) {
+ return MultiplyU128ToU256(a, b);
}
- static uint64_t hi(result_type r) { return absl::Uint128High64(r); }
- static uint64_t lo(result_type r) { return absl::Uint128Low64(r); }
+ static input_type hi(result_type r) { return r.hi; }
+ static input_type lo(result_type r) { return r.lo; }
};
-#endif
} // namespace random_internal
ABSL_NAMESPACE_END
diff --git a/absl/random/internal/wide_multiply_test.cc b/absl/random/internal/wide_multiply_test.cc
index e276cb51..f8ee35c0 100644
--- a/absl/random/internal/wide_multiply_test.cc
+++ b/absl/random/internal/wide_multiply_test.cc
@@ -14,52 +14,106 @@
#include "absl/random/internal/wide_multiply.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
-using absl::random_internal::MultiplyU64ToU128;
+using absl::random_internal::MultiplyU128ToU256;
+using absl::random_internal::U256;
namespace {
-TEST(WideMultiplyTest, MultiplyU64ToU128Test) {
- constexpr uint64_t k1 = 1;
- constexpr uint64_t kMax = ~static_cast<uint64_t>(0);
+U256 LeftShift(U256 v, int s) {
+ if (s == 0) {
+ return v;
+ } else if (s < 128) {
+ return {(v.hi << s) | (v.lo >> (128 - s)), v.lo << s};
+ } else {
+ return {v.lo << (s - 128), 0};
+ }
+}
+
+MATCHER_P2(Eq256, hi, lo, "") { return arg.hi == hi && arg.lo == lo; }
+MATCHER_P(Eq256, v, "") { return arg.hi == v.hi && arg.lo == v.lo; }
+
+TEST(WideMultiplyTest, MultiplyU128ToU256Test) {
+ using absl::uint128;
+ constexpr uint128 k1 = 1;
+ constexpr uint128 kMax = ~static_cast<uint128>(0);
- EXPECT_EQ(absl::uint128(0), MultiplyU64ToU128(0, 0));
+ EXPECT_THAT(MultiplyU128ToU256(0, 0), Eq256(0, 0));
- // Max uint64_t
- EXPECT_EQ(MultiplyU64ToU128(kMax, kMax),
- absl::MakeUint128(0xfffffffffffffffe, 0x0000000000000001));
- EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(kMax, 1));
- EXPECT_EQ(absl::MakeUint128(0, kMax), MultiplyU64ToU128(1, kMax));
+ // Max uin128_t
+ EXPECT_THAT(MultiplyU128ToU256(kMax, kMax), Eq256(kMax << 1, 1));
+ EXPECT_THAT(MultiplyU128ToU256(kMax, 1), Eq256(0, kMax));
+ EXPECT_THAT(MultiplyU128ToU256(1, kMax), Eq256(0, kMax));
for (int i = 0; i < 64; ++i) {
- EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
- MultiplyU64ToU128(kMax, k1 << i));
- EXPECT_EQ(absl::MakeUint128(0, kMax) << i,
- MultiplyU64ToU128(k1 << i, kMax));
+ SCOPED_TRACE(i);
+ EXPECT_THAT(MultiplyU128ToU256(kMax, k1 << i),
+ Eq256(LeftShift({0, kMax}, i)));
+ EXPECT_THAT(MultiplyU128ToU256(k1 << i, kMax),
+ Eq256(LeftShift({0, kMax}, i)));
}
// 1-bit x 1-bit.
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
- EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
- MultiplyU64ToU128(k1 << i, k1 << j));
- EXPECT_EQ(absl::MakeUint128(0, 1) << (i + j),
- MultiplyU64ToU128(k1 << i, k1 << j));
+ EXPECT_THAT(MultiplyU128ToU256(k1 << i, k1 << j),
+ Eq256(LeftShift({0, 1}, i + j)));
}
}
// Verified multiplies
- EXPECT_EQ(MultiplyU64ToU128(0xffffeeeeddddcccc, 0xbbbbaaaa99998888),
- absl::MakeUint128(0xbbbb9e2692c5dddc, 0xc28f7531048d2c60));
- EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfedcba9876543210),
- absl::MakeUint128(0x0121fa00ad77d742, 0x2236d88fe5618cf0));
- EXPECT_EQ(MultiplyU64ToU128(0x0123456789abcdef, 0xfdb97531eca86420),
- absl::MakeUint128(0x0120ae99d26725fc, 0xce197f0ecac319e0));
- EXPECT_EQ(MultiplyU64ToU128(0x97a87f4f261ba3f2, 0xfedcba9876543210),
- absl::MakeUint128(0x96fbf1a8ae78d0ba, 0x5a6dd4b71f278320));
- EXPECT_EQ(MultiplyU64ToU128(0xfedcba9876543210, 0xfdb97531eca86420),
- absl::MakeUint128(0xfc98c6981a413e22, 0x342d0bbf48948200));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0xc502da0d6ea99fe8, 0xfa3c9141a1f50912),
+ absl::MakeUint128(0x96bcf1ac37f97bd6, 0x27e2cdeb5fb2299e)),
+ Eq256(absl::MakeUint128(0x740113d838f96a64, 0x22e8cfa4d71f89ea),
+ absl::MakeUint128(0x19184a345c62e993, 0x237871b630337b1c)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0x6f29e670cee07230, 0xc3d8e6c3e4d86759),
+ absl::MakeUint128(0x3227d29fa6386db1, 0x231682bb1e4b764f)),
+ Eq256(absl::MakeUint128(0x15c779d9d5d3b07c, 0xd7e6c827f0c81cbe),
+ absl::MakeUint128(0xf88e3914f7fa287a, 0x15b79975137dea77)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0xafb77107215646e1, 0x3b844cb1ac5769e7),
+ absl::MakeUint128(0x1ff7b2d888b62479, 0x92f758ae96fcba0b)),
+ Eq256(absl::MakeUint128(0x15f13b70181f6985, 0x2adb36bbabce7d02),
+ absl::MakeUint128(0x6c470d72e13aad04, 0x63fba3f5841762ed)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0xd85d5558d67ac905, 0xf88c70654dae19b1),
+ absl::MakeUint128(0x17252c6727db3738, 0x399ff658c511eedc)),
+ Eq256(absl::MakeUint128(0x138fcdaf8b0421ee, 0x1b465ddf2a0d03f6),
+ absl::MakeUint128(0x8f573ba68296860f, 0xf327d2738741a21c)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0x46f0421a37ff6bee, 0xa61df89f09d140b1),
+ absl::MakeUint128(0x3d712ec9f37ca2e1, 0x9658a2cba47ef4b1)),
+ Eq256(absl::MakeUint128(0x11069cc48ee7c95d, 0xd35fb1c7aa91c978),
+ absl::MakeUint128(0xbe2f4a6de874b015, 0xd2f7ac1b76746e61)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0x730d27c72d58fa49, 0x3ebeda7498f8827c),
+ absl::MakeUint128(0xa2c959eca9f503af, 0x189c687eb842bbd8)),
+ Eq256(absl::MakeUint128(0x4928d0ea356ba022, 0x1546d34a2963393),
+ absl::MakeUint128(0x7481531e1e0a16d1, 0xdd8025015cf6aca0)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0x6ca41020f856d2f1, 0xb9b0838c04a7f4aa),
+ absl::MakeUint128(0x9cf41d28a8396f54, 0x1d681695e377ffe6)),
+ Eq256(absl::MakeUint128(0x429b92934d9be6f1, 0xea182877157c1e7),
+ absl::MakeUint128(0x7135c23f0a4a475, 0xc1adc366f4a126bc)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0x57472833797c332, 0x6c79272fdec4687a),
+ absl::MakeUint128(0xb5f022ea3838e46b, 0x16face2f003e27a6)),
+ Eq256(absl::MakeUint128(0x3e072e0962b3400, 0x5d9fe8fdc3d0e1f4),
+ absl::MakeUint128(0x7dc0df47cedafd62, 0xbe6501f1acd2551c)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0xf0fb4198322eb1c2, 0xfe7f5f31f3885938),
+ absl::MakeUint128(0xd99012b71bb7aa31, 0xac7a6f9eb190789)),
+ Eq256(absl::MakeUint128(0xcccc998cf075ca01, 0x642d144322fb873a),
+ absl::MakeUint128(0xc79dc12b69d91ed4, 0xa83459132ce046f8)));
+ EXPECT_THAT(MultiplyU128ToU256(
+ absl::MakeUint128(0xb5c04120848cdb47, 0x8aa62a827bf52635),
+ absl::MakeUint128(0x8d07a359be2f1380, 0x467bb90d59da0dea)),
+ Eq256(absl::MakeUint128(0x64205019d139a9ce, 0x99425c5fb6e7a977),
+ absl::MakeUint128(0xd3e99628a9e5fca7, 0x9c7824cb7279d72)));
}
} // namespace
diff --git a/absl/random/log_uniform_int_distribution.h b/absl/random/log_uniform_int_distribution.h
index 43e10116..4afff8f6 100644
--- a/absl/random/log_uniform_int_distribution.h
+++ b/absl/random/log_uniform_int_distribution.h
@@ -69,10 +69,8 @@ class log_uniform_int_distribution {
if (base_ == 2) {
// Determine where the first set bit is on range(), giving a log2(range)
// value which can be used to construct bounds.
- log_range_ =
- (std::min)(bit_width(range()),
- static_cast<unsigned_type>(
- std::numeric_limits<unsigned_type>::digits));
+ log_range_ = (std::min)(random_internal::BitWidth(range()),
+ std::numeric_limits<unsigned_type>::digits);
} else {
// NOTE: Computing the logN(x) introduces error from 2 sources:
// 1. Conversion of int to double loses precision for values >=
@@ -83,7 +81,7 @@ class log_uniform_int_distribution {
//
// Thus a result which should equal K may equal K +/- epsilon,
// which can eliminate some values depending on where the bounds fall.
- const double inv_log_base = 1.0 / std::log(base_);
+ const double inv_log_base = 1.0 / std::log(static_cast<double>(base_));
const double log_range = std::log(static_cast<double>(range()) + 0.5);
log_range_ = static_cast<int>(std::ceil(inv_log_base * log_range));
}
@@ -113,7 +111,7 @@ class log_uniform_int_distribution {
unsigned_type range_; // max - min
int log_range_; // ceil(logN(range_))
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::log_uniform_int_distribution<> must be "
"parameterized using an integral type.");
};
@@ -139,7 +137,7 @@ class log_uniform_int_distribution {
template <typename URBG>
result_type operator()(URBG& g, // NOLINT(runtime/references)
const param_type& p) {
- return (p.min)() + Generate(g, p);
+ return static_cast<result_type>((p.min)() + Generate(g, p));
}
result_type(min)() const { return (param_.min)(); }
@@ -193,8 +191,8 @@ log_uniform_int_distribution<IntType>::Generate(
? (std::numeric_limits<unsigned_type>::max)()
: (static_cast<unsigned_type>(1) << e) - 1;
} else {
- const double r = std::pow(p.base(), d);
- const double s = (r * p.base()) - 1.0;
+ const double r = std::pow(static_cast<double>(p.base()), d);
+ const double s = (r * static_cast<double>(p.base())) - 1.0;
base_e =
(r > static_cast<double>((std::numeric_limits<unsigned_type>::max)()))
@@ -211,7 +209,8 @@ log_uniform_int_distribution<IntType>::Generate(
const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e;
// choose uniformly over [lo, hi]
- return absl::uniform_int_distribution<result_type>(lo, hi)(g);
+ return absl::uniform_int_distribution<result_type>(
+ static_cast<result_type>(lo), static_cast<result_type>(hi))(g);
}
template <typename CharT, typename Traits, typename IntType>
diff --git a/absl/random/log_uniform_int_distribution_test.cc b/absl/random/log_uniform_int_distribution_test.cc
index 5e780d96..0d0fcb95 100644
--- a/absl/random/log_uniform_int_distribution_test.cc
+++ b/absl/random/log_uniform_int_distribution_test.cc
@@ -42,7 +42,7 @@ class LogUniformIntDistributionTypeTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, //
uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(LogUniformIntDistributionTypeTest, IntTypes);
+TYPED_TEST_SUITE(LogUniformIntDistributionTypeTest, IntTypes);
TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) {
using param_type =
diff --git a/absl/random/mocking_bit_gen.h b/absl/random/mocking_bit_gen.h
index 7b2b80eb..89fa5a47 100644
--- a/absl/random/mocking_bit_gen.h
+++ b/absl/random/mocking_bit_gen.h
@@ -87,7 +87,7 @@ class BitGenRef;
//
// ON_CALL(absl::MockUniform<int>(), Call(bitgen, testing::_, testing::_))
// .WillByDefault([] (int low, int high) {
-// return (low + high) / 2;
+// return low + (high - low) / 2;
// });
//
// EXPECT_EQ(absl::Uniform<int>(gen, 0, 10), 5);
diff --git a/absl/random/poisson_distribution.h b/absl/random/poisson_distribution.h
index cb5f5d5d..f4573082 100644
--- a/absl/random/poisson_distribution.h
+++ b/absl/random/poisson_distribution.h
@@ -26,6 +26,7 @@
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/generate_real.h"
#include "absl/random/internal/iostream_state_saver.h"
+#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -80,7 +81,7 @@ class poisson_distribution {
double log_k_;
int split_;
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::poisson_distribution<> must be "
"parameterized using an integral type.");
};
@@ -133,7 +134,8 @@ template <typename IntType>
poisson_distribution<IntType>::param_type::param_type(double mean)
: mean_(mean), split_(0) {
assert(mean >= 0);
- assert(mean <= (std::numeric_limits<result_type>::max)());
+ assert(mean <=
+ static_cast<double>((std::numeric_limits<result_type>::max)()));
// As a defensive measure, avoid large values of the mean. The rejection
// algorithm used does not support very large values well. It my be worth
// changing algorithms to better deal with these cases.
@@ -222,8 +224,9 @@ poisson_distribution<IntType>::operator()(
// clang-format on
const double lhs = 2.0 * std::log(u) + p.log_k_ + s;
if (lhs < rhs) {
- return x > (max)() ? (max)()
- : static_cast<result_type>(x); // f(x)/k >= u^2
+ return x > static_cast<double>((max)())
+ ? (max)()
+ : static_cast<result_type>(x); // f(x)/k >= u^2
}
}
}
diff --git a/absl/random/poisson_distribution_test.cc b/absl/random/poisson_distribution_test.cc
index 8baabd11..4f585b9b 100644
--- a/absl/random/poisson_distribution_test.cc
+++ b/absl/random/poisson_distribution_test.cc
@@ -73,7 +73,7 @@ class PoissonDistributionInterfaceTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(PoissonDistributionInterfaceTest, IntTypes);
+TYPED_TEST_SUITE(PoissonDistributionInterfaceTest, IntTypes);
TYPED_TEST(PoissonDistributionInterfaceTest, SerializeTest) {
using param_type = typename absl::poisson_distribution<TypeParam>::param_type;
diff --git a/absl/random/seed_sequences.h b/absl/random/seed_sequences.h
index ff1340cc..c3af4b00 100644
--- a/absl/random/seed_sequences.h
+++ b/absl/random/seed_sequences.h
@@ -28,6 +28,7 @@
#include <iterator>
#include <random>
+#include "absl/base/config.h"
#include "absl/random/internal/salted_seed_seq.h"
#include "absl/random/internal/seed_material.h"
#include "absl/random/seed_gen_exception.h"
diff --git a/absl/random/uniform_int_distribution.h b/absl/random/uniform_int_distribution.h
index c1f54cce..fae80252 100644
--- a/absl/random/uniform_int_distribution.h
+++ b/absl/random/uniform_int_distribution.h
@@ -97,7 +97,7 @@ class uniform_int_distribution {
result_type lo_;
unsigned_type range_;
- static_assert(std::is_integral<result_type>::value,
+ static_assert(random_internal::IsIntegral<result_type>::value,
"Class-template absl::uniform_int_distribution<> must be "
"parameterized using an integral type.");
}; // param_type
@@ -125,7 +125,7 @@ class uniform_int_distribution {
template <typename URBG>
result_type operator()(
URBG& gen, const param_type& param) { // NOLINT(runtime/references)
- return param.a() + Generate(gen, param.range());
+ return static_cast<result_type>(param.a() + Generate(gen, param.range()));
}
result_type a() const { return param_.a(); }
diff --git a/absl/random/uniform_real_distribution.h b/absl/random/uniform_real_distribution.h
index 5ba17b23..19683341 100644
--- a/absl/random/uniform_real_distribution.h
+++ b/absl/random/uniform_real_distribution.h
@@ -73,12 +73,12 @@ class uniform_real_distribution {
: lo_(lo), hi_(hi), range_(hi - lo) {
// [rand.dist.uni.real] preconditions 2 & 3
assert(lo <= hi);
+
// NOTE: For integral types, we can promote the range to an unsigned type,
// which gives full width of the range. However for real (fp) types, this
// is not possible, so value generation cannot use the full range of the
// real type.
assert(range_ <= (std::numeric_limits<result_type>::max)());
- assert(std::isfinite(range_));
}
result_type a() const { return lo_; }
diff --git a/absl/random/uniform_real_distribution_test.cc b/absl/random/uniform_real_distribution_test.cc
index 035bd284..07f199d3 100644
--- a/absl/random/uniform_real_distribution_test.cc
+++ b/absl/random/uniform_real_distribution_test.cc
@@ -78,62 +78,74 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
GTEST_SKIP()
<< "Skipping the test because we detected x87 floating-point semantics";
#endif
+ using DistributionType = absl::uniform_real_distribution<TypeParam>;
+ using real_type = TypeParam;
+ using param_type = typename DistributionType::param_type;
- using param_type =
- typename absl::uniform_real_distribution<TypeParam>::param_type;
+ constexpr const real_type kMax = std::numeric_limits<real_type>::max();
+ constexpr const real_type kMin = std::numeric_limits<real_type>::min();
+ constexpr const real_type kEpsilon =
+ std::numeric_limits<real_type>::epsilon();
+ constexpr const real_type kLowest =
+ std::numeric_limits<real_type>::lowest(); // -max
- constexpr const TypeParam a{1152921504606846976};
+ const real_type kDenormMax = std::nextafter(kMin, real_type{0});
+ const real_type kOneMinusE =
+ std::nextafter(real_type{1}, real_type{0}); // 1 - epsilon
+
+ constexpr const real_type kTwo60{1152921504606846976}; // 2^60
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const auto& param : {
param_type(),
- param_type(TypeParam(2.0), TypeParam(2.0)), // Same
- param_type(TypeParam(-0.1), TypeParam(0.1)),
- param_type(TypeParam(0.05), TypeParam(0.12)),
- param_type(TypeParam(-0.05), TypeParam(0.13)),
- param_type(TypeParam(-0.05), TypeParam(-0.02)),
+ param_type(real_type{0}, real_type{1}),
+ param_type(real_type(-0.1), real_type(0.1)),
+ param_type(real_type(0.05), real_type(0.12)),
+ param_type(real_type(-0.05), real_type(0.13)),
+ param_type(real_type(-0.05), real_type(-0.02)),
+ // range = 0
+ param_type(real_type(2.0), real_type(2.0)), // Same
// double range = 0
// 2^60 , 2^60 + 2^6
- param_type(a, TypeParam(1152921504606847040)),
+ param_type(kTwo60, real_type(1152921504606847040)),
// 2^60 , 2^60 + 2^7
- param_type(a, TypeParam(1152921504606847104)),
+ param_type(kTwo60, real_type(1152921504606847104)),
// double range = 2^8
// 2^60 , 2^60 + 2^8
- param_type(a, TypeParam(1152921504606847232)),
+ param_type(kTwo60, real_type(1152921504606847232)),
// float range = 0
// 2^60 , 2^60 + 2^36
- param_type(a, TypeParam(1152921573326323712)),
+ param_type(kTwo60, real_type(1152921573326323712)),
// 2^60 , 2^60 + 2^37
- param_type(a, TypeParam(1152921642045800448)),
+ param_type(kTwo60, real_type(1152921642045800448)),
// float range = 2^38
// 2^60 , 2^60 + 2^38
- param_type(a, TypeParam(1152921779484753920)),
+ param_type(kTwo60, real_type(1152921779484753920)),
// Limits
- param_type(0, std::numeric_limits<TypeParam>::max()),
- param_type(std::numeric_limits<TypeParam>::lowest(), 0),
- param_type(0, std::numeric_limits<TypeParam>::epsilon()),
- param_type(-std::numeric_limits<TypeParam>::epsilon(),
- std::numeric_limits<TypeParam>::epsilon()),
- param_type(std::numeric_limits<TypeParam>::epsilon(),
- 2 * std::numeric_limits<TypeParam>::epsilon()),
+ param_type(0, kMax),
+ param_type(kLowest, 0),
+ param_type(0, kMin),
+ param_type(0, kEpsilon),
+ param_type(-kEpsilon, kEpsilon),
+ param_type(0, kOneMinusE),
+ param_type(0, kDenormMax),
}) {
// Validate parameters.
const auto a = param.a();
const auto b = param.b();
- absl::uniform_real_distribution<TypeParam> before(a, b);
+ DistributionType before(a, b);
EXPECT_EQ(before.a(), param.a());
EXPECT_EQ(before.b(), param.b());
{
- absl::uniform_real_distribution<TypeParam> via_param(param);
+ DistributionType via_param(param);
EXPECT_EQ(via_param, before);
}
std::stringstream ss;
ss << before;
- absl::uniform_real_distribution<TypeParam> after(TypeParam(1.0),
- TypeParam(3.1));
+ DistributionType after(real_type(1.0), real_type(3.1));
EXPECT_NE(before.a(), after.a());
EXPECT_NE(before.b(), after.b());
@@ -168,7 +180,7 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
}
}
- if (!std::is_same<TypeParam, long double>::value) {
+ if (!std::is_same<real_type, long double>::value) {
// static_cast<double>(long double) can overflow.
std::string msg = absl::StrCat("Range: ", static_cast<double>(sample_min),
", ", static_cast<double>(sample_max));
@@ -182,33 +194,52 @@ TYPED_TEST(UniformRealDistributionTest, ParamSerializeTest) {
#pragma warning(disable:4756) // Constant arithmetic overflow.
#endif
TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) {
+ using DistributionType = absl::uniform_real_distribution<TypeParam>;
+ using real_type = TypeParam;
+
#if GTEST_HAS_DEATH_TEST
// Hi < Lo
- EXPECT_DEBUG_DEATH(
- { absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0); }, "");
+ EXPECT_DEBUG_DEATH({ DistributionType dist(10.0, 1.0); }, "");
// Hi - Lo > numeric_limits<>::max()
EXPECT_DEBUG_DEATH(
{
- absl::uniform_real_distribution<TypeParam> dist(
- std::numeric_limits<TypeParam>::lowest(),
- std::numeric_limits<TypeParam>::max());
+ DistributionType dist(std::numeric_limits<real_type>::lowest(),
+ std::numeric_limits<real_type>::max());
+ },
+ "");
+
+ // kEpsilon guarantees that max + kEpsilon = inf.
+ const auto kEpsilon = std::nexttoward(
+ (std::numeric_limits<real_type>::max() -
+ std::nexttoward(std::numeric_limits<real_type>::max(), 0.0)) /
+ 2,
+ std::numeric_limits<real_type>::max());
+ EXPECT_DEBUG_DEATH(
+ {
+ DistributionType dist(-kEpsilon, std::numeric_limits<real_type>::max());
},
"");
+ EXPECT_DEBUG_DEATH(
+ {
+ DistributionType dist(std::numeric_limits<real_type>::lowest(),
+ kEpsilon);
+ },
+ "");
+
#endif // GTEST_HAS_DEATH_TEST
#if defined(NDEBUG)
// opt-mode, for invalid parameters, will generate a garbage value,
// but should not enter an infinite loop.
absl::InsecureBitGen gen;
{
- absl::uniform_real_distribution<TypeParam> dist(10.0, 1.0);
+ DistributionType dist(10.0, 1.0);
auto x = dist(gen);
EXPECT_FALSE(std::isnan(x)) << x;
}
{
- absl::uniform_real_distribution<TypeParam> dist(
- std::numeric_limits<TypeParam>::lowest(),
- std::numeric_limits<TypeParam>::max());
+ DistributionType dist(std::numeric_limits<real_type>::lowest(),
+ std::numeric_limits<real_type>::max());
auto x = dist(gen);
// Infinite result.
EXPECT_FALSE(std::isfinite(x)) << x;
@@ -220,6 +251,8 @@ TYPED_TEST(UniformRealDistributionTest, ViolatesPreconditionsDeathTest) {
#endif
TYPED_TEST(UniformRealDistributionTest, TestMoments) {
+ using DistributionType = absl::uniform_real_distribution<TypeParam>;
+
constexpr int kSize = 1000000;
std::vector<double> values(kSize);
@@ -228,7 +261,7 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) {
// implementation.
absl::random_internal::pcg64_2018_engine rng{0x2B7E151628AED2A6};
- absl::uniform_real_distribution<TypeParam> dist;
+ DistributionType dist;
for (int i = 0; i < kSize; i++) {
values[i] = dist(rng);
}
@@ -242,9 +275,10 @@ TYPED_TEST(UniformRealDistributionTest, TestMoments) {
}
TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
+ using DistributionType = absl::uniform_real_distribution<TypeParam>;
+ using param_type = typename DistributionType::param_type;
+
using absl::random_internal::kChiSquared;
- using param_type =
- typename absl::uniform_real_distribution<TypeParam>::param_type;
constexpr size_t kTrials = 100000;
constexpr int kBuckets = 50;
@@ -269,7 +303,7 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
const double factor = kBuckets / (max_val - min_val);
std::vector<int32_t> counts(kBuckets, 0);
- absl::uniform_real_distribution<TypeParam> dist(param);
+ DistributionType dist(param);
for (size_t i = 0; i < kTrials; i++) {
auto x = dist(rng);
auto bucket = static_cast<size_t>((x - min_val) * factor);
@@ -297,8 +331,11 @@ TYPED_TEST(UniformRealDistributionTest, ChiSquaredTest50) {
}
TYPED_TEST(UniformRealDistributionTest, StabilityTest) {
+ using DistributionType = absl::uniform_real_distribution<TypeParam>;
+ using real_type = TypeParam;
+
// absl::uniform_real_distribution stability relies only on
- // random_internal::RandU64ToDouble and random_internal::RandU64ToFloat.
+ // random_internal::GenerateRealFromBits.
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
@@ -307,9 +344,9 @@ TYPED_TEST(UniformRealDistributionTest, StabilityTest) {
std::vector<int> output(12);
- absl::uniform_real_distribution<TypeParam> dist;
+ DistributionType dist;
std::generate(std::begin(output), std::end(output), [&] {
- return static_cast<int>(TypeParam(1000000) * dist(urbg));
+ return static_cast<int>(real_type(1000000) * dist(urbg));
});
EXPECT_THAT(
diff --git a/absl/random/zipf_distribution.h b/absl/random/zipf_distribution.h
index 22ebc756..03497b1b 100644
--- a/absl/random/zipf_distribution.h
+++ b/absl/random/zipf_distribution.h
@@ -23,13 +23,14 @@
#include <type_traits>
#include "absl/random/internal/iostream_state_saver.h"
+#include "absl/random/internal/traits.h"
#include "absl/random/uniform_real_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
// absl::zipf_distribution produces random integer-values in the range [0, k],
-// distributed according to the discrete probability function:
+// distributed according to the unnormalized discrete probability function:
//
// P(x) = (v + x) ^ -q
//
@@ -94,7 +95,7 @@ class zipf_distribution {
double hxm_; // h(k + 0.5)
double hx0_minus_hxm_; // h(x0) - h(k + 0.5)
- static_assert(std::is_integral<IntType>::value,
+ static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::zipf_distribution<> must be "
"parameterized using an integral type.");
};
@@ -221,7 +222,7 @@ zipf_distribution<IntType>::operator()(
const double u = p.hxm_ + v * p.hx0_minus_hxm_;
const double x = p.hinv(u);
k = rint(x); // std::floor(x + 0.5);
- if (k > p.k()) continue; // reject k > max_k
+ if (k > static_cast<double>(p.k())) continue; // reject k > max_k
if (k - x <= p.s_) break;
const double h = p.h(k + 0.5);
const double r = p.pow_negative_q(p.v_ + k);
diff --git a/absl/random/zipf_distribution_test.cc b/absl/random/zipf_distribution_test.cc
index f8cf70e0..c8bb89db 100644
--- a/absl/random/zipf_distribution_test.cc
+++ b/absl/random/zipf_distribution_test.cc
@@ -44,7 +44,7 @@ class ZipfDistributionTypedTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
-TYPED_TEST_CASE(ZipfDistributionTypedTest, IntTypes);
+TYPED_TEST_SUITE(ZipfDistributionTypedTest, IntTypes);
TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) {
using param_type = typename absl::zipf_distribution<TypeParam>::param_type;
diff --git a/absl/status/BUILD.bazel b/absl/status/BUILD.bazel
index bae5156f..ce0ea70c 100644
--- a/absl/status/BUILD.bazel
+++ b/absl/status/BUILD.bazel
@@ -20,6 +20,7 @@
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
+ "ABSL_DEFAULT_LINKOPTS",
"ABSL_TEST_COPTS",
)
@@ -39,11 +40,12 @@ cc_library(
"status_payload_printer.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:atomic_hook",
- "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
+ "//absl/base:strerror",
"//absl/container:inlined_vector",
"//absl/debugging:stacktrace",
"//absl/debugging:symbolize",
@@ -76,6 +78,7 @@ cc_library(
"statusor.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":status",
"//absl/base",
diff --git a/absl/status/CMakeLists.txt b/absl/status/CMakeLists.txt
index f107c85b..15db36af 100644
--- a/absl/status/CMakeLists.txt
+++ b/absl/status/CMakeLists.txt
@@ -28,16 +28,17 @@ absl_cc_library(
DEPS
absl::atomic_hook
absl::config
+ absl::cord
absl::core_headers
absl::function_ref
- absl::raw_logging_internal
absl::inlined_vector
+ absl::optional
+ absl::raw_logging_internal
absl::stacktrace
- absl::symbolize
- absl::strings
- absl::cord
absl::str_format
- absl::optional
+ absl::strerror
+ absl::strings
+ absl::symbolize
PUBLIC
)
diff --git a/absl/status/internal/status_internal.h b/absl/status/internal/status_internal.h
index ac12940a..19a4a7aa 100644
--- a/absl/status/internal/status_internal.h
+++ b/absl/status/internal/status_internal.h
@@ -15,7 +15,9 @@
#define ABSL_STATUS_INTERNAL_STATUS_INTERNAL_H_
#include <string>
+#include <utility>
+#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/cord.h"
@@ -25,7 +27,14 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
// Returned Status objects may not be ignored. Codesearch doesn't handle ifdefs
// as part of a class definitions (b/6995610), so we use a forward declaration.
+//
+// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] Status;
+#else
class ABSL_MUST_USE_RESULT Status;
+#endif
ABSL_NAMESPACE_END
} // namespace absl
#endif // !SWIG
@@ -61,6 +70,14 @@ struct StatusRep {
};
absl::StatusCode MapToLocalCode(int value);
+
+// Returns a pointer to a newly-allocated string with the given `prefix`,
+// suitable for output as an error message in assertion/`CHECK()` failures.
+//
+// This is an internal implementation detail for Abseil logging.
+std::string* MakeCheckFailString(const absl::Status* status,
+ const char* prefix);
+
} // namespace status_internal
ABSL_NAMESPACE_END
diff --git a/absl/status/status.cc b/absl/status/status.cc
index bcf3413e..88e8eda9 100644
--- a/absl/status/status.cc
+++ b/absl/status/status.cc
@@ -13,9 +13,12 @@
// limitations under the License.
#include "absl/status/status.h"
+#include <errno.h>
+
#include <cassert>
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/strerror.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
#include "absl/status/status_payload_printer.h"
@@ -185,11 +188,16 @@ void Status::ForEachPayload(
}
const std::string* Status::EmptyString() {
- static std::string* empty_string = new std::string();
- return empty_string;
+ static union EmptyString {
+ std::string str;
+ ~EmptyString() {}
+ } empty = {{}};
+ return &empty.str;
}
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr const char Status::kMovedFromString[];
+#endif
const std::string* Status::MovedFromString() {
static std::string* moved_from_string = new std::string(kMovedFromString);
@@ -440,5 +448,169 @@ bool IsUnknown(const Status& status) {
return status.code() == absl::StatusCode::kUnknown;
}
+StatusCode ErrnoToStatusCode(int error_number) {
+ switch (error_number) {
+ case 0:
+ return StatusCode::kOk;
+ case EINVAL: // Invalid argument
+ case ENAMETOOLONG: // Filename too long
+ case E2BIG: // Argument list too long
+ case EDESTADDRREQ: // Destination address required
+ case EDOM: // Mathematics argument out of domain of function
+ case EFAULT: // Bad address
+ case EILSEQ: // Illegal byte sequence
+ case ENOPROTOOPT: // Protocol not available
+ case ENOSTR: // Not a STREAM
+ case ENOTSOCK: // Not a socket
+ case ENOTTY: // Inappropriate I/O control operation
+ case EPROTOTYPE: // Protocol wrong type for socket
+ case ESPIPE: // Invalid seek
+ return StatusCode::kInvalidArgument;
+ case ETIMEDOUT: // Connection timed out
+ case ETIME: // Timer expired
+ return StatusCode::kDeadlineExceeded;
+ case ENODEV: // No such device
+ case ENOENT: // No such file or directory
+#ifdef ENOMEDIUM
+ case ENOMEDIUM: // No medium found
+#endif
+ case ENXIO: // No such device or address
+ case ESRCH: // No such process
+ return StatusCode::kNotFound;
+ case EEXIST: // File exists
+ case EADDRNOTAVAIL: // Address not available
+ case EALREADY: // Connection already in progress
+#ifdef ENOTUNIQ
+ case ENOTUNIQ: // Name not unique on network
+#endif
+ return StatusCode::kAlreadyExists;
+ case EPERM: // Operation not permitted
+ case EACCES: // Permission denied
+#ifdef ENOKEY
+ case ENOKEY: // Required key not available
+#endif
+ case EROFS: // Read only file system
+ return StatusCode::kPermissionDenied;
+ case ENOTEMPTY: // Directory not empty
+ case EISDIR: // Is a directory
+ case ENOTDIR: // Not a directory
+ case EADDRINUSE: // Address already in use
+ case EBADF: // Invalid file descriptor
+#ifdef EBADFD
+ case EBADFD: // File descriptor in bad state
+#endif
+ case EBUSY: // Device or resource busy
+ case ECHILD: // No child processes
+ case EISCONN: // Socket is connected
+#ifdef EISNAM
+ case EISNAM: // Is a named type file
+#endif
+#ifdef ENOTBLK
+ case ENOTBLK: // Block device required
+#endif
+ case ENOTCONN: // The socket is not connected
+ case EPIPE: // Broken pipe
+#ifdef ESHUTDOWN
+ case ESHUTDOWN: // Cannot send after transport endpoint shutdown
+#endif
+ case ETXTBSY: // Text file busy
+#ifdef EUNATCH
+ case EUNATCH: // Protocol driver not attached
+#endif
+ return StatusCode::kFailedPrecondition;
+ case ENOSPC: // No space left on device
+#ifdef EDQUOT
+ case EDQUOT: // Disk quota exceeded
+#endif
+ case EMFILE: // Too many open files
+ case EMLINK: // Too many links
+ case ENFILE: // Too many open files in system
+ case ENOBUFS: // No buffer space available
+ case ENODATA: // No message is available on the STREAM read queue
+ case ENOMEM: // Not enough space
+ case ENOSR: // No STREAM resources
+#ifdef EUSERS
+ case EUSERS: // Too many users
+#endif
+ return StatusCode::kResourceExhausted;
+#ifdef ECHRNG
+ case ECHRNG: // Channel number out of range
+#endif
+ case EFBIG: // File too large
+ case EOVERFLOW: // Value too large to be stored in data type
+ case ERANGE: // Result too large
+ return StatusCode::kOutOfRange;
+#ifdef ENOPKG
+ case ENOPKG: // Package not installed
+#endif
+ case ENOSYS: // Function not implemented
+ case ENOTSUP: // Operation not supported
+ case EAFNOSUPPORT: // Address family not supported
+#ifdef EPFNOSUPPORT
+ case EPFNOSUPPORT: // Protocol family not supported
+#endif
+ case EPROTONOSUPPORT: // Protocol not supported
+#ifdef ESOCKTNOSUPPORT
+ case ESOCKTNOSUPPORT: // Socket type not supported
+#endif
+ case EXDEV: // Improper link
+ return StatusCode::kUnimplemented;
+ case EAGAIN: // Resource temporarily unavailable
+#ifdef ECOMM
+ case ECOMM: // Communication error on send
+#endif
+ case ECONNREFUSED: // Connection refused
+ case ECONNABORTED: // Connection aborted
+ case ECONNRESET: // Connection reset
+ case EINTR: // Interrupted function call
+#ifdef EHOSTDOWN
+ case EHOSTDOWN: // Host is down
+#endif
+ case EHOSTUNREACH: // Host is unreachable
+ case ENETDOWN: // Network is down
+ case ENETRESET: // Connection aborted by network
+ case ENETUNREACH: // Network unreachable
+ case ENOLCK: // No locks available
+ case ENOLINK: // Link has been severed
+#ifdef ENONET
+ case ENONET: // Machine is not on the network
+#endif
+ return StatusCode::kUnavailable;
+ case EDEADLK: // Resource deadlock avoided
+#ifdef ESTALE
+ case ESTALE: // Stale file handle
+#endif
+ return StatusCode::kAborted;
+ case ECANCELED: // Operation cancelled
+ return StatusCode::kCancelled;
+ default:
+ return StatusCode::kUnknown;
+ }
+}
+
+namespace {
+std::string MessageForErrnoToStatus(int error_number,
+ absl::string_view message) {
+ return absl::StrCat(message, ": ",
+ absl::base_internal::StrError(error_number));
+}
+} // namespace
+
+Status ErrnoToStatus(int error_number, absl::string_view message) {
+ return Status(ErrnoToStatusCode(error_number),
+ MessageForErrnoToStatus(error_number, message));
+}
+
+namespace status_internal {
+
+std::string* MakeCheckFailString(const absl::Status* status,
+ const char* prefix) {
+ return new std::string(
+ absl::StrCat(prefix, " (",
+ status->ToString(StatusToStringMode::kWithEverything), ")"));
+}
+
+} // namespace status_internal
+
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/status/status.h b/absl/status/status.h
index 39071e5f..9292b83a 100644
--- a/absl/status/status.h
+++ b/absl/status/status.h
@@ -24,11 +24,11 @@
// * A set of helper functions for creating status codes and checking their
// values
//
-// Within Google, `absl::Status` is the primary mechanism for gracefully
-// handling errors across API boundaries (and in particular across RPC
-// boundaries). Some of these errors may be recoverable, but others may not.
-// Most functions that can produce a recoverable error should be designed to
-// return an `absl::Status` (or `absl::StatusOr`).
+// Within Google, `absl::Status` is the primary mechanism for communicating
+// errors in C++, and is used to represent error state in both in-process
+// library calls as well as RPC calls. Some of these errors may be recoverable,
+// but others may not. Most functions that can produce a recoverable error
+// should be designed to return an `absl::Status` (or `absl::StatusOr`).
//
// Example:
//
@@ -469,8 +469,9 @@ class Status final {
// Status::ok()
//
- // Returns `true` if `this->ok()`. Prefer checking for an OK status using this
- // member function.
+ // Returns `true` if `this->code()` == `absl::StatusCode::kOk`,
+ // indicating the absence of an error.
+ // Prefer checking for an OK status using this member function.
ABSL_MUST_USE_RESULT bool ok() const;
// Status::code()
@@ -532,7 +533,7 @@ class Status final {
//----------------------------------------------------------------------------
// A payload may be attached to a status to provide additional context to an
- // error that may not be satisifed by an existing `absl::StatusCode`.
+ // error that may not be satisfied by an existing `absl::StatusCode`.
// Typically, this payload serves one of several purposes:
//
// * It may provide more fine-grained semantic information about the error
@@ -612,10 +613,6 @@ class Status final {
const status_internal::Payloads* GetPayloads() const;
status_internal::Payloads* GetPayloads();
- // Takes ownership of payload.
- static uintptr_t NewRep(
- absl::StatusCode code, absl::string_view msg,
- std::unique_ptr<status_internal::Payloads> payload);
static bool EqualsSlow(const absl::Status& a, const absl::Status& b);
// MSVC 14.0 limitation requires the const.
@@ -741,6 +738,19 @@ Status UnavailableError(absl::string_view message);
Status UnimplementedError(absl::string_view message);
Status UnknownError(absl::string_view message);
+// ErrnoToStatusCode()
+//
+// Returns the StatusCode for `error_number`, which should be an `errno` value.
+// See https://en.cppreference.com/w/cpp/error/errno_macros and similar
+// references.
+absl::StatusCode ErrnoToStatusCode(int error_number);
+
+// ErrnoToStatus()
+//
+// Convenience function that creates a `absl::Status` using an `error_number`,
+// which should be an `errno` value.
+Status ErrnoToStatus(int error_number, absl::string_view message);
+
//------------------------------------------------------------------------------
// Implementation details follow
//------------------------------------------------------------------------------
diff --git a/absl/status/status_test.cc b/absl/status/status_test.cc
index 1b038f6d..89cce7df 100644
--- a/absl/status/status_test.cc
+++ b/absl/status/status_test.cc
@@ -14,6 +14,8 @@
#include "absl/status/status.h"
+#include <errno.h>
+
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
@@ -485,4 +487,22 @@ TEST(Status, Swap) {
test_swap(no_payload, with_payload);
test_swap(with_payload, no_payload);
}
+
+TEST(StatusErrno, ErrnoToStatusCode) {
+ EXPECT_EQ(absl::ErrnoToStatusCode(0), absl::StatusCode::kOk);
+
+ // Spot-check a few errno values.
+ EXPECT_EQ(absl::ErrnoToStatusCode(EINVAL),
+ absl::StatusCode::kInvalidArgument);
+ EXPECT_EQ(absl::ErrnoToStatusCode(ENOENT), absl::StatusCode::kNotFound);
+
+ // We'll pick a very large number so it hopefully doesn't collide to errno.
+ EXPECT_EQ(absl::ErrnoToStatusCode(19980927), absl::StatusCode::kUnknown);
+}
+
+TEST(StatusErrno, ErrnoToStatus) {
+ absl::Status status = absl::ErrnoToStatus(ENOENT, "Cannot open 'path'");
+ EXPECT_EQ(status.code(), absl::StatusCode::kNotFound);
+ EXPECT_EQ(status.message(), "Cannot open 'path': No such file or directory");
+}
} // namespace
diff --git a/absl/status/statusor.h b/absl/status/statusor.h
index c051fbb3..a76e7201 100644
--- a/absl/status/statusor.h
+++ b/absl/status/statusor.h
@@ -106,7 +106,13 @@ class BadStatusOrAccess : public std::exception {
// Returned StatusOr objects may not be ignored.
template <typename T>
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+// TODO(b/176172494): ABSL_MUST_USE_RESULT should expand to the more strict
+// [[nodiscard]]. For now, just use [[nodiscard]] directly when it is available.
+class [[nodiscard]] StatusOr;
+#else
class ABSL_MUST_USE_RESULT StatusOr;
+#endif // ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
// absl::StatusOr<T>
//
@@ -156,8 +162,8 @@ class ABSL_MUST_USE_RESULT StatusOr;
// A `absl::StatusOr<T*>` can be constructed from a null pointer like any other
// pointer value, and the result will be that `ok()` returns `true` and
// `value()` returns `nullptr`. Checking the value of pointer in an
-// `absl::StatusOr<T>` generally requires a bit more care, to ensure both that a
-// value is present and that value is not null:
+// `absl::StatusOr<T*>` generally requires a bit more care, to ensure both that
+// a value is present and that value is not null:
//
// StatusOr<std::unique_ptr<Foo>> result = FooFactory::MakeNewFoo(arg);
// if (!result.ok()) {
@@ -471,7 +477,7 @@ class StatusOr : private internal_statusor::StatusOrData<T>,
// StatusOr<T>::ok()
//
// Returns whether or not this `absl::StatusOr<T>` holds a `T` value. This
- // member function is analagous to `absl::Status::ok()` and should be used
+ // member function is analogous to `absl::Status::ok()` and should be used
// similarly to check the status of return values.
//
// Example:
diff --git a/absl/strings/BUILD.bazel b/absl/strings/BUILD.bazel
index 090fc58a..3f51252f 100644
--- a/absl/strings/BUILD.bazel
+++ b/absl/strings/BUILD.bazel
@@ -16,6 +16,7 @@
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
+ "ABSL_DEFAULT_LINKOPTS",
"ABSL_TEST_COPTS",
)
@@ -65,6 +66,7 @@ cc_library(
"substitute.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":internal",
"//absl/base",
@@ -95,6 +97,7 @@ cc_library(
"internal/utf8.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
"//absl/base:core_headers",
@@ -272,19 +275,23 @@ cc_library(
"internal/cord_rep_btree_navigator.cc",
"internal/cord_rep_btree_reader.cc",
"internal/cord_rep_consume.cc",
+ "internal/cord_rep_crc.cc",
"internal/cord_rep_ring.cc",
],
hdrs = [
+ "internal/cord_data_edge.h",
"internal/cord_internal.h",
"internal/cord_rep_btree.h",
"internal/cord_rep_btree_navigator.h",
"internal/cord_rep_btree_reader.h",
"internal/cord_rep_consume.h",
+ "internal/cord_rep_crc.h",
"internal/cord_rep_flat.h",
"internal/cord_rep_ring.h",
"internal/cord_rep_ring_reader.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//visibility:private",
],
@@ -306,12 +313,16 @@ cc_library(
)
cc_test(
- name = "cord_internal_test",
- srcs = ["internal/cord_internal_test.cc"],
+ name = "cord_data_edge_test",
+ size = "small",
+ srcs = ["internal/cord_data_edge_test.cc"],
copts = ABSL_TEST_COPTS,
visibility = ["//visibility:private"],
deps = [
":cord_internal",
+ ":cord_rep_test_util",
+ ":strings",
+ "//absl/base:config",
"@com_google_googletest//:gtest_main",
],
)
@@ -366,10 +377,25 @@ cc_test(
],
)
+cc_test(
+ name = "cord_rep_crc_test",
+ size = "small",
+ srcs = ["internal/cord_rep_crc_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":cord_internal",
+ ":cord_rep_test_util",
+ "//absl/base:config",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "cordz_update_tracker",
hdrs = ["internal/cordz_update_tracker.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -392,11 +418,16 @@ cc_library(
name = "cord",
srcs = [
"cord.cc",
+ "cord_analysis.cc",
+ "cord_analysis.h",
+ "cord_buffer.cc",
],
hdrs = [
"cord.h",
+ "cord_buffer.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":cord_internal",
":cordz_functions",
@@ -416,7 +447,9 @@ cc_library(
"//absl/container:inlined_vector",
"//absl/functional:function_ref",
"//absl/meta:type_traits",
+ "//absl/numeric:bits",
"//absl/types:optional",
+ "//absl/types:span",
],
)
@@ -425,6 +458,7 @@ cc_library(
srcs = ["internal/cordz_handle.cc"],
hdrs = ["internal/cordz_handle.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -441,6 +475,7 @@ cc_library(
srcs = ["internal/cordz_info.cc"],
hdrs = ["internal/cordz_info.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -465,6 +500,7 @@ cc_library(
name = "cordz_update_scope",
hdrs = ["internal/cordz_update_scope.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -497,6 +533,7 @@ cc_library(
srcs = ["internal/cordz_sample_token.cc"],
hdrs = ["internal/cordz_sample_token.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -512,6 +549,7 @@ cc_library(
srcs = ["internal/cordz_functions.cc"],
hdrs = ["internal/cordz_functions.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -527,6 +565,7 @@ cc_library(
name = "cordz_statistics",
hdrs = ["internal/cordz_statistics.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
@@ -636,6 +675,7 @@ cc_library(
"cord_test_helpers.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":cord",
":cord_internal",
@@ -649,6 +689,7 @@ cc_library(
testonly = 1,
hdrs = ["internal/cord_rep_test_util.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":cord_internal",
":strings",
@@ -662,6 +703,7 @@ cc_library(
testonly = 1,
hdrs = ["cordz_test_helpers.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":cord",
":cord_internal",
@@ -677,6 +719,22 @@ cc_library(
)
cc_test(
+ name = "cord_buffer_test",
+ size = "small",
+ srcs = ["cord_buffer_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":cord",
+ ":cord_internal",
+ ":cord_rep_test_util",
+ "//absl/base:config",
+ "//absl/types:span",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
name = "cord_test",
size = "medium",
srcs = ["cord_test.cc"],
@@ -695,6 +753,7 @@ cc_test(
"//absl/base:endian",
"//absl/base:raw_logging_internal",
"//absl/container:fixed_array",
+ "//absl/hash",
"//absl/random",
"@com_google_googletest//:gtest_main",
],
@@ -710,7 +769,6 @@ cc_test(
"no_test_android_arm",
"no_test_android_arm64",
"no_test_android_x86",
- "no_test_darwin_x86_64",
"no_test_ios_x86_64",
"no_test_loonix",
"no_test_msvc_x64",
@@ -734,22 +792,6 @@ cc_test(
)
cc_test(
- name = "cord_rep_consume_test",
- size = "medium",
- srcs = ["internal/cord_rep_consume_test.cc"],
- copts = ABSL_TEST_COPTS,
- visibility = ["//visibility:private"],
- deps = [
- ":cord_internal",
- ":strings",
- "//absl/base:config",
- "//absl/base:core_headers",
- "//absl/debugging:leak_check",
- "@com_google_googletest//:gtest_main",
- ],
-)
-
-cc_test(
name = "cord_ring_test",
size = "medium",
srcs = ["cord_ring_test.cc"],
@@ -1070,6 +1112,7 @@ cc_library(
"str_format.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":str_format_internal",
],
@@ -1095,6 +1138,7 @@ cc_library(
"internal/str_format/parser.h",
],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
deps = [
":strings",
@@ -1107,6 +1151,7 @@ cc_library(
"//absl/numeric:representation",
"//absl/types:optional",
"//absl/types:span",
+ "//absl/utility",
],
)
@@ -1182,6 +1227,7 @@ cc_test(
deps = [
":str_format_internal",
":strings",
+ "//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/types:optional",
"@com_google_googletest//:gtest_main",
@@ -1217,6 +1263,7 @@ cc_library(
testonly = True,
srcs = ["internal/pow10_helper.cc"],
hdrs = ["internal/pow10_helper.h"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
deps = ["//absl/base:config"],
)
diff --git a/absl/strings/CMakeLists.txt b/absl/strings/CMakeLists.txt
index d6801fe6..bb073301 100644
--- a/absl/strings/CMakeLists.txt
+++ b/absl/strings/CMakeLists.txt
@@ -68,6 +68,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
strings_internal
@@ -385,6 +386,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
str_format_internal
@@ -412,6 +414,7 @@ absl_cc_library(
absl::core_headers
absl::numeric_representation
absl::type_traits
+ absl::utility
absl::int128
absl::span
)
@@ -492,6 +495,7 @@ absl_cc_test(
DEPS
absl::strings
absl::str_format_internal
+ absl::core_headers
absl::raw_logging_internal
absl::int128
GTest::gmock_main
@@ -523,6 +527,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
pow10_helper
@@ -550,14 +555,17 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cord_internal
HDRS
+ "internal/cord_data_edge.h"
"internal/cord_internal.h"
"internal/cord_rep_btree.h"
"internal/cord_rep_btree_navigator.h"
"internal/cord_rep_btree_reader.h"
+ "internal/cord_rep_crc.h"
"internal/cord_rep_consume.h"
"internal/cord_rep_flat.h"
"internal/cord_rep_ring.h"
@@ -567,6 +575,7 @@ absl_cc_library(
"internal/cord_rep_btree.cc"
"internal/cord_rep_btree_navigator.cc"
"internal/cord_rep_btree_reader.cc"
+ "internal/cord_rep_crc.cc"
"internal/cord_rep_consume.cc"
"internal/cord_rep_ring.cc"
COPTS
@@ -585,6 +594,7 @@ absl_cc_library(
absl::type_traits
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_update_tracker
@@ -611,6 +621,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_functions
@@ -639,6 +650,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_statistics
@@ -653,6 +665,7 @@ absl_cc_library(
absl::synchronization
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_handle
@@ -686,6 +699,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_info
@@ -753,6 +767,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_sample_token
@@ -791,6 +806,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_update_scope
@@ -829,8 +845,12 @@ absl_cc_library(
cord
HDRS
"cord.h"
+ "cord_buffer.h"
SRCS
"cord.cc"
+ "cord_analysis.cc"
+ "cord_analysis.h"
+ "cord_buffer.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
@@ -848,11 +868,13 @@ absl_cc_library(
absl::inlined_vector
absl::optional
absl::raw_logging_internal
+ absl::span
absl::strings
absl::type_traits
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cord_rep_test_util
@@ -883,6 +905,7 @@ absl_cc_library(
TESTONLY
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
cordz_test_helpers
@@ -920,6 +943,7 @@ absl_cc_test(
absl::cordz_test_helpers
absl::core_headers
absl::endian
+ absl::hash
absl::random_random
absl::raw_logging_internal
absl::fixed_array
@@ -928,36 +952,23 @@ absl_cc_test(
absl_cc_test(
NAME
- cord_rep_consume_test
+ cord_data_edge_test
SRCS
- "internal/cord_rep_consume_test.cc"
+ "internal/cord_data_edge_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::config
absl::cord_internal
+ absl::cord_rep_test_util
absl::core_headers
- absl::function_ref
- absl::raw_logging_internal
absl::strings
GTest::gmock_main
)
absl_cc_test(
NAME
- cord_internal_test
- SRCS
- "internal/cord_internal_test.cc"
- COPTS
- ${ABSL_TEST_COPTS}
- DEPS
- absl::cord_internal
- GTest::gmock_main
-)
-
-absl_cc_test(
- NAME
cord_rep_btree_test
SRCS
"internal/cord_rep_btree_test.cc"
@@ -1013,6 +1024,20 @@ absl_cc_test(
absl_cc_test(
NAME
+ cord_rep_crc_test
+ SRCS
+ "internal/cord_rep_crc_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::config
+ absl::cord_internal
+ absl::cord_rep_test_util
+ GTest::gmock_main
+)
+
+absl_cc_test(
+ NAME
cord_ring_test
SRCS
"cord_ring_test.cc"
diff --git a/absl/strings/ascii.h b/absl/strings/ascii.h
index b46bc71f..42eadaea 100644
--- a/absl/strings/ascii.h
+++ b/absl/strings/ascii.h
@@ -133,7 +133,7 @@ inline bool ascii_isdigit(unsigned char c) { return c >= '0' && c <= '9'; }
// ascii_isprint()
//
-// Determines whether the given character is printable, including whitespace.
+// Determines whether the given character is printable, including spaces.
inline bool ascii_isprint(unsigned char c) { return c >= 32 && c < 127; }
// ascii_isgraph()
@@ -197,7 +197,7 @@ ABSL_MUST_USE_RESULT inline std::string AsciiStrToUpper(absl::string_view s) {
ABSL_MUST_USE_RESULT inline absl::string_view StripLeadingAsciiWhitespace(
absl::string_view str) {
auto it = std::find_if_not(str.begin(), str.end(), absl::ascii_isspace);
- return str.substr(it - str.begin());
+ return str.substr(static_cast<size_t>(it - str.begin()));
}
// Strips in place whitespace from the beginning of the given string.
@@ -211,13 +211,13 @@ inline void StripLeadingAsciiWhitespace(std::string* str) {
ABSL_MUST_USE_RESULT inline absl::string_view StripTrailingAsciiWhitespace(
absl::string_view str) {
auto it = std::find_if_not(str.rbegin(), str.rend(), absl::ascii_isspace);
- return str.substr(0, str.rend() - it);
+ return str.substr(0, static_cast<size_t>(str.rend() - it));
}
// Strips in place whitespace from the end of the given string
inline void StripTrailingAsciiWhitespace(std::string* str) {
auto it = std::find_if_not(str->rbegin(), str->rend(), absl::ascii_isspace);
- str->erase(str->rend() - it);
+ str->erase(static_cast<size_t>(str->rend() - it));
}
// Returns absl::string_view with whitespace stripped from both ends of the
diff --git a/absl/strings/cord.cc b/absl/strings/cord.cc
index 854047ca..85a67a08 100644
--- a/absl/strings/cord.cc
+++ b/absl/strings/cord.cc
@@ -34,9 +34,12 @@
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
+#include "absl/strings/cord_buffer.h"
#include "absl/strings/escaping.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cordz_statistics.h"
#include "absl/strings/internal/cordz_update_scope.h"
@@ -52,7 +55,7 @@ ABSL_NAMESPACE_BEGIN
using ::absl::cord_internal::CordRep;
using ::absl::cord_internal::CordRepBtree;
-using ::absl::cord_internal::CordRepConcat;
+using ::absl::cord_internal::CordRepCrc;
using ::absl::cord_internal::CordRepExternal;
using ::absl::cord_internal::CordRepFlat;
using ::absl::cord_internal::CordRepSubstring;
@@ -64,56 +67,6 @@ using ::absl::cord_internal::kMinFlatLength;
using ::absl::cord_internal::kInlinedVectorSize;
using ::absl::cord_internal::kMaxBytesToCopy;
-constexpr uint64_t Fibonacci(unsigned char n, uint64_t a = 0, uint64_t b = 1) {
- return n == 0 ? a : Fibonacci(n - 1, b, a + b);
-}
-
-static_assert(Fibonacci(63) == 6557470319842,
- "Fibonacci values computed incorrectly");
-
-// Minimum length required for a given depth tree -- a tree is considered
-// balanced if
-// length(t) >= min_length[depth(t)]
-// The root node depth is allowed to become twice as large to reduce rebalancing
-// for larger strings (see IsRootBalanced).
-static constexpr uint64_t min_length[] = {
- Fibonacci(2), Fibonacci(3), Fibonacci(4), Fibonacci(5),
- Fibonacci(6), Fibonacci(7), Fibonacci(8), Fibonacci(9),
- Fibonacci(10), Fibonacci(11), Fibonacci(12), Fibonacci(13),
- Fibonacci(14), Fibonacci(15), Fibonacci(16), Fibonacci(17),
- Fibonacci(18), Fibonacci(19), Fibonacci(20), Fibonacci(21),
- Fibonacci(22), Fibonacci(23), Fibonacci(24), Fibonacci(25),
- Fibonacci(26), Fibonacci(27), Fibonacci(28), Fibonacci(29),
- Fibonacci(30), Fibonacci(31), Fibonacci(32), Fibonacci(33),
- Fibonacci(34), Fibonacci(35), Fibonacci(36), Fibonacci(37),
- Fibonacci(38), Fibonacci(39), Fibonacci(40), Fibonacci(41),
- Fibonacci(42), Fibonacci(43), Fibonacci(44), Fibonacci(45),
- Fibonacci(46), Fibonacci(47),
- 0xffffffffffffffffull, // Avoid overflow
-};
-
-static const int kMinLengthSize = ABSL_ARRAYSIZE(min_length);
-
-static inline bool btree_enabled() {
- return cord_internal::cord_btree_enabled.load(
- std::memory_order_relaxed);
-}
-
-static inline bool IsRootBalanced(CordRep* node) {
- if (!node->IsConcat()) {
- return true;
- } else if (node->concat()->depth() <= 15) {
- return true;
- } else if (node->concat()->depth() > kMinLengthSize) {
- return false;
- } else {
- // Allow depth to become twice as large as implied by fibonacci rule to
- // reduce rebalancing for larger strings.
- return (node->length >= min_length[node->concat()->depth() / 2]);
- }
-}
-
-static CordRep* Rebalance(CordRep* node);
static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
int indent = 0);
static bool VerifyNode(CordRep* root, CordRep* start_node,
@@ -135,75 +88,6 @@ static inline CordRep* VerifyTree(CordRep* node) {
return node;
}
-// Return the depth of a node
-static int Depth(const CordRep* rep) {
- if (rep->IsConcat()) {
- return rep->concat()->depth();
- } else {
- return 0;
- }
-}
-
-static void SetConcatChildren(CordRepConcat* concat, CordRep* left,
- CordRep* right) {
- concat->left = left;
- concat->right = right;
-
- concat->length = left->length + right->length;
- concat->set_depth(1 + std::max(Depth(left), Depth(right)));
-}
-
-// Create a concatenation of the specified nodes.
-// Does not change the refcounts of "left" and "right".
-// The returned node has a refcount of 1.
-static CordRep* RawConcat(CordRep* left, CordRep* right) {
- // Avoid making degenerate concat nodes (one child is empty)
- if (left == nullptr) return right;
- if (right == nullptr) return left;
- if (left->length == 0) {
- CordRep::Unref(left);
- return right;
- }
- if (right->length == 0) {
- CordRep::Unref(right);
- return left;
- }
-
- CordRepConcat* rep = new CordRepConcat();
- rep->tag = cord_internal::CONCAT;
- SetConcatChildren(rep, left, right);
-
- return rep;
-}
-
-static CordRep* Concat(CordRep* left, CordRep* right) {
- CordRep* rep = RawConcat(left, right);
- if (rep != nullptr && !IsRootBalanced(rep)) {
- rep = Rebalance(rep);
- }
- return VerifyTree(rep);
-}
-
-// Make a balanced tree out of an array of leaf nodes.
-static CordRep* MakeBalancedTree(CordRep** reps, size_t n) {
- // Make repeated passes over the array, merging adjacent pairs
- // until we are left with just a single node.
- while (n > 1) {
- size_t dst = 0;
- for (size_t src = 0; src < n; src += 2) {
- if (src + 1 < n) {
- reps[dst] = Concat(reps[src], reps[src + 1]);
- } else {
- reps[dst] = reps[src];
- }
- dst++;
- }
- n = dst;
- }
-
- return reps[0];
-}
-
static CordRepFlat* CreateFlat(const char* data, size_t length,
size_t alloc_hint) {
CordRepFlat* flat = CordRepFlat::New(length + alloc_hint);
@@ -229,21 +113,7 @@ static CordRep* NewBtree(const char* data, size_t length, size_t alloc_hint) {
// The returned node has a refcount of 1.
static CordRep* NewTree(const char* data, size_t length, size_t alloc_hint) {
if (length == 0) return nullptr;
- if (btree_enabled()) {
- return NewBtree(data, length, alloc_hint);
- }
- absl::FixedArray<CordRep*> reps((length - 1) / kMaxFlatLength + 1);
- size_t n = 0;
- do {
- const size_t len = std::min(length, kMaxFlatLength);
- CordRepFlat* rep = CordRepFlat::New(len + alloc_hint);
- rep->length = len;
- memcpy(rep->Data(), data, len);
- reps[n++] = VerifyTree(rep);
- data += len;
- length -= len;
- } while (length != 0);
- return MakeBalancedTree(reps.data(), n);
+ return NewBtree(data, length, alloc_hint);
}
namespace cord_internal {
@@ -258,22 +128,6 @@ void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep) {
} // namespace cord_internal
-static CordRep* NewSubstring(CordRep* child, size_t offset, size_t length) {
- // Never create empty substring nodes
- if (length == 0) {
- CordRep::Unref(child);
- return nullptr;
- } else {
- CordRepSubstring* rep = new CordRepSubstring();
- assert((offset + length) <= child->length);
- rep->length = length;
- rep->tag = cord_internal::SUBSTRING;
- rep->start = offset;
- rep->child = child;
- return VerifyTree(rep);
- }
-}
-
// Creates a CordRep from the provided string. If the string is large enough,
// and not wasteful, we move the string into an external cord rep, preserving
// the already allocated string contents.
@@ -306,13 +160,14 @@ static CordRep* CordRepFromString(std::string&& src) {
// --------------------------------------------------------------------
// Cord::InlineRep functions
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr unsigned char Cord::InlineRep::kMaxInline;
+#endif
-inline void Cord::InlineRep::set_data(const char* data, size_t n,
- bool nullify_tail) {
+inline void Cord::InlineRep::set_data(const char* data, size_t n) {
static_assert(kMaxInline == 15, "set_data is hard-coded for a length of 15");
- cord_internal::SmallMemmove(data_.as_chars(), data, n, nullify_tail);
+ cord_internal::SmallMemmove<true>(data_.as_chars(), data, n);
set_inline_size(n);
}
@@ -341,7 +196,9 @@ inline void Cord::InlineRep::remove_prefix(size_t n) {
// Returns `rep` converted into a CordRepBtree.
// Directly returns `rep` if `rep` is already a CordRepBtree.
static CordRepBtree* ForceBtree(CordRep* rep) {
- return rep->IsBtree() ? rep->btree() : CordRepBtree::Create(rep);
+ return rep->IsBtree()
+ ? rep->btree()
+ : CordRepBtree::Create(cord_internal::RemoveCrcNode(rep));
}
void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
@@ -349,11 +206,7 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
- if (btree_enabled()) {
- tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
- } else {
- tree = Concat(flat, tree);
- }
+ tree = CordRepBtree::Append(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
@@ -361,16 +214,14 @@ void Cord::InlineRep::AppendTreeToInlined(CordRep* tree,
void Cord::InlineRep::AppendTreeToTree(CordRep* tree, MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
- if (btree_enabled()) {
- tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
- } else {
- tree = Concat(data_.as_tree(), tree);
- }
+ tree = CordRepBtree::Append(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::AppendTree(CordRep* tree, MethodIdentifier method) {
- if (tree == nullptr) return;
+ assert(tree != nullptr);
+ assert(tree->length != 0);
+ assert(!tree->IsCrc());
if (data_.is_tree()) {
AppendTreeToTree(tree, method);
} else {
@@ -383,11 +234,7 @@ void Cord::InlineRep::PrependTreeToInlined(CordRep* tree,
assert(!is_tree());
if (!data_.is_empty()) {
CordRepFlat* flat = MakeFlatWithExtraCapacity(0);
- if (btree_enabled()) {
- tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
- } else {
- tree = Concat(tree, flat);
- }
+ tree = CordRepBtree::Prepend(CordRepBtree::Create(flat), tree);
}
EmplaceTree(tree, method);
}
@@ -396,16 +243,14 @@ void Cord::InlineRep::PrependTreeToTree(CordRep* tree,
MethodIdentifier method) {
assert(is_tree());
const CordzUpdateScope scope(data_.cordz_info(), method);
- if (btree_enabled()) {
- tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
- } else {
- tree = Concat(tree, data_.as_tree());
- }
+ tree = CordRepBtree::Prepend(ForceBtree(data_.as_tree()), tree);
SetTree(tree, scope);
}
void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
assert(tree != nullptr);
+ assert(tree->length != 0);
+ assert(!tree->IsCrc());
if (data_.is_tree()) {
PrependTreeToTree(tree, method);
} else {
@@ -419,7 +264,7 @@ void Cord::InlineRep::PrependTree(CordRep* tree, MethodIdentifier method) {
// written to region and the actual size increase will be written to size.
static inline bool PrepareAppendRegion(CordRep* root, char** region,
size_t* size, size_t max_length) {
- if (root->IsBtree() && root->refcount.IsMutable()) {
+ if (root->IsBtree() && root->refcount.IsOne()) {
Span<char> span = root->btree()->GetAppendBuffer(max_length);
if (!span.empty()) {
*region = span.data();
@@ -428,13 +273,8 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
}
}
- // Search down the right-hand path for a non-full FLAT node.
CordRep* dst = root;
- while (dst->IsConcat() && dst->refcount.IsMutable()) {
- dst = dst->concat()->right;
- }
-
- if (!dst->IsFlat() || !dst->refcount.IsMutable()) {
+ if (!dst->IsFlat() || !dst->refcount.IsOne()) {
*region = nullptr;
*size = 0;
return false;
@@ -448,12 +288,7 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
return false;
}
- size_t size_increase = std::min(capacity - in_use, max_length);
-
- // We need to update the length fields for all nodes, including the leaf node.
- for (CordRep* rep = root; rep != dst; rep = rep->concat()->right) {
- rep->length += size_increase;
- }
+ const size_t size_increase = std::min(capacity - in_use, max_length);
dst->length += size_increase;
*region = dst->flat()->Data() + in_use;
@@ -461,90 +296,6 @@ static inline bool PrepareAppendRegion(CordRep* root, char** region,
return true;
}
-template <bool has_length>
-void Cord::InlineRep::GetAppendRegion(char** region, size_t* size,
- size_t length) {
- auto constexpr method = CordzUpdateTracker::kGetAppendRegion;
-
- CordRep* root = tree();
- size_t sz = root ? root->length : inline_size();
- if (root == nullptr) {
- size_t available = kMaxInline - sz;
- if (available >= (has_length ? length : 1)) {
- *region = data_.as_chars() + sz;
- *size = has_length ? length : available;
- set_inline_size(has_length ? sz + length : kMaxInline);
- return;
- }
- }
-
- size_t extra = has_length ? length : (std::max)(sz, kMinFlatLength);
- CordRep* rep = root ? root : MakeFlatWithExtraCapacity(extra);
- CordzUpdateScope scope(root ? data_.cordz_info() : nullptr, method);
- if (PrepareAppendRegion(rep, region, size, length)) {
- CommitTree(root, rep, scope, method);
- return;
- }
-
- // Allocate new node.
- CordRepFlat* new_node = CordRepFlat::New(extra);
- new_node->length = std::min(new_node->Capacity(), length);
- *region = new_node->Data();
- *size = new_node->length;
-
- if (btree_enabled()) {
- rep = CordRepBtree::Append(ForceBtree(rep), new_node);
- } else {
- rep = Concat(rep, new_node);
- }
- CommitTree(root, rep, scope, method);
-}
-
-// Computes the memory side of the provided edge which must be a valid data edge
-// for a btrtee, i.e., a FLAT, EXTERNAL or SUBSTRING of a FLAT or EXTERNAL node.
-static bool RepMemoryUsageDataEdge(const CordRep* rep,
- size_t* total_mem_usage) {
- size_t maybe_sub_size = 0;
- if (ABSL_PREDICT_FALSE(rep->IsSubstring())) {
- maybe_sub_size = sizeof(cord_internal::CordRepSubstring);
- rep = rep->substring()->child;
- }
- if (rep->IsFlat()) {
- *total_mem_usage += maybe_sub_size + rep->flat()->AllocatedSize();
- return true;
- }
- if (rep->IsExternal()) {
- // We don't know anything about the embedded / bound data, but we can safely
- // assume it is 'at least' a word / pointer to data. In the future we may
- // choose to use the 'data' byte as a tag to identify the types of some
- // well-known externals, such as a std::string instance.
- *total_mem_usage += maybe_sub_size +
- sizeof(cord_internal::CordRepExternalImpl<intptr_t>) +
- rep->length;
- return true;
- }
- return false;
-}
-
-// If the rep is a leaf, this will increment the value at total_mem_usage and
-// will return true.
-static bool RepMemoryUsageLeaf(const CordRep* rep, size_t* total_mem_usage) {
- if (rep->IsFlat()) {
- *total_mem_usage += rep->flat()->AllocatedSize();
- return true;
- }
- if (rep->IsExternal()) {
- // We don't know anything about the embedded / bound data, but we can safely
- // assume it is 'at least' a word / pointer to data. In the future we may
- // choose to use the 'data' byte as a tag to identify the types of some
- // well-known externals, such as a std::string instance.
- *total_mem_usage +=
- sizeof(cord_internal::CordRepExternalImpl<intptr_t>) + rep->length;
- return true;
- }
- return false;
-}
-
void Cord::InlineRep::AssignSlow(const Cord::InlineRep& src) {
assert(&src != this);
assert(is_tree() || src.is_tree());
@@ -581,7 +332,7 @@ Cord::Cord(absl::string_view src, MethodIdentifier method)
: contents_(InlineData::kDefaultInit) {
const size_t n = src.size();
if (n <= InlineRep::kMaxInline) {
- contents_.set_data(src.data(), n, true);
+ contents_.set_data(src.data(), n);
} else {
CordRep* rep = NewTree(src.data(), n, 0);
contents_.EmplaceTree(rep, method);
@@ -591,7 +342,7 @@ Cord::Cord(absl::string_view src, MethodIdentifier method)
template <typename T, Cord::EnableIfString<T>>
Cord::Cord(T&& src) : contents_(InlineData::kDefaultInit) {
if (src.size() <= InlineRep::kMaxInline) {
- contents_.set_data(src.data(), src.size(), true);
+ contents_.set_data(src.data(), src.size());
} else {
CordRep* rep = CordRepFromString(std::forward<T>(src));
contents_.EmplaceTree(rep, CordzUpdateTracker::kConstructorString);
@@ -642,14 +393,14 @@ Cord& Cord::operator=(absl::string_view src) {
// - MaybeUntrackCord must be called before set_data() clobbers cordz_info.
// - set_data() must be called before Unref(tree) as it may reference tree.
if (tree != nullptr) CordzInfo::MaybeUntrackCord(contents_.cordz_info());
- contents_.set_data(data, length, true);
+ contents_.set_data(data, length);
if (tree != nullptr) CordRep::Unref(tree);
return *this;
}
if (tree != nullptr) {
CordzUpdateScope scope(contents_.cordz_info(), method);
if (tree->IsFlat() && tree->flat()->Capacity() >= length &&
- tree->refcount.IsMutable()) {
+ tree->refcount.IsOne()) {
// Copy in place if the existing FLAT node is reusable.
memmove(tree->flat()->Data(), data, length);
tree->length = length;
@@ -675,6 +426,7 @@ void Cord::InlineRep::AppendArray(absl::string_view src,
const CordRep* const root = rep;
CordzUpdateScope scope(root ? cordz_info() : nullptr, method);
if (root != nullptr) {
+ rep = cord_internal::RemoveCrcNode(rep);
char* region;
if (PrepareAppendRegion(rep, &region, &appended, src.size())) {
memcpy(region, src.data(), appended);
@@ -705,27 +457,11 @@ void Cord::InlineRep::AppendArray(absl::string_view src,
return;
}
- if (btree_enabled()) {
- // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
- rep = ForceBtree(rep);
- const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
- rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
- } else {
- // Use new block(s) for any remaining bytes that were not handled above.
- // Alloc extra memory only if the right child of the root of the new tree
- // is going to be a FLAT node, which will permit further inplace appends.
- size_t length = src.size();
- if (src.size() < kMaxFlatLength) {
- // The new length is either
- // - old size + 10%
- // - old_size + src.size()
- // This will cause a reasonable conservative step-up in size that is
- // still large enough to avoid excessive amounts of small fragments
- // being added.
- length = std::max<size_t>(rep->length / 10, src.size());
- }
- rep = Concat(rep, NewTree(src.data(), src.size(), length - src.size()));
- }
+ // TODO(b/192061034): keep legacy 10% growth rate: consider other rates.
+ rep = ForceBtree(rep);
+ const size_t min_growth = std::max<size_t>(rep->length / 10, src.size());
+ rep = CordRepBtree::Append(rep->btree(), src, min_growth - src.size());
+
CommitTree(root, rep, scope, method);
}
@@ -746,7 +482,8 @@ inline void Cord::AppendImpl(C&& src) {
// Since destination is empty, we can avoid allocating a node,
if (src.contents_.is_tree()) {
// by taking the tree directly
- CordRep* rep = std::forward<C>(src).TakeRep();
+ CordRep* rep =
+ cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.EmplaceTree(rep, method);
} else {
// or copying over inline data
@@ -782,10 +519,50 @@ inline void Cord::AppendImpl(C&& src) {
}
// Guaranteed to be a tree (kMaxBytesToCopy > kInlinedSize)
- CordRep* rep = std::forward<C>(src).TakeRep();
+ CordRep* rep = cord_internal::RemoveCrcNode(std::forward<C>(src).TakeRep());
contents_.AppendTree(rep, CordzUpdateTracker::kAppendCord);
}
+static CordRep::ExtractResult ExtractAppendBuffer(CordRep* rep,
+ size_t min_capacity) {
+ switch (rep->tag) {
+ case cord_internal::BTREE:
+ return CordRepBtree::ExtractAppendBuffer(rep->btree(), min_capacity);
+ default:
+ if (rep->IsFlat() && rep->refcount.IsOne() &&
+ rep->flat()->Capacity() - rep->length >= min_capacity) {
+ return {nullptr, rep};
+ }
+ return {rep, nullptr};
+ }
+}
+
+static CordBuffer CreateAppendBuffer(InlineData& data, size_t capacity) {
+ // Watch out for overflow, people can ask for size_t::max().
+ const size_t size = data.inline_size();
+ capacity = (std::min)(std::numeric_limits<size_t>::max() - size, capacity);
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(size + capacity);
+ cord_internal::SmallMemmove(buffer.data(), data.as_chars(), size);
+ buffer.SetLength(size);
+ data = {};
+ return buffer;
+}
+
+CordBuffer Cord::GetAppendBufferSlowPath(size_t capacity, size_t min_capacity) {
+ auto constexpr method = CordzUpdateTracker::kGetAppendBuffer;
+ CordRep* tree = contents_.tree();
+ if (tree != nullptr) {
+ CordzUpdateScope scope(contents_.cordz_info(), method);
+ CordRep::ExtractResult result = ExtractAppendBuffer(tree, min_capacity);
+ if (result.extracted != nullptr) {
+ contents_.SetTreeOrEmpty(result.tree, scope);
+ return CordBuffer(result.extracted->flat());
+ }
+ return CordBuffer::CreateWithDefaultLimit(capacity);
+ }
+ return CreateAppendBuffer(contents_.data_, capacity);
+}
+
void Cord::Append(const Cord& src) {
AppendImpl(src);
}
@@ -810,7 +587,8 @@ void Cord::Prepend(const Cord& src) {
CordRep* src_tree = src.contents_.tree();
if (src_tree != nullptr) {
CordRep::Ref(src_tree);
- contents_.PrependTree(src_tree, CordzUpdateTracker::kPrependCord);
+ contents_.PrependTree(cord_internal::RemoveCrcNode(src_tree),
+ CordzUpdateTracker::kPrependCord);
return;
}
@@ -837,103 +615,45 @@ void Cord::PrependArray(absl::string_view src, MethodIdentifier method) {
contents_.PrependTree(rep, method);
}
-template <typename T, Cord::EnableIfString<T>>
-inline void Cord::Prepend(T&& src) {
- if (src.size() <= kMaxBytesToCopy) {
- Prepend(absl::string_view(src));
+void Cord::AppendPrecise(absl::string_view src, MethodIdentifier method) {
+ assert(!src.empty());
+ assert(src.size() <= cord_internal::kMaxFlatLength);
+ if (contents_.remaining_inline_capacity() >= src.size()) {
+ const size_t inline_length = contents_.inline_size();
+ memcpy(contents_.data_.as_chars() + inline_length, src.data(), src.size());
+ contents_.set_inline_size(inline_length + src.size());
} else {
- CordRep* rep = CordRepFromString(std::forward<T>(src));
- contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
+ contents_.AppendTree(CordRepFlat::Create(src), method);
}
}
-template void Cord::Prepend(std::string&& src);
-
-static CordRep* RemovePrefixFrom(CordRep* node, size_t n) {
- if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
- absl::InlinedVector<CordRep*, kInlinedVectorSize> rhs_stack;
-
- while (node->IsConcat()) {
- assert(n <= node->length);
- if (n < node->concat()->left->length) {
- // Push right to stack, descend left.
- rhs_stack.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Drop left, descend right.
- n -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
- assert(n <= node->length);
-
- if (n == 0) {
- CordRep::Ref(node);
+void Cord::PrependPrecise(absl::string_view src, MethodIdentifier method) {
+ assert(!src.empty());
+ assert(src.size() <= cord_internal::kMaxFlatLength);
+ if (contents_.remaining_inline_capacity() >= src.size()) {
+ const size_t inline_length = contents_.inline_size();
+ char data[InlineRep::kMaxInline + 1] = {0};
+ memcpy(data, src.data(), src.size());
+ memcpy(data + src.size(), contents_.data(), inline_length);
+ memcpy(contents_.data_.as_chars(), data, InlineRep::kMaxInline + 1);
+ contents_.set_inline_size(inline_length + src.size());
} else {
- size_t start = n;
- size_t len = node->length - n;
- if (node->IsSubstring()) {
- // Consider in-place update of node, similar to in RemoveSuffixFrom().
- start += node->substring()->start;
- node = node->substring()->child;
- }
- node = NewSubstring(CordRep::Ref(node), start, len);
- }
- while (!rhs_stack.empty()) {
- node = Concat(node, CordRep::Ref(rhs_stack.back()));
- rhs_stack.pop_back();
+ contents_.PrependTree(CordRepFlat::Create(src), method);
}
- return node;
}
-// RemoveSuffixFrom() is very similar to RemovePrefixFrom(), with the
-// exception that removing a suffix has an optimization where a node may be
-// edited in place iff that node and all its ancestors have a refcount of 1.
-static CordRep* RemoveSuffixFrom(CordRep* node, size_t n) {
- if (n >= node->length) return nullptr;
- if (n == 0) return CordRep::Ref(node);
- absl::InlinedVector<CordRep*, kInlinedVectorSize> lhs_stack;
- bool inplace_ok = node->refcount.IsMutable();
-
- while (node->IsConcat()) {
- assert(n <= node->length);
- if (n < node->concat()->right->length) {
- // Push left to stack, descend right.
- lhs_stack.push_back(node->concat()->left);
- node = node->concat()->right;
- } else {
- // Drop right, descend left.
- n -= node->concat()->right->length;
- node = node->concat()->left;
- }
- inplace_ok = inplace_ok && node->refcount.IsMutable();
- }
- assert(n <= node->length);
-
- if (n == 0) {
- CordRep::Ref(node);
- } else if (inplace_ok && !node->IsExternal()) {
- // Consider making a new buffer if the current node capacity is much
- // larger than the new length.
- CordRep::Ref(node);
- node->length -= n;
+template <typename T, Cord::EnableIfString<T>>
+inline void Cord::Prepend(T&& src) {
+ if (src.size() <= kMaxBytesToCopy) {
+ Prepend(absl::string_view(src));
} else {
- size_t start = 0;
- size_t len = node->length - n;
- if (node->IsSubstring()) {
- start = node->substring()->start;
- node = node->substring()->child;
- }
- node = NewSubstring(CordRep::Ref(node), start, len);
- }
- while (!lhs_stack.empty()) {
- node = Concat(CordRep::Ref(lhs_stack.back()), node);
- lhs_stack.pop_back();
+ CordRep* rep = CordRepFromString(std::forward<T>(src));
+ contents_.PrependTree(rep, CordzUpdateTracker::kPrependString);
}
- return node;
}
+template void Cord::Prepend(std::string&& src);
+
void Cord::RemovePrefix(size_t n) {
ABSL_INTERNAL_CHECK(n <= size(),
absl::StrCat("Requested prefix size ", n,
@@ -944,14 +664,21 @@ void Cord::RemovePrefix(size_t n) {
} else {
auto constexpr method = CordzUpdateTracker::kRemovePrefix;
CordzUpdateScope scope(contents_.cordz_info(), method);
- if (tree->IsBtree()) {
+ tree = cord_internal::RemoveCrcNode(tree);
+ if (n >= tree->length) {
+ CordRep::Unref(tree);
+ tree = nullptr;
+ } else if (tree->IsBtree()) {
CordRep* old = tree;
tree = tree->btree()->SubTree(n, tree->length - n);
CordRep::Unref(old);
+ } else if (tree->IsSubstring() && tree->refcount.IsOne()) {
+ tree->substring()->start += n;
+ tree->length -= n;
} else {
- CordRep* newrep = RemovePrefixFrom(tree, n);
+ CordRep* rep = CordRepSubstring::Substring(tree, n, tree->length - n);
CordRep::Unref(tree);
- tree = VerifyTree(newrep);
+ tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
@@ -967,68 +694,24 @@ void Cord::RemoveSuffix(size_t n) {
} else {
auto constexpr method = CordzUpdateTracker::kRemoveSuffix;
CordzUpdateScope scope(contents_.cordz_info(), method);
- if (tree->IsBtree()) {
+ tree = cord_internal::RemoveCrcNode(tree);
+ if (n >= tree->length) {
+ CordRep::Unref(tree);
+ tree = nullptr;
+ } else if (tree->IsBtree()) {
tree = CordRepBtree::RemoveSuffix(tree->btree(), n);
+ } else if (!tree->IsExternal() && tree->refcount.IsOne()) {
+ assert(tree->IsFlat() || tree->IsSubstring());
+ tree->length -= n;
} else {
- CordRep* newrep = RemoveSuffixFrom(tree, n);
+ CordRep* rep = CordRepSubstring::Substring(tree, 0, tree->length - n);
CordRep::Unref(tree);
- tree = VerifyTree(newrep);
+ tree = rep;
}
contents_.SetTreeOrEmpty(tree, scope);
}
}
-// Work item for NewSubRange().
-struct SubRange {
- SubRange(CordRep* a_node, size_t a_pos, size_t a_n)
- : node(a_node), pos(a_pos), n(a_n) {}
- CordRep* node; // nullptr means concat last 2 results.
- size_t pos;
- size_t n;
-};
-
-static CordRep* NewSubRange(CordRep* node, size_t pos, size_t n) {
- absl::InlinedVector<CordRep*, kInlinedVectorSize> results;
- absl::InlinedVector<SubRange, kInlinedVectorSize> todo;
- todo.push_back(SubRange(node, pos, n));
- do {
- const SubRange& sr = todo.back();
- node = sr.node;
- pos = sr.pos;
- n = sr.n;
- todo.pop_back();
-
- if (node == nullptr) {
- assert(results.size() >= 2);
- CordRep* right = results.back();
- results.pop_back();
- CordRep* left = results.back();
- results.pop_back();
- results.push_back(Concat(left, right));
- } else if (pos == 0 && n == node->length) {
- results.push_back(CordRep::Ref(node));
- } else if (!node->IsConcat()) {
- if (node->IsSubstring()) {
- pos += node->substring()->start;
- node = node->substring()->child;
- }
- results.push_back(NewSubstring(CordRep::Ref(node), pos, n));
- } else if (pos + n <= node->concat()->left->length) {
- todo.push_back(SubRange(node->concat()->left, pos, n));
- } else if (pos >= node->concat()->left->length) {
- pos -= node->concat()->left->length;
- todo.push_back(SubRange(node->concat()->right, pos, n));
- } else {
- size_t left_n = node->concat()->left->length - pos;
- todo.push_back(SubRange(nullptr, 0, 0)); // Concat()
- todo.push_back(SubRange(node->concat()->right, 0, n - left_n));
- todo.push_back(SubRange(node->concat()->left, pos, left_n));
- }
- } while (!todo.empty());
- assert(results.size() == 1);
- return results[0];
-}
-
Cord Cord::Subcord(size_t pos, size_t new_size) const {
Cord sub_cord;
size_t length = size();
@@ -1038,9 +721,7 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
CordRep* tree = contents_.tree();
if (tree == nullptr) {
- // sub_cord is newly constructed, no need to re-zero-out the tail of
- // contents_ memory.
- sub_cord.contents_.set_data(contents_.data() + pos, new_size, false);
+ sub_cord.contents_.set_data(contents_.data() + pos, new_size);
return sub_cord;
}
@@ -1060,10 +741,11 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
return sub_cord;
}
+ tree = cord_internal::SkipCrcNode(tree);
if (tree->IsBtree()) {
tree = tree->btree()->SubTree(pos, new_size);
} else {
- tree = NewSubRange(tree, pos, new_size);
+ tree = CordRepSubstring::Substring(tree, pos, new_size);
}
sub_cord.contents_.EmplaceTree(tree, contents_.data_,
CordzUpdateTracker::kSubCord);
@@ -1071,146 +753,6 @@ Cord Cord::Subcord(size_t pos, size_t new_size) const {
}
// --------------------------------------------------------------------
-// Balancing
-
-class CordForest {
- public:
- explicit CordForest(size_t length)
- : root_length_(length), trees_(kMinLengthSize, nullptr) {}
-
- void Build(CordRep* cord_root) {
- std::vector<CordRep*> pending = {cord_root};
-
- while (!pending.empty()) {
- CordRep* node = pending.back();
- pending.pop_back();
- CheckNode(node);
- if (ABSL_PREDICT_FALSE(!node->IsConcat())) {
- AddNode(node);
- continue;
- }
-
- CordRepConcat* concat_node = node->concat();
- if (concat_node->depth() >= kMinLengthSize ||
- concat_node->length < min_length[concat_node->depth()]) {
- pending.push_back(concat_node->right);
- pending.push_back(concat_node->left);
-
- if (concat_node->refcount.IsOne()) {
- concat_node->left = concat_freelist_;
- concat_freelist_ = concat_node;
- } else {
- CordRep::Ref(concat_node->right);
- CordRep::Ref(concat_node->left);
- CordRep::Unref(concat_node);
- }
- } else {
- AddNode(node);
- }
- }
- }
-
- CordRep* ConcatNodes() {
- CordRep* sum = nullptr;
- for (auto* node : trees_) {
- if (node == nullptr) continue;
-
- sum = PrependNode(node, sum);
- root_length_ -= node->length;
- if (root_length_ == 0) break;
- }
- ABSL_INTERNAL_CHECK(sum != nullptr, "Failed to locate sum node");
- return VerifyTree(sum);
- }
-
- private:
- CordRep* AppendNode(CordRep* node, CordRep* sum) {
- return (sum == nullptr) ? node : MakeConcat(sum, node);
- }
-
- CordRep* PrependNode(CordRep* node, CordRep* sum) {
- return (sum == nullptr) ? node : MakeConcat(node, sum);
- }
-
- void AddNode(CordRep* node) {
- CordRep* sum = nullptr;
-
- // Collect together everything with which we will merge with node
- int i = 0;
- for (; node->length > min_length[i + 1]; ++i) {
- auto& tree_at_i = trees_[i];
-
- if (tree_at_i == nullptr) continue;
- sum = PrependNode(tree_at_i, sum);
- tree_at_i = nullptr;
- }
-
- sum = AppendNode(node, sum);
-
- // Insert sum into appropriate place in the forest
- for (; sum->length >= min_length[i]; ++i) {
- auto& tree_at_i = trees_[i];
- if (tree_at_i == nullptr) continue;
-
- sum = MakeConcat(tree_at_i, sum);
- tree_at_i = nullptr;
- }
-
- // min_length[0] == 1, which means sum->length >= min_length[0]
- assert(i > 0);
- trees_[i - 1] = sum;
- }
-
- // Make concat node trying to resue existing CordRepConcat nodes we
- // already collected in the concat_freelist_.
- CordRep* MakeConcat(CordRep* left, CordRep* right) {
- if (concat_freelist_ == nullptr) return RawConcat(left, right);
-
- CordRepConcat* rep = concat_freelist_;
- if (concat_freelist_->left == nullptr) {
- concat_freelist_ = nullptr;
- } else {
- concat_freelist_ = concat_freelist_->left->concat();
- }
- SetConcatChildren(rep, left, right);
-
- return rep;
- }
-
- static void CheckNode(CordRep* node) {
- ABSL_INTERNAL_CHECK(node->length != 0u, "");
- if (node->IsConcat()) {
- ABSL_INTERNAL_CHECK(node->concat()->left != nullptr, "");
- ABSL_INTERNAL_CHECK(node->concat()->right != nullptr, "");
- ABSL_INTERNAL_CHECK(node->length == (node->concat()->left->length +
- node->concat()->right->length),
- "");
- }
- }
-
- size_t root_length_;
-
- // use an inlined vector instead of a flat array to get bounds checking
- absl::InlinedVector<CordRep*, kInlinedVectorSize> trees_;
-
- // List of concat nodes we can re-use for Cord balancing.
- CordRepConcat* concat_freelist_ = nullptr;
-};
-
-static CordRep* Rebalance(CordRep* node) {
- VerifyTree(node);
- assert(node->IsConcat());
-
- if (node->length == 0) {
- return nullptr;
- }
-
- CordForest forest(node->length);
- forest.Build(node);
- return forest.ConcatNodes();
-}
-
-// --------------------------------------------------------------------
// Comparators
namespace {
@@ -1256,7 +798,7 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return absl::string_view(data_.as_chars(), data_.inline_size());
}
- CordRep* node = tree();
+ CordRep* node = cord_internal::SkipCrcNode(tree());
if (node->IsFlat()) {
return absl::string_view(node->flat()->Data(), node->length);
}
@@ -1274,11 +816,6 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return tree->Data(tree->begin());
}
- // Walk down the left branches until we hit a non-CONCAT node.
- while (node->IsConcat()) {
- node = node->concat()->left;
- }
-
// Get the child node if we encounter a SUBSTRING.
size_t offset = 0;
size_t length = node->length;
@@ -1298,6 +835,28 @@ inline absl::string_view Cord::InlineRep::FindFlatStartPiece() const {
return absl::string_view(node->external()->base + offset, length);
}
+void Cord::SetExpectedChecksum(uint32_t crc) {
+ auto constexpr method = CordzUpdateTracker::kSetExpectedChecksum;
+ if (empty()) return;
+
+ if (!contents_.is_tree()) {
+ CordRep* rep = contents_.MakeFlatWithExtraCapacity(0);
+ rep = CordRepCrc::New(rep, crc);
+ contents_.EmplaceTree(rep, method);
+ } else {
+ const CordzUpdateScope scope(contents_.data_.cordz_info(), method);
+ CordRep* rep = CordRepCrc::New(contents_.data_.as_tree(), crc);
+ contents_.SetTree(rep, scope);
+ }
+}
+
+absl::optional<uint32_t> Cord::ExpectedChecksum() const {
+ if (!contents_.is_tree() || !contents_.tree()->IsCrc()) {
+ return absl::nullopt;
+ }
+ return contents_.tree()->crc()->crc;
+}
+
inline int Cord::CompareSlowPath(absl::string_view rhs, size_t compared_size,
size_t size_to_compare) const {
auto advance = [](Cord::ChunkIterator* it, absl::string_view* chunk) {
@@ -1473,42 +1032,6 @@ void Cord::CopyToArraySlowPath(char* dst) const {
}
}
-Cord::ChunkIterator& Cord::ChunkIterator::AdvanceStack() {
- auto& stack_of_right_children = stack_of_right_children_;
- if (stack_of_right_children.empty()) {
- assert(!current_chunk_.empty()); // Called on invalid iterator.
- // We have reached the end of the Cord.
- return *this;
- }
-
- // Process the next node on the stack.
- CordRep* node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
-
- // Walk down the left branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
-
- assert(node->IsExternal() || node->IsFlat());
- assert(length != 0);
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = absl::string_view(data + offset, length);
- current_leaf_ = node;
- return *this;
-}
-
Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
ABSL_HARDENING_ASSERT(bytes_remaining_ >= n &&
"Attempted to iterate past `end()`");
@@ -1551,166 +1074,33 @@ Cord Cord::ChunkIterator::AdvanceAndReadBytes(size_t n) {
return subcord;
}
- auto& stack_of_right_children = stack_of_right_children_;
- if (n < current_chunk_.size()) {
- // Range to read is a proper subrange of the current chunk.
- assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
- const char* data = subnode->IsExternal() ? subnode->external()->base
- : subnode->flat()->Data();
- subnode = NewSubstring(subnode, current_chunk_.data() - data, n);
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
- RemoveChunkPrefix(n);
- return subcord;
- }
-
- // Range to read begins with a proper subrange of the current chunk.
- assert(!current_chunk_.empty());
+ // Short circuit if reading the entire data edge.
assert(current_leaf_ != nullptr);
- CordRep* subnode = CordRep::Ref(current_leaf_);
- if (current_chunk_.size() < subnode->length) {
- const char* data = subnode->IsExternal() ? subnode->external()->base
- : subnode->flat()->Data();
- subnode = NewSubstring(subnode, current_chunk_.data() - data,
- current_chunk_.size());
- }
- n -= current_chunk_.size();
- bytes_remaining_ -= current_chunk_.size();
-
- // Process the next node(s) on the stack, reading whole subtrees depending on
- // their length and how many bytes we are advancing.
- CordRep* node = nullptr;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
- if (node->length > n) break;
- // TODO(qrczak): This might unnecessarily recreate existing concat nodes.
- // Avoiding that would need pretty complicated logic (instead of
- // current_leaf, keep current_subtree_ which points to the highest node
- // such that the current leaf can be found on the path of left children
- // starting from current_subtree_; delay creating subnode while node is
- // below current_subtree_; find the proper node along the path of left
- // children starting from current_subtree_ if this loop exits while staying
- // below current_subtree_; etc.; alternatively, push parents instead of
- // right children on the stack).
- subnode = Concat(subnode, CordRep::Ref(node));
- n -= node->length;
- bytes_remaining_ -= node->length;
- node = nullptr;
- }
-
- if (node == nullptr) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ if (n == current_leaf_->length) {
+ bytes_remaining_ = 0;
+ current_chunk_ = {};
+ CordRep* tree = CordRep::Ref(current_leaf_);
+ subcord.contents_.EmplaceTree(VerifyTree(tree), method);
return subcord;
}
- // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- if (node->concat()->left->length > n) {
- // Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Read left, descend right.
- subnode = Concat(subnode, CordRep::Ref(node->concat()->left));
- n -= node->concat()->left->length;
- bytes_remaining_ -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
+ // From this point on, we need a partial substring node.
+ // Get pointer to the underlying flat or external data payload and
+ // compute data pointer and offset into current flat or external.
+ CordRep* payload = current_leaf_->IsSubstring()
+ ? current_leaf_->substring()->child
+ : current_leaf_;
+ const char* data = payload->IsExternal() ? payload->external()->base
+ : payload->flat()->Data();
+ const size_t offset = current_chunk_.data() - data;
- // Range to read ends with a proper (possibly empty) subrange of the current
- // chunk.
- assert(node->IsExternal() || node->IsFlat());
- assert(length > n);
- if (n > 0) {
- subnode = Concat(subnode, NewSubstring(CordRep::Ref(node), offset, n));
- }
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = absl::string_view(data + offset + n, length - n);
- current_leaf_ = node;
+ auto* tree = CordRepSubstring::Substring(payload, offset, n);
+ subcord.contents_.EmplaceTree(VerifyTree(tree), method);
bytes_remaining_ -= n;
- subcord.contents_.EmplaceTree(VerifyTree(subnode), method);
+ current_chunk_.remove_prefix(n);
return subcord;
}
-void Cord::ChunkIterator::AdvanceBytesSlowPath(size_t n) {
- assert(bytes_remaining_ >= n && "Attempted to iterate past `end()`");
- assert(n >= current_chunk_.size()); // This should only be called when
- // iterating to a new node.
-
- n -= current_chunk_.size();
- bytes_remaining_ -= current_chunk_.size();
-
- if (stack_of_right_children_.empty()) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- return;
- }
-
- // Process the next node(s) on the stack, skipping whole subtrees depending on
- // their length and how many bytes we are advancing.
- CordRep* node = nullptr;
- auto& stack_of_right_children = stack_of_right_children_;
- while (!stack_of_right_children.empty()) {
- node = stack_of_right_children.back();
- stack_of_right_children.pop_back();
- if (node->length > n) break;
- n -= node->length;
- bytes_remaining_ -= node->length;
- node = nullptr;
- }
-
- if (node == nullptr) {
- // We have reached the end of the Cord.
- assert(bytes_remaining_ == 0);
- return;
- }
-
- // Walk down the appropriate branches until we hit a non-CONCAT node. Save the
- // right children to the stack for subsequent traversal.
- while (node->IsConcat()) {
- if (node->concat()->left->length > n) {
- // Push right, descend left.
- stack_of_right_children.push_back(node->concat()->right);
- node = node->concat()->left;
- } else {
- // Skip left, descend right.
- n -= node->concat()->left->length;
- bytes_remaining_ -= node->concat()->left->length;
- node = node->concat()->right;
- }
- }
-
- // Get the child node if we encounter a SUBSTRING.
- size_t offset = 0;
- size_t length = node->length;
- if (node->IsSubstring()) {
- offset = node->substring()->start;
- node = node->substring()->child;
- }
-
- assert(node->IsExternal() || node->IsFlat());
- assert(length > n);
- const char* data =
- node->IsExternal() ? node->external()->base : node->flat()->Data();
- current_chunk_ = absl::string_view(data + offset + n, length - n);
- current_leaf_ = node;
- bytes_remaining_ -= n;
-}
-
char Cord::operator[](size_t i) const {
ABSL_HARDENING_ASSERT(i < size());
size_t offset = i;
@@ -1718,6 +1108,7 @@ char Cord::operator[](size_t i) const {
if (rep == nullptr) {
return contents_.data()[i];
}
+ rep = cord_internal::SkipCrcNode(rep);
while (true) {
assert(rep != nullptr);
assert(offset < rep->length);
@@ -1729,16 +1120,6 @@ char Cord::operator[](size_t i) const {
} else if (rep->IsExternal()) {
// Get the "i"th character from the external array.
return rep->external()->base[offset];
- } else if (rep->IsConcat()) {
- // Recursively branch to the side of the concatenation that the "i"th
- // character is on.
- size_t left_length = rep->concat()->left->length;
- if (offset < left_length) {
- rep = rep->concat()->left;
- } else {
- offset -= left_length;
- rep = rep->concat()->right;
- }
} else {
// This must be a substring a node, so bypass it to get to the child.
assert(rep->IsSubstring());
@@ -1778,6 +1159,7 @@ absl::string_view Cord::FlattenSlowPath() {
/* static */ bool Cord::GetFlatAux(CordRep* rep, absl::string_view* fragment) {
assert(rep != nullptr);
+ rep = cord_internal::SkipCrcNode(rep);
if (rep->IsFlat()) {
*fragment = absl::string_view(rep->flat()->Data(), rep->length);
return true;
@@ -1807,6 +1189,9 @@ absl::string_view Cord::FlattenSlowPath() {
/* static */ void Cord::ForEachChunkAux(
absl::cord_internal::CordRep* rep,
absl::FunctionRef<void(absl::string_view)> callback) {
+ assert(rep != nullptr);
+ rep = cord_internal::SkipCrcNode(rep);
+
if (rep->IsBtree()) {
ChunkIterator it(rep), end;
while (it != end) {
@@ -1816,44 +1201,13 @@ absl::string_view Cord::FlattenSlowPath() {
return;
}
- assert(rep != nullptr);
- int stack_pos = 0;
- constexpr int stack_max = 128;
- // Stack of right branches for tree traversal
- absl::cord_internal::CordRep* stack[stack_max];
- absl::cord_internal::CordRep* current_node = rep;
- while (true) {
- if (current_node->IsConcat()) {
- if (stack_pos == stack_max) {
- // There's no more room on our stack array to add another right branch,
- // and the idea is to avoid allocations, so call this function
- // recursively to navigate this subtree further. (This is not something
- // we expect to happen in practice).
- ForEachChunkAux(current_node, callback);
-
- // Pop the next right branch and iterate.
- current_node = stack[--stack_pos];
- continue;
- } else {
- // Save the right branch for later traversal and continue down the left
- // branch.
- stack[stack_pos++] = current_node->concat()->right;
- current_node = current_node->concat()->left;
- continue;
- }
- }
- // This is a leaf node, so invoke our callback.
- absl::string_view chunk;
- bool success = GetFlatAux(current_node, &chunk);
- assert(success);
- if (success) {
- callback(chunk);
- }
- if (stack_pos == 0) {
- // end of traversal
- return;
- }
- current_node = stack[--stack_pos];
+ // This is a leaf node, so invoke our callback.
+ absl::cord_internal::CordRep* current_node = cord_internal::SkipCrcNode(rep);
+ absl::string_view chunk;
+ bool success = GetFlatAux(current_node, &chunk);
+ assert(success);
+ if (success) {
+ callback(chunk);
}
}
@@ -1868,14 +1222,11 @@ static void DumpNode(CordRep* rep, bool include_data, std::ostream* os,
*os << " [";
if (include_data) *os << static_cast<void*>(rep);
*os << "]";
- *os << " " << (IsRootBalanced(rep) ? 'b' : 'u');
*os << " " << std::setw(indent) << "";
- if (rep->IsConcat()) {
- *os << "CONCAT depth=" << Depth(rep) << "\n";
+ if (rep->IsCrc()) {
+ *os << "CRC crc=" << rep->crc()->crc << "\n";
indent += kIndentStep;
- indents.push_back(indent);
- stack.push_back(rep->concat()->right);
- rep = rep->concat()->left;
+ rep = rep->crc()->child;
} else if (rep->IsSubstring()) {
*os << "SUBSTRING @ " << rep->substring()->start << "\n";
indent += kIndentStep;
@@ -1912,7 +1263,7 @@ static std::string ReportError(CordRep* root, CordRep* node) {
}
static bool VerifyNode(CordRep* root, CordRep* start_node,
- bool full_validation) {
+ bool /* full_validation */) {
absl::InlinedVector<CordRep*, 2> worklist;
worklist.push_back(start_node);
do {
@@ -1922,21 +1273,10 @@ static bool VerifyNode(CordRep* root, CordRep* start_node,
ABSL_INTERNAL_CHECK(node != nullptr, ReportError(root, node));
if (node != root) {
ABSL_INTERNAL_CHECK(node->length != 0, ReportError(root, node));
+ ABSL_INTERNAL_CHECK(!node->IsCrc(), ReportError(root, node));
}
- if (node->IsConcat()) {
- ABSL_INTERNAL_CHECK(node->concat()->left != nullptr,
- ReportError(root, node));
- ABSL_INTERNAL_CHECK(node->concat()->right != nullptr,
- ReportError(root, node));
- ABSL_INTERNAL_CHECK((node->length == node->concat()->left->length +
- node->concat()->right->length),
- ReportError(root, node));
- if (full_validation) {
- worklist.push_back(node->concat()->right);
- worklist.push_back(node->concat()->left);
- }
- } else if (node->IsFlat()) {
+ if (node->IsFlat()) {
ABSL_INTERNAL_CHECK(node->length <= node->flat()->Capacity(),
ReportError(root, node));
} else if (node->IsExternal()) {
@@ -1949,75 +1289,17 @@ static bool VerifyNode(CordRep* root, CordRep* start_node,
ABSL_INTERNAL_CHECK(node->substring()->start + node->length <=
node->substring()->child->length,
ReportError(root, node));
+ } else if (node->IsCrc()) {
+ ABSL_INTERNAL_CHECK(node->crc()->child != nullptr,
+ ReportError(root, node));
+ ABSL_INTERNAL_CHECK(node->crc()->length == node->crc()->child->length,
+ ReportError(root, node));
+ worklist.push_back(node->crc()->child);
}
} while (!worklist.empty());
return true;
}
-// Traverses the tree and computes the total memory allocated.
-/* static */ size_t Cord::MemoryUsageAux(const CordRep* rep) {
- size_t total_mem_usage = 0;
-
- // Allow a quick exit for the common case that the root is a leaf.
- if (RepMemoryUsageLeaf(rep, &total_mem_usage)) {
- return total_mem_usage;
- }
-
- // Iterate over the tree. cur_node is never a leaf node and leaf nodes will
- // never be appended to tree_stack. This reduces overhead from manipulating
- // tree_stack.
- absl::InlinedVector<const CordRep*, kInlinedVectorSize> tree_stack;
- const CordRep* cur_node = rep;
- while (true) {
- const CordRep* next_node = nullptr;
-
- if (cur_node->IsConcat()) {
- total_mem_usage += sizeof(CordRepConcat);
- const CordRep* left = cur_node->concat()->left;
- if (!RepMemoryUsageLeaf(left, &total_mem_usage)) {
- next_node = left;
- }
-
- const CordRep* right = cur_node->concat()->right;
- if (!RepMemoryUsageLeaf(right, &total_mem_usage)) {
- if (next_node) {
- tree_stack.push_back(next_node);
- }
- next_node = right;
- }
- } else if (cur_node->IsBtree()) {
- total_mem_usage += sizeof(CordRepBtree);
- const CordRepBtree* node = cur_node->btree();
- if (node->height() == 0) {
- for (const CordRep* edge : node->Edges()) {
- RepMemoryUsageDataEdge(edge, &total_mem_usage);
- }
- } else {
- for (const CordRep* edge : node->Edges()) {
- tree_stack.push_back(edge);
- }
- }
- } else {
- // Since cur_node is not a leaf or a concat node it must be a substring.
- assert(cur_node->IsSubstring());
- total_mem_usage += sizeof(CordRepSubstring);
- next_node = cur_node->substring()->child;
- if (RepMemoryUsageLeaf(next_node, &total_mem_usage)) {
- next_node = nullptr;
- }
- }
-
- if (!next_node) {
- if (tree_stack.empty()) {
- return total_mem_usage;
- }
- next_node = tree_stack.back();
- tree_stack.pop_back();
- }
- cur_node = next_node;
- }
-}
-
std::ostream& operator<<(std::ostream& out, const Cord& cord) {
for (absl::string_view chunk : cord.Chunks()) {
out.write(chunk.data(), chunk.size());
@@ -2035,7 +1317,6 @@ uint8_t CordTestAccess::LengthToTag(size_t s) {
ABSL_INTERNAL_CHECK(s <= kMaxFlatLength, absl::StrCat("Invalid length ", s));
return cord_internal::AllocatedSizeToTag(s + cord_internal::kFlatOverhead);
}
-size_t CordTestAccess::SizeofCordRepConcat() { return sizeof(CordRepConcat); }
size_t CordTestAccess::SizeofCordRepExternal() {
return sizeof(CordRepExternal);
}
diff --git a/absl/strings/cord.h b/absl/strings/cord.h
index f0a19914..18d6ab85 100644
--- a/absl/strings/cord.h
+++ b/absl/strings/cord.h
@@ -70,6 +70,7 @@
#include <string>
#include <type_traits>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/per_thread_tls.h"
@@ -78,9 +79,13 @@
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/meta/type_traits.h"
+#include "absl/strings/cord_analysis.h"
+#include "absl/strings/cord_buffer.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_reader.h"
+#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/cordz_functions.h"
#include "absl/strings/internal/cordz_info.h"
@@ -100,6 +105,20 @@ template <typename Releaser>
Cord MakeCordFromExternal(absl::string_view, Releaser&&);
void CopyCordToString(const Cord& src, std::string* dst);
+// Cord memory accounting modes
+enum class CordMemoryAccounting {
+ // Counts the *approximate* number of bytes held in full or in part by this
+ // Cord (which may not remain the same between invocations). Cords that share
+ // memory could each be "charged" independently for the same shared memory.
+ kTotal,
+
+ // Counts the *approximate* number of bytes held in full or in part by this
+ // Cord weighted by the sharing ratio of that data. For example, if some data
+ // edge is shared by 4 different Cords, then each cord is attributed 1/4th of
+ // the total memory usage as a 'fair share' of the total memory usage.
+ kFairShare,
+};
+
// Cord
//
// A Cord is a sequence of characters, designed to be more efficient than a
@@ -214,7 +233,7 @@ class Cord {
//
// Releases the Cord data. Any nodes that share data with other Cords, if
// applicable, will have their reference counts reduced by 1.
- void Clear();
+ ABSL_ATTRIBUTE_REINITIALIZES void Clear();
// Cord::Append()
//
@@ -226,6 +245,45 @@ class Cord {
template <typename T, EnableIfString<T> = 0>
void Append(T&& src);
+ // Appends `buffer` to this cord, unless `buffer` has a zero length in which
+ // case this method has no effect on this cord instance.
+ // This method is guaranteed to consume `buffer`.
+ void Append(CordBuffer buffer);
+
+ // Returns a CordBuffer, re-using potential existing capacity in this cord.
+ //
+ // Cord instances may have additional unused capacity in the last (or first)
+ // nodes of the underlying tree to facilitate amortized growth. This method
+ // allows applications to explicitly use this spare capacity if available,
+ // or create a new CordBuffer instance otherwise.
+ // If this cord has a final non-shared node with at least `min_capacity`
+ // available, then this method will return that buffer including its data
+ // contents. I.e.; the returned buffer will have a non-zero length, and
+ // a capacity of at least `buffer.length + min_capacity`. Otherwise, this
+ // method will return `CordBuffer::CreateWithDefaultLimit(capacity)`.
+ //
+ // Below an example of using GetAppendBuffer. Notice that in this example we
+ // use `GetAppendBuffer()` only on the first iteration. As we know nothing
+ // about any initial extra capacity in `cord`, we may be able to use the extra
+ // capacity. But as we add new buffers with fully utilized contents after that
+ // we avoid calling `GetAppendBuffer()` on subsequent iterations: while this
+ // works fine, it results in an unnecessary inspection of cord contents:
+ //
+ // void AppendRandomDataToCord(absl::Cord &cord, size_t n) {
+ // bool first = true;
+ // while (n > 0) {
+ // CordBuffer buffer = first ? cord.GetAppendBuffer(n)
+ // : CordBuffer::CreateWithDefaultLimit(n);
+ // absl::Span<char> data = buffer.available_up_to(n);
+ // FillRandomValues(data.data(), data.size());
+ // buffer.IncreaseLengthBy(data.size());
+ // cord.Append(std::move(buffer));
+ // n -= data.size();
+ // first = false;
+ // }
+ // }
+ CordBuffer GetAppendBuffer(size_t capacity, size_t min_capacity = 16);
+
// Cord::Prepend()
//
// Prepends data to the Cord, which may come from another Cord or other string
@@ -235,6 +293,11 @@ class Cord {
template <typename T, EnableIfString<T> = 0>
void Prepend(T&& src);
+ // Prepends `buffer` to this cord, unless `buffer` has a zero length in which
+ // case this method has no effect on this cord instance.
+ // This method is guaranteed to consume `buffer`.
+ void Prepend(CordBuffer buffer);
+
// Cord::RemovePrefix()
//
// Removes the first `n` bytes of a Cord.
@@ -270,11 +333,10 @@ class Cord {
// Cord::EstimatedMemoryUsage()
//
- // Returns the *approximate* number of bytes held in full or in part by this
- // Cord (which may not remain the same between invocations). Note that Cords
- // that share memory could each be "charged" independently for the same shared
- // memory.
- size_t EstimatedMemoryUsage() const;
+ // Returns the *approximate* number of bytes held by this cord.
+ // See CordMemoryAccounting for more information on the accounting method.
+ size_t EstimatedMemoryUsage(CordMemoryAccounting accounting_method =
+ CordMemoryAccounting::kTotal) const;
// Cord::Compare()
//
@@ -324,7 +386,7 @@ class Cord {
//----------------------------------------------------------------------------
//
// A `Cord::ChunkIterator` allows iteration over the constituent chunks of its
- // Cord. Such iteration allows you to perform non-const operatons on the data
+ // Cord. Such iteration allows you to perform non-const operations on the data
// of a Cord without modifying it.
//
// Generally, you do not instantiate a `Cord::ChunkIterator` directly;
@@ -372,12 +434,6 @@ class Cord {
using CordRepBtree = absl::cord_internal::CordRepBtree;
using CordRepBtreeReader = absl::cord_internal::CordRepBtreeReader;
- // Stack of right children of concat nodes that we have to visit.
- // Keep this at the end of the structure to avoid cache-thrashing.
- // TODO(jgm): Benchmark to see if there's a more optimal value than 47 for
- // the inlined vector size (47 exists for backward compatibility).
- using Stack = absl::InlinedVector<absl::cord_internal::CordRep*, 47>;
-
// Constructs a `begin()` iterator from `tree`. `tree` must not be null.
explicit ChunkIterator(cord_internal::CordRep* tree);
@@ -393,17 +449,10 @@ class Cord {
Cord AdvanceAndReadBytes(size_t n);
void AdvanceBytes(size_t n);
- // Stack specific operator++
- ChunkIterator& AdvanceStack();
-
// Btree specific operator++
ChunkIterator& AdvanceBtree();
void AdvanceBytesBtree(size_t n);
- // Iterates `n` bytes, where `n` is expected to be greater than or equal to
- // `current_chunk_.size()`.
- void AdvanceBytesSlowPath(size_t n);
-
// A view into bytes of the current `CordRep`. It may only be a view to a
// suffix of bytes if this is being used by `CharIterator`.
absl::string_view current_chunk_;
@@ -416,12 +465,9 @@ class Cord {
// Cord reader for cord btrees. Empty if not traversing a btree.
CordRepBtreeReader btree_reader_;
-
- // See 'Stack' alias definition.
- Stack stack_of_right_children_;
};
- // Cord::ChunkIterator::chunk_begin()
+ // Cord::chunk_begin()
//
// Returns an iterator to the first chunk of the `Cord`.
//
@@ -437,7 +483,7 @@ class Cord {
// }
ChunkIterator chunk_begin() const;
- // Cord::ChunkItertator::chunk_end()
+ // Cord::chunk_end()
//
// Returns an iterator one increment past the last chunk of the `Cord`.
//
@@ -447,7 +493,7 @@ class Cord {
ChunkIterator chunk_end() const;
//----------------------------------------------------------------------------
- // Cord::ChunkIterator::ChunkRange
+ // Cord::ChunkRange
//----------------------------------------------------------------------------
//
// `ChunkRange` is a helper class for iterating over the chunks of the `Cord`,
@@ -461,7 +507,7 @@ class Cord {
class ChunkRange {
public:
// Fulfill minimum c++ container requirements [container.requirements]
- // Theses (partial) container type definitions allow ChunkRange to be used
+ // These (partial) container type definitions allow ChunkRange to be used
// in various utilities expecting a subset of [container.requirements].
// For example, the below enables using `::testing::ElementsAre(...)`
using value_type = absl::string_view;
@@ -481,9 +527,9 @@ class Cord {
// Cord::Chunks()
//
- // Returns a `Cord::ChunkIterator::ChunkRange` for iterating over the chunks
- // of a `Cord` with a range-based for-loop. For most iteration tasks on a
- // Cord, use `Cord::Chunks()` to retrieve this iterator.
+ // Returns a `Cord::ChunkRange` for iterating over the chunks of a `Cord` with
+ // a range-based for-loop. For most iteration tasks on a Cord, use
+ // `Cord::Chunks()` to retrieve this iterator.
//
// Example:
//
@@ -549,7 +595,7 @@ class Cord {
ChunkIterator chunk_iterator_;
};
- // Cord::CharIterator::AdvanceAndRead()
+ // Cord::AdvanceAndRead()
//
// Advances the `Cord::CharIterator` by `n_bytes` and returns the bytes
// advanced as a separate `Cord`. `n_bytes` must be less than or equal to the
@@ -557,21 +603,21 @@ class Cord {
// valid to pass `char_end()` and `0`.
static Cord AdvanceAndRead(CharIterator* it, size_t n_bytes);
- // Cord::CharIterator::Advance()
+ // Cord::Advance()
//
// Advances the `Cord::CharIterator` by `n_bytes`. `n_bytes` must be less than
// or equal to the number of bytes remaining within the Cord; otherwise,
// behavior is undefined. It is valid to pass `char_end()` and `0`.
static void Advance(CharIterator* it, size_t n_bytes);
- // Cord::CharIterator::ChunkRemaining()
+ // Cord::ChunkRemaining()
//
// Returns the longest contiguous view starting at the iterator's position.
//
// `it` must be dereferenceable.
static absl::string_view ChunkRemaining(const CharIterator& it);
- // Cord::CharIterator::char_begin()
+ // Cord::char_begin()
//
// Returns an iterator to the first character of the `Cord`.
//
@@ -580,7 +626,7 @@ class Cord {
// a `CharIterator` where range-based for-loops may not be available.
CharIterator char_begin() const;
- // Cord::CharIterator::char_end()
+ // Cord::char_end()
//
// Returns an iterator to one past the last character of the `Cord`.
//
@@ -589,13 +635,13 @@ class Cord {
// a `CharIterator` where range-based for-loops are not useful.
CharIterator char_end() const;
- // Cord::CharIterator::CharRange
+ // Cord::CharRange
//
// `CharRange` is a helper class for iterating over the characters of a
// producing an iterator which can be used within a range-based for loop.
// Construction of a `CharRange` will return an iterator pointing to the first
// character of the Cord. Generally, do not construct a `CharRange` directly;
- // instead, prefer to use the `Cord::Chars()` method show below.
+ // instead, prefer to use the `Cord::Chars()` method shown below.
//
// Implementation note: `CharRange` is simply a convenience wrapper over
// `Cord::char_begin()` and `Cord::char_end()`.
@@ -620,11 +666,11 @@ class Cord {
const Cord* cord_;
};
- // Cord::CharIterator::Chars()
+ // Cord::Chars()
//
- // Returns a `Cord::CharIterator` for iterating over the characters of a
- // `Cord` with a range-based for-loop. For most character-based iteration
- // tasks on a Cord, use `Cord::Chars()` to retrieve this iterator.
+ // Returns a `Cord::CharRange` for iterating over the characters of a `Cord`
+ // with a range-based for-loop. For most character-based iteration tasks on a
+ // Cord, use `Cord::Chars()` to retrieve this iterator.
//
// Example:
//
@@ -671,6 +717,29 @@ class Cord {
cord->Append(part);
}
+ // Cord::SetExpectedChecksum()
+ //
+ // Stores a checksum value with this non-empty cord instance, for later
+ // retrieval.
+ //
+ // The expected checksum is a number stored out-of-band, alongside the data.
+ // It is preserved across copies and assignments, but any mutations to a cord
+ // will cause it to lose its expected checksum.
+ //
+ // The expected checksum is not part of a Cord's value, and does not affect
+ // operations such as equality or hashing.
+ //
+ // This field is intended to store a CRC32C checksum for later validation, to
+ // help support end-to-end checksum workflows. However, the Cord API itself
+ // does no CRC validation, and assigns no meaning to this number.
+ //
+ // This call has no effect if this cord is empty.
+ void SetExpectedChecksum(uint32_t crc);
+
+ // Returns this cord's expected checksum, if it has one. Otherwise, returns
+ // nullopt.
+ absl::optional<uint32_t> ExpectedChecksum() const;
+
template <typename H>
friend H AbslHashValue(H hash_state, const absl::Cord& c) {
absl::optional<absl::string_view> maybe_flat = c.TryFlat();
@@ -686,7 +755,8 @@ class Cord {
// be used by spelling absl::strings_internal::MakeStringConstant, which is
// also an internal API.
template <typename T>
- explicit constexpr Cord(strings_internal::StringConstant<T>);
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ constexpr Cord(strings_internal::StringConstant<T>);
private:
using CordRep = absl::cord_internal::CordRep;
@@ -738,12 +808,12 @@ class Cord {
bool empty() const;
size_t size() const;
const char* data() const; // Returns nullptr if holding pointer
- void set_data(const char* data, size_t n,
- bool nullify_tail); // Discards pointer, if any
- char* set_data(size_t n); // Write data to the result
+ void set_data(const char* data, size_t n); // Discards pointer, if any
+ char* set_data(size_t n); // Write data to the result
// Returns nullptr if holding bytes
absl::cord_internal::CordRep* tree() const;
absl::cord_internal::CordRep* as_tree() const;
+ const char* as_chars() const;
// Returns non-null iff was holding a pointer
absl::cord_internal::CordRep* clear();
// Converts to pointer if necessary.
@@ -831,6 +901,11 @@ class Cord {
// Returns true if the Cord is being profiled by cordz.
bool is_profiled() const { return data_.is_tree() && data_.is_profiled(); }
+ // Returns the available inlined capacity, or 0 if is_tree() == true.
+ size_t remaining_inline_capacity() const {
+ return data_.is_tree() ? 0 : kMaxInline - data_.inline_size();
+ }
+
// Returns the profiled CordzInfo, or nullptr if not sampled.
absl::cord_internal::CordzInfo* cordz_info() const {
return data_.cordz_info();
@@ -861,9 +936,6 @@ class Cord {
};
InlineRep contents_;
- // Helper for MemoryUsage().
- static size_t MemoryUsageAux(const absl::cord_internal::CordRep* rep);
-
// Helper for GetFlat() and TryFlat().
static bool GetFlatAux(absl::cord_internal::CordRep* rep,
absl::string_view* fragment);
@@ -901,6 +973,15 @@ class Cord {
template <typename C>
void AppendImpl(C&& src);
+ // Appends / Prepends `src` to this instance, using precise sizing.
+ // This method does explicitly not attempt to use any spare capacity
+ // in any pending last added private owned flat.
+ // Requires `src` to be <= kMaxFlatLength.
+ void AppendPrecise(absl::string_view src, MethodIdentifier method);
+ void PrependPrecise(absl::string_view src, MethodIdentifier method);
+
+ CordBuffer GetAppendBufferSlowPath(size_t capacity, size_t min_capacity);
+
// Prepends the provided data to this instance. `method` contains the public
// API method for this action which is tracked for Cordz sampling purposes.
void PrependArray(absl::string_view src, MethodIdentifier method);
@@ -938,8 +1019,8 @@ namespace cord_internal {
// Fast implementation of memmove for up to 15 bytes. This implementation is
// safe for overlapping regions. If nullify_tail is true, the destination is
// padded with '\0' up to 16 bytes.
-inline void SmallMemmove(char* dst, const char* src, size_t n,
- bool nullify_tail = false) {
+template <bool nullify_tail = false>
+inline void SmallMemmove(char* dst, const char* src, size_t n) {
if (n >= 8) {
assert(n <= 16);
uint64_t buf1;
@@ -976,22 +1057,16 @@ inline void SmallMemmove(char* dst, const char* src, size_t n,
}
// Does non-template-specific `CordRepExternal` initialization.
-// Expects `data` to be non-empty.
+// Requires `data` to be non-empty.
void InitializeCordRepExternal(absl::string_view data, CordRepExternal* rep);
// Creates a new `CordRep` that owns `data` and `releaser` and returns a pointer
-// to it, or `nullptr` if `data` was empty.
+// to it. Requires `data` to be non-empty.
template <typename Releaser>
// NOLINTNEXTLINE - suppress clang-tidy raw pointer return.
CordRep* NewExternalRep(absl::string_view data, Releaser&& releaser) {
+ assert(!data.empty());
using ReleaserType = absl::decay_t<Releaser>;
- if (data.empty()) {
- // Never create empty external nodes.
- InvokeReleaser(Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
- data);
- return nullptr;
- }
-
CordRepExternal* rep = new CordRepExternalImpl<ReleaserType>(
std::forward<Releaser>(releaser), 0);
InitializeCordRepExternal(data, rep);
@@ -1011,10 +1086,15 @@ inline CordRep* NewExternalRep(absl::string_view data,
template <typename Releaser>
Cord MakeCordFromExternal(absl::string_view data, Releaser&& releaser) {
Cord cord;
- if (auto* rep = ::absl::cord_internal::NewExternalRep(
- data, std::forward<Releaser>(releaser))) {
- cord.contents_.EmplaceTree(rep,
+ if (ABSL_PREDICT_TRUE(!data.empty())) {
+ cord.contents_.EmplaceTree(::absl::cord_internal::NewExternalRep(
+ data, std::forward<Releaser>(releaser)),
Cord::MethodIdentifier::kMakeCordFromExternal);
+ } else {
+ using ReleaserType = absl::decay_t<Releaser>;
+ cord_internal::InvokeReleaser(
+ cord_internal::Rank0{}, ReleaserType(std::forward<Releaser>(releaser)),
+ data);
}
return cord;
}
@@ -1069,6 +1149,11 @@ inline const char* Cord::InlineRep::data() const {
return is_tree() ? nullptr : data_.as_chars();
}
+inline const char* Cord::InlineRep::as_chars() const {
+ assert(!data_.is_tree());
+ return data_.as_chars();
+}
+
inline absl::cord_internal::CordRep* Cord::InlineRep::as_tree() const {
assert(data_.is_tree());
return data_.as_tree();
@@ -1207,10 +1292,15 @@ inline size_t Cord::size() const {
inline bool Cord::empty() const { return contents_.empty(); }
-inline size_t Cord::EstimatedMemoryUsage() const {
+inline size_t Cord::EstimatedMemoryUsage(
+ CordMemoryAccounting accounting_method) const {
size_t result = sizeof(Cord);
if (const absl::cord_internal::CordRep* rep = contents_.tree()) {
- result += MemoryUsageAux(rep);
+ if (accounting_method == CordMemoryAccounting::kFairShare) {
+ result += cord_internal::GetEstimatedFairShareMemoryUsage(rep);
+ } else {
+ result += cord_internal::GetEstimatedMemoryUsage(rep);
+ }
}
return result;
}
@@ -1248,6 +1338,31 @@ inline void Cord::Prepend(absl::string_view src) {
PrependArray(src, CordzUpdateTracker::kPrependString);
}
+inline void Cord::Append(CordBuffer buffer) {
+ if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ absl::string_view short_value;
+ if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+ contents_.AppendTree(rep, CordzUpdateTracker::kAppendCordBuffer);
+ } else {
+ AppendPrecise(short_value, CordzUpdateTracker::kAppendCordBuffer);
+ }
+}
+
+inline void Cord::Prepend(CordBuffer buffer) {
+ if (ABSL_PREDICT_FALSE(buffer.length() == 0)) return;
+ absl::string_view short_value;
+ if (CordRep* rep = buffer.ConsumeValue(short_value)) {
+ contents_.PrependTree(rep, CordzUpdateTracker::kPrependCordBuffer);
+ } else {
+ PrependPrecise(short_value, CordzUpdateTracker::kPrependCordBuffer);
+ }
+}
+
+inline CordBuffer Cord::GetAppendBuffer(size_t capacity, size_t min_capacity) {
+ if (empty()) return CordBuffer::CreateWithDefaultLimit(capacity);
+ return GetAppendBufferSlowPath(capacity, min_capacity);
+}
+
extern template void Cord::Append(std::string&& src);
extern template void Cord::Prepend(std::string&& src);
@@ -1274,27 +1389,27 @@ inline bool Cord::StartsWith(absl::string_view rhs) const {
}
inline void Cord::ChunkIterator::InitTree(cord_internal::CordRep* tree) {
+ tree = cord_internal::SkipCrcNode(tree);
if (tree->tag == cord_internal::BTREE) {
current_chunk_ = btree_reader_.Init(tree->btree());
- return;
+ } else {
+ current_leaf_ = tree;
+ current_chunk_ = cord_internal::EdgeData(tree);
}
-
- stack_of_right_children_.push_back(tree);
- operator++();
}
-inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree)
- : bytes_remaining_(tree->length) {
+inline Cord::ChunkIterator::ChunkIterator(cord_internal::CordRep* tree) {
+ bytes_remaining_ = tree->length;
InitTree(tree);
}
-inline Cord::ChunkIterator::ChunkIterator(const Cord* cord)
- : bytes_remaining_(cord->size()) {
- if (cord->contents_.is_tree()) {
- InitTree(cord->contents_.as_tree());
+inline Cord::ChunkIterator::ChunkIterator(const Cord* cord) {
+ if (CordRep* tree = cord->contents_.tree()) {
+ bytes_remaining_ = tree->length;
+ InitTree(tree);
} else {
- current_chunk_ =
- absl::string_view(cord->contents_.data(), bytes_remaining_);
+ bytes_remaining_ = cord->contents_.inline_size();
+ current_chunk_ = {cord->contents_.data(), bytes_remaining_};
}
}
@@ -1324,8 +1439,11 @@ inline Cord::ChunkIterator& Cord::ChunkIterator::operator++() {
assert(bytes_remaining_ >= current_chunk_.size());
bytes_remaining_ -= current_chunk_.size();
if (bytes_remaining_ > 0) {
- return btree_reader_ ? AdvanceBtree() : AdvanceStack();
- } else {
+ if (btree_reader_) {
+ return AdvanceBtree();
+ } else {
+ assert(!current_chunk_.empty()); // Called on invalid iterator.
+ }
current_chunk_ = {};
}
return *this;
@@ -1366,7 +1484,11 @@ inline void Cord::ChunkIterator::AdvanceBytes(size_t n) {
if (ABSL_PREDICT_TRUE(n < current_chunk_.size())) {
RemoveChunkPrefix(n);
} else if (n != 0) {
- btree_reader_ ? AdvanceBytesBtree(n) : AdvanceBytesSlowPath(n);
+ if (btree_reader_) {
+ AdvanceBytesBtree(n);
+ } else {
+ bytes_remaining_ = 0;
+ }
}
}
@@ -1457,7 +1579,7 @@ inline void Cord::ForEachChunk(
}
}
-// Nonmember Cord-to-Cord relational operarators.
+// Nonmember Cord-to-Cord relational operators.
inline bool operator==(const Cord& lhs, const Cord& rhs) {
if (lhs.contents_.IsSame(rhs.contents_)) return true;
size_t rhs_size = rhs.size();
@@ -1508,7 +1630,6 @@ class CordTestAccess {
public:
static size_t FlatOverhead();
static size_t MaxFlatLength();
- static size_t SizeofCordRepConcat();
static size_t SizeofCordRepExternal();
static size_t SizeofCordRepSubstring();
static size_t FlatTagToLength(uint8_t tag);
diff --git a/absl/strings/cord_analysis.cc b/absl/strings/cord_analysis.cc
new file mode 100644
index 00000000..73d3c4e6
--- /dev/null
+++ b/absl/strings/cord_analysis.cc
@@ -0,0 +1,188 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/cord_analysis.h"
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/internal/cord_data_edge.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_ring.h"
+//
+#include "absl/base/macros.h"
+#include "absl/base/port.h"
+#include "absl/functional/function_ref.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+// Accounting mode for analyzing memory usage.
+enum class Mode { kTotal, kFairShare };
+
+// CordRepRef holds a `const CordRep*` reference in rep, and depending on mode,
+// holds a 'fraction' representing a cumulative inverse refcount weight.
+template <Mode mode>
+struct CordRepRef {
+ // Instantiates a CordRepRef instance.
+ explicit CordRepRef(const CordRep* r) : rep(r) {}
+
+ // Creates a child reference holding the provided child.
+ // Overloaded to add cumulative reference count for kFairShare.
+ CordRepRef Child(const CordRep* child) const { return CordRepRef(child); }
+
+ const CordRep* rep;
+};
+
+// RawUsage holds the computed total number of bytes.
+template <Mode mode>
+struct RawUsage {
+ size_t total = 0;
+
+ // Add 'size' to total, ignoring the CordRepRef argument.
+ void Add(size_t size, CordRepRef<mode>) { total += size; }
+};
+
+// Returns n / refcount avoiding a div for the common refcount == 1.
+template <typename refcount_t>
+double MaybeDiv(double d, refcount_t refcount) {
+ return refcount == 1 ? d : d / refcount;
+}
+
+// Overloaded 'kFairShare' specialization for CordRepRef. This class holds a
+// `fraction` value which represents a cumulative inverse refcount weight.
+// For example, a top node with a reference count of 2 will have a fraction
+// value of 1/2 = 0.5, representing the 'fair share' of memory it references.
+// A node below such a node with a reference count of 5 then has a fraction of
+// 0.5 / 5 = 0.1 representing the fair share of memory below that node, etc.
+template <>
+struct CordRepRef<Mode::kFairShare> {
+ // Creates a CordRepRef with the provided rep and top (parent) fraction.
+ explicit CordRepRef(const CordRep* r, double frac = 1.0)
+ : rep(r), fraction(MaybeDiv(frac, r->refcount.Get())) {}
+
+ // Returns a CordRepRef with a fraction of `this->fraction / child.refcount`
+ CordRepRef Child(const CordRep* child) const {
+ return CordRepRef(child, fraction);
+ }
+
+ const CordRep* rep;
+ double fraction;
+};
+
+// Overloaded 'kFairShare' specialization for RawUsage
+template <>
+struct RawUsage<Mode::kFairShare> {
+ double total = 0;
+
+ // Adds `size` multiplied by `rep.fraction` to the total size.
+ void Add(size_t size, CordRepRef<Mode::kFairShare> rep) {
+ total += static_cast<double>(size) * rep.fraction;
+ }
+};
+
+// Computes the estimated memory size of the provided data edge.
+// External reps are assumed 'heap allocated at their exact size'.
+template <Mode mode>
+void AnalyzeDataEdge(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ assert(IsDataEdge(rep.rep));
+
+ // Consume all substrings
+ if (rep.rep->tag == SUBSTRING) {
+ raw_usage.Add(sizeof(CordRepSubstring), rep);
+ rep = rep.Child(rep.rep->substring()->child);
+ }
+
+ // Consume FLAT / EXTERNAL
+ const size_t size =
+ rep.rep->tag >= FLAT
+ ? rep.rep->flat()->AllocatedSize()
+ : rep.rep->length + sizeof(CordRepExternalImpl<intptr_t>);
+ raw_usage.Add(size, rep);
+}
+
+// Computes the memory size of the provided Ring tree.
+template <Mode mode>
+void AnalyzeRing(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ const CordRepRing* ring = rep.rep->ring();
+ raw_usage.Add(CordRepRing::AllocSize(ring->capacity()), rep);
+ ring->ForEach([&](CordRepRing::index_type pos) {
+ AnalyzeDataEdge(rep.Child(ring->entry_child(pos)), raw_usage);
+ });
+}
+
+// Computes the memory size of the provided Btree tree.
+template <Mode mode>
+void AnalyzeBtree(CordRepRef<mode> rep, RawUsage<mode>& raw_usage) {
+ raw_usage.Add(sizeof(CordRepBtree), rep);
+ const CordRepBtree* tree = rep.rep->btree();
+ if (tree->height() > 0) {
+ for (CordRep* edge : tree->Edges()) {
+ AnalyzeBtree(rep.Child(edge), raw_usage);
+ }
+ } else {
+ for (CordRep* edge : tree->Edges()) {
+ AnalyzeDataEdge(rep.Child(edge), raw_usage);
+ }
+ }
+}
+
+template <Mode mode>
+size_t GetEstimatedUsage(const CordRep* rep) {
+ // Zero initialized memory usage totals.
+ RawUsage<mode> raw_usage;
+
+ // Capture top level node and refcount into a CordRepRef.
+ CordRepRef<mode> repref(rep);
+
+ // Consume the top level CRC node if present.
+ if (repref.rep->tag == CRC) {
+ raw_usage.Add(sizeof(CordRepCrc), repref);
+ repref = repref.Child(repref.rep->crc()->child);
+ }
+
+ if (IsDataEdge(repref.rep)) {
+ AnalyzeDataEdge(repref, raw_usage);
+ } else if (repref.rep->tag == BTREE) {
+ AnalyzeBtree(repref, raw_usage);
+ } else if (repref.rep->tag == RING) {
+ AnalyzeRing(repref, raw_usage);
+ } else {
+ assert(false);
+ }
+
+ return static_cast<size_t>(raw_usage.total);
+}
+
+} // namespace
+
+size_t GetEstimatedMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kTotal>(rep);
+}
+
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep) {
+ return GetEstimatedUsage<Mode::kFairShare>(rep);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/cord_analysis.h b/absl/strings/cord_analysis.h
new file mode 100644
index 00000000..7041ad1a
--- /dev/null
+++ b/absl/strings/cord_analysis.h
@@ -0,0 +1,44 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_CORD_ANALYSIS_H_
+#define ABSL_STRINGS_CORD_ANALYSIS_H_
+
+#include <cstddef>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// Cord (which may not remain the same between invocations). Cords that share
+// memory could each be "charged" independently for the same shared memory.
+size_t GetEstimatedMemoryUsage(const CordRep* rep);
+
+// Returns the *approximate* number of bytes held in full or in part by this
+// CordRep weighted by the sharing ratio of that data. For example, if some data
+// edge is shared by 4 different Cords, then each cord is attribute 1/4th of
+// the total memory usage as a 'fair share' of the total memory usage.
+size_t GetEstimatedFairShareMemoryUsage(const CordRep* rep);
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+
+#endif // ABSL_STRINGS_CORD_ANALYSIS_H_
diff --git a/absl/debugging/leak_check_disable.cc b/absl/strings/cord_buffer.cc
index 924d6e3d..fad6269c 100644
--- a/absl/debugging/leak_check_disable.cc
+++ b/absl/strings/cord_buffer.cc
@@ -1,10 +1,10 @@
-// Copyright 2017 The Abseil Authors.
+// Copyright 2022 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// https://www.apache.org/licenses/LICENSE-2.0
+// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,9 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Disable LeakSanitizer when this file is linked in.
-// This function overrides __lsan_is_turned_off from sanitizer/lsan_interface.h
-extern "C" int __lsan_is_turned_off();
-extern "C" int __lsan_is_turned_off() {
- return 1;
-}
+#include "absl/strings/cord_buffer.h"
+
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordBuffer::kDefaultLimit;
+constexpr size_t CordBuffer::kCustomLimit;
+#endif
+
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/cord_buffer.h b/absl/strings/cord_buffer.h
new file mode 100644
index 00000000..56a6ce6f
--- /dev/null
+++ b/absl/strings/cord_buffer.h
@@ -0,0 +1,572 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: cord_buffer.h
+// -----------------------------------------------------------------------------
+//
+// This file defines an `absl::CordBuffer` data structure to hold data for
+// eventual inclusion within an existing `Cord` data structure. Cord buffers are
+// useful for building large Cords that may require custom allocation of its
+// associated memory.
+//
+#ifndef ABSL_STRINGS_CORD_BUFFER_H_
+#define ABSL_STRINGS_CORD_BUFFER_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/numeric/bits.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class Cord;
+class CordBufferTestPeer;
+
+// CordBuffer
+//
+// CordBuffer manages memory buffers for purposes such as zero-copy APIs as well
+// as applications building cords with large data requiring granular control
+// over the allocation and size of cord data. For example, a function creating
+// a cord of random data could use a CordBuffer as follows:
+//
+// absl::Cord CreateRandomCord(size_t length) {
+// absl::Cord cord;
+// while (length > 0) {
+// CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(length);
+// absl::Span<char> data = buffer.available_up_to(length);
+// FillRandomValues(data.data(), data.size());
+// buffer.IncreaseLengthBy(data.size());
+// cord.Append(std::move(buffer));
+// length -= data.size();
+// }
+// return cord;
+// }
+//
+// CordBuffer instances are by default limited to a capacity of `kDefaultLimit`
+// bytes. `kDefaultLimit` is currently just under 4KiB, but this default may
+// change in the future and/or for specific architectures. The default limit is
+// aimed to provide a good trade-off between performance and memory overhead.
+// Smaller buffers typically incur more compute cost while larger buffers are
+// more CPU efficient but create significant memory overhead because of such
+// allocations being less granular. Using larger buffers may also increase the
+// risk of memory fragmentation.
+//
+// Applications create a buffer using one of the `CreateWithDefaultLimit()` or
+// `CreateWithCustomLimit()` methods. The returned instance will have a non-zero
+// capacity and a zero length. Applications use the `data()` method to set the
+// contents of the managed memory, and once done filling the buffer, use the
+// `IncreaseLengthBy()` or 'SetLength()' method to specify the length of the
+// initialized data before adding the buffer to a Cord.
+//
+// The `CreateWithCustomLimit()` method is intended for applications needing
+// larger buffers than the default memory limit, allowing the allocation of up
+// to a capacity of `kCustomLimit` bytes minus some minimum internal overhead.
+// The usage of `CreateWithCustomLimit()` should be limited to only those use
+// cases where the distribution of the input is relatively well known, and/or
+// where the trade-off between the efficiency gains outweigh the risk of memory
+// fragmentation. See the documentation for `CreateWithCustomLimit()` for more
+// information on using larger custom limits.
+//
+// The capacity of a `CordBuffer` returned by one of the `Create` methods may
+// be larger than the requested capacity due to rounding, alignment and
+// granularity of the memory allocator. Applications should use the `capacity`
+// method to obtain the effective capacity of the returned instance as
+// demonstrated in the provided example above.
+//
+// CordBuffer is a move-only class. All references into the managed memory are
+// invalidated when an instance is moved into either another CordBuffer instance
+// or a Cord. Writing to a location obtained by a previous call to `data()`
+// after an instance was moved will lead to undefined behavior.
+//
+// A `moved from` CordBuffer instance will have a valid, but empty state.
+// CordBuffer is thread compatible.
+class CordBuffer {
+ public:
+ // kDefaultLimit
+ //
+ // Default capacity limits of allocated CordBuffers.
+ // See the class comments for more information on allocation limits.
+ static constexpr size_t kDefaultLimit = cord_internal::kMaxFlatLength;
+
+ // kCustomLimit
+ //
+ // Maximum size for CreateWithCustomLimit() allocated buffers.
+ // Note that the effective capacity may be slightly less
+ // because of internal overhead of internal cord buffers.
+ static constexpr size_t kCustomLimit = 64U << 10;
+
+ // Constructors, Destructors and Assignment Operators
+
+ // Creates an empty CordBuffer.
+ CordBuffer() = default;
+
+ // Destroys this CordBuffer instance and, if not empty, releases any memory
+ // managed by this instance, invalidating previously returned references.
+ ~CordBuffer();
+
+ // CordBuffer is move-only
+ CordBuffer(CordBuffer&& rhs) noexcept;
+ CordBuffer& operator=(CordBuffer&&) noexcept;
+ CordBuffer(const CordBuffer&) = delete;
+ CordBuffer& operator=(const CordBuffer&) = delete;
+
+ // CordBuffer::MaximumPayload()
+ //
+ // Returns the guaranteed maximum payload for a CordBuffer returned by the
+ // `CreateWithDefaultLimit()` method. While small, each internal buffer inside
+ // a Cord incurs an overhead to manage the length, type and reference count
+ // for the buffer managed inside the cord tree. Applications can use this
+ // method to get approximate number of buffers required for a given byte
+ // size, etc.
+ //
+ // For example:
+ // const size_t payload = absl::CordBuffer::MaximumPayload();
+ // const size_t buffer_count = (total_size + payload - 1) / payload;
+ // buffers.reserve(buffer_count);
+ static constexpr size_t MaximumPayload();
+
+ // Overload to the above `MaximumPayload()` except that it returns the
+ // maximum payload for a CordBuffer returned by the `CreateWithCustomLimit()`
+ // method given the provided `block_size`.
+ static constexpr size_t MaximumPayload(size_t block_size);
+
+ // CordBuffer::CreateWithDefaultLimit()
+ //
+ // Creates a CordBuffer instance of the desired `capacity`, capped at the
+ // default limit `kDefaultLimit`. The returned buffer has a guaranteed
+ // capacity of at least `min(kDefaultLimit, capacity)`. See the class comments
+ // for more information on buffer capacities and intended usage.
+ static CordBuffer CreateWithDefaultLimit(size_t capacity);
+
+
+ // CordBuffer::CreateWithCustomLimit()
+ //
+ // Creates a CordBuffer instance of the desired `capacity` rounded to an
+ // appropriate power of 2 size less than, or equal to `block_size`.
+ // Requires `block_size` to be a power of 2.
+ //
+ // If `capacity` is less than or equal to `kDefaultLimit`, then this method
+ // behaves identical to `CreateWithDefaultLimit`, which means that the caller
+ // is guaranteed to get a buffer of at least the requested capacity.
+ //
+ // If `capacity` is greater than or equal to `block_size`, then this method
+ // returns a buffer with an `allocated size` of `block_size` bytes. Otherwise,
+ // this methods returns a buffer with a suitable smaller power of 2 block size
+ // to satisfy the request. The actual size depends on a number of factors, and
+ // is typically (but not necessarily) the highest or second highest power of 2
+ // value less than or equal to `capacity`.
+ //
+ // The 'allocated size' includes a small amount of overhead required for
+ // internal state, which is currently 13 bytes on 64-bit platforms. For
+ // example: a buffer created with `block_size` and `capacity' set to 8KiB
+ // will have an allocated size of 8KiB, and an effective internal `capacity`
+ // of 8KiB - 13 = 8179 bytes.
+ //
+ // To demonstrate this in practice, let's assume we want to read data from
+ // somewhat larger files using approximately 64KiB buffers:
+ //
+ // absl::Cord ReadFromFile(int fd, size_t n) {
+ // absl::Cord cord;
+ // while (n > 0) {
+ // CordBuffer buffer = CordBuffer::CreateWithCustomLimit(64 << 10, n);
+ // absl::Span<char> data = buffer.available_up_to(n);
+ // ReadFileDataOrDie(fd, data.data(), data.size());
+ // buffer.IncreaseLengthBy(data.size());
+ // cord.Append(std::move(buffer));
+ // n -= data.size();
+ // }
+ // return cord;
+ // }
+ //
+ // If we'd use this function to read a file of 659KiB, we may get the
+ // following pattern of allocated cord buffer sizes:
+ //
+ // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+ // CreateWithCustomLimit(64KiB, 674816) --> ~64KiB (65523)
+ // ...
+ // CreateWithCustomLimit(64KiB, 19586) --> ~16KiB (16371)
+ // CreateWithCustomLimit(64KiB, 3215) --> 3215 (at least 3215)
+ //
+ // The reason the method returns a 16K buffer instead of a roughly 19K buffer
+ // is to reduce memory overhead and fragmentation risks. Using carefully
+ // chosen power of 2 values reduces the entropy of allocated memory sizes.
+ //
+ // Additionally, let's assume we'd use the above function on files that are
+ // generally smaller than 64K. If we'd use 'precise' sized buffers for such
+ // files, than we'd get a very wide distribution of allocated memory sizes
+ // rounded to 4K page sizes, and we'd end up with a lot of unused capacity.
+ //
+ // In general, application should only use custom sizes if the data they are
+ // consuming or storing is expected to be many times the chosen block size,
+ // and be based on objective data and performance metrics. For example, a
+ // compress function may work faster and consume less CPU when using larger
+ // buffers. Such an application should pick a size offering a reasonable
+ // trade-off between expected data size, compute savings with larger buffers,
+ // and the cost or fragmentation effect of larger buffers.
+ // Applications must pick a reasonable spot on that curve, and make sure their
+ // data meets their expectations in size distributions such as "mostly large".
+ static CordBuffer CreateWithCustomLimit(size_t block_size, size_t capacity);
+
+ // CordBuffer::available()
+ //
+ // Returns the span delineating the available capacity in this buffer
+ // which is defined as `{ data() + length(), capacity() - length() }`.
+ absl::Span<char> available();
+
+ // CordBuffer::available_up_to()
+ //
+ // Returns the span delineating the available capacity in this buffer limited
+ // to `size` bytes. This is equivalent to `available().subspan(0, size)`.
+ absl::Span<char> available_up_to(size_t size);
+
+ // CordBuffer::data()
+ //
+ // Returns a non-null reference to the data managed by this instance.
+ // Applications are allowed to write up to `capacity` bytes of instance data.
+ // CordBuffer data is uninitialized by default. Reading data from an instance
+ // that has not yet been initialized will lead to undefined behavior.
+ char* data();
+ const char* data() const;
+
+ // CordBuffer::length()
+ //
+ // Returns the length of this instance. The default length of a CordBuffer is
+ // 0, indicating an 'empty' CordBuffer. Applications must specify the length
+ // of the data in a CordBuffer before adding it to a Cord.
+ size_t length() const;
+
+ // CordBuffer::capacity()
+ //
+ // Returns the capacity of this instance. All instances have a non-zero
+ // capacity: default and `moved from` instances have a small internal buffer.
+ size_t capacity() const;
+
+ // CordBuffer::IncreaseLengthBy()
+ //
+ // Increases the length of this buffer by the specified 'n' bytes.
+ // Applications must make sure all data in this buffer up to the new length
+ // has been initialized before adding a CordBuffer to a Cord: failure to do so
+ // will lead to undefined behavior. Requires `length() + n <= capacity()`.
+ // Typically, applications will use 'available_up_to()` to get a span of the
+ // desired capacity, and use `span.size()` to increase the length as in:
+ // absl::Span<char> span = buffer.available_up_to(desired);
+ // buffer.IncreaseLengthBy(span.size());
+ // memcpy(span.data(), src, span.size());
+ // etc...
+ void IncreaseLengthBy(size_t n);
+
+ // CordBuffer::SetLength()
+ //
+ // Sets the data length of this instance. Applications must make sure all data
+ // of the specified length has been initialized before adding a CordBuffer to
+ // a Cord: failure to do so will lead to undefined behavior.
+ // Setting the length to a small value or zero does not release any memory
+ // held by this CordBuffer instance. Requires `length <= capacity()`.
+ // Applications should preferably use the `IncreaseLengthBy()` method above
+ // in combination with the 'available()` or `available_up_to()` methods.
+ void SetLength(size_t length);
+
+ private:
+ // Make sure we don't accidentally over promise.
+ static_assert(kCustomLimit <= cord_internal::kMaxLargeFlatSize, "");
+
+ // Assume the cost of an 'uprounded' allocation to CeilPow2(size) versus
+ // the cost of allocating at least 1 extra flat <= 4KB:
+ // - Flat overhead = 13 bytes
+ // - Btree amortized cost / node =~ 13 bytes
+ // - 64 byte granularity of tcmalloc at 4K =~ 32 byte average
+ // CPU cost and efficiency requires we should at least 'save' something by
+ // splitting, as a poor man's measure, we say the slop needs to be
+ // at least double the cost offset to make it worth splitting: ~128 bytes.
+ static constexpr size_t kMaxPageSlop = 128;
+
+ // Overhead for allocation a flat.
+ static constexpr size_t kOverhead = cord_internal::kFlatOverhead;
+
+ using CordRepFlat = cord_internal::CordRepFlat;
+
+ // `Rep` is the internal data representation of a CordBuffer. The internal
+ // representation has an internal small size optimization similar to
+ // std::string (SSO).
+ struct Rep {
+ // Inline SSO size of a CordBuffer
+ static constexpr size_t kInlineCapacity = sizeof(intptr_t) * 2 - 1;
+
+ // Creates a default instance with kInlineCapacity.
+ Rep() : short_rep{} {}
+
+ // Creates an instance managing an allocated non zero CordRep.
+ explicit Rep(cord_internal::CordRepFlat* rep) : long_rep{rep} {
+ assert(rep != nullptr);
+ }
+
+ // Returns true if this instance manages the SSO internal buffer.
+ bool is_short() const {
+ constexpr size_t offset = offsetof(Short, raw_size);
+ return (reinterpret_cast<const char*>(this)[offset] & 1) != 0;
+ }
+
+ // Returns the available area of the internal SSO data
+ absl::Span<char> short_available() {
+ assert(is_short());
+ const size_t length = (short_rep.raw_size >> 1);
+ return absl::Span<char>(short_rep.data + length,
+ kInlineCapacity - length);
+ }
+
+ // Returns the available area of the internal SSO data
+ absl::Span<char> long_available() {
+ assert(!is_short());
+ const size_t length = long_rep.rep->length;
+ return absl::Span<char>(long_rep.rep->Data() + length,
+ long_rep.rep->Capacity() - length);
+ }
+
+ // Returns the length of the internal SSO data.
+ size_t short_length() const {
+ assert(is_short());
+ return short_rep.raw_size >> 1;
+ }
+
+ // Sets the length of the internal SSO data.
+ // Disregards any previously set CordRep instance.
+ void set_short_length(size_t length) {
+ short_rep.raw_size = static_cast<char>((length << 1) + 1);
+ }
+
+ // Adds `n` to the current short length.
+ void add_short_length(size_t n) {
+ assert(is_short());
+ short_rep.raw_size += static_cast<char>(n << 1);
+ }
+
+ // Returns reference to the internal SSO data buffer.
+ char* data() {
+ assert(is_short());
+ return short_rep.data;
+ }
+ const char* data() const {
+ assert(is_short());
+ return short_rep.data;
+ }
+
+ // Returns a pointer the external CordRep managed by this instance.
+ cord_internal::CordRepFlat* rep() const {
+ assert(!is_short());
+ return long_rep.rep;
+ }
+
+ // The internal representation takes advantage of the fact that allocated
+ // memory is always on an even address, and uses the least significant bit
+ // of the first or last byte (depending on endianness) as the inline size
+ // indicator overlapping with the least significant byte of the CordRep*.
+#if defined(ABSL_IS_BIG_ENDIAN)
+ struct Long {
+ explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+ void* padding;
+ cord_internal::CordRepFlat* rep;
+ };
+ struct Short {
+ char data[sizeof(Long) - 1];
+ char raw_size = 1;
+ };
+#else
+ struct Long {
+ explicit Long(cord_internal::CordRepFlat* rep_arg) : rep(rep_arg) {}
+ cord_internal::CordRepFlat* rep;
+ void* padding;
+ };
+ struct Short {
+ char raw_size = 1;
+ char data[sizeof(Long) - 1];
+ };
+#endif
+
+ union {
+ Long long_rep;
+ Short short_rep;
+ };
+ };
+
+ // Power2 functions
+ static bool IsPow2(size_t size) { return absl::has_single_bit(size); }
+ static size_t Log2Floor(size_t size) { return absl::bit_width(size) - 1; }
+ static size_t Log2Ceil(size_t size) { return absl::bit_width(size - 1); }
+
+ // Implementation of `CreateWithCustomLimit()`.
+ // This implementation allows for future memory allocation hints to
+ // be passed down into the CordRepFlat allocation function.
+ template <typename... AllocationHints>
+ static CordBuffer CreateWithCustomLimitImpl(size_t block_size,
+ size_t capacity,
+ AllocationHints... hints);
+
+ // Consumes the value contained in this instance and resets the instance.
+ // This method returns a non-null Cordrep* if the current instances manages a
+ // CordRep*, and resets the instance to an empty SSO instance. If the current
+ // instance is an SSO instance, then this method returns nullptr and sets
+ // `short_value` to the inlined data value. In either case, the current
+ // instance length is reset to zero.
+ // This method is intended to be used by Cord internal functions only.
+ cord_internal::CordRep* ConsumeValue(absl::string_view& short_value) {
+ cord_internal::CordRep* rep = nullptr;
+ if (rep_.is_short()) {
+ short_value = absl::string_view(rep_.data(), rep_.short_length());
+ } else {
+ rep = rep_.rep();
+ }
+ rep_.set_short_length(0);
+ return rep;
+ }
+
+ // Internal constructor.
+ explicit CordBuffer(cord_internal::CordRepFlat* rep) : rep_(rep) {
+ assert(rep != nullptr);
+ }
+
+ Rep rep_;
+
+ friend class Cord;
+ friend class CordBufferTestPeer;
+};
+
+inline constexpr size_t CordBuffer::MaximumPayload() {
+ return cord_internal::kMaxFlatLength;
+}
+
+inline constexpr size_t CordBuffer::MaximumPayload(size_t block_size) {
+ // TODO(absl-team): Use std::min when C++11 support is dropped.
+ return (kCustomLimit < block_size ? kCustomLimit : block_size) -
+ cord_internal::kFlatOverhead;
+}
+
+inline CordBuffer CordBuffer::CreateWithDefaultLimit(size_t capacity) {
+ if (capacity > Rep::kInlineCapacity) {
+ auto* rep = cord_internal::CordRepFlat::New(capacity);
+ rep->length = 0;
+ return CordBuffer(rep);
+ }
+ return CordBuffer();
+}
+
+template <typename... AllocationHints>
+inline CordBuffer CordBuffer::CreateWithCustomLimitImpl(
+ size_t block_size, size_t capacity, AllocationHints... hints) {
+ assert(IsPow2(block_size));
+ capacity = (std::min)(capacity, kCustomLimit);
+ block_size = (std::min)(block_size, kCustomLimit);
+ if (capacity + kOverhead >= block_size) {
+ capacity = block_size;
+ } else if (capacity <= kDefaultLimit) {
+ capacity = capacity + kOverhead;
+ } else if (!IsPow2(capacity)) {
+ // Check if rounded up to next power 2 is a good enough fit
+ // with limited waste making it an acceptable direct fit.
+ const size_t rounded_up = size_t{1} << Log2Ceil(capacity);
+ const size_t slop = rounded_up - capacity;
+ if (slop >= kOverhead && slop <= kMaxPageSlop + kOverhead) {
+ capacity = rounded_up;
+ } else {
+ // Round down to highest power of 2 <= capacity.
+ // Consider a more aggressive step down if that may reduce the
+ // risk of fragmentation where 'people are holding it wrong'.
+ const size_t rounded_down = size_t{1} << Log2Floor(capacity);
+ capacity = rounded_down;
+ }
+ }
+ const size_t length = capacity - kOverhead;
+ auto* rep = CordRepFlat::New(CordRepFlat::Large(), length, hints...);
+ rep->length = 0;
+ return CordBuffer(rep);
+}
+
+inline CordBuffer CordBuffer::CreateWithCustomLimit(size_t block_size,
+ size_t capacity) {
+ return CreateWithCustomLimitImpl(block_size, capacity);
+}
+
+inline CordBuffer::~CordBuffer() {
+ if (!rep_.is_short()) {
+ cord_internal::CordRepFlat::Delete(rep_.rep());
+ }
+}
+
+inline CordBuffer::CordBuffer(CordBuffer&& rhs) noexcept : rep_(rhs.rep_) {
+ rhs.rep_.set_short_length(0);
+}
+
+inline CordBuffer& CordBuffer::operator=(CordBuffer&& rhs) noexcept {
+ if (!rep_.is_short()) cord_internal::CordRepFlat::Delete(rep_.rep());
+ rep_ = rhs.rep_;
+ rhs.rep_.set_short_length(0);
+ return *this;
+}
+
+inline absl::Span<char> CordBuffer::available() {
+ return rep_.is_short() ? rep_.short_available() : rep_.long_available();
+}
+
+inline absl::Span<char> CordBuffer::available_up_to(size_t size) {
+ return available().subspan(0, size);
+}
+
+inline char* CordBuffer::data() {
+ return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline const char* CordBuffer::data() const {
+ return rep_.is_short() ? rep_.data() : rep_.rep()->Data();
+}
+
+inline size_t CordBuffer::capacity() const {
+ return rep_.is_short() ? Rep::kInlineCapacity : rep_.rep()->Capacity();
+}
+
+inline size_t CordBuffer::length() const {
+ return rep_.is_short() ? rep_.short_length() : rep_.rep()->length;
+}
+
+inline void CordBuffer::SetLength(size_t length) {
+ ABSL_HARDENING_ASSERT(length <= capacity());
+ if (rep_.is_short()) {
+ rep_.set_short_length(length);
+ } else {
+ rep_.rep()->length = length;
+ }
+}
+
+inline void CordBuffer::IncreaseLengthBy(size_t n) {
+ ABSL_HARDENING_ASSERT(n <= capacity() && length() + n <= capacity());
+ if (rep_.is_short()) {
+ rep_.add_short_length(n);
+ } else {
+ rep_.rep()->length += n;
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_CORD_BUFFER_H_
diff --git a/absl/strings/cord_buffer_test.cc b/absl/strings/cord_buffer_test.cc
new file mode 100644
index 00000000..5c7437ae
--- /dev/null
+++ b/absl/strings/cord_buffer_test.cc
@@ -0,0 +1,320 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/cord_buffer.h"
+
+
+#include <algorithm>
+#include <climits>
+#include <cstring>
+#include <string>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+#include "absl/types/span.h"
+
+using testing::Eq;
+using testing::Ge;
+using testing::Le;
+using testing::Ne;
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class CordBufferTestPeer {
+ public:
+ static cord_internal::CordRep* ConsumeValue(CordBuffer& buffer,
+ absl::string_view& short_value) {
+ return buffer.ConsumeValue(short_value);
+ }
+};
+
+namespace {
+
+using ::absl::cordrep_testing::CordToString;
+
+constexpr size_t kInlinedSize = sizeof(CordBuffer) - 1;
+constexpr size_t kDefaultLimit = CordBuffer::kDefaultLimit;
+constexpr size_t kCustomLimit = CordBuffer::kCustomLimit;
+constexpr size_t kMaxFlatSize = cord_internal::kMaxFlatSize;
+constexpr size_t kMaxFlatLength = cord_internal::kMaxFlatLength;
+constexpr size_t kFlatOverhead = cord_internal::kFlatOverhead;
+
+constexpr size_t k8KiB = 8 << 10;
+constexpr size_t k16KiB = 16 << 10;
+constexpr size_t k64KiB = 64 << 10;
+constexpr size_t k1MB = 1 << 20;
+
+class CordBufferTest : public testing::TestWithParam<size_t> {};
+
+INSTANTIATE_TEST_SUITE_P(MediumSize, CordBufferTest,
+ testing::Values(1, kInlinedSize - 1, kInlinedSize,
+ kInlinedSize + 1, kDefaultLimit - 1,
+ kDefaultLimit));
+
+TEST_P(CordBufferTest, MaximumPayload) {
+ EXPECT_THAT(CordBuffer::MaximumPayload(), Eq(kMaxFlatLength));
+ EXPECT_THAT(CordBuffer::MaximumPayload(512), Eq(512 - kFlatOverhead));
+ EXPECT_THAT(CordBuffer::MaximumPayload(k64KiB), Eq(k64KiB - kFlatOverhead));
+ EXPECT_THAT(CordBuffer::MaximumPayload(k1MB), Eq(k64KiB - kFlatOverhead));
+}
+
+TEST(CordBufferTest, ConstructDefault) {
+ CordBuffer buffer;
+ EXPECT_THAT(buffer.capacity(), Eq(sizeof(CordBuffer) - 1));
+ EXPECT_THAT(buffer.length(), Eq(0));
+ EXPECT_THAT(buffer.data(), Ne(nullptr));
+ EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
+ EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
+ memset(buffer.data(), 0xCD, buffer.capacity());
+}
+
+TEST(CordBufferTest, CreateSsoWithDefaultLimit) {
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(3);
+ EXPECT_THAT(buffer.capacity(), Ge(3));
+ EXPECT_THAT(buffer.capacity(), Le(sizeof(CordBuffer)));
+ EXPECT_THAT(buffer.length(), Eq(0));
+ memset(buffer.data(), 0xCD, buffer.capacity());
+
+ memcpy(buffer.data(), "Abc", 3);
+ buffer.SetLength(3);
+ EXPECT_THAT(buffer.length(), Eq(3));
+ absl::string_view short_value;
+ EXPECT_THAT(CordBufferTestPeer::ConsumeValue(buffer, short_value),
+ Eq(nullptr));
+ EXPECT_THAT(absl::string_view(buffer.data(), 3), Eq("Abc"));
+ EXPECT_THAT(short_value, Eq("Abc"));
+}
+
+TEST_P(CordBufferTest, Available) {
+ const size_t requested = GetParam();
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+ EXPECT_THAT(buffer.available().data(), Eq(buffer.data()));
+ EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity()));
+
+ buffer.SetLength(2);
+ EXPECT_THAT(buffer.available().data(), Eq(buffer.data() + 2));
+ EXPECT_THAT(buffer.available().size(), Eq(buffer.capacity() - 2));
+}
+
+TEST_P(CordBufferTest, IncreaseLengthBy) {
+ const size_t requested = GetParam();
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+ buffer.IncreaseLengthBy(2);
+ EXPECT_THAT(buffer.length(), Eq(2));
+ buffer.IncreaseLengthBy(5);
+ EXPECT_THAT(buffer.length(), Eq(7));
+}
+
+TEST_P(CordBufferTest, AvailableUpTo) {
+ const size_t requested = GetParam();
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+ size_t expected_up_to = std::min<size_t>(3, buffer.capacity());
+ EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data()));
+ EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
+
+ buffer.SetLength(2);
+ expected_up_to = std::min<size_t>(3, buffer.capacity() - 2);
+ EXPECT_THAT(buffer.available_up_to(3).data(), Eq(buffer.data() + 2));
+ EXPECT_THAT(buffer.available_up_to(3).size(), Eq(expected_up_to));
+}
+
+// Returns the maximum capacity for a given block_size and requested size.
+size_t MaxCapacityFor(size_t block_size, size_t requested) {
+ requested = (std::min)(requested, cord_internal::kMaxLargeFlatSize);
+ // Maximum returned size is always capped at block_size - kFlatOverhead.
+ return block_size - kFlatOverhead;
+}
+
+TEST_P(CordBufferTest, CreateWithDefaultLimit) {
+ const size_t requested = GetParam();
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+ EXPECT_THAT(buffer.capacity(), Ge(requested));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
+ EXPECT_THAT(buffer.length(), Eq(0));
+
+ memset(buffer.data(), 0xCD, buffer.capacity());
+
+ std::string data(requested - 1, 'x');
+ memcpy(buffer.data(), data.c_str(), requested);
+ buffer.SetLength(requested);
+
+ EXPECT_THAT(buffer.length(), Eq(requested));
+ EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
+}
+
+TEST(CordBufferTest, CreateWithDefaultLimitAskingFor2GB) {
+ constexpr size_t k2GiB = 1U << 31;
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(k2GiB);
+ // Expect to never be awarded more than a reasonable memory size, even in
+ // cases where a (debug) memory allocator may grant us somewhat more memory
+ // than `kDefaultLimit` which should be no more than `2 * kDefaultLimit`
+ EXPECT_THAT(buffer.capacity(), Le(2 * CordBuffer::kDefaultLimit));
+ EXPECT_THAT(buffer.length(), Eq(0));
+ EXPECT_THAT(buffer.data(), Ne(nullptr));
+ memset(buffer.data(), 0xCD, buffer.capacity());
+}
+
+TEST_P(CordBufferTest, MoveConstruct) {
+ const size_t requested = GetParam();
+ CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
+ const size_t capacity = from.capacity();
+ memcpy(from.data(), "Abc", 4);
+ from.SetLength(4);
+
+ CordBuffer to(std::move(from));
+ EXPECT_THAT(to.capacity(), Eq(capacity));
+ EXPECT_THAT(to.length(), Eq(4));
+ EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
+
+ EXPECT_THAT(from.length(), Eq(0)); // NOLINT
+}
+
+TEST_P(CordBufferTest, MoveAssign) {
+ const size_t requested = GetParam();
+ CordBuffer from = CordBuffer::CreateWithDefaultLimit(requested);
+ const size_t capacity = from.capacity();
+ memcpy(from.data(), "Abc", 4);
+ from.SetLength(4);
+
+ CordBuffer to;
+ to = std::move(from);
+ EXPECT_THAT(to.capacity(), Eq(capacity));
+ EXPECT_THAT(to.length(), Eq(4));
+ EXPECT_THAT(absl::string_view(to.data()), Eq("Abc"));
+
+ EXPECT_THAT(from.length(), Eq(0)); // NOLINT
+}
+
+TEST_P(CordBufferTest, ConsumeValue) {
+ const size_t requested = GetParam();
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(requested);
+ memcpy(buffer.data(), "Abc", 4);
+ buffer.SetLength(3);
+
+ absl::string_view short_value;
+ if (cord_internal::CordRep* rep =
+ CordBufferTestPeer::ConsumeValue(buffer, short_value)) {
+ EXPECT_THAT(CordToString(rep), Eq("Abc"));
+ cord_internal::CordRep::Unref(rep);
+ } else {
+ EXPECT_THAT(short_value, Eq("Abc"));
+ }
+ EXPECT_THAT(buffer.length(), Eq(0));
+}
+
+TEST_P(CordBufferTest, CreateWithCustomLimitWithinDefaultLimit) {
+ const size_t requested = GetParam();
+ CordBuffer buffer =
+ CordBuffer::CreateWithCustomLimit(kMaxFlatSize, requested);
+ EXPECT_THAT(buffer.capacity(), Ge(requested));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, requested)));
+ EXPECT_THAT(buffer.length(), Eq(0));
+
+ memset(buffer.data(), 0xCD, buffer.capacity());
+
+ std::string data(requested - 1, 'x');
+ memcpy(buffer.data(), data.c_str(), requested);
+ buffer.SetLength(requested);
+
+ EXPECT_THAT(buffer.length(), Eq(requested));
+ EXPECT_THAT(absl::string_view(buffer.data()), Eq(data));
+}
+
+TEST(CordLargeBufferTest, CreateAtOrBelowDefaultLimit) {
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, kDefaultLimit);
+ EXPECT_THAT(buffer.capacity(), Ge(kDefaultLimit));
+ EXPECT_THAT(buffer.capacity(),
+ Le(MaxCapacityFor(kMaxFlatSize, kDefaultLimit)));
+
+ buffer = CordBuffer::CreateWithCustomLimit(k64KiB, 3178);
+ EXPECT_THAT(buffer.capacity(), Ge(3178));
+}
+
+TEST(CordLargeBufferTest, CreateWithCustomLimit) {
+ ASSERT_THAT((kMaxFlatSize & (kMaxFlatSize - 1)) == 0, "Must be power of 2");
+
+ for (size_t size = kMaxFlatSize; size <= kCustomLimit; size *= 2) {
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(size, size);
+ size_t expected = size - kFlatOverhead;
+ ASSERT_THAT(buffer.capacity(), Ge(expected));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(size, expected)));
+ }
+}
+
+TEST(CordLargeBufferTest, CreateWithTooLargeLimit) {
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k64KiB, k1MB);
+ ASSERT_THAT(buffer.capacity(), Ge(k64KiB - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k64KiB, k1MB)));
+}
+
+TEST(CordLargeBufferTest, CreateWithHugeValueForOverFlowHardening) {
+ for (size_t dist_from_max = 0; dist_from_max <= 32; ++dist_from_max) {
+ size_t capacity = std::numeric_limits<size_t>::max() - dist_from_max;
+
+ CordBuffer buffer = CordBuffer::CreateWithDefaultLimit(capacity);
+ ASSERT_THAT(buffer.capacity(), Ge(kDefaultLimit));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(kMaxFlatSize, capacity)));
+
+ for (size_t limit = kMaxFlatSize; limit <= kCustomLimit; limit *= 2) {
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(limit, capacity);
+ ASSERT_THAT(buffer.capacity(), Ge(limit - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(limit, capacity)));
+ }
+ }
+}
+
+TEST(CordLargeBufferTest, CreateWithSmallLimit) {
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(512, 1024);
+ ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 1024)));
+
+ // Ask for precise block size, should return size - kOverhead
+ buffer = CordBuffer::CreateWithCustomLimit(512, 512);
+ ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 512)));
+
+ // Corner case: 511 < block_size, but 511 + kOverhead is above
+ buffer = CordBuffer::CreateWithCustomLimit(512, 511);
+ ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 511)));
+
+ // Corner case: 498 + kOverhead < block_size
+ buffer = CordBuffer::CreateWithCustomLimit(512, 498);
+ ASSERT_THAT(buffer.capacity(), Ge(512 - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(512, 498)));
+}
+
+TEST(CordLargeBufferTest, CreateWasteFull) {
+ // 15 KiB gets rounded down to next pow2 value.
+ const size_t requested = (15 << 10);
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
+ ASSERT_THAT(buffer.capacity(), Ge(k8KiB - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k8KiB, requested)));
+}
+
+TEST(CordLargeBufferTest, CreateSmallSlop) {
+ const size_t requested = k16KiB - 2 * kFlatOverhead;
+ CordBuffer buffer = CordBuffer::CreateWithCustomLimit(k16KiB, requested);
+ ASSERT_THAT(buffer.capacity(), Ge(k16KiB - kFlatOverhead));
+ EXPECT_THAT(buffer.capacity(), Le(MaxCapacityFor(k16KiB, requested)));
+}
+
+} // namespace
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/cord_ring_reader_test.cc b/absl/strings/cord_ring_reader_test.cc
index d9a9a76d..8e7183bf 100644
--- a/absl/strings/cord_ring_reader_test.cc
+++ b/absl/strings/cord_ring_reader_test.cc
@@ -126,7 +126,7 @@ TEST(CordRingReaderTest, SeekForward) {
reader.Reset(ring);
size_t consumed = 0;
- size_t remaining = ring->length;;
+ size_t remaining = ring->length;
for (int i = 0; i < flats.size(); ++i) {
CordRepRing::index_type index = ring->advance(head, i);
size_t offset = consumed;
diff --git a/absl/strings/cord_ring_test.cc b/absl/strings/cord_ring_test.cc
index f1318595..f39a0a4f 100644
--- a/absl/strings/cord_ring_test.cc
+++ b/absl/strings/cord_ring_test.cc
@@ -44,7 +44,6 @@ using ::absl::cord_internal::CordRepFlat;
using ::absl::cord_internal::CordRepRing;
using ::absl::cord_internal::CordRepSubstring;
-using ::absl::cord_internal::CONCAT;
using ::absl::cord_internal::EXTERNAL;
using ::absl::cord_internal::SUBSTRING;
@@ -262,16 +261,6 @@ CordRepSubstring* RemoveSuffix(size_t length, CordRep* rep) {
return MakeSubstring(0, rep->length - length, rep);
}
-CordRepConcat* MakeConcat(CordRep* left, CordRep* right, int depth = 0) {
- auto* concat = new CordRepConcat;
- concat->tag = CONCAT;
- concat->length = left->length + right->length;
- concat->left = left;
- concat->right = right;
- concat->set_depth(depth);
- return concat;
-}
-
enum Composition { kMix, kAppend, kPrepend };
Composition RandomComposition() {
@@ -296,7 +285,6 @@ constexpr const char* kFox = "The quick brown fox jumps over the lazy dog";
constexpr const char* kFoxFlats[] = {"The ", "quick ", "brown ",
"fox ", "jumps ", "over ",
"the ", "lazy ", "dog"};
-constexpr const char* kAlphabet = "abcdefghijklmnopqrstuvwxyz";
CordRepRing* FromFlats(Span<const char* const> flats,
Composition composition = kAppend) {
@@ -594,35 +582,6 @@ TEST_P(CordRingCreateFromTreeTest, CreateFromSubstringOfLargeExternal) {
EXPECT_THAT(ToRawFlats(result), ElementsAre(str));
}
-TEST_P(CordRingBuildInputTest, CreateFromConcat) {
- CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"),
- MakeFlat("nopqrstuv"), MakeFlat("wxyz")};
- auto* left = MakeConcat(RefIfInputSharedIndirect(flats[0]), flats[1]);
- auto* right = MakeConcat(flats[2], RefIfInputSharedIndirect(flats[3]));
- auto* concat = RefIfInputShared(MakeConcat(left, right));
- CordRepRing* result = NeedsUnref(CordRepRing::Create(concat));
- ASSERT_THAT(result, IsValidRingBuffer());
- EXPECT_THAT(result->length, Eq(26));
- EXPECT_THAT(ToString(result), Eq(kAlphabet));
-}
-
-TEST_P(CordRingBuildInputTest, CreateFromSubstringConcat) {
- for (size_t off = 0; off < 26; ++off) {
- for (size_t len = 1; len < 26 - off; ++len) {
- CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"),
- MakeFlat("nopqrstuv"), MakeFlat("wxyz")};
- auto* left = MakeConcat(RefIfInputSharedIndirect(flats[0]), flats[1]);
- auto* right = MakeConcat(flats[2], RefIfInputSharedIndirect(flats[3]));
- auto* concat = MakeConcat(left, right);
- auto* child = RefIfInputShared(MakeSubstring(off, len, concat));
- CordRepRing* result = NeedsUnref(CordRepRing::Create(child));
- ASSERT_THAT(result, IsValidRingBuffer());
- ASSERT_THAT(result->length, Eq(len));
- ASSERT_THAT(ToString(result), string_view(kAlphabet).substr(off, len));
- }
- }
-}
-
TEST_P(CordRingCreateTest, Properties) {
absl::string_view str1 = "abcdefghijklmnopqrstuvwxyz";
CordRepRing* result = NeedsUnref(CordRepRing::Create(MakeFlat(str1), 120));
diff --git a/absl/strings/cord_test.cc b/absl/strings/cord_test.cc
index cced9bba..0862f69a 100644
--- a/absl/strings/cord_test.cc
+++ b/absl/strings/cord_test.cc
@@ -34,15 +34,32 @@
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/fixed_array.h"
+#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/cordz_test_helpers.h"
+#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
+// convenience local constants
+static constexpr auto FLAT = absl::cord_internal::FLAT;
+static constexpr auto MAX_FLAT_TAG = absl::cord_internal::MAX_FLAT_TAG;
+
typedef std::mt19937_64 RandomEngine;
+using absl::cord_internal::CordRep;
+using absl::cord_internal::CordRepBtree;
+using absl::cord_internal::CordRepConcat;
+using absl::cord_internal::CordRepCrc;
+using absl::cord_internal::CordRepExternal;
+using absl::cord_internal::CordRepFlat;
+using absl::cord_internal::CordRepSubstring;
+using absl::cord_internal::CordzUpdateTracker;
+using absl::cord_internal::kFlatOverhead;
+using absl::cord_internal::kMaxFlatLength;
+
static std::string RandomLowercaseString(RandomEngine* rng);
static std::string RandomLowercaseString(RandomEngine* rng, size_t length);
@@ -185,6 +202,7 @@ class CordTestPeer {
}
static bool IsTree(const Cord& c) { return c.contents_.is_tree(); }
+ static CordRep* Tree(const Cord& c) { return c.contents_.tree(); }
static cord_internal::CordzInfo* GetCordzInfo(const Cord& c) {
return c.contents_.cordz_info();
@@ -192,14 +210,12 @@ class CordTestPeer {
static Cord MakeSubstring(Cord src, size_t offset, size_t length) {
ABSL_RAW_CHECK(src.contents_.is_tree(), "Can not be inlined");
+ ABSL_RAW_CHECK(src.ExpectedChecksum() == absl::nullopt,
+ "Can not be hardened");
Cord cord;
- auto* rep = new cord_internal::CordRepSubstring;
- rep->tag = cord_internal::SUBSTRING;
- rep->child = cord_internal::CordRep::Ref(src.contents_.tree());
- rep->start = offset;
- rep->length = length;
- cord.contents_.EmplaceTree(rep,
- cord_internal::CordzUpdateTracker::kSubCord);
+ auto* tree = cord_internal::SkipCrcNode(src.contents_.tree());
+ auto* rep = CordRepSubstring::Create(CordRep::Ref(tree), offset, length);
+ cord.contents_.EmplaceTree(rep, CordzUpdateTracker::kSubCord);
return cord;
}
};
@@ -207,31 +223,107 @@ class CordTestPeer {
ABSL_NAMESPACE_END
} // namespace absl
-// The CordTest fixture runs all tests with and without Cord Btree enabled.
-class CordTest : public testing::TestWithParam<bool> {
+// The CordTest fixture runs all tests with and without Cord Btree enabled,
+// and with our without expected CRCs being set on the subject Cords.
+class CordTest : public testing::TestWithParam<int> {
public:
- CordTest() : was_btree_(absl::cord_internal::cord_btree_enabled.load()) {
- absl::cord_internal::cord_btree_enabled.store(UseBtree());
+ // Returns true if test is running with btree enabled.
+ bool UseCrc() const { return GetParam() == 2 || GetParam() == 3; }
+ void MaybeHarden(absl::Cord& c) {
+ if (UseCrc()) {
+ c.SetExpectedChecksum(1);
+ }
}
- ~CordTest() override {
- absl::cord_internal::cord_btree_enabled.store(was_btree_);
+ absl::Cord MaybeHardened(absl::Cord c) {
+ MaybeHarden(c);
+ return c;
}
- // Returns true if test is running with btree enabled.
- bool UseBtree() const { return GetParam(); }
-
// Returns human readable string representation of the test parameter.
- static std::string ToString(testing::TestParamInfo<bool> param) {
- return param.param ? "Btree" : "Concat";
+ static std::string ToString(testing::TestParamInfo<int> param) {
+ switch (param.param) {
+ case 0:
+ return "Btree";
+ case 1:
+ return "BtreeHardened";
+ default:
+ assert(false);
+ return "???";
+ }
}
-
- private:
- const bool was_btree_;
};
-INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Bool(),
+INSTANTIATE_TEST_SUITE_P(WithParam, CordTest, testing::Values(0, 1),
CordTest::ToString);
+TEST(CordRepFlat, AllFlatCapacities) {
+ // Explicitly and redundantly assert built-in min/max limits
+ static_assert(absl::cord_internal::kFlatOverhead < 32, "");
+ static_assert(absl::cord_internal::kMinFlatSize == 32, "");
+ static_assert(absl::cord_internal::kMaxLargeFlatSize == 256 << 10, "");
+ EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(FLAT), 32);
+ EXPECT_EQ(absl::cord_internal::TagToAllocatedSize(MAX_FLAT_TAG), 256 << 10);
+
+ // Verify all tags to map perfectly back and forth, and
+ // that sizes are monotonically increasing.
+ size_t last_size = 0;
+ for (int tag = FLAT; tag <= MAX_FLAT_TAG; ++tag) {
+ size_t size = absl::cord_internal::TagToAllocatedSize(tag);
+ ASSERT_GT(size, last_size);
+ ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+ last_size = size;
+ }
+
+ // All flat size from 32 - 512 are 8 byte granularity
+ for (size_t size = 32; size <= 512; size += 8) {
+ ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+ uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+ ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+ }
+
+ // All flat sizes from 512 - 8192 are 64 byte granularity
+ for (size_t size = 512; size <= 8192; size += 64) {
+ ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+ uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+ ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+ }
+
+ // All flat sizes from 8KB to 256KB are 4KB granularity
+ for (size_t size = 8192; size <= 256 * 1024; size += 4 * 1024) {
+ ASSERT_EQ(absl::cord_internal::RoundUpForTag(size), size);
+ uint8_t tag = absl::cord_internal::AllocatedSizeToTag(size);
+ ASSERT_EQ(absl::cord_internal::TagToAllocatedSize(tag), size);
+ }
+}
+
+TEST(CordRepFlat, MaxFlatSize) {
+ CordRepFlat* flat = CordRepFlat::New(kMaxFlatLength);
+ EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
+ CordRep::Unref(flat);
+
+ flat = CordRepFlat::New(kMaxFlatLength * 4);
+ EXPECT_EQ(flat->Capacity(), kMaxFlatLength);
+ CordRep::Unref(flat);
+}
+
+TEST(CordRepFlat, MaxLargeFlatSize) {
+ const size_t size = 256 * 1024 - kFlatOverhead;
+ CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), size);
+ EXPECT_GE(flat->Capacity(), size);
+ CordRep::Unref(flat);
+}
+
+TEST(CordRepFlat, AllFlatSizes) {
+ const size_t kMaxSize = 256 * 1024;
+ for (size_t size = 32; size <= kMaxSize; size *=2) {
+ const size_t length = size - kFlatOverhead - 1;
+ CordRepFlat* flat = CordRepFlat::New(CordRepFlat::Large(), length);
+ EXPECT_GE(flat->Capacity(), length);
+ memset(flat->Data(), 0xCD, flat->Capacity());
+ CordRep::Unref(flat);
+ }
+}
+
TEST_P(CordTest, AllFlatSizes) {
using absl::strings_internal::CordTestAccess;
@@ -243,6 +335,7 @@ TEST_P(CordTest, AllFlatSizes) {
}
absl::Cord dst(src);
+ MaybeHarden(dst);
EXPECT_EQ(std::string(dst), src) << s;
}
}
@@ -274,6 +367,7 @@ TEST_P(CordTest, GigabyteCordFromExternal) {
c.Append(from);
c.Append(from);
c.Append(from);
+ MaybeHarden(c);
}
for (int i = 0; i < 1024; ++i) {
@@ -302,6 +396,8 @@ bool my_unique_true_boolean = true;
TEST_P(CordTest, Assignment) {
absl::Cord x(absl::string_view("hi there"));
absl::Cord y(x);
+ MaybeHarden(y);
+ ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
ASSERT_EQ(std::string(x), "hi there");
ASSERT_EQ(std::string(y), "hi there");
ASSERT_TRUE(x == y);
@@ -355,6 +451,7 @@ TEST_P(CordTest, Assignment) {
TEST_P(CordTest, StartsEndsWith) {
absl::Cord x(absl::string_view("abcde"));
+ MaybeHarden(x);
absl::Cord empty("");
ASSERT_TRUE(x.StartsWith(absl::Cord("abcde")));
@@ -392,6 +489,7 @@ TEST_P(CordTest, Subcord) {
absl::Cord a;
AppendWithFragments(s, &rng, &a);
+ MaybeHarden(a);
ASSERT_EQ(s, std::string(a));
// Check subcords of a, from a variety of interesting points.
@@ -413,6 +511,9 @@ TEST_P(CordTest, Subcord) {
ASSERT_EQ(absl::string_view(s).substr(pos, end_pos - pos),
std::string(sa))
<< a;
+ if (pos != 0 || end_pos != a.size()) {
+ ASSERT_EQ(sa.ExpectedChecksum(), absl::nullopt);
+ }
}
}
@@ -452,10 +553,19 @@ TEST_P(CordTest, Swap) {
absl::string_view b("Mandark");
absl::Cord x(a);
absl::Cord y(b);
+ MaybeHarden(x);
swap(x, y);
+ if (UseCrc()) {
+ ASSERT_EQ(x.ExpectedChecksum(), absl::nullopt);
+ ASSERT_EQ(y.ExpectedChecksum(), 1);
+ }
ASSERT_EQ(x, absl::Cord(b));
ASSERT_EQ(y, absl::Cord(a));
x.swap(y);
+ if (UseCrc()) {
+ ASSERT_EQ(x.ExpectedChecksum(), 1);
+ ASSERT_EQ(y.ExpectedChecksum(), absl::nullopt);
+ }
ASSERT_EQ(x, absl::Cord(a));
ASSERT_EQ(y, absl::Cord(b));
}
@@ -480,11 +590,289 @@ static void VerifyCopyToString(const absl::Cord& cord) {
}
TEST_P(CordTest, CopyToString) {
- VerifyCopyToString(absl::Cord());
- VerifyCopyToString(absl::Cord("small cord"));
- VerifyCopyToString(
+ VerifyCopyToString(absl::Cord()); // empty cords cannot carry CRCs
+ VerifyCopyToString(MaybeHardened(absl::Cord("small cord")));
+ VerifyCopyToString(MaybeHardened(
absl::MakeFragmentedCord({"fragmented ", "cord ", "to ", "test ",
- "copying ", "to ", "a ", "string."}));
+ "copying ", "to ", "a ", "string."})));
+}
+
+TEST_P(CordTest, AppendEmptyBuffer) {
+ absl::Cord cord;
+ cord.Append(absl::CordBuffer());
+ cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendEmptyBufferToFlat) {
+ absl::Cord cord(std::string(2000, 'x'));
+ cord.Append(absl::CordBuffer());
+ cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendEmptyBufferToTree) {
+ absl::Cord cord(std::string(2000, 'x'));
+ cord.Append(std::string(2000, 'y'));
+ cord.Append(absl::CordBuffer());
+ cord.Append(absl::CordBuffer::CreateWithDefaultLimit(2000));
+}
+
+TEST_P(CordTest, AppendSmallBuffer) {
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ ASSERT_THAT(buffer.capacity(), ::testing::Le(15));
+ memcpy(buffer.data(), "Abc", 3);
+ buffer.SetLength(3);
+ cord.Append(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ memcpy(buffer.data(), "defgh", 5);
+ buffer.SetLength(5);
+ cord.Append(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("Abcdefgh"));
+}
+
+TEST_P(CordTest, AppendAndPrependBufferArePrecise) {
+ // Create a cord large enough to force 40KB flats.
+ std::string test_data(absl::cord_internal::kMaxFlatLength * 10, 'x');
+ absl::Cord cord1(test_data);
+ absl::Cord cord2(test_data);
+ const size_t size1 = cord1.EstimatedMemoryUsage();
+ const size_t size2 = cord2.EstimatedMemoryUsage();
+
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ memcpy(buffer.data(), "Abc", 3);
+ buffer.SetLength(3);
+ cord1.Append(std::move(buffer));
+
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ memcpy(buffer.data(), "Abc", 3);
+ buffer.SetLength(3);
+ cord2.Prepend(std::move(buffer));
+
+#ifndef NDEBUG
+ // Allow 32 bytes new CordRepFlat, and 128 bytes for 'glue nodes'
+ constexpr size_t kMaxDelta = 128 + 32;
+#else
+ // Allow 256 bytes extra for 'allocation debug overhead'
+ constexpr size_t kMaxDelta = 128 + 32 + 256;
+#endif
+
+ EXPECT_LE(cord1.EstimatedMemoryUsage() - size1, kMaxDelta);
+ EXPECT_LE(cord2.EstimatedMemoryUsage() - size2, kMaxDelta);
+
+ EXPECT_EQ(cord1, absl::StrCat(test_data, "Abc"));
+ EXPECT_EQ(cord2, absl::StrCat("Abc", test_data));
+}
+
+TEST_P(CordTest, PrependSmallBuffer) {
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ ASSERT_THAT(buffer.capacity(), ::testing::Le(15));
+ memcpy(buffer.data(), "Abc", 3);
+ buffer.SetLength(3);
+ cord.Prepend(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(3);
+ memcpy(buffer.data(), "defgh", 5);
+ buffer.SetLength(5);
+ cord.Prepend(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre("defghAbc"));
+}
+
+TEST_P(CordTest, AppendLargeBuffer) {
+ absl::Cord cord;
+
+ std::string s1(700, '1');
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
+ memcpy(buffer.data(), s1.data(), s1.size());
+ buffer.SetLength(s1.size());
+ cord.Append(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ std::string s2(1000, '2');
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
+ memcpy(buffer.data(), s2.data(), s2.size());
+ buffer.SetLength(s2.size());
+ cord.Append(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s1, s2));
+}
+
+TEST_P(CordTest, PrependLargeBuffer) {
+ absl::Cord cord;
+
+ std::string s1(700, '1');
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(s1.size());
+ memcpy(buffer.data(), s1.data(), s1.size());
+ buffer.SetLength(s1.size());
+ cord.Prepend(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ std::string s2(1000, '2');
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(s2.size());
+ memcpy(buffer.data(), s2.data(), s2.size());
+ buffer.SetLength(s2.size());
+ cord.Prepend(std::move(buffer));
+ EXPECT_EQ(buffer.length(), 0); // NOLINT
+ EXPECT_GT(buffer.capacity(), 0); // NOLINT
+
+ EXPECT_THAT(cord.Chunks(), ::testing::ElementsAre(s2, s1));
+}
+
+TEST_P(CordTest, GetAppendBufferOnEmptyCord) {
+ absl::Cord cord;
+ absl::CordBuffer buffer = cord.GetAppendBuffer(1000);
+ EXPECT_GE(buffer.capacity(), 1000);
+ EXPECT_EQ(buffer.length(), 0);
+}
+
+TEST_P(CordTest, GetAppendBufferOnInlinedCord) {
+ static constexpr int kInlinedSize = sizeof(absl::CordBuffer) - 1;
+ for (int size : {6, kInlinedSize - 3, kInlinedSize - 2, 1000}) {
+ absl::Cord cord("Abc");
+ absl::CordBuffer buffer = cord.GetAppendBuffer(size, 1);
+ EXPECT_GE(buffer.capacity(), 3 + size);
+ EXPECT_EQ(buffer.length(), 3);
+ EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+ EXPECT_TRUE(cord.empty());
+ }
+}
+
+TEST_P(CordTest, GetAppendBufferOnInlinedCordWithCapacityCloseToMax) {
+ // Cover the use case where we have a non empty inlined cord with some size
+ // 'n', and ask for something like 'uint64_max - k', assuming internal logic
+ // could overflow on 'uint64_max - k + size', and return a valid, but
+ // inefficiently smaller buffer if it would provide is the max allowed size.
+ for (size_t dist_from_max = 0; dist_from_max <= 4; ++dist_from_max) {
+ absl::Cord cord("Abc");
+ size_t size = std::numeric_limits<size_t>::max() - dist_from_max;
+ absl::CordBuffer buffer = cord.GetAppendBuffer(size, 1);
+ EXPECT_EQ(buffer.capacity(), absl::CordBuffer::kDefaultLimit);
+ EXPECT_EQ(buffer.length(), 3);
+ EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+ EXPECT_TRUE(cord.empty());
+ }
+}
+
+TEST_P(CordTest, GetAppendBufferOnFlat) {
+ // Create a cord with a single flat and extra capacity
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(3);
+ memcpy(buffer.data(), "Abc", 3);
+ cord.Append(std::move(buffer));
+
+ buffer = cord.GetAppendBuffer(6);
+ EXPECT_GE(buffer.capacity(), 500);
+ EXPECT_EQ(buffer.length(), 3);
+ EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), "Abc");
+ EXPECT_TRUE(cord.empty());
+}
+
+TEST_P(CordTest, GetAppendBufferOnFlatWithoutMinCapacity) {
+ // Create a cord with a single flat and extra capacity
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(30);
+ memset(buffer.data(), 'x', 30);
+ cord.Append(std::move(buffer));
+
+ buffer = cord.GetAppendBuffer(1000, 900);
+ EXPECT_GE(buffer.capacity(), 1000);
+ EXPECT_EQ(buffer.length(), 0);
+ EXPECT_EQ(cord, std::string(30, 'x'));
+}
+
+TEST_P(CordTest, GetAppendBufferOnTree) {
+ RandomEngine rng;
+ for (int num_flats : {2, 3, 100}) {
+ // Create a cord with `num_flats` flats and extra capacity
+ absl::Cord cord;
+ std::string prefix;
+ std::string last;
+ for (int i = 0; i < num_flats - 1; ++i) {
+ prefix += last;
+ last = RandomLowercaseString(&rng, 10);
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(10);
+ memcpy(buffer.data(), last.data(), 10);
+ cord.Append(std::move(buffer));
+ }
+ absl::CordBuffer buffer = cord.GetAppendBuffer(6);
+ EXPECT_GE(buffer.capacity(), 500);
+ EXPECT_EQ(buffer.length(), 10);
+ EXPECT_EQ(absl::string_view(buffer.data(), buffer.length()), last);
+ EXPECT_EQ(cord, prefix);
+ }
+}
+
+TEST_P(CordTest, GetAppendBufferOnTreeWithoutMinCapacity) {
+ absl::Cord cord;
+ for (int i = 0; i < 2; ++i) {
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(3);
+ memcpy(buffer.data(), i ? "def" : "Abc", 3);
+ cord.Append(std::move(buffer));
+ }
+ absl::CordBuffer buffer = cord.GetAppendBuffer(1000, 900);
+ EXPECT_GE(buffer.capacity(), 1000);
+ EXPECT_EQ(buffer.length(), 0);
+ EXPECT_EQ(cord, "Abcdef");
+}
+
+TEST_P(CordTest, GetAppendBufferOnSubstring) {
+ // Create a large cord with a single flat and some extra capacity
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(450);
+ memset(buffer.data(), 'x', 450);
+ cord.Append(std::move(buffer));
+ cord.RemovePrefix(1);
+
+ // Deny on substring
+ buffer = cord.GetAppendBuffer(6);
+ EXPECT_EQ(buffer.length(), 0);
+ EXPECT_EQ(cord, std::string(449, 'x'));
+}
+
+TEST_P(CordTest, GetAppendBufferOnSharedCord) {
+ // Create a shared cord with a single flat and extra capacity
+ absl::Cord cord;
+ absl::CordBuffer buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(3);
+ memcpy(buffer.data(), "Abc", 3);
+ cord.Append(std::move(buffer));
+ absl::Cord shared_cord = cord;
+
+ // Deny on flat
+ buffer = cord.GetAppendBuffer(6);
+ EXPECT_EQ(buffer.length(), 0);
+ EXPECT_EQ(cord, "Abc");
+
+ buffer = absl::CordBuffer::CreateWithDefaultLimit(500);
+ buffer.SetLength(3);
+ memcpy(buffer.data(), "def", 3);
+ cord.Append(std::move(buffer));
+ shared_cord = cord;
+
+ // Deny on tree
+ buffer = cord.GetAppendBuffer(6);
+ EXPECT_EQ(buffer.length(), 0);
+ EXPECT_EQ(cord, "Abcdef");
}
TEST_P(CordTest, TryFlatEmpty) {
@@ -494,45 +882,43 @@ TEST_P(CordTest, TryFlatEmpty) {
TEST_P(CordTest, TryFlatFlat) {
absl::Cord c("hello");
+ MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "hello");
}
TEST_P(CordTest, TryFlatSubstrInlined) {
absl::Cord c("hello");
c.RemovePrefix(1);
+ MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "ello");
}
TEST_P(CordTest, TryFlatSubstrFlat) {
absl::Cord c("longer than 15 bytes");
absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
+ MaybeHarden(sub);
EXPECT_EQ(sub.TryFlat(), "onger than 15 bytes");
}
TEST_P(CordTest, TryFlatConcat) {
absl::Cord c = absl::MakeFragmentedCord({"hel", "lo"});
+ MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), absl::nullopt);
}
TEST_P(CordTest, TryFlatExternal) {
absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
+ MaybeHarden(c);
EXPECT_EQ(c.TryFlat(), "hell");
}
TEST_P(CordTest, TryFlatSubstrExternal) {
absl::Cord c = absl::MakeCordFromExternal("hell", [](absl::string_view) {});
absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
+ MaybeHarden(sub);
EXPECT_EQ(sub.TryFlat(), "ell");
}
-TEST_P(CordTest, TryFlatSubstrConcat) {
- absl::Cord c = absl::MakeFragmentedCord({"hello", " world"});
- absl::Cord sub = absl::CordTestPeer::MakeSubstring(c, 1, c.size() - 1);
- EXPECT_EQ(sub.TryFlat(), absl::nullopt);
- c.RemovePrefix(1);
- EXPECT_EQ(c.TryFlat(), absl::nullopt);
-}
-
TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) {
// The behavior tested below is not part of the API contract of Cord, but it's
// something we intend to be true in our current implementation. This test
@@ -547,6 +933,7 @@ TEST_P(CordTest, TryFlatCommonlyAssumedInvariants) {
"returned by the ",
"iterator"};
absl::Cord c = absl::MakeFragmentedCord(fragments);
+ MaybeHarden(c);
int fragment = 0;
int offset = 0;
absl::Cord::CharIterator itc = c.char_begin();
@@ -591,13 +978,15 @@ static void VerifyFlatten(absl::Cord c) {
TEST_P(CordTest, Flatten) {
VerifyFlatten(absl::Cord());
- VerifyFlatten(absl::Cord("small cord"));
- VerifyFlatten(absl::Cord("larger than small buffer optimization"));
- VerifyFlatten(absl::MakeFragmentedCord({"small ", "fragmented ", "cord"}));
+ VerifyFlatten(MaybeHardened(absl::Cord("small cord")));
+ VerifyFlatten(
+ MaybeHardened(absl::Cord("larger than small buffer optimization")));
+ VerifyFlatten(MaybeHardened(
+ absl::MakeFragmentedCord({"small ", "fragmented ", "cord"})));
// Test with a cord that is longer than the largest flat buffer
RandomEngine rng(GTEST_FLAG_GET(random_seed));
- VerifyFlatten(absl::Cord(RandomLowercaseString(&rng, 8192)));
+ VerifyFlatten(MaybeHardened(absl::Cord(RandomLowercaseString(&rng, 8192))));
}
// Test data
@@ -651,22 +1040,26 @@ TEST_P(CordTest, MultipleLengths) {
{ // Construct from Cord
absl::Cord tmp(a);
absl::Cord x(tmp);
+ MaybeHarden(x);
EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
}
{ // Construct from absl::string_view
absl::Cord x(a);
+ MaybeHarden(x);
EXPECT_EQ(a, std::string(x)) << "'" << a << "'";
}
{ // Append cord to self
absl::Cord self(a);
+ MaybeHarden(self);
self.Append(self);
EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
}
{ // Prepend cord to self
absl::Cord self(a);
+ MaybeHarden(self);
self.Prepend(self);
EXPECT_EQ(a + a, std::string(self)) << "'" << a << "' + '" << a << "'";
}
@@ -678,12 +1071,14 @@ TEST_P(CordTest, MultipleLengths) {
{ // CopyFrom Cord
absl::Cord x(a);
absl::Cord y(b);
+ MaybeHarden(x);
x = y;
EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{ // CopyFrom absl::string_view
absl::Cord x(a);
+ MaybeHarden(x);
x = b;
EXPECT_EQ(b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
@@ -691,12 +1086,14 @@ TEST_P(CordTest, MultipleLengths) {
{ // Cord::Append(Cord)
absl::Cord x(a);
absl::Cord y(b);
+ MaybeHarden(x);
x.Append(y);
EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
{ // Cord::Append(absl::string_view)
absl::Cord x(a);
+ MaybeHarden(x);
x.Append(b);
EXPECT_EQ(a + b, std::string(x)) << "'" << a << "' + '" << b << "'";
}
@@ -704,12 +1101,14 @@ TEST_P(CordTest, MultipleLengths) {
{ // Cord::Prepend(Cord)
absl::Cord x(a);
absl::Cord y(b);
+ MaybeHarden(x);
x.Prepend(y);
EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
}
{ // Cord::Prepend(absl::string_view)
absl::Cord x(a);
+ MaybeHarden(x);
x.Prepend(b);
EXPECT_EQ(b + a, std::string(x)) << "'" << b << "' + '" << a << "'";
}
@@ -722,13 +1121,16 @@ namespace {
TEST_P(CordTest, RemoveSuffixWithExternalOrSubstring) {
absl::Cord cord = absl::MakeCordFromExternal(
"foo bar baz", [](absl::string_view s) { DoNothing(s, nullptr); });
-
EXPECT_EQ("foo bar baz", std::string(cord));
+ MaybeHarden(cord);
+
// This RemoveSuffix() will wrap the EXTERNAL node in a SUBSTRING node.
cord.RemoveSuffix(4);
EXPECT_EQ("foo bar", std::string(cord));
+ MaybeHarden(cord);
+
// This RemoveSuffix() will adjust the SUBSTRING node in-place.
cord.RemoveSuffix(4);
EXPECT_EQ("foo", std::string(cord));
@@ -738,6 +1140,7 @@ TEST_P(CordTest, RemoveSuffixMakesZeroLengthNode) {
absl::Cord c;
c.Append(absl::Cord(std::string(100, 'x')));
absl::Cord other_ref = c; // Prevent inplace appends
+ MaybeHarden(c);
c.Append(absl::Cord(std::string(200, 'y')));
c.RemoveSuffix(200);
EXPECT_EQ(std::string(100, 'x'), std::string(c));
@@ -763,6 +1166,7 @@ absl::Cord CordWithZedBlock(size_t size) {
// Establish that ZedBlock does what we think it does.
TEST_P(CordTest, CordSpliceTestZedBlock) {
absl::Cord blob = CordWithZedBlock(10);
+ MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
std::string s;
absl::CopyCordToString(blob, &s);
@@ -771,6 +1175,7 @@ TEST_P(CordTest, CordSpliceTestZedBlock) {
TEST_P(CordTest, CordSpliceTestZedBlock0) {
absl::Cord blob = CordWithZedBlock(0);
+ MaybeHarden(blob);
EXPECT_EQ(0, blob.size());
std::string s;
absl::CopyCordToString(blob, &s);
@@ -779,6 +1184,7 @@ TEST_P(CordTest, CordSpliceTestZedBlock0) {
TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) {
absl::Cord blob = CordWithZedBlock(10);
+ MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
absl::Cord suffix(blob);
suffix.RemovePrefix(9);
@@ -791,6 +1197,7 @@ TEST_P(CordTest, CordSpliceTestZedBlockSuffix1) {
// Remove all of a prefix block
TEST_P(CordTest, CordSpliceTestZedBlockSuffix0) {
absl::Cord blob = CordWithZedBlock(10);
+ MaybeHarden(blob);
EXPECT_EQ(10, blob.size());
absl::Cord suffix(blob);
suffix.RemovePrefix(10);
@@ -823,6 +1230,7 @@ absl::Cord SpliceCord(const absl::Cord& blob, int64_t offset,
// Taking an empty suffix of a block breaks appending.
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) {
absl::Cord zero = CordWithZedBlock(10);
+ MaybeHarden(zero);
absl::Cord suffix(zero);
suffix.RemovePrefix(10);
absl::Cord result;
@@ -831,6 +1239,7 @@ TEST_P(CordTest, CordSpliceTestRemoveEntireBlock1) {
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) {
absl::Cord zero = CordWithZedBlock(10);
+ MaybeHarden(zero);
absl::Cord prefix(zero);
prefix.RemoveSuffix(10);
absl::Cord suffix(zero);
@@ -842,13 +1251,19 @@ TEST_P(CordTest, CordSpliceTestRemoveEntireBlock2) {
TEST_P(CordTest, CordSpliceTestRemoveEntireBlock3) {
absl::Cord blob = CordWithZedBlock(10);
absl::Cord block = BigCord(10, 'b');
+ MaybeHarden(blob);
+ MaybeHarden(block);
blob = SpliceCord(blob, 0, block);
}
struct CordCompareTestCase {
template <typename LHS, typename RHS>
- CordCompareTestCase(const LHS& lhs, const RHS& rhs)
- : lhs_cord(lhs), rhs_cord(rhs) {}
+ CordCompareTestCase(const LHS& lhs, const RHS& rhs, bool use_crc)
+ : lhs_cord(lhs), rhs_cord(rhs) {
+ if (use_crc) {
+ lhs_cord.SetExpectedChecksum(1);
+ }
+ }
absl::Cord lhs_cord;
absl::Cord rhs_cord;
@@ -885,47 +1300,54 @@ TEST_P(CordTest, Compare) {
concat2.Append("cccccccccccDDDDDDDDDDDDDD");
concat2.Append("DD");
+ const bool use_crc = UseCrc();
+
std::vector<CordCompareTestCase> test_cases = {{
// Inline cords
- {"abcdef", "abcdef"},
- {"abcdef", "abcdee"},
- {"abcdef", "abcdeg"},
- {"bbcdef", "abcdef"},
- {"bbcdef", "abcdeg"},
- {"abcdefa", "abcdef"},
- {"abcdef", "abcdefa"},
+ {"abcdef", "abcdef", use_crc},
+ {"abcdef", "abcdee", use_crc},
+ {"abcdef", "abcdeg", use_crc},
+ {"bbcdef", "abcdef", use_crc},
+ {"bbcdef", "abcdeg", use_crc},
+ {"abcdefa", "abcdef", use_crc},
+ {"abcdef", "abcdefa", use_crc},
// Small flat cords
- {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD"},
- {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD"},
- {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD"},
- {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX"},
- {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD"},
- {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa"},
+ {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
+ {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBxccccDDDDD", use_crc},
+ {"aaaaaBBBBBcxcccDDDDD", "aaaaaBBBBBcccccDDDDD", use_crc},
+ {"aaaaaBBBBBxccccDDDDD", "aaaaaBBBBBcccccDDDDX", use_crc},
+ {"aaaaaBBBBBcccccDDDDDa", "aaaaaBBBBBcccccDDDDD", use_crc},
+ {"aaaaaBBBBBcccccDDDDD", "aaaaaBBBBBcccccDDDDDa", use_crc},
// Subcords
- {subcord, subcord},
- {subcord, "aaBBBBBccc"},
- {subcord, "aaBBBBBccd"},
- {subcord, "aaBBBBBccb"},
- {subcord, "aaBBBBBxcb"},
- {subcord, "aaBBBBBccca"},
- {subcord, "aaBBBBBcc"},
+ {subcord, subcord, use_crc},
+ {subcord, "aaBBBBBccc", use_crc},
+ {subcord, "aaBBBBBccd", use_crc},
+ {subcord, "aaBBBBBccb", use_crc},
+ {subcord, "aaBBBBBxcb", use_crc},
+ {subcord, "aaBBBBBccca", use_crc},
+ {subcord, "aaBBBBBcc", use_crc},
// Concats
- {concat, concat},
+ {concat, concat, use_crc},
{concat,
- "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD"},
+ "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDD",
+ use_crc},
{concat,
- "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD"},
+ "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBcccccccccccccccxDDDDDDDDDDDDDDDD",
+ use_crc},
{concat,
- "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD"},
+ "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBacccccccccccccccDDDDDDDDDDDDDDDD",
+ use_crc},
{concat,
- "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD"},
+ "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDD",
+ use_crc},
{concat,
- "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe"},
+ "aaaaaaaaaaaaaaaaBBBBBBBBBBBBBBBBccccccccccccccccDDDDDDDDDDDDDDDDe",
+ use_crc},
- {concat, concat2},
+ {concat, concat2, use_crc},
}};
for (const auto& tc : test_cases) {
@@ -936,6 +1358,7 @@ TEST_P(CordTest, Compare) {
TEST_P(CordTest, CompareAfterAssign) {
absl::Cord a("aaaaaa1111111");
absl::Cord b("aaaaaa2222222");
+ MaybeHarden(a);
a = "cccccc";
b = "cccccc";
EXPECT_EQ(a, b);
@@ -994,6 +1417,8 @@ TEST_P(CordTest, CompareRandomComparisons) {
d.Append(a[GetUniformRandomUpTo(&rng, ABSL_ARRAYSIZE(a))]);
}
std::bernoulli_distribution coin_flip(0.5);
+ MaybeHarden(c);
+ MaybeHarden(d);
TestCompare(coin_flip(rng) ? c : absl::Cord(std::string(c)),
coin_flip(rng) ? d : absl::Cord(std::string(d)), &rng);
}
@@ -1119,6 +1544,7 @@ TEST_P(CordTest, ConstructFromExternalCompareContents) {
EXPECT_EQ(external->size(), sv.size());
delete external;
});
+ MaybeHarden(cord);
EXPECT_EQ(data, cord);
}
}
@@ -1134,7 +1560,7 @@ TEST_P(CordTest, ConstructFromExternalLargeReleaser) {
EXPECT_EQ(data, absl::string_view(data_array.data(), data_array.size()));
invoked = true;
};
- (void)absl::MakeCordFromExternal(data, releaser);
+ (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
EXPECT_TRUE(invoked);
}
@@ -1147,11 +1573,11 @@ TEST_P(CordTest, ConstructFromExternalFunctionPointerReleaser) {
invoked = true;
});
invoked = false;
- (void)absl::MakeCordFromExternal(data, releaser);
+ (void)MaybeHardened(absl::MakeCordFromExternal(data, releaser));
EXPECT_TRUE(invoked);
invoked = false;
- (void)absl::MakeCordFromExternal(data, *releaser);
+ (void)MaybeHardened(absl::MakeCordFromExternal(data, *releaser));
EXPECT_TRUE(invoked);
}
@@ -1165,20 +1591,21 @@ TEST_P(CordTest, ConstructFromExternalMoveOnlyReleaser) {
};
bool invoked = false;
- (void)absl::MakeCordFromExternal("dummy", Releaser(&invoked));
+ (void)MaybeHardened(absl::MakeCordFromExternal("dummy", Releaser(&invoked)));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalNoArgLambda) {
bool invoked = false;
- (void)absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; });
+ (void)MaybeHardened(
+ absl::MakeCordFromExternal("dummy", [&invoked]() { invoked = true; }));
EXPECT_TRUE(invoked);
}
TEST_P(CordTest, ConstructFromExternalStringViewArgLambda) {
bool invoked = false;
- (void)absl::MakeCordFromExternal(
- "dummy", [&invoked](absl::string_view) { invoked = true; });
+ (void)MaybeHardened(absl::MakeCordFromExternal(
+ "dummy", [&invoked](absl::string_view) { invoked = true; }));
EXPECT_TRUE(invoked);
}
@@ -1193,43 +1620,78 @@ TEST_P(CordTest, ConstructFromExternalNonTrivialReleaserDestructor) {
bool destroyed = false;
Releaser releaser(&destroyed);
- (void)absl::MakeCordFromExternal("dummy", releaser);
+ (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser));
EXPECT_TRUE(destroyed);
}
TEST_P(CordTest, ConstructFromExternalReferenceQualifierOverloads) {
- struct Releaser {
- void operator()(absl::string_view) & { *lvalue_invoked = true; }
- void operator()(absl::string_view) && { *rvalue_invoked = true; }
+ enum InvokedAs { kMissing, kLValue, kRValue };
+ enum CopiedAs { kNone, kMove, kCopy };
+ struct Tracker {
+ CopiedAs copied_as = kNone;
+ InvokedAs invoked_as = kMissing;
+
+ void Record(InvokedAs rhs) {
+ ASSERT_EQ(invoked_as, kMissing);
+ invoked_as = rhs;
+ }
+
+ void Record(CopiedAs rhs) {
+ if (copied_as == kNone || rhs == kCopy) copied_as = rhs;
+ }
+ } tracker;
+
+ class Releaser {
+ public:
+ explicit Releaser(Tracker* tracker) : tr_(tracker) { *tracker = Tracker(); }
+ Releaser(Releaser&& rhs) : tr_(rhs.tr_) { tr_->Record(kMove); }
+ Releaser(const Releaser& rhs) : tr_(rhs.tr_) { tr_->Record(kCopy); }
- bool* lvalue_invoked;
- bool* rvalue_invoked;
+ void operator()(absl::string_view) & { tr_->Record(kLValue); }
+ void operator()(absl::string_view) && { tr_->Record(kRValue); }
+
+ private:
+ Tracker* tr_;
};
- bool lvalue_invoked = false;
- bool rvalue_invoked = false;
- Releaser releaser = {&lvalue_invoked, &rvalue_invoked};
- (void)absl::MakeCordFromExternal("", releaser);
- EXPECT_FALSE(lvalue_invoked);
- EXPECT_TRUE(rvalue_invoked);
- rvalue_invoked = false;
+ const Releaser releaser1(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("", releaser1));
+ EXPECT_EQ(tracker.copied_as, kCopy);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
+
+ const Releaser releaser2(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("", releaser2));
+ EXPECT_EQ(tracker.copied_as, kCopy);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
- (void)absl::MakeCordFromExternal("dummy", releaser);
- EXPECT_FALSE(lvalue_invoked);
- EXPECT_TRUE(rvalue_invoked);
- rvalue_invoked = false;
+ Releaser releaser3(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("", std::move(releaser3)));
+ EXPECT_EQ(tracker.copied_as, kMove);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
- // NOLINTNEXTLINE: suppress clang-tidy std::move on trivially copyable type.
- (void)absl::MakeCordFromExternal("dummy", std::move(releaser));
- EXPECT_FALSE(lvalue_invoked);
- EXPECT_TRUE(rvalue_invoked);
+ Releaser releaser4(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser4));
+ EXPECT_EQ(tracker.copied_as, kCopy);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
+
+ const Releaser releaser5(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("dummy", releaser5));
+ EXPECT_EQ(tracker.copied_as, kCopy);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
+
+ Releaser releaser6(&tracker);
+ (void)MaybeHardened(absl::MakeCordFromExternal("foo", std::move(releaser6)));
+ EXPECT_EQ(tracker.copied_as, kMove);
+ EXPECT_EQ(tracker.invoked_as, kRValue);
}
TEST_P(CordTest, ExternalMemoryBasicUsage) {
static const char* strings[] = {"", "hello", "there"};
for (const char* str : strings) {
absl::Cord dst("(prefix)");
+ MaybeHarden(dst);
AddExternalMemory(str, &dst);
+ MaybeHarden(dst);
dst.Append("(suffix)");
EXPECT_EQ((std::string("(prefix)") + str + std::string("(suffix)")),
std::string(dst));
@@ -1243,7 +1705,9 @@ TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) {
for (int offset = 0; offset <= s.size(); offset++) {
for (int length = 0; length <= s.size() - offset; length++) {
absl::Cord result(cord);
+ MaybeHarden(result);
result.RemovePrefix(offset);
+ MaybeHarden(result);
result.RemoveSuffix(result.size() - length);
EXPECT_EQ(s.substr(offset, length), std::string(result))
<< offset << " " << length;
@@ -1254,8 +1718,10 @@ TEST_P(CordTest, ExternalMemoryRemovePrefixSuffix) {
TEST_P(CordTest, ExternalMemoryGet) {
absl::Cord cord("hello");
AddExternalMemory(" world!", &cord);
+ MaybeHarden(cord);
AddExternalMemory(" how are ", &cord);
cord.Append(" you?");
+ MaybeHarden(cord);
std::string s = std::string(cord);
for (int i = 0; i < s.size(); i++) {
EXPECT_EQ(s[i], cord[i]);
@@ -1263,76 +1729,133 @@ TEST_P(CordTest, ExternalMemoryGet) {
}
// CordMemoryUsage tests verify the correctness of the EstimatedMemoryUsage()
-// These tests take into account that the reported memory usage is approximate
-// and non-deterministic. For all tests, We verify that the reported memory
-// usage is larger than `size()`, and less than `size() * 1.5` as a cord should
-// never reserve more 'extra' capacity than half of its size as it grows.
-// Additionally we have some whiteboxed expectations based on our knowledge of
-// the layout and size of empty and inlined cords, and flat nodes.
+// We use whiteboxed expectations based on our knowledge of the layout and size
+// of empty and inlined cords, and flat nodes.
+
+constexpr auto kFairShare = absl::CordMemoryAccounting::kFairShare;
-TEST_P(CordTest, CordMemoryUsageEmpty) {
- EXPECT_EQ(sizeof(absl::Cord), absl::Cord().EstimatedMemoryUsage());
+// Creates a cord of `n` `c` values, making sure no string stealing occurs.
+absl::Cord MakeCord(size_t n, char c) {
+ const std::string s(n, c);
+ return absl::Cord(s);
}
-TEST_P(CordTest, CordMemoryUsageEmbedded) {
- absl::Cord a("hello");
- EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
+TEST(CordTest, CordMemoryUsageEmpty) {
+ absl::Cord cord;
+ EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage());
+ EXPECT_EQ(sizeof(absl::Cord), cord.EstimatedMemoryUsage(kFairShare));
}
-TEST_P(CordTest, CordMemoryUsageEmbeddedAppend) {
- absl::Cord a("a");
- absl::Cord b("bcd");
- EXPECT_EQ(b.EstimatedMemoryUsage(), sizeof(absl::Cord));
- a.Append(b);
+TEST(CordTest, CordMemoryUsageInlined) {
+ absl::Cord a("hello");
EXPECT_EQ(a.EstimatedMemoryUsage(), sizeof(absl::Cord));
+ EXPECT_EQ(a.EstimatedMemoryUsage(kFairShare), sizeof(absl::Cord));
}
-TEST_P(CordTest, CordMemoryUsageExternalMemory) {
- static const int kLength = 1000;
+TEST(CordTest, CordMemoryUsageExternalMemory) {
absl::Cord cord;
- AddExternalMemory(std::string(kLength, 'x'), &cord);
- EXPECT_GT(cord.EstimatedMemoryUsage(), kLength);
- EXPECT_LE(cord.EstimatedMemoryUsage(), kLength * 1.5);
-}
+ AddExternalMemory(std::string(1000, 'x'), &cord);
+ const size_t expected =
+ sizeof(absl::Cord) + 1000 + sizeof(CordRepExternal) + sizeof(intptr_t);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(), expected);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare), expected);
+}
+
+TEST(CordTest, CordMemoryUsageFlat) {
+ absl::Cord cord = MakeCord(1000, 'a');
+ const size_t flat_size =
+ absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+ EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + flat_size);
+}
+
+TEST(CordTest, CordMemoryUsageSubStringSharedFlat) {
+ absl::Cord flat = MakeCord(2000, 'a');
+ const size_t flat_size =
+ absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+ absl::Cord cord = flat.Subcord(500, 1000);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + sizeof(CordRepSubstring) + flat_size / 2);
+}
+
+TEST(CordTest, CordMemoryUsageFlatShared) {
+ absl::Cord shared = MakeCord(1000, 'a');
+ absl::Cord cord(shared);
+ const size_t flat_size =
+ absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+ EXPECT_EQ(cord.EstimatedMemoryUsage(), sizeof(absl::Cord) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + flat_size / 2);
+}
+
+TEST(CordTest, CordMemoryUsageFlatHardenedAndShared) {
+ absl::Cord shared = MakeCord(1000, 'a');
+ absl::Cord cord(shared);
+ const size_t flat_size =
+ absl::CordTestPeer::Tree(cord)->flat()->AllocatedSize();
+ cord.SetExpectedChecksum(1);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size / 2);
+
+ absl::Cord cord2(cord);
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) + sizeof(CordRepCrc) + flat_size);
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + (sizeof(CordRepCrc) + flat_size / 2) / 2);
+}
+
+TEST(CordTest, CordMemoryUsageBTree) {
+ absl::Cord cord1;
+ size_t flats1_size = 0;
+ absl::Cord flats1[4] = {MakeCord(1000, 'a'), MakeCord(1100, 'a'),
+ MakeCord(1200, 'a'), MakeCord(1300, 'a')};
+ for (absl::Cord flat : flats1) {
+ flats1_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+ cord1.Append(std::move(flat));
+ }
-TEST_P(CordTest, CordMemoryUsageFlat) {
- static const int kLength = 125;
- absl::Cord a(std::string(kLength, 'a'));
- EXPECT_GT(a.EstimatedMemoryUsage(), kLength);
- EXPECT_LE(a.EstimatedMemoryUsage(), kLength * 1.5);
-}
+ // Make sure the created cord is a BTREE tree. Under some builds such as
+ // windows DLL, we may have ODR like effects on the flag, meaning the DLL
+ // code will run with the picked up default.
+ if (!absl::CordTestPeer::Tree(cord1)->IsBtree()) {
+ ABSL_RAW_LOG(WARNING, "Cord library code not respecting btree flag");
+ return;
+ }
-TEST_P(CordTest, CordMemoryUsageAppendFlat) {
- using absl::strings_internal::CordTestAccess;
- absl::Cord a(std::string(CordTestAccess::MaxFlatLength(), 'a'));
- size_t length = a.EstimatedMemoryUsage();
- a.Append(std::string(CordTestAccess::MaxFlatLength(), 'b'));
- size_t delta = a.EstimatedMemoryUsage() - length;
- EXPECT_GT(delta, CordTestAccess::MaxFlatLength());
- EXPECT_LE(delta, CordTestAccess::MaxFlatLength() * 1.5);
-}
+ size_t rep1_size = sizeof(CordRepBtree) + flats1_size;
+ size_t rep1_shared_size = sizeof(CordRepBtree) + flats1_size / 2;
-TEST_P(CordTest, CordMemoryUsageAppendExternal) {
- static const int kLength = 1000;
- using absl::strings_internal::CordTestAccess;
- absl::Cord a(std::string(CordTestAccess::MaxFlatLength(), 'a'));
- size_t length = a.EstimatedMemoryUsage();
- AddExternalMemory(std::string(kLength, 'b'), &a);
- size_t delta = a.EstimatedMemoryUsage() - length;
- EXPECT_GT(delta, kLength);
- EXPECT_LE(delta, kLength * 1.5);
-}
+ EXPECT_EQ(cord1.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep1_size);
+ EXPECT_EQ(cord1.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + rep1_shared_size);
-TEST_P(CordTest, CordMemoryUsageSubString) {
- static const int kLength = 2000;
- using absl::strings_internal::CordTestAccess;
- absl::Cord a(std::string(kLength, 'a'));
- size_t length = a.EstimatedMemoryUsage();
- AddExternalMemory(std::string(kLength, 'b'), &a);
- absl::Cord b = a.Subcord(0, kLength + kLength / 2);
- size_t delta = b.EstimatedMemoryUsage() - length;
- EXPECT_GT(delta, kLength);
- EXPECT_LE(delta, kLength * 1.5);
+ absl::Cord cord2;
+ size_t flats2_size = 0;
+ absl::Cord flats2[4] = {MakeCord(600, 'a'), MakeCord(700, 'a'),
+ MakeCord(800, 'a'), MakeCord(900, 'a')};
+ for (absl::Cord& flat : flats2) {
+ flats2_size += absl::CordTestPeer::Tree(flat)->flat()->AllocatedSize();
+ cord2.Append(std::move(flat));
+ }
+ size_t rep2_size = sizeof(CordRepBtree) + flats2_size;
+
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(), sizeof(absl::Cord) + rep2_size);
+ EXPECT_EQ(cord2.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + rep2_size);
+
+ absl::Cord cord(cord1);
+ cord.Append(std::move(cord2));
+
+ EXPECT_EQ(cord.EstimatedMemoryUsage(),
+ sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_size + rep2_size);
+ EXPECT_EQ(cord.EstimatedMemoryUsage(kFairShare),
+ sizeof(absl::Cord) + sizeof(CordRepBtree) + rep1_shared_size / 2 +
+ rep2_size);
}
// Regtest for a change that had to be rolled back because it expanded out
@@ -1354,11 +1877,13 @@ TEST_P(CordTest, CordMemoryUsageInlineRep) {
TEST_P(CordTest, Concat_Append) {
// Create a rep of type CONCAT
absl::Cord s1("foobarbarbarbarbar");
+ MaybeHarden(s1);
s1.Append("abcdefgabcdefgabcdefgabcdefgabcdefgabcdefgabcdefg");
size_t size = s1.size();
// Create a copy of s1 and append to it.
absl::Cord s2 = s1;
+ MaybeHarden(s2);
s2.Append("x");
// 7465150 modifies s1 when it shouldn't.
@@ -1378,6 +1903,7 @@ TEST_P(CordTest, DiabolicalGrowth) {
for (char c : expected) {
absl::Cord shared(cord);
cord.Append(absl::string_view(&c, 1));
+ MaybeHarden(cord);
}
std::string value;
absl::CopyCordToString(cord, &value);
@@ -1422,8 +1948,12 @@ static absl::Cord MakeHuge(absl::string_view prefix) {
TEST_P(CordTest, HugeCord) {
absl::Cord cord = MakeHuge("huge cord");
+ MaybeHarden(cord);
+
+ const size_t acceptable_delta =
+ 100 + (UseCrc() ? sizeof(absl::cord_internal::CordRepCrc) : 0);
EXPECT_LE(cord.size(), cord.EstimatedMemoryUsage());
- EXPECT_GE(cord.size() + 100, cord.EstimatedMemoryUsage());
+ EXPECT_GE(cord.size() + acceptable_delta, cord.EstimatedMemoryUsage());
}
// Tests that Append() works ok when handed a self reference
@@ -1433,6 +1963,7 @@ TEST_P(CordTest, AppendSelf) {
std::string control_data = "Abc";
absl::Cord data(control_data);
while (control_data.length() < 0x4000) {
+ MaybeHarden(data);
data.Append(data);
control_data.append(control_data);
ASSERT_EQ(control_data, data);
@@ -1443,6 +1974,8 @@ TEST_P(CordTest, MakeFragmentedCordFromInitializerList) {
absl::Cord fragmented =
absl::MakeFragmentedCord({"A ", "fragmented ", "Cord"});
+ MaybeHarden(fragmented);
+
EXPECT_EQ("A fragmented Cord", fragmented);
auto chunk_it = fragmented.chunk_begin();
@@ -1463,6 +1996,8 @@ TEST_P(CordTest, MakeFragmentedCordFromVector) {
std::vector<absl::string_view> chunks = {"A ", "fragmented ", "Cord"};
absl::Cord fragmented = absl::MakeFragmentedCord(chunks);
+ MaybeHarden(fragmented);
+
EXPECT_EQ("A fragmented Cord", fragmented);
auto chunk_it = fragmented.chunk_begin();
@@ -1565,22 +2100,26 @@ TEST_P(CordTest, CordChunkIteratorOperations) {
VerifyChunkIterator(empty_cord, 0);
absl::Cord small_buffer_cord("small cord");
+ MaybeHarden(small_buffer_cord);
VerifyChunkIterator(small_buffer_cord, 1);
absl::Cord flat_node_cord("larger than small buffer optimization");
+ MaybeHarden(flat_node_cord);
VerifyChunkIterator(flat_node_cord, 1);
- VerifyChunkIterator(
- absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ",
- "testing ", "chunk ", "iterations."}),
- 8);
+ VerifyChunkIterator(MaybeHardened(absl::MakeFragmentedCord(
+ {"a ", "small ", "fragmented ", "cord ", "for ",
+ "testing ", "chunk ", "iterations."})),
+ 8);
absl::Cord reused_nodes_cord(std::string(40, 'c'));
reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'b')));
+ MaybeHarden(reused_nodes_cord);
reused_nodes_cord.Prepend(absl::Cord(std::string(40, 'a')));
size_t expected_chunks = 3;
for (int i = 0; i < 8; ++i) {
reused_nodes_cord.Prepend(reused_nodes_cord);
+ MaybeHarden(reused_nodes_cord);
expected_chunks *= 2;
VerifyChunkIterator(reused_nodes_cord, expected_chunks);
}
@@ -1592,6 +2131,78 @@ TEST_P(CordTest, CordChunkIteratorOperations) {
VerifyChunkIterator(subcords, 128);
}
+
+TEST_P(CordTest, AdvanceAndReadOnDataEdge) {
+ RandomEngine rng(GTEST_FLAG_GET(random_seed));
+ const std::string data = RandomLowercaseString(&rng, 2000);
+ for (bool as_flat : {true, false}) {
+ SCOPED_TRACE(as_flat ? "Flat" : "External");
+
+ absl::Cord cord =
+ as_flat ? absl::Cord(data)
+ : absl::MakeCordFromExternal(data, [](absl::string_view) {});
+ auto it = cord.Chars().begin();
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
+#endif
+
+ it = cord.Chars().begin();
+ absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
+ EXPECT_EQ(frag, data);
+ EXPECT_TRUE(it == cord.Chars().end());
+
+ it = cord.Chars().begin();
+ frag = cord.AdvanceAndRead(&it, 200);
+ EXPECT_EQ(frag, data.substr(0, 200));
+ EXPECT_FALSE(it == cord.Chars().end());
+
+ frag = cord.AdvanceAndRead(&it, 1500);
+ EXPECT_EQ(frag, data.substr(200, 1500));
+ EXPECT_FALSE(it == cord.Chars().end());
+
+ frag = cord.AdvanceAndRead(&it, 300);
+ EXPECT_EQ(frag, data.substr(1700, 300));
+ EXPECT_TRUE(it == cord.Chars().end());
+ }
+}
+
+TEST_P(CordTest, AdvanceAndReadOnSubstringDataEdge) {
+ RandomEngine rng(GTEST_FLAG_GET(random_seed));
+ const std::string data = RandomLowercaseString(&rng, 2500);
+ for (bool as_flat : {true, false}) {
+ SCOPED_TRACE(as_flat ? "Flat" : "External");
+
+ absl::Cord cord =
+ as_flat ? absl::Cord(data)
+ : absl::MakeCordFromExternal(data, [](absl::string_view) {});
+ cord = cord.Subcord(200, 2000);
+ const std::string substr = data.substr(200, 2000);
+
+ auto it = cord.Chars().begin();
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ EXPECT_DEATH_IF_SUPPORTED(cord.AdvanceAndRead(&it, 2001), ".*");
+#endif
+
+ it = cord.Chars().begin();
+ absl::Cord frag = cord.AdvanceAndRead(&it, 2000);
+ EXPECT_EQ(frag, substr);
+ EXPECT_TRUE(it == cord.Chars().end());
+
+ it = cord.Chars().begin();
+ frag = cord.AdvanceAndRead(&it, 200);
+ EXPECT_EQ(frag, substr.substr(0, 200));
+ EXPECT_FALSE(it == cord.Chars().end());
+
+ frag = cord.AdvanceAndRead(&it, 1500);
+ EXPECT_EQ(frag, substr.substr(200, 1500));
+ EXPECT_FALSE(it == cord.Chars().end());
+
+ frag = cord.AdvanceAndRead(&it, 300);
+ EXPECT_EQ(frag, substr.substr(1700, 300));
+ EXPECT_TRUE(it == cord.Chars().end());
+ }
+}
+
TEST_P(CordTest, CharIteratorTraits) {
static_assert(std::is_copy_constructible<absl::Cord::CharIterator>::value,
"");
@@ -1706,27 +2317,33 @@ TEST_P(CordTest, CharIteratorOperations) {
VerifyCharIterator(empty_cord);
absl::Cord small_buffer_cord("small cord");
+ MaybeHarden(small_buffer_cord);
VerifyCharIterator(small_buffer_cord);
absl::Cord flat_node_cord("larger than small buffer optimization");
+ MaybeHarden(flat_node_cord);
VerifyCharIterator(flat_node_cord);
- VerifyCharIterator(
+ VerifyCharIterator(MaybeHardened(
absl::MakeFragmentedCord({"a ", "small ", "fragmented ", "cord ", "for ",
- "testing ", "character ", "iteration."}));
+ "testing ", "character ", "iteration."})));
absl::Cord reused_nodes_cord("ghi");
reused_nodes_cord.Prepend(absl::Cord("def"));
reused_nodes_cord.Prepend(absl::Cord("abc"));
for (int i = 0; i < 4; ++i) {
reused_nodes_cord.Prepend(reused_nodes_cord);
+ MaybeHarden(reused_nodes_cord);
VerifyCharIterator(reused_nodes_cord);
}
RandomEngine rng(GTEST_FLAG_GET(random_seed));
absl::Cord flat_cord(RandomLowercaseString(&rng, 256));
absl::Cord subcords;
- for (int i = 0; i < 4; ++i) subcords.Prepend(flat_cord.Subcord(16 * i, 128));
+ for (int i = 0; i < 4; ++i) {
+ subcords.Prepend(flat_cord.Subcord(16 * i, 128));
+ MaybeHarden(subcords);
+ }
VerifyCharIterator(subcords);
}
@@ -1751,6 +2368,8 @@ TEST_P(CordTest, CharIteratorAdvanceAndRead) {
cord.Append(absl::Cord(block));
}
+ MaybeHarden(cord);
+
for (size_t chunk_size :
{kChunkSize1, kChunkSize2, kChunkSize3, kChunkSize4}) {
absl::Cord::CharIterator it = cord.char_begin();
@@ -1768,6 +2387,7 @@ TEST_P(CordTest, CharIteratorAdvanceAndRead) {
TEST_P(CordTest, StreamingOutput) {
absl::Cord c =
absl::MakeFragmentedCord({"A ", "small ", "fragmented ", "Cord", "."});
+ MaybeHarden(c);
std::stringstream output;
output << c;
EXPECT_EQ("A small fragmented Cord.", output.str());
@@ -1781,6 +2401,7 @@ TEST_P(CordTest, ForEachChunk) {
cord_chunks.push_back(absl::StrCat("[", i, "]"));
}
absl::Cord c = absl::MakeFragmentedCord(cord_chunks);
+ MaybeHarden(c);
std::vector<std::string> iterated_chunks;
absl::CordTestPeer::ForEachChunk(c,
@@ -1798,6 +2419,7 @@ TEST_P(CordTest, SmallBufferAssignFromOwnData) {
for (size_t pos = 0; pos < contents.size(); ++pos) {
for (size_t count = contents.size() - pos; count > 0; --count) {
absl::Cord c(contents);
+ MaybeHarden(c);
absl::string_view flat = c.Flatten();
c = flat.substr(pos, count);
EXPECT_EQ(c, contents.substr(pos, count))
@@ -1810,12 +2432,16 @@ TEST_P(CordTest, Format) {
absl::Cord c;
absl::Format(&c, "There were %04d little %s.", 3, "pigs");
EXPECT_EQ(c, "There were 0003 little pigs.");
+ MaybeHarden(c);
absl::Format(&c, "And %-3llx bad wolf!", 1);
+ MaybeHarden(c);
EXPECT_EQ(c, "There were 0003 little pigs.And 1 bad wolf!");
}
TEST_P(CordTest, Hardening) {
absl::Cord cord("hello");
+ MaybeHarden(cord);
+
// These statement should abort the program in all builds modes.
EXPECT_DEATH_IF_SUPPORTED(cord.RemovePrefix(6), "");
EXPECT_DEATH_IF_SUPPORTED(cord.RemoveSuffix(6), "");
@@ -1855,6 +2481,7 @@ TEST_P(CordTest, BtreeHostileSplitInsertJoin) {
}
for (int j = 0; j < 1000; ++j) {
+ MaybeHarden(cord);
size_t offset = absl::Uniform(bitgen, 0u, cord.size());
size_t length = absl::Uniform(bitgen, 100u, data.size());
if (cord.size() == offset) {
@@ -1892,12 +2519,34 @@ class AfterExitCordTester {
absl::string_view expected_;
};
+// Deliberately prevents the destructor for an absl::Cord from running. The cord
+// is accessible via the cord member during the lifetime of the CordLeaker.
+// After the CordLeaker is destroyed, pointers to the cord will remain valid
+// until the CordLeaker's memory is deallocated.
+struct CordLeaker {
+ union {
+ absl::Cord cord;
+ };
+
+ template <typename Str>
+ constexpr explicit CordLeaker(const Str& str) : cord(str) {}
+
+ ~CordLeaker() {
+ // Don't do anything, including running cord's destructor. (cord's
+ // destructor won't run automatically because cord is hidden inside a
+ // union.)
+ }
+};
+
template <typename Str>
void TestConstinitConstructor(Str) {
const auto expected = Str::value;
// Defined before `cord` to be destroyed after it.
static AfterExitCordTester exit_tester; // NOLINT
- ABSL_CONST_INIT static absl::Cord cord(Str{}); // NOLINT
+ ABSL_CONST_INIT static CordLeaker cord_leaker(Str{}); // NOLINT
+ // cord_leaker is static, so this reference will remain valid through the end
+ // of program execution.
+ static absl::Cord& cord = cord_leaker.cord;
static bool init_exit_tester = exit_tester.Set(&cord, expected);
(void)init_exit_tester;
@@ -1955,3 +2604,272 @@ TEST_P(CordTest, ConstinitConstructor) {
TestConstinitConstructor(
absl::strings_internal::MakeStringConstant(LongView{}));
}
+
+namespace {
+
+// Test helper that generates a populated cord for future manipulation.
+//
+// By test convention, all generated cords begin with the characters "abcde" at
+// the start of the first chunk.
+class PopulatedCordFactory {
+ public:
+ constexpr PopulatedCordFactory(absl::string_view name,
+ absl::Cord (*generator)())
+ : name_(name), generator_(generator) {}
+
+ absl::string_view Name() const { return name_; }
+ absl::Cord Generate() const { return generator_(); }
+
+ private:
+ absl::string_view name_;
+ absl::Cord (*generator_)();
+};
+
+// clang-format off
+// This array is constant-initialized in conformant compilers.
+PopulatedCordFactory cord_factories[] = {
+ {"sso", [] { return absl::Cord("abcde"); }},
+ {"flat", [] {
+ // Too large to live in SSO space, but small enough to be a simple FLAT.
+ absl::Cord flat(absl::StrCat("abcde", std::string(1000, 'x')));
+ flat.Flatten();
+ return flat;
+ }},
+ {"external", [] {
+ // A cheat: we are using a string literal as the external storage, so a
+ // no-op releaser is correct here.
+ return absl::MakeCordFromExternal("abcde External!", []{});
+ }},
+ {"external substring", [] {
+ // A cheat: we are using a string literal as the external storage, so a
+ // no-op releaser is correct here.
+ absl::Cord ext = absl::MakeCordFromExternal("-abcde External!", []{});
+ return absl::CordTestPeer::MakeSubstring(ext, 1, ext.size() - 1);
+ }},
+ {"substring", [] {
+ absl::Cord flat(absl::StrCat("-abcde", std::string(1000, 'x')));
+ flat.Flatten();
+ return flat.Subcord(1, 998);
+ }},
+ {"fragmented", [] {
+ std::string fragment = absl::StrCat("abcde", std::string(195, 'x'));
+ std::vector<std::string> fragments(200, fragment);
+ absl::Cord cord = absl::MakeFragmentedCord(fragments);
+ assert(cord.size() == 40000);
+ return cord;
+ }},
+};
+// clang-format on
+
+// Test helper that can mutate a cord, and possibly undo the mutation, for
+// testing.
+class CordMutator {
+ public:
+ constexpr CordMutator(absl::string_view name, void (*mutate)(absl::Cord&),
+ void (*undo)(absl::Cord&) = nullptr)
+ : name_(name), mutate_(mutate), undo_(undo) {}
+
+ absl::string_view Name() const { return name_; }
+ void Mutate(absl::Cord& cord) const { mutate_(cord); }
+ bool CanUndo() const { return undo_ != nullptr; }
+ void Undo(absl::Cord& cord) const { undo_(cord); }
+
+ private:
+ absl::string_view name_;
+ void (*mutate_)(absl::Cord&);
+ void (*undo_)(absl::Cord&);
+};
+
+// clang-format off
+// This array is constant-initialized in conformant compilers.
+CordMutator cord_mutators[] ={
+ {"clear", [](absl::Cord& c) { c.Clear(); }},
+ {"overwrite", [](absl::Cord& c) { c = "overwritten"; }},
+ {
+ "append string",
+ [](absl::Cord& c) { c.Append("0123456789"); },
+ [](absl::Cord& c) { c.RemoveSuffix(10); }
+ },
+ {
+ "append cord",
+ [](absl::Cord& c) {
+ c.Append(absl::MakeFragmentedCord({"12345", "67890"}));
+ },
+ [](absl::Cord& c) { c.RemoveSuffix(10); }
+ },
+ {
+ "append checksummed cord",
+ [](absl::Cord& c) {
+ absl::Cord to_append = absl::MakeFragmentedCord({"12345", "67890"});
+ to_append.SetExpectedChecksum(999);
+ c.Append(to_append);
+ },
+ [](absl::Cord& c) { c.RemoveSuffix(10); }
+ },
+ {
+ "append self",
+ [](absl::Cord& c) { c.Append(c); },
+ [](absl::Cord& c) { c.RemoveSuffix(c.size() / 2); }
+ },
+ {
+ "prepend string",
+ [](absl::Cord& c) { c.Prepend("9876543210"); },
+ [](absl::Cord& c) { c.RemovePrefix(10); }
+ },
+ {
+ "prepend cord",
+ [](absl::Cord& c) {
+ c.Prepend(absl::MakeFragmentedCord({"98765", "43210"}));
+ },
+ [](absl::Cord& c) { c.RemovePrefix(10); }
+ },
+ {
+ "prepend checksummed cord",
+ [](absl::Cord& c) {
+ absl::Cord to_prepend = absl::MakeFragmentedCord({"98765", "43210"});
+ to_prepend.SetExpectedChecksum(999);
+ c.Prepend(to_prepend);
+ },
+ [](absl::Cord& c) { c.RemovePrefix(10); }
+ },
+ {
+ "prepend self",
+ [](absl::Cord& c) { c.Prepend(c); },
+ [](absl::Cord& c) { c.RemovePrefix(c.size() / 2); }
+ },
+ {"remove prefix", [](absl::Cord& c) { c.RemovePrefix(2); }},
+ {"remove suffix", [](absl::Cord& c) { c.RemoveSuffix(2); }},
+ {"subcord", [](absl::Cord& c) { c = c.Subcord(1, c.size() - 2); }},
+ {
+ "swap inline",
+ [](absl::Cord& c) {
+ absl::Cord other("swap");
+ c.swap(other);
+ }
+ },
+ {
+ "swap tree",
+ [](absl::Cord& c) {
+ absl::Cord other(std::string(10000, 'x'));
+ c.swap(other);
+ }
+ },
+};
+// clang-format on
+} // namespace
+
+TEST_P(CordTest, ExpectedChecksum) {
+ for (const PopulatedCordFactory& factory : cord_factories) {
+ SCOPED_TRACE(factory.Name());
+ for (bool shared : {false, true}) {
+ SCOPED_TRACE(shared);
+
+ absl::Cord shared_cord_source = factory.Generate();
+ auto make_instance = [=] {
+ return shared ? shared_cord_source : factory.Generate();
+ };
+
+ const absl::Cord base_value = factory.Generate();
+ const std::string base_value_as_string(factory.Generate().Flatten());
+
+ absl::Cord c1 = make_instance();
+ EXPECT_FALSE(c1.ExpectedChecksum().has_value());
+
+ // Setting an expected checksum works, and retains the cord's bytes
+ c1.SetExpectedChecksum(12345);
+ EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+ EXPECT_EQ(c1, base_value);
+
+ // CRC persists through copies, assignments, and moves:
+ absl::Cord c1_copy_construct = c1;
+ EXPECT_EQ(c1_copy_construct.ExpectedChecksum().value_or(0), 12345);
+
+ absl::Cord c1_copy_assign;
+ c1_copy_assign = c1;
+ EXPECT_EQ(c1_copy_assign.ExpectedChecksum().value_or(0), 12345);
+
+ absl::Cord c1_move(std::move(c1_copy_assign));
+ EXPECT_EQ(c1_move.ExpectedChecksum().value_or(0), 12345);
+
+ EXPECT_EQ(c1.ExpectedChecksum().value_or(0), 12345);
+
+ // A CRC Cord compares equal to its non-CRC value.
+ EXPECT_EQ(c1, make_instance());
+
+ for (const CordMutator& mutator : cord_mutators) {
+ SCOPED_TRACE(mutator.Name());
+
+ // Test that mutating a cord removes its stored checksum
+ absl::Cord c2 = make_instance();
+ c2.SetExpectedChecksum(24680);
+
+ mutator.Mutate(c2);
+ EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
+
+ if (mutator.CanUndo()) {
+ // Undoing an operation should not restore the checksum
+ mutator.Undo(c2);
+ EXPECT_EQ(c2, base_value);
+ EXPECT_EQ(c2.ExpectedChecksum(), absl::nullopt);
+ }
+ }
+
+ absl::Cord c3 = make_instance();
+ c3.SetExpectedChecksum(999);
+ const absl::Cord& cc3 = c3;
+
+ // Test that all cord reading operations function in the face of an
+ // expected checksum.
+
+ // Test data precondition
+ ASSERT_TRUE(cc3.StartsWith("abcde"));
+
+ EXPECT_EQ(cc3.size(), base_value_as_string.size());
+ EXPECT_FALSE(cc3.empty());
+ EXPECT_EQ(cc3.Compare(base_value), 0);
+ EXPECT_EQ(cc3.Compare(base_value_as_string), 0);
+ EXPECT_EQ(cc3.Compare("wxyz"), -1);
+ EXPECT_EQ(cc3.Compare(absl::Cord("wxyz")), -1);
+ EXPECT_EQ(cc3.Compare("aaaa"), 1);
+ EXPECT_EQ(cc3.Compare(absl::Cord("aaaa")), 1);
+ EXPECT_EQ(absl::Cord("wxyz").Compare(cc3), 1);
+ EXPECT_EQ(absl::Cord("aaaa").Compare(cc3), -1);
+ EXPECT_TRUE(cc3.StartsWith("abcd"));
+ EXPECT_EQ(std::string(cc3), base_value_as_string);
+
+ std::string dest;
+ absl::CopyCordToString(cc3, &dest);
+ EXPECT_EQ(dest, base_value_as_string);
+
+ bool first_pass = true;
+ for (absl::string_view chunk : cc3.Chunks()) {
+ if (first_pass) {
+ EXPECT_TRUE(absl::StartsWith(chunk, "abcde"));
+ }
+ first_pass = false;
+ }
+ first_pass = true;
+ for (char ch : cc3.Chars()) {
+ if (first_pass) {
+ EXPECT_EQ(ch, 'a');
+ }
+ first_pass = false;
+ }
+ EXPECT_TRUE(absl::StartsWith(*cc3.chunk_begin(), "abcde"));
+ EXPECT_EQ(*cc3.char_begin(), 'a');
+
+ auto char_it = cc3.char_begin();
+ absl::Cord::Advance(&char_it, 2);
+ EXPECT_EQ(absl::Cord::AdvanceAndRead(&char_it, 2), "cd");
+ EXPECT_EQ(*char_it, 'e');
+ char_it = cc3.char_begin();
+ absl::Cord::Advance(&char_it, 2);
+ EXPECT_TRUE(absl::StartsWith(absl::Cord::ChunkRemaining(char_it), "cde"));
+
+ EXPECT_EQ(cc3[0], 'a');
+ EXPECT_EQ(cc3[4], 'e');
+ EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value));
+ EXPECT_EQ(absl::HashOf(cc3), absl::HashOf(base_value_as_string));
+ }
+ }
+}
diff --git a/absl/strings/internal/cord_data_edge.h b/absl/strings/internal/cord_data_edge.h
new file mode 100644
index 00000000..e18b33e1
--- /dev/null
+++ b/absl/strings/internal/cord_data_edge.h
@@ -0,0 +1,63 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+#define ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
+
+#include <cassert>
+#include <cstddef>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_flat.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
+// holding a FLAT or EXTERNAL child rep. Requires `rep != nullptr`.
+inline bool IsDataEdge(const CordRep* edge) {
+ assert(edge != nullptr);
+
+ // The fast path is that `edge` is an EXTERNAL or FLAT node, making the below
+ // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
+ // check in the slow path of the SUBSTRING check to optimize for the hot path.
+ if (edge->tag == EXTERNAL || edge->tag >= FLAT) return true;
+ if (edge->tag == SUBSTRING) edge = edge->substring()->child;
+ return edge->tag == EXTERNAL || edge->tag >= FLAT;
+}
+
+// Returns the `absl::string_view` data reference for the provided data edge.
+// Requires 'IsDataEdge(edge) == true`.
+inline absl::string_view EdgeData(const CordRep* edge) {
+ assert(IsDataEdge(edge));
+
+ size_t offset = 0;
+ const size_t length = edge->length;
+ if (edge->IsSubstring()) {
+ offset = edge->substring()->start;
+ edge = edge->substring()->child;
+ }
+ return edge->tag >= FLAT
+ ? absl::string_view{edge->flat()->Data() + offset, length}
+ : absl::string_view{edge->external()->base + offset, length};
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
diff --git a/absl/strings/internal/cord_data_edge_test.cc b/absl/strings/internal/cord_data_edge_test.cc
new file mode 100644
index 00000000..8fce3bc6
--- /dev/null
+++ b/absl/strings/internal/cord_data_edge_test.cc
@@ -0,0 +1,130 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_data_edge.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::absl::cordrep_testing::MakeExternal;
+using ::absl::cordrep_testing::MakeFlat;
+using ::absl::cordrep_testing::MakeSubstring;
+
+TEST(CordDataEdgeTest, IsDataEdgeOnFlat) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ EXPECT_TRUE(IsDataEdge(rep));
+ CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnExternal) {
+ CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
+ EXPECT_TRUE(IsDataEdge(rep));
+ CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfFlat) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRep* substr = MakeSubstring(1, 20, rep);
+ EXPECT_TRUE(IsDataEdge(substr));
+ CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfExternal) {
+ CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRep* substr = MakeSubstring(1, 20, rep);
+ EXPECT_TRUE(IsDataEdge(substr));
+ CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnBtree) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRepBtree* tree = CordRepBtree::New(rep);
+ EXPECT_FALSE(IsDataEdge(tree));
+ CordRep::Unref(tree);
+}
+
+TEST(CordDataEdgeTest, IsDataEdgeOnBadSubstr) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
+ EXPECT_FALSE(IsDataEdge(substr));
+ CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnFlat) {
+ absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+ CordRep* rep = MakeFlat(value);
+ EXPECT_EQ(EdgeData(rep), value);
+ CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnExternal) {
+ absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+ CordRep* rep = MakeExternal(value);
+ EXPECT_EQ(EdgeData(rep), value);
+ CordRep::Unref(rep);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnSubstringOfFlat) {
+ absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+ CordRep* rep = MakeFlat(value);
+ CordRep* substr = MakeSubstring(1, 20, rep);
+ EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
+ CordRep::Unref(substr);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnSubstringOfExternal) {
+ absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
+ CordRep* rep = MakeExternal(value);
+ CordRep* substr = MakeSubstring(1, 20, rep);
+ EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
+ CordRep::Unref(substr);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
+
+TEST(CordDataEdgeTest, IsDataEdgeOnNullPtr) {
+ EXPECT_DEATH(IsDataEdge(nullptr), ".*");
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnNullPtr) {
+ EXPECT_DEATH(EdgeData(nullptr), ".*");
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnBtree) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRepBtree* tree = CordRepBtree::New(rep);
+ EXPECT_DEATH(EdgeData(tree), ".*");
+ CordRep::Unref(tree);
+}
+
+TEST(CordDataEdgeTest, EdgeDataOnBadSubstr) {
+ CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
+ CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
+ EXPECT_DEATH(EdgeData(substr), ".*");
+ CordRep::Unref(substr);
+}
+
+#endif // GTEST_HAS_DEATH_TEST && !NDEBUG
+
+} // namespace
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/internal/cord_internal.cc b/absl/strings/internal/cord_internal.cc
index 1767e6fc..b6b06cfa 100644
--- a/absl/strings/internal/cord_internal.cc
+++ b/absl/strings/internal/cord_internal.cc
@@ -17,69 +17,57 @@
#include <cassert>
#include <memory>
+#include "absl/base/internal/raw_logging.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cord_rep_ring.h"
+#include "absl/strings/str_cat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-ABSL_CONST_INIT std::atomic<bool> cord_btree_enabled(kCordEnableBtreeDefault);
ABSL_CONST_INIT std::atomic<bool> cord_ring_buffer_enabled(
kCordEnableRingBufferDefault);
ABSL_CONST_INIT std::atomic<bool> shallow_subcords_enabled(
kCordShallowSubcordsDefault);
ABSL_CONST_INIT std::atomic<bool> cord_btree_exhaustive_validation(false);
+void LogFatalNodeType(CordRep* rep) {
+ ABSL_INTERNAL_LOG(FATAL, absl::StrCat("Unexpected node type: ",
+ static_cast<int>(rep->tag)));
+}
+
void CordRep::Destroy(CordRep* rep) {
assert(rep != nullptr);
- absl::InlinedVector<CordRep*, Constants::kInlinedVectorSize> pending;
while (true) {
assert(!rep->refcount.IsImmortal());
- if (rep->tag == CONCAT) {
- CordRepConcat* rep_concat = rep->concat();
- CordRep* right = rep_concat->right;
- if (!right->refcount.Decrement()) {
- pending.push_back(right);
- }
- CordRep* left = rep_concat->left;
- delete rep_concat;
- rep = nullptr;
- if (!left->refcount.Decrement()) {
- rep = left;
- continue;
- }
- } else if (rep->tag == BTREE) {
+ if (rep->tag == BTREE) {
CordRepBtree::Destroy(rep->btree());
- rep = nullptr;
+ return;
} else if (rep->tag == RING) {
CordRepRing::Destroy(rep->ring());
- rep = nullptr;
+ return;
} else if (rep->tag == EXTERNAL) {
CordRepExternal::Delete(rep);
- rep = nullptr;
+ return;
} else if (rep->tag == SUBSTRING) {
CordRepSubstring* rep_substring = rep->substring();
- CordRep* child = rep_substring->child;
+ rep = rep_substring->child;
delete rep_substring;
- rep = nullptr;
- if (!child->refcount.Decrement()) {
- rep = child;
- continue;
+ if (rep->refcount.Decrement()) {
+ return;
}
+ } else if (rep->tag == CRC) {
+ CordRepCrc::Destroy(rep->crc());
+ return;
} else {
+ assert(rep->IsFlat());
CordRepFlat::Delete(rep);
- rep = nullptr;
- }
-
- if (!pending.empty()) {
- rep = pending.back();
- pending.pop_back();
- } else {
- break;
+ return;
}
}
}
diff --git a/absl/strings/internal/cord_internal.h b/absl/strings/internal/cord_internal.h
index bfe5564e..b50fb79a 100644
--- a/absl/strings/internal/cord_internal.h
+++ b/absl/strings/internal/cord_internal.h
@@ -21,6 +21,7 @@
#include <cstdint>
#include <type_traits>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/invoke.h"
@@ -33,16 +34,27 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
+// The overhead of a vtable is too much for Cord, so we roll our own subclasses
+// using only a single byte to differentiate classes from each other - the "tag"
+// byte. Define the subclasses first so we can provide downcasting helper
+// functions in the base class.
+struct CordRep;
+struct CordRepConcat;
+struct CordRepExternal;
+struct CordRepFlat;
+struct CordRepSubstring;
+struct CordRepCrc;
+class CordRepRing;
+class CordRepBtree;
+
class CordzInfo;
// Default feature enable states for cord ring buffers
enum CordFeatureDefaults {
- kCordEnableBtreeDefault = true,
kCordEnableRingBufferDefault = false,
kCordShallowSubcordsDefault = false
};
-extern std::atomic<bool> cord_btree_enabled;
extern std::atomic<bool> cord_ring_buffer_enabled;
extern std::atomic<bool> shallow_subcords_enabled;
@@ -52,10 +64,6 @@ extern std::atomic<bool> shallow_subcords_enabled;
// O(n^2) complexity as recursive / full tree validation is O(n).
extern std::atomic<bool> cord_btree_exhaustive_validation;
-inline void enable_cord_btree(bool enable) {
- cord_btree_enabled.store(enable, std::memory_order_relaxed);
-}
-
inline void enable_cord_ring_buffer(bool enable) {
cord_ring_buffer_enabled.store(enable, std::memory_order_relaxed);
}
@@ -80,6 +88,9 @@ enum Constants {
kMaxBytesToCopy = 511
};
+// Emits a fatal error "Unexpected node type: xyz" and aborts the program.
+ABSL_ATTRIBUTE_NORETURN void LogFatalNodeType(CordRep* rep);
+
// Compact class for tracking the reference count and state flags for CordRep
// instances. Data is stored in an atomic int32_t for compactness and speed.
class RefcountAndFlags {
@@ -87,9 +98,6 @@ class RefcountAndFlags {
constexpr RefcountAndFlags() : count_{kRefIncrement} {}
struct Immortal {};
explicit constexpr RefcountAndFlags(Immortal) : count_(kImmortalFlag) {}
- struct WithCrc {};
- explicit constexpr RefcountAndFlags(WithCrc)
- : count_(kCrcFlag | kRefIncrement) {}
// Increments the reference count. Imposes no memory ordering.
inline void Increment() {
@@ -125,32 +133,14 @@ class RefcountAndFlags {
return count_.load(std::memory_order_acquire) >> kNumFlags;
}
- // Returns true if the referenced object carries a CRC value.
- bool HasCrc() const {
- return (count_.load(std::memory_order_relaxed) & kCrcFlag) != 0;
- }
-
- // Returns true iff the atomic integer is 1 and this node does not store
- // a CRC. When both these conditions are met, the current thread owns
- // the reference and no other thread shares it, so its contents may be
- // safely mutated.
- //
- // If the referenced item is shared, carries a CRC, or is immortal,
- // it should not be modified in-place, and this function returns false.
- //
- // This call performs the memory barrier needed for the owning thread
- // to act on the object, so that if it returns true, it may safely
- // assume exclusive access to the object.
- inline bool IsMutable() {
- return (count_.load(std::memory_order_acquire)) == kRefIncrement;
- }
-
- // Returns whether the atomic integer is 1. Similar to IsMutable(),
- // but does not check for a stored CRC. (An unshared node with a CRC is not
- // mutable, because changing its data would invalidate the CRC.)
- //
- // When this returns true, there are no other references, and data sinks
- // may safely adopt the children of the CordRep.
+ // Returns whether the atomic integer is 1.
+ // If the reference count is used in the conventional way, a
+ // reference count of 1 implies that the current thread owns the
+ // reference and no other thread shares it.
+ // This call performs the test for a reference count of one, and
+ // performs the memory barrier needed for the owning thread
+ // to act on the object, knowing that it has exclusive access to the
+ // object. Always returns false when the immortal bit is set.
inline bool IsOne() {
return (count_.load(std::memory_order_acquire) & kRefcountMask) ==
kRefIncrement;
@@ -166,51 +156,43 @@ class RefcountAndFlags {
// used for the StringConstant constructor to avoid collecting immutable
// constant cords.
// kReservedFlag is reserved for future use.
- enum {
+ enum Flags {
kNumFlags = 2,
kImmortalFlag = 0x1,
- kCrcFlag = 0x2,
+ kReservedFlag = 0x2,
kRefIncrement = (1 << kNumFlags),
// Bitmask to use when checking refcount by equality. This masks out
// all flags except kImmortalFlag, which is part of the refcount for
// purposes of equality. (A refcount of 0 or 1 does not count as 0 or 1
// if the immortal bit is set.)
- kRefcountMask = ~kCrcFlag,
+ kRefcountMask = ~kReservedFlag,
};
std::atomic<int32_t> count_;
};
-// The overhead of a vtable is too much for Cord, so we roll our own subclasses
-// using only a single byte to differentiate classes from each other - the "tag"
-// byte. Define the subclasses first so we can provide downcasting helper
-// functions in the base class.
-
-struct CordRepConcat;
-struct CordRepExternal;
-struct CordRepFlat;
-struct CordRepSubstring;
-class CordRepRing;
-class CordRepBtree;
-
// Various representations that we allow
enum CordRepKind {
- CONCAT = 0,
+ UNUSED_0 = 0,
SUBSTRING = 1,
- BTREE = 2,
- RING = 3,
- EXTERNAL = 4,
+ CRC = 2,
+ BTREE = 3,
+ RING = 4,
+ EXTERNAL = 5,
// We have different tags for different sized flat arrays,
- // starting with FLAT, and limited to MAX_FLAT_TAG. The 225 value is based on
- // the current 'size to tag' encoding of 8 / 32 bytes. If a new tag is needed
- // in the future, then 'FLAT' and 'MAX_FLAT_TAG' should be adjusted as well
- // as the Tag <---> Size logic so that FLAT stil represents the minimum flat
- // allocation size. (32 bytes as of now).
- FLAT = 5,
- MAX_FLAT_TAG = 225
+ // starting with FLAT, and limited to MAX_FLAT_TAG. The below values map to an
+ // allocated range of 32 bytes to 256 KB. The current granularity is:
+ // - 8 byte granularity for flat sizes in [32 - 512]
+ // - 64 byte granularity for flat sizes in (512 - 8KiB]
+ // - 4KiB byte granularity for flat sizes in (8KiB, 256 KiB]
+ // If a new tag is needed in the future, then 'FLAT' and 'MAX_FLAT_TAG' should
+ // be adjusted as well as the Tag <---> Size mapping logic so that FLAT still
+ // represents the minimum flat allocation size. (32 bytes as of now).
+ FLAT = 6,
+ MAX_FLAT_TAG = 248
};
// There are various locations where we want to check if some rep is a 'plain'
@@ -225,6 +207,18 @@ static_assert(EXTERNAL == RING + 1, "BTREE and EXTERNAL not consecutive");
static_assert(FLAT == EXTERNAL + 1, "EXTERNAL and FLAT not consecutive");
struct CordRep {
+ // Result from an `extract edge` operation. Contains the (possibly changed)
+ // tree node as well as the extracted edge, or {tree, nullptr} if no edge
+ // could be extracted.
+ // On success, the returned `tree` value is null if `extracted` was the only
+ // data edge inside the tree, a data edge if there were only two data edges in
+ // the tree, or the (possibly new / smaller) remaining tree with the extracted
+ // data edge removed.
+ struct ExtractResult {
+ CordRep* tree;
+ CordRep* extracted;
+ };
+
CordRep() = default;
constexpr CordRep(RefcountAndFlags::Immortal immortal, size_t l)
: length(l), refcount(immortal), tag(EXTERNAL), storage{} {}
@@ -249,18 +243,18 @@ struct CordRep {
// Returns true if this instance's tag matches the requested type.
constexpr bool IsRing() const { return tag == RING; }
- constexpr bool IsConcat() const { return tag == CONCAT; }
constexpr bool IsSubstring() const { return tag == SUBSTRING; }
+ constexpr bool IsCrc() const { return tag == CRC; }
constexpr bool IsExternal() const { return tag == EXTERNAL; }
constexpr bool IsFlat() const { return tag >= FLAT; }
constexpr bool IsBtree() const { return tag == BTREE; }
inline CordRepRing* ring();
inline const CordRepRing* ring() const;
- inline CordRepConcat* concat();
- inline const CordRepConcat* concat() const;
inline CordRepSubstring* substring();
inline const CordRepSubstring* substring() const;
+ inline CordRepCrc* crc();
+ inline const CordRepCrc* crc() const;
inline CordRepExternal* external();
inline const CordRepExternal* external() const;
inline CordRepFlat* flat();
@@ -283,17 +277,23 @@ struct CordRep {
static inline void Unref(CordRep* rep);
};
-struct CordRepConcat : public CordRep {
- CordRep* left;
- CordRep* right;
-
- uint8_t depth() const { return storage[0]; }
- void set_depth(uint8_t depth) { storage[0] = depth; }
-};
-
struct CordRepSubstring : public CordRep {
size_t start; // Starting offset of substring in child
CordRep* child;
+
+ // Creates a substring on `child`, adopting a reference on `child`.
+ // Requires `child` to be either a flat or external node, and `pos` and `n` to
+ // form a non-empty partial sub range of `'child`, i.e.:
+ // `n > 0 && n < length && n + pos <= length`
+ static inline CordRepSubstring* Create(CordRep* child, size_t pos, size_t n);
+
+ // Creates a substring of `rep`. Does not adopt a reference on `rep`.
+ // Requires `IsDataEdge(rep) && n > 0 && pos + n <= rep->length`.
+ // If `n == rep->length` then this method returns `CordRep::Ref(rep)`
+ // If `rep` is a substring of a flat or external node, then this method will
+ // return a new substring of that flat or external node with `pos` adjusted
+ // with the original `start` position.
+ static inline CordRep* Substring(CordRep* rep, size_t pos, size_t n);
};
// Type for function pointer that will invoke the releaser function and also
@@ -357,6 +357,47 @@ struct CordRepExternalImpl
}
};
+inline CordRepSubstring* CordRepSubstring::Create(CordRep* child, size_t pos,
+ size_t n) {
+ assert(child != nullptr);
+ assert(n > 0);
+ assert(n < child->length);
+ assert(pos < child->length);
+ assert(n <= child->length - pos);
+
+ // TODO(b/217376272): Harden internal logic.
+ // Move to strategical places inside the Cord logic and make this an assert.
+ if (ABSL_PREDICT_FALSE(!(child->IsExternal() || child->IsFlat()))) {
+ LogFatalNodeType(child);
+ }
+
+ CordRepSubstring* rep = new CordRepSubstring();
+ rep->length = n;
+ rep->tag = SUBSTRING;
+ rep->start = pos;
+ rep->child = child;
+ return rep;
+}
+
+inline CordRep* CordRepSubstring::Substring(CordRep* rep, size_t pos,
+ size_t n) {
+ assert(rep != nullptr);
+ assert(n != 0);
+ assert(pos < rep->length);
+ assert(n <= rep->length - pos);
+ if (n == rep->length) return CordRep::Ref(rep);
+ if (rep->IsSubstring()) {
+ pos += rep->substring()->start;
+ rep = rep->substring()->child;
+ }
+ CordRepSubstring* substr = new CordRepSubstring();
+ substr->length = n;
+ substr->tag = SUBSTRING;
+ substr->start = pos;
+ substr->child = CordRep::Ref(rep);
+ return substr;
+}
+
inline void CordRepExternal::Delete(CordRep* rep) {
assert(rep != nullptr && rep->IsExternal());
auto* rep_external = static_cast<CordRepExternal*>(rep);
@@ -370,7 +411,8 @@ struct ConstInitExternalStorage {
};
template <typename Str>
-CordRepExternal ConstInitExternalStorage<Str>::value(Str::value);
+ABSL_CONST_INIT CordRepExternal
+ ConstInitExternalStorage<Str>::value(Str::value);
enum {
kMaxInline = 15,
@@ -456,8 +498,8 @@ class InlineData {
// Requires the current instance to hold a tree value.
CordzInfo* cordz_info() const {
assert(is_tree());
- intptr_t info =
- static_cast<intptr_t>(absl::big_endian::ToHost64(as_tree_.cordz_info));
+ intptr_t info = static_cast<intptr_t>(
+ absl::big_endian::ToHost64(static_cast<uint64_t>(as_tree_.cordz_info)));
assert(info & 1);
return reinterpret_cast<CordzInfo*>(info - 1);
}
@@ -467,8 +509,9 @@ class InlineData {
// Requires the current instance to hold a tree value.
void set_cordz_info(CordzInfo* cordz_info) {
assert(is_tree());
- intptr_t info = reinterpret_cast<intptr_t>(cordz_info) | 1;
- as_tree_.cordz_info = absl::big_endian::FromHost64(info);
+ uintptr_t info = reinterpret_cast<uintptr_t>(cordz_info) | 1;
+ as_tree_.cordz_info =
+ static_cast<cordz_info_t>(absl::big_endian::FromHost64(info));
}
// Resets the current cordz_info to null / empty.
@@ -568,16 +611,6 @@ class InlineData {
static_assert(sizeof(InlineData) == kMaxInline + 1, "");
-inline CordRepConcat* CordRep::concat() {
- assert(IsConcat());
- return static_cast<CordRepConcat*>(this);
-}
-
-inline const CordRepConcat* CordRep::concat() const {
- assert(IsConcat());
- return static_cast<const CordRepConcat*>(this);
-}
-
inline CordRepSubstring* CordRep::substring() {
assert(IsSubstring());
return static_cast<CordRepSubstring*>(this);
@@ -599,7 +632,9 @@ inline const CordRepExternal* CordRep::external() const {
}
inline CordRep* CordRep::Ref(CordRep* rep) {
- assert(rep != nullptr);
+ // ABSL_ASSUME is a workaround for
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105585
+ ABSL_ASSUME(rep != nullptr);
rep->refcount.Increment();
return rep;
}
diff --git a/absl/strings/internal/cord_internal_test.cc b/absl/strings/internal/cord_internal_test.cc
deleted file mode 100644
index 0758dfef..00000000
--- a/absl/strings/internal/cord_internal_test.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2021 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/strings/internal/cord_internal.h"
-
-#include "gmock/gmock.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-
-TEST(RefcountAndFlags, NormalRefcount) {
- for (bool expect_high_refcount : {false, true}) {
- SCOPED_TRACE(expect_high_refcount);
- RefcountAndFlags refcount;
- // count = 1
-
- EXPECT_FALSE(refcount.HasCrc());
- EXPECT_TRUE(refcount.IsMutable());
- EXPECT_TRUE(refcount.IsOne());
-
- refcount.Increment();
- // count = 2
-
- EXPECT_FALSE(refcount.HasCrc());
- EXPECT_FALSE(refcount.IsMutable());
- EXPECT_FALSE(refcount.IsOne());
-
- // Decrementing should return true, since a reference is outstanding.
- if (expect_high_refcount) {
- EXPECT_TRUE(refcount.DecrementExpectHighRefcount());
- } else {
- EXPECT_TRUE(refcount.Decrement());
- }
- // count = 1
-
- EXPECT_FALSE(refcount.HasCrc());
- EXPECT_TRUE(refcount.IsMutable());
- EXPECT_TRUE(refcount.IsOne());
-
- // One more decremnt will return false, as no references remain.
- if (expect_high_refcount) {
- EXPECT_FALSE(refcount.DecrementExpectHighRefcount());
- } else {
- EXPECT_FALSE(refcount.Decrement());
- }
- }
-}
-
-TEST(RefcountAndFlags, CrcRefcount) {
- for (bool expect_high_refcount : {false, true}) {
- SCOPED_TRACE(expect_high_refcount);
- RefcountAndFlags refcount(RefcountAndFlags::WithCrc{});
- // count = 1
-
- // A CRC-carrying node is never mutable, but can be unshared
- EXPECT_TRUE(refcount.HasCrc());
- EXPECT_FALSE(refcount.IsMutable());
- EXPECT_TRUE(refcount.IsOne());
-
- refcount.Increment();
- // count = 2
-
- EXPECT_TRUE(refcount.HasCrc());
- EXPECT_FALSE(refcount.IsMutable());
- EXPECT_FALSE(refcount.IsOne());
-
- // Decrementing should return true, since a reference is outstanding.
- if (expect_high_refcount) {
- EXPECT_TRUE(refcount.DecrementExpectHighRefcount());
- } else {
- EXPECT_TRUE(refcount.Decrement());
- }
- // count = 1
-
- EXPECT_TRUE(refcount.HasCrc());
- EXPECT_FALSE(refcount.IsMutable());
- EXPECT_TRUE(refcount.IsOne());
-
- // One more decremnt will return false, as no references remain.
- if (expect_high_refcount) {
- EXPECT_FALSE(refcount.DecrementExpectHighRefcount());
- } else {
- EXPECT_FALSE(refcount.Decrement());
- }
- }
-}
-
-TEST(RefcountAndFlags, ImmortalRefcount) {
- RefcountAndFlags immortal_refcount(RefcountAndFlags::Immortal{});
-
- for (int i = 0; i < 100; ++i) {
- // An immortal refcount is never unshared, and decrementing never causes
- // a collection.
- EXPECT_FALSE(immortal_refcount.HasCrc());
- EXPECT_FALSE(immortal_refcount.IsMutable());
- EXPECT_FALSE(immortal_refcount.IsOne());
- EXPECT_TRUE(immortal_refcount.Decrement());
- EXPECT_TRUE(immortal_refcount.DecrementExpectHighRefcount());
- }
-}
-
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/strings/internal/cord_rep_btree.cc b/absl/strings/internal/cord_rep_btree.cc
index 4404f33a..cacbf3da 100644
--- a/absl/strings/internal/cord_rep_btree.cc
+++ b/absl/strings/internal/cord_rep_btree.cc
@@ -22,6 +22,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_consume.h"
#include "absl/strings/internal/cord_rep_flat.h"
@@ -32,7 +33,9 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
-constexpr size_t CordRepBtree::kMaxCapacity; // NOLINT: needed for c++ < c++17
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepBtree::kMaxCapacity;
+#endif
namespace {
@@ -69,7 +72,7 @@ void DumpAll(const CordRep* rep, bool include_contents, std::ostream& stream,
// indentation and prefix / labels keeps us within roughly 80-100 wide.
constexpr size_t kMaxDataLength = 60;
stream << ", data = \""
- << CordRepBtree::EdgeData(r).substr(0, kMaxDataLength)
+ << EdgeData(r).substr(0, kMaxDataLength)
<< (r->length > kMaxDataLength ? "\"..." : "\"");
}
stream << '\n';
@@ -119,6 +122,7 @@ CordRepSubstring* CreateSubstring(CordRep* rep, size_t offset, size_t n) {
rep = CordRep::Ref(substring->child);
CordRep::Unref(substring);
}
+ assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
@@ -149,7 +153,7 @@ inline CordRep* MakeSubstring(CordRep* rep, size_t offset) {
CordRep* ResizeEdge(CordRep* edge, size_t length, bool is_mutable) {
assert(length > 0);
assert(length <= edge->length);
- assert(CordRepBtree::IsDataEdge(edge));
+ assert(IsDataEdge(edge));
if (length >= edge->length) return edge;
if (is_mutable && (edge->tag >= FLAT || edge->tag == SUBSTRING)) {
@@ -190,24 +194,29 @@ inline void FastUnref(R* r, Fn&& fn) {
}
}
-// Deletes a leaf node data edge. Requires `rep` to be an EXTERNAL or FLAT
-// node, or a SUBSTRING of an EXTERNAL or FLAT node.
-void DeleteLeafEdge(CordRep* rep) {
- for (;;) {
+
+void DeleteSubstring(CordRepSubstring* substring) {
+ CordRep* rep = substring->child;
+ if (!rep->refcount.Decrement()) {
if (rep->tag >= FLAT) {
CordRepFlat::Delete(rep->flat());
- return;
- }
- if (rep->tag == EXTERNAL) {
+ } else {
+ assert(rep->tag == EXTERNAL);
CordRepExternal::Delete(rep->external());
- return;
}
- assert(rep->tag == SUBSTRING);
- CordRepSubstring* substring = rep->substring();
- rep = substring->child;
- assert(rep->tag == EXTERNAL || rep->tag >= FLAT);
- delete substring;
- if (rep->refcount.Decrement()) return;
+ }
+ delete substring;
+}
+
+// Deletes a leaf node data edge. Requires `IsDataEdge(rep)`.
+void DeleteLeafEdge(CordRep* rep) {
+ assert(IsDataEdge(rep));
+ if (rep->tag >= FLAT) {
+ CordRepFlat::Delete(rep->flat());
+ } else if (rep->tag == EXTERNAL) {
+ CordRepExternal::Delete(rep->external());
+ } else {
+ DeleteSubstring(rep->substring());
}
}
@@ -216,8 +225,8 @@ void DeleteLeafEdge(CordRep* rep) {
// propagate node changes up the stack.
template <EdgeType edge_type>
struct StackOperations {
- // Returns true if the node at 'depth' is mutable, i.e. has a refcount
- // of one, carries no CRC, and all of its parent nodes have a refcount of one.
+ // Returns true if the node at 'depth' is not shared, i.e. has a refcount
+ // of one and all of its parent nodes have a refcount of one.
inline bool owned(int depth) const { return depth < share_depth; }
// Returns the node at 'depth'.
@@ -228,11 +237,11 @@ struct StackOperations {
inline CordRepBtree* BuildStack(CordRepBtree* tree, int depth) {
assert(depth <= tree->height());
int current_depth = 0;
- while (current_depth < depth && tree->refcount.IsMutable()) {
+ while (current_depth < depth && tree->refcount.IsOne()) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
- share_depth = current_depth + (tree->refcount.IsMutable() ? 1 : 0);
+ share_depth = current_depth + (tree->refcount.IsOne() ? 1 : 0);
while (current_depth < depth) {
stack[current_depth++] = tree;
tree = tree->Edge(edge_type)->btree();
@@ -241,17 +250,17 @@ struct StackOperations {
}
// Builds a stack with the invariant that all nodes are private owned / not
- // shared and carry no CRC data. This is used in iterative updates where a
- // previous propagation guaranteed all nodes have this property.
+ // shared. This is used in iterative updates where a previous propagation
+ // guaranteed all nodes are owned / private.
inline void BuildOwnedStack(CordRepBtree* tree, int height) {
assert(height <= CordRepBtree::kMaxHeight);
int depth = 0;
while (depth < height) {
- assert(tree->refcount.IsMutable());
+ assert(tree->refcount.IsOne());
stack[depth++] = tree;
tree = tree->Edge(edge_type)->btree();
}
- assert(tree->refcount.IsMutable());
+ assert(tree->refcount.IsOne());
share_depth = depth + 1;
}
@@ -336,12 +345,12 @@ struct StackOperations {
return Unwind</*propagate=*/true>(tree, depth, length, result);
}
- // `share_depth` contains the depth at which the nodes in the stack cannot
- // be mutated. I.e., if the top most level is shared (i.e.:
- // `!refcount.IsMutable()`), then `share_depth` is 0. If the 2nd node
- // is shared (and implicitly all nodes below that) then `share_depth` is 1,
- // etc. A `share_depth` greater than the depth of the stack indicates that
- // none of the nodes in the stack are shared.
+ // `share_depth` contains the depth at which the nodes in the stack become
+ // shared. I.e., if the top most level is shared (i.e.: `!refcount.IsOne()`),
+ // then `share_depth` is 0. If the 2nd node is shared (and implicitly all
+ // nodes below that) then `share_depth` is 1, etc. A `share_depth` greater
+ // than the depth of the stack indicates that none of the nodes in the stack
+ // are shared.
int share_depth;
NodeStack stack;
@@ -372,19 +381,37 @@ void CordRepBtree::Dump(const CordRep* rep, std::ostream& stream) {
Dump(rep, absl::string_view(), false, stream);
}
-void CordRepBtree::DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end) {
- for (CordRep* edge : tree->Edges(begin, end)) {
- FastUnref(edge, DeleteLeafEdge);
+template <size_t size>
+static void DestroyTree(CordRepBtree* tree) {
+ for (CordRep* node : tree->Edges()) {
+ if (node->refcount.Decrement()) continue;
+ for (CordRep* edge : node->btree()->Edges()) {
+ if (edge->refcount.Decrement()) continue;
+ if (size == 1) {
+ DeleteLeafEdge(edge);
+ } else {
+ CordRepBtree::Destroy(edge->btree());
+ }
+ }
+ CordRepBtree::Delete(node->btree());
}
- Delete(tree);
+ CordRepBtree::Delete(tree);
}
-void CordRepBtree::DestroyNonLeaf(CordRepBtree* tree, size_t begin,
- size_t end) {
- for (CordRep* edge : tree->Edges(begin, end)) {
- FastUnref(edge->btree(), Destroy);
+void CordRepBtree::Destroy(CordRepBtree* tree) {
+ switch (tree->height()) {
+ case 0:
+ for (CordRep* edge : tree->Edges()) {
+ if (!edge->refcount.Decrement()) {
+ DeleteLeafEdge(edge);
+ }
+ }
+ return CordRepBtree::Delete(tree);
+ case 1:
+ return DestroyTree<1>(tree);
+ default:
+ return DestroyTree<2>(tree);
}
- Delete(tree);
}
bool CordRepBtree::IsValid(const CordRepBtree* tree, bool shallow) {
@@ -773,7 +800,7 @@ CopyResult CordRepBtree::CopyPrefix(size_t n, bool allow_folding) {
CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRep* front = tree->Edge(tree->begin());
- if (tree->refcount.IsMutable()) {
+ if (tree->refcount.IsOne()) {
Unref(tree->Edges(tree->begin() + 1, tree->end()));
CordRepBtree::Delete(tree);
} else {
@@ -786,7 +813,7 @@ CordRep* CordRepBtree::ExtractFront(CordRepBtree* tree) {
CordRepBtree* CordRepBtree::ConsumeBeginTo(CordRepBtree* tree, size_t end,
size_t new_length) {
assert(end <= tree->end());
- if (tree->refcount.IsMutable()) {
+ if (tree->refcount.IsOne()) {
Unref(tree->Edges(end, tree->end()));
tree->set_end(end);
tree->length = new_length;
@@ -813,13 +840,13 @@ CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
size_t length = len - n;
int height = tree->height();
- bool is_mutable = tree->refcount.IsMutable();
+ bool is_mutable = tree->refcount.IsOne();
// Extract all top nodes which are reduced to size = 1
Position pos = tree->IndexOfLength(length);
while (pos.index == tree->begin()) {
CordRep* edge = ExtractFront(tree);
- is_mutable &= edge->refcount.IsMutable();
+ is_mutable &= edge->refcount.IsOne();
if (height-- == 0) return ResizeEdge(edge, length, is_mutable);
tree = edge->btree();
pos = tree->IndexOfLength(length);
@@ -835,8 +862,8 @@ CordRep* CordRepBtree::RemoveSuffix(CordRepBtree* tree, size_t n) {
length = pos.n;
while (length != edge->length) {
// ConsumeBeginTo guarantees `tree` is a clean, privately owned copy.
- assert(tree->refcount.IsMutable());
- const bool edge_is_mutable = edge->refcount.IsMutable();
+ assert(tree->refcount.IsOne());
+ const bool edge_is_mutable = edge->refcount.IsOne();
if (height-- == 0) {
tree->edges_[pos.index] = ResizeEdge(edge, length, edge_is_mutable);
@@ -973,7 +1000,7 @@ char CordRepBtree::GetCharacter(size_t offset) const {
Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
// The inlined version in `GetAppendBuffer()` deals with all heights <= 3.
assert(height() >= 4);
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
// Build a stack of nodes we may potentially need to update if we find a
// non-shared FLAT with capacity at the leaf level.
@@ -982,13 +1009,13 @@ Span<char> CordRepBtree::GetAppendBufferSlow(size_t size) {
CordRepBtree* stack[kMaxDepth];
for (int i = 0; i < depth; ++i) {
node = node->Edge(kBack)->btree();
- if (!node->refcount.IsMutable()) return {};
+ if (!node->refcount.IsOne()) return {};
stack[i] = node;
}
// Must be a privately owned, mutable flat.
CordRep* const edge = node->Edge(kBack);
- if (!edge->refcount.IsMutable() || edge->tag < FLAT) return {};
+ if (!edge->refcount.IsOne() || edge->tag < FLAT) return {};
// Must have capacity.
const size_t avail = edge->flat()->Capacity() - edge->length;
@@ -1123,6 +1150,79 @@ CordRepBtree* CordRepBtree::Rebuild(CordRepBtree* tree) {
return nullptr;
}
+CordRepBtree::ExtractResult CordRepBtree::ExtractAppendBuffer(
+ CordRepBtree* tree, size_t extra_capacity) {
+ int depth = 0;
+ NodeStack stack;
+
+ // Set up default 'no success' result which is {tree, nullptr}.
+ ExtractResult result;
+ result.tree = tree;
+ result.extracted = nullptr;
+
+ // Dive down the right side of the tree, making sure no edges are shared.
+ while (tree->height() > 0) {
+ if (!tree->refcount.IsOne()) return result;
+ stack[depth++] = tree;
+ tree = tree->Edge(kBack)->btree();
+ }
+ if (!tree->refcount.IsOne()) return result;
+
+ // Validate we ended on a non shared flat.
+ CordRep* rep = tree->Edge(kBack);
+ if (!(rep->IsFlat() && rep->refcount.IsOne())) return result;
+
+ // Verify it has at least the requested extra capacity.
+ CordRepFlat* flat = rep->flat();
+ const size_t length = flat->length;
+ const size_t avail = flat->Capacity() - flat->length;
+ if (extra_capacity > avail) return result;
+
+ // Set the extracted flat in the result.
+ result.extracted = flat;
+
+ // Cascading delete all nodes that become empty.
+ while (tree->size() == 1) {
+ CordRepBtree::Delete(tree);
+ if (--depth < 0) {
+ // We consumed the entire tree: return nullptr for new tree.
+ result.tree = nullptr;
+ return result;
+ }
+ rep = tree;
+ tree = stack[depth];
+ }
+
+ // Remove the edge or cascaded up parent node.
+ tree->set_end(tree->end() - 1);
+ tree->length -= length;
+
+ // Adjust lengths up the tree.
+ while (depth > 0) {
+ tree = stack[--depth];
+ tree->length -= length;
+ }
+
+ // Remove unnecessary top nodes with size = 1. This may iterate all the way
+ // down to the leaf node in which case we simply return the remaining last
+ // edge in that node and the extracted flat.
+ while (tree->size() == 1) {
+ int height = tree->height();
+ rep = tree->Edge(kBack);
+ Delete(tree);
+ if (height == 0) {
+ // We consumed the leaf: return the sole data edge as the new tree.
+ result.tree = rep;
+ return result;
+ }
+ tree = rep->btree();
+ }
+
+ // Done: return the (new) top level node and extracted flat.
+ result.tree = tree;
+ return result;
+}
+
} // namespace cord_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/internal/cord_rep_btree.h b/absl/strings/internal/cord_rep_btree.h
index bb38f0c3..2cbc09ec 100644
--- a/absl/strings/internal/cord_rep_btree.h
+++ b/absl/strings/internal/cord_rep_btree.h
@@ -22,6 +22,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/optimization.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/string_view.h"
@@ -163,6 +164,9 @@ class CordRepBtree : public CordRep {
// typically after a ref_count.Decrement() on the last reference count.
static void Destroy(CordRepBtree* tree);
+ // Destruction
+ static void Delete(CordRepBtree* tree) { delete tree; }
+
// Use CordRep::Unref() as we overload for absl::Span<CordRep* const>.
using CordRep::Unref;
@@ -240,11 +244,41 @@ class CordRepBtree : public CordRep {
// length of the flat node and involved tree nodes have been increased by
// `span.length()`. The caller is responsible for immediately assigning values
// to all uninitialized data reference by the returned span.
- // Requires `this->refcount.IsMutable()`: this function forces the
- // caller to do this fast path check on the top level node, as this is the
- // most commonly shared node of a cord tree.
+ // Requires `this->refcount.IsOne()`: this function forces the caller to do
+ // this fast path check on the top level node, as this is the most commonly
+ // shared node of a cord tree.
Span<char> GetAppendBuffer(size_t size);
+ // Extracts the right-most data edge from this tree iff:
+ // - the tree and all internal edges to the right-most node are not shared.
+ // - the right-most node is a FLAT node and not shared.
+ // - the right-most node has at least the desired extra capacity.
+ //
+ // Returns {tree, nullptr} if any of the above conditions are not met.
+ // This method effectively removes data from the tree. The intent of this
+ // method is to allow applications appending small string data to use
+ // pre-existing capacity, and add the modified rep back to the tree.
+ //
+ // Simplified such code would look similar to this:
+ // void MyTreeBuilder::Append(string_view data) {
+ // ExtractResult result = CordRepBtree::ExtractAppendBuffer(tree_, 1);
+ // if (CordRep* rep = result.extracted) {
+ // size_t available = rep->Capacity() - rep->length;
+ // size_t n = std::min(data.size(), n);
+ // memcpy(rep->Data(), data.data(), n);
+ // rep->length += n;
+ // data.remove_prefix(n);
+ // if (!result.tree->IsBtree()) {
+ // tree_ = CordRepBtree::Create(result.tree);
+ // }
+ // tree_ = CordRepBtree::Append(tree_, rep);
+ // }
+ // ...
+ // // Remaining edge in `result.tree`.
+ // }
+ static ExtractResult ExtractAppendBuffer(CordRepBtree* tree,
+ size_t extra_capacity = 1);
+
// Returns the `height` of the tree. The height of a tree is limited to
// kMaxHeight. `height` is implemented as an `int` as in some places we
// use negative (-1) values for 'data edges'.
@@ -277,13 +311,6 @@ class CordRepBtree : public CordRep {
// Requires this instance to be a leaf node, and `index` to be valid index.
inline absl::string_view Data(size_t index) const;
- static const char* EdgeDataPtr(const CordRep* r);
- static absl::string_view EdgeData(const CordRep* r);
-
- // Returns true if the provided rep is a FLAT, EXTERNAL or a SUBSTRING node
- // holding a FLAT or EXTERNAL child rep.
- static bool IsDataEdge(const CordRep* rep);
-
// Diagnostics: returns true if `tree` is valid and internally consistent.
// If `shallow` is false, then the provided top level node and all child nodes
// below it are recursively checked. If `shallow` is true, only the provided
@@ -410,12 +437,6 @@ class CordRepBtree : public CordRep {
// Requires `offset` < length.
Position IndexBeyond(size_t offset) const;
- // Destruction
- static void DestroyLeaf(CordRepBtree* tree, size_t begin, size_t end);
- static void DestroyNonLeaf(CordRepBtree* tree, size_t begin, size_t end);
- static void DestroyTree(CordRepBtree* tree, size_t begin, size_t end);
- static void Delete(CordRepBtree* tree) { delete tree; }
-
// Creates a new leaf node containing as much data as possible from `data`.
// The data is added either forwards or reversed depending on `edge_type`.
// Callers must check the length of the returned node to determine if all data
@@ -604,34 +625,11 @@ inline absl::Span<CordRep* const> CordRepBtree::Edges(size_t begin,
return {edges_ + begin, static_cast<size_t>(end - begin)};
}
-inline const char* CordRepBtree::EdgeDataPtr(const CordRep* r) {
- assert(IsDataEdge(r));
- size_t offset = 0;
- if (r->tag == SUBSTRING) {
- offset = r->substring()->start;
- r = r->substring()->child;
- }
- return (r->tag >= FLAT ? r->flat()->Data() : r->external()->base) + offset;
-}
-
-inline absl::string_view CordRepBtree::EdgeData(const CordRep* r) {
- return absl::string_view(EdgeDataPtr(r), r->length);
-}
-
inline absl::string_view CordRepBtree::Data(size_t index) const {
assert(height() == 0);
return EdgeData(Edge(index));
}
-inline bool CordRepBtree::IsDataEdge(const CordRep* rep) {
- // The fast path is that `rep` is an EXTERNAL or FLAT node, making the below
- // if a single, well predicted branch. We then repeat the FLAT or EXTERNAL
- // check in the slow path the SUBSTRING check to optimize for the hot path.
- if (rep->tag == EXTERNAL || rep->tag >= FLAT) return true;
- if (rep->tag == SUBSTRING) rep = rep->substring()->child;
- return rep->tag == EXTERNAL || rep->tag >= FLAT;
-}
-
inline CordRepBtree* CordRepBtree::New(int height) {
CordRepBtree* tree = new CordRepBtree;
tree->length = 0;
@@ -659,19 +657,6 @@ inline CordRepBtree* CordRepBtree::New(CordRepBtree* front,
return tree;
}
-inline void CordRepBtree::DestroyTree(CordRepBtree* tree, size_t begin,
- size_t end) {
- if (tree->height() == 0) {
- DestroyLeaf(tree, begin, end);
- } else {
- DestroyNonLeaf(tree, begin, end);
- }
-}
-
-inline void CordRepBtree::Destroy(CordRepBtree* tree) {
- DestroyTree(tree, tree->begin(), tree->end());
-}
-
inline void CordRepBtree::Unref(absl::Span<CordRep* const> edges) {
for (CordRep* edge : edges) {
if (ABSL_PREDICT_FALSE(!edge->refcount.Decrement())) {
@@ -731,7 +716,7 @@ inline void CordRepBtree::AlignBegin() {
// size, and then do overlapping load/store of up to 4 pointers (inlined as
// XMM, YMM or ZMM load/store) and up to 2 pointers (XMM / YMM), which is a)
// compact and b) not clobbering any registers.
- ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+ ABSL_ASSUME(new_end <= kMaxCapacity);
#ifdef __clang__
#pragma unroll 1
#endif
@@ -749,7 +734,7 @@ inline void CordRepBtree::AlignEnd() {
const size_t new_end = end() + delta;
set_begin(new_begin);
set_end(new_end);
- ABSL_INTERNAL_ASSUME(new_end <= kMaxCapacity);
+ ABSL_ASSUME(new_end <= kMaxCapacity);
#ifdef __clang__
#pragma unroll 1
#endif
@@ -849,7 +834,7 @@ inline CordRepBtree* CordRepBtree::Create(CordRep* rep) {
}
inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
CordRepBtree* tree = this;
const int height = this->height();
CordRepBtree* n1 = tree;
@@ -858,21 +843,21 @@ inline Span<char> CordRepBtree::GetAppendBuffer(size_t size) {
switch (height) {
case 3:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
n2 = tree;
ABSL_FALLTHROUGH_INTENDED;
case 2:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
n1 = tree;
ABSL_FALLTHROUGH_INTENDED;
case 1:
tree = tree->Edge(kBack)->btree();
- if (!tree->refcount.IsMutable()) return {};
+ if (!tree->refcount.IsOne()) return {};
ABSL_FALLTHROUGH_INTENDED;
case 0:
CordRep* edge = tree->Edge(kBack);
- if (!edge->refcount.IsMutable()) return {};
+ if (!edge->refcount.IsOne()) return {};
if (edge->tag < FLAT) return {};
size_t avail = edge->flat()->Capacity() - edge->length;
if (avail == 0) return {};
diff --git a/absl/strings/internal/cord_rep_btree_navigator.cc b/absl/strings/internal/cord_rep_btree_navigator.cc
index d1f9995d..9b896a3d 100644
--- a/absl/strings/internal/cord_rep_btree_navigator.cc
+++ b/absl/strings/internal/cord_rep_btree_navigator.cc
@@ -16,6 +16,7 @@
#include <cassert>
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
@@ -39,7 +40,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
assert(n <= rep->length);
assert(offset < rep->length);
assert(offset <= rep->length - n);
- assert(CordRepBtree::IsDataEdge(rep));
+ assert(IsDataEdge(rep));
if (n == 0) return nullptr;
if (n == rep->length) return CordRep::Ref(rep);
@@ -49,6 +50,7 @@ inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
rep = rep->substring()->child;
}
+ assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
diff --git a/absl/strings/internal/cord_rep_btree_navigator.h b/absl/strings/internal/cord_rep_btree_navigator.h
index 971b92ed..3d581c87 100644
--- a/absl/strings/internal/cord_rep_btree_navigator.h
+++ b/absl/strings/internal/cord_rep_btree_navigator.h
@@ -143,8 +143,8 @@ class CordRepBtreeNavigator {
// `index_` and `node_` contain the navigation state as the 'path' to the
// current data edge which is at `node_[0]->Edge(index_[0])`. The contents
// of these are undefined until the instance is initialized (`height_ >= 0`).
- uint8_t index_[CordRepBtree::kMaxHeight];
- CordRepBtree* node_[CordRepBtree::kMaxHeight];
+ uint8_t index_[CordRepBtree::kMaxDepth];
+ CordRepBtree* node_[CordRepBtree::kMaxDepth];
};
// Returns true if this instance is not empty.
@@ -173,6 +173,7 @@ template <CordRepBtree::EdgeType edge_type>
inline CordRep* CordRepBtreeNavigator::Init(CordRepBtree* tree) {
assert(tree != nullptr);
assert(tree->size() > 0);
+ assert(tree->height() <= CordRepBtree::kMaxHeight);
int height = height_ = tree->height();
size_t index = tree->index(edge_type);
node_[height] = tree;
@@ -206,6 +207,7 @@ inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::Seek(
inline CordRepBtreeNavigator::Position CordRepBtreeNavigator::InitOffset(
CordRepBtree* tree, size_t offset) {
assert(tree != nullptr);
+ assert(tree->height() <= CordRepBtree::kMaxHeight);
if (ABSL_PREDICT_FALSE(offset >= tree->length)) return {nullptr, 0};
height_ = tree->height();
node_[height_] = tree;
diff --git a/absl/strings/internal/cord_rep_btree_navigator_test.cc b/absl/strings/internal/cord_rep_btree_navigator_test.cc
index ce09b199..4f9bd4e5 100644
--- a/absl/strings/internal/cord_rep_btree_navigator_test.cc
+++ b/absl/strings/internal/cord_rep_btree_navigator_test.cc
@@ -319,6 +319,27 @@ TEST_P(CordRepBtreeNavigatorTest, ReadBeyondLengthOfTree) {
ASSERT_THAT(result.tree, Eq(nullptr));
}
+TEST(CordRepBtreeNavigatorTest, NavigateMaximumTreeDepth) {
+ CordRepFlat* flat1 = MakeFlat("Hello world");
+ CordRepFlat* flat2 = MakeFlat("World Hello");
+
+ CordRepBtree* node = CordRepBtree::Create(flat1);
+ node = CordRepBtree::Append(node, flat2);
+ while (node->height() < CordRepBtree::kMaxHeight) {
+ node = CordRepBtree::New(node);
+ }
+
+ CordRepBtreeNavigator nav;
+ CordRep* edge = nav.InitFirst(node);
+ EXPECT_THAT(edge, Eq(flat1));
+ EXPECT_THAT(nav.Next(), Eq(flat2));
+ EXPECT_THAT(nav.Next(), Eq(nullptr));
+ EXPECT_THAT(nav.Previous(), Eq(flat1));
+ EXPECT_THAT(nav.Previous(), Eq(nullptr));
+
+ CordRep::Unref(node);
+}
+
} // namespace
} // namespace cord_internal
ABSL_NAMESPACE_END
diff --git a/absl/strings/internal/cord_rep_btree_reader.cc b/absl/strings/internal/cord_rep_btree_reader.cc
index 5dc76966..0d0e8601 100644
--- a/absl/strings/internal/cord_rep_btree_reader.cc
+++ b/absl/strings/internal/cord_rep_btree_reader.cc
@@ -17,6 +17,7 @@
#include <cassert>
#include "absl/base/config.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_navigator.h"
@@ -44,7 +45,7 @@ absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
// can directly return the substring into the current data edge as the next
// chunk. We can easily establish from the above code that `navigator_.Next()`
// has not been called as that requires `chunk_size` to be zero.
- if (n < chunk_size) return CordRepBtree::EdgeData(edge).substr(result.n);
+ if (n < chunk_size) return EdgeData(edge).substr(result.n);
// The amount of data taken from the last edge is `chunk_size` and `result.n`
// contains the offset into the current edge trailing the read data (which can
@@ -60,7 +61,7 @@ absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
// We did not read all data, return remaining data from current edge.
edge = navigator_.Current();
remaining_ -= consumed_by_read + edge->length;
- return CordRepBtree::EdgeData(edge).substr(result.n);
+ return EdgeData(edge).substr(result.n);
}
} // namespace cord_internal
diff --git a/absl/strings/internal/cord_rep_btree_reader.h b/absl/strings/internal/cord_rep_btree_reader.h
index 7aa79dbf..8db8f8dd 100644
--- a/absl/strings/internal/cord_rep_btree_reader.h
+++ b/absl/strings/internal/cord_rep_btree_reader.h
@@ -18,6 +18,7 @@
#include <cassert>
#include "absl/base/config.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_navigator.h"
@@ -167,7 +168,7 @@ inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) {
assert(tree != nullptr);
const CordRep* edge = navigator_.InitFirst(tree);
remaining_ = tree->length - edge->length;
- return CordRepBtree::EdgeData(edge);
+ return EdgeData(edge);
}
inline absl::string_view CordRepBtreeReader::Next() {
@@ -175,7 +176,7 @@ inline absl::string_view CordRepBtreeReader::Next() {
const CordRep* edge = navigator_.Next();
assert(edge != nullptr);
remaining_ -= edge->length;
- return CordRepBtree::EdgeData(edge);
+ return EdgeData(edge);
}
inline absl::string_view CordRepBtreeReader::Skip(size_t skip) {
@@ -190,7 +191,7 @@ inline absl::string_view CordRepBtreeReader::Skip(size_t skip) {
// The combined length of all edges skipped before `pos.edge` is `skip -
// pos.offset`, all of which are 'consumed', as well as the current edge.
remaining_ -= skip - pos.offset + pos.edge->length;
- return CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+ return EdgeData(pos.edge).substr(pos.offset);
}
inline absl::string_view CordRepBtreeReader::Seek(size_t offset) {
@@ -199,7 +200,7 @@ inline absl::string_view CordRepBtreeReader::Seek(size_t offset) {
remaining_ = 0;
return {};
}
- absl::string_view chunk = CordRepBtree::EdgeData(pos.edge).substr(pos.offset);
+ absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset);
remaining_ = length() - offset - chunk.length();
return chunk;
}
diff --git a/absl/strings/internal/cord_rep_btree_test.cc b/absl/strings/internal/cord_rep_btree_test.cc
index be9473d4..51b90db1 100644
--- a/absl/strings/internal/cord_rep_btree_test.cc
+++ b/absl/strings/internal/cord_rep_btree_test.cc
@@ -25,6 +25,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/cleanup/cleanup.h"
+#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/str_cat.h"
@@ -52,7 +53,6 @@ using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CordVisitReps;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
-using ::absl::cordrep_testing::MakeConcat;
using ::absl::cordrep_testing::MakeExternal;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::MakeSubstring;
@@ -128,6 +128,16 @@ MATCHER_P2(IsSubstring, start, length,
return true;
}
+MATCHER_P2(EqExtractResult, tree, rep, "Equals ExtractResult") {
+ if (arg.tree != tree || arg.extracted != rep) {
+ *result_listener << "Expected {" << static_cast<const void*>(tree) << ", "
+ << static_cast<const void*>(rep) << "}, got {" << arg.tree
+ << ", " << arg.extracted << "}";
+ return false;
+ }
+ return true;
+}
+
// DataConsumer is a simple helper class used by tests to 'consume' string
// fragments from the provided input in forward or backward direction.
class DataConsumer {
@@ -312,40 +322,31 @@ TEST(CordRepBtreeTest, EdgeData) {
CordRepExternal* external = MakeExternal("Hello external");
CordRep* substr1 = MakeSubstring(1, 6, CordRep::Ref(flat));
CordRep* substr2 = MakeSubstring(1, 6, CordRep::Ref(external));
- CordRep* concat = MakeConcat(CordRep::Ref(flat), CordRep::Ref(external));
CordRep* bad_substr = MakeSubstring(1, 2, CordRep::Ref(substr1));
- EXPECT_TRUE(CordRepBtree::IsDataEdge(flat));
- EXPECT_THAT(CordRepBtree::EdgeDataPtr(flat),
- TypedEq<const void*>(flat->Data()));
- EXPECT_THAT(CordRepBtree::EdgeData(flat), Eq("Hello world"));
+ EXPECT_TRUE(IsDataEdge(flat));
+ EXPECT_THAT(EdgeData(flat).data(), TypedEq<const void*>(flat->Data()));
+ EXPECT_THAT(EdgeData(flat), Eq("Hello world"));
- EXPECT_TRUE(CordRepBtree::IsDataEdge(external));
- EXPECT_THAT(CordRepBtree::EdgeDataPtr(external),
- TypedEq<const void*>(external->base));
- EXPECT_THAT(CordRepBtree::EdgeData(external), Eq("Hello external"));
+ EXPECT_TRUE(IsDataEdge(external));
+ EXPECT_THAT(EdgeData(external).data(), TypedEq<const void*>(external->base));
+ EXPECT_THAT(EdgeData(external), Eq("Hello external"));
- EXPECT_TRUE(CordRepBtree::IsDataEdge(substr1));
- EXPECT_THAT(CordRepBtree::EdgeDataPtr(substr1),
- TypedEq<const void*>(flat->Data() + 1));
- EXPECT_THAT(CordRepBtree::EdgeData(substr1), Eq("ello w"));
+ EXPECT_TRUE(IsDataEdge(substr1));
+ EXPECT_THAT(EdgeData(substr1).data(), TypedEq<const void*>(flat->Data() + 1));
+ EXPECT_THAT(EdgeData(substr1), Eq("ello w"));
- EXPECT_TRUE(CordRepBtree::IsDataEdge(substr2));
- EXPECT_THAT(CordRepBtree::EdgeDataPtr(substr2),
+ EXPECT_TRUE(IsDataEdge(substr2));
+ EXPECT_THAT(EdgeData(substr2).data(),
TypedEq<const void*>(external->base + 1));
- EXPECT_THAT(CordRepBtree::EdgeData(substr2), Eq("ello e"));
+ EXPECT_THAT(EdgeData(substr2), Eq("ello e"));
- EXPECT_FALSE(CordRepBtree::IsDataEdge(concat));
- EXPECT_FALSE(CordRepBtree::IsDataEdge(bad_substr));
+ EXPECT_FALSE(IsDataEdge(bad_substr));
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
- EXPECT_DEATH(CordRepBtree::EdgeData(concat), ".*");
- EXPECT_DEATH(CordRepBtree::EdgeDataPtr(concat), ".*");
- EXPECT_DEATH(CordRepBtree::EdgeData(bad_substr), ".*");
- EXPECT_DEATH(CordRepBtree::EdgeDataPtr(bad_substr), ".*");
+ EXPECT_DEATH(EdgeData(bad_substr), ".*");
#endif
CordRep::Unref(bad_substr);
- CordRep::Unref(concat);
CordRep::Unref(substr2);
CordRep::Unref(substr1);
CordRep::Unref(external);
@@ -996,50 +997,6 @@ TEST_P(CordRepBtreeTest, AddLargeDataToLeaf) {
}
}
-TEST_P(CordRepBtreeDualTest, CreateFromConcat) {
- AutoUnref refs;
- CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"),
- MakeFlat("nopqrstuv"), MakeFlat("wxyz")};
- auto* left = MakeConcat(flats[0], flats[1]);
- auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3]));
- auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right));
- CordRepBtree* result = CordRepBtree::Create(concat);
- ASSERT_TRUE(CordRepBtree::IsValid(result));
- EXPECT_THAT(result->length, Eq(26));
- EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz"));
- CordRep::Unref(result);
-}
-
-TEST_P(CordRepBtreeDualTest, AppendConcat) {
- AutoUnref refs;
- CordRep* flats[] = {MakeFlat("defgh"), MakeFlat("ijklm"),
- MakeFlat("nopqrstuv"), MakeFlat("wxyz")};
- auto* left = MakeConcat(flats[0], flats[1]);
- auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3]));
- auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right));
- CordRepBtree* result = CordRepBtree::Create(MakeFlat("abc"));
- result = CordRepBtree::Append(result, concat);
- ASSERT_TRUE(CordRepBtree::IsValid(result));
- EXPECT_THAT(result->length, Eq(26));
- EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz"));
- CordRep::Unref(result);
-}
-
-TEST_P(CordRepBtreeDualTest, PrependConcat) {
- AutoUnref refs;
- CordRep* flats[] = {MakeFlat("abcdefgh"), MakeFlat("ijklm"),
- MakeFlat("nopqrstuv"), MakeFlat("wx")};
- auto* left = MakeConcat(flats[0], flats[1]);
- auto* right = MakeConcat(flats[2], refs.RefIf(first_shared(), flats[3]));
- auto* concat = refs.RefIf(second_shared(), MakeConcat(left, right));
- CordRepBtree* result = CordRepBtree::Create(MakeFlat("yz"));
- result = CordRepBtree::Prepend(result, concat);
- ASSERT_TRUE(CordRepBtree::IsValid(result));
- EXPECT_THAT(result->length, Eq(26));
- EXPECT_THAT(CordToString(result), Eq("abcdefghijklmnopqrstuvwxyz"));
- CordRep::Unref(result);
-}
-
TEST_P(CordRepBtreeTest, CreateFromTreeReturnsTree) {
AutoUnref refs;
CordRepBtree* leaf = CordRepBtree::Create(MakeFlat("Hello world"));
@@ -1483,6 +1440,125 @@ TEST_P(CordRepBtreeTest, Rebuild) {
}
}
+// Convenience helper for CordRepBtree::ExtractAppendBuffer
+CordRepBtree::ExtractResult ExtractLast(CordRepBtree* input, size_t cap = 1) {
+ return CordRepBtree::ExtractAppendBuffer(input, cap);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafSingleFlat) {
+ CordRep* flat = MakeFlat("Abc");
+ CordRepBtree* leaf = CordRepBtree::Create(flat);
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(nullptr, flat));
+ CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeSingleFlat) {
+ CordRep* flat = MakeFlat("Abc");
+ CordRepBtree* leaf = CordRepBtree::Create(flat);
+ CordRepBtree* node = CordRepBtree::New(leaf);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(nullptr, flat));
+ CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafTwoFlats) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ CordRepBtree* leaf = CreateTree(flats);
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(flats[0], flats[1]));
+ CordRep::Unref(flats[0]);
+ CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlats) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ CordRepBtree* leaf = CreateTree(flats);
+ CordRepBtree* node = CordRepBtree::New(leaf);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
+ CordRep::Unref(flats[0]);
+ CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeTwoFlatsInTwoLeafs) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ CordRepBtree* leaf1 = CordRepBtree::Create(flats[0]);
+ CordRepBtree* leaf2 = CordRepBtree::Create(flats[1]);
+ CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(flats[0], flats[1]));
+ CordRep::Unref(flats[0]);
+ CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferLeafThreeFlats) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdefghi", 3);
+ CordRepBtree* leaf = CreateTree(flats);
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, flats[2]));
+ CordRep::Unref(flats[2]);
+ CordRep::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightNoFolding) {
+ CordRep* flat = MakeFlat("Abc");
+ std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
+ CordRepBtree* leaf1 = CordRepBtree::Create(flat);
+ CordRepBtree* leaf2 = CreateTree(flats);
+ CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(node, flats[1]));
+ EXPECT_THAT(node->Edges(), ElementsAre(leaf1, leaf2));
+ EXPECT_THAT(leaf1->Edges(), ElementsAre(flat));
+ EXPECT_THAT(leaf2->Edges(), ElementsAre(flats[0]));
+ CordRep::Unref(node);
+ CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNodeThreeFlatsRightLeafFolding) {
+ CordRep* flat = MakeFlat("Abc");
+ std::vector<CordRep*> flats = CreateFlatsFromString("defghi", 3);
+ CordRepBtree* leaf1 = CreateTree(flats);
+ CordRepBtree* leaf2 = CordRepBtree::Create(flat);
+ CordRepBtree* node = CordRepBtree::New(leaf1, leaf2);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(leaf1, flat));
+ EXPECT_THAT(leaf1->Edges(), ElementsAreArray(flats));
+ CordRep::Unref(leaf1);
+ CordRep::Unref(flat);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNoCapacity) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ CordRepBtree* leaf = CreateTree(flats);
+ size_t avail = flats[1]->flat()->Capacity() - flats[1]->length;
+ EXPECT_THAT(ExtractLast(leaf, avail + 1), EqExtractResult(leaf, nullptr));
+ EXPECT_THAT(ExtractLast(leaf, avail), EqExtractResult(flats[0], flats[1]));
+ CordRep::Unref(flats[0]);
+ CordRep::Unref(flats[1]);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferNotFlat) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ auto substr = MakeSubstring(1, 2, flats[1]);
+ CordRepBtree* leaf = CreateTree({flats[0], substr});
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+ CordRep::Unref(leaf);
+}
+
+TEST(CordRepBtreeTest, ExtractAppendBufferShared) {
+ std::vector<CordRep*> flats = CreateFlatsFromString("abcdef", 3);
+ CordRepBtree* leaf = CreateTree(flats);
+
+ CordRep::Ref(flats[1]);
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+ CordRep::Unref(flats[1]);
+
+ CordRep::Ref(leaf);
+ EXPECT_THAT(ExtractLast(leaf), EqExtractResult(leaf, nullptr));
+ CordRep::Unref(leaf);
+
+ CordRepBtree* node = CordRepBtree::New(leaf);
+ CordRep::Ref(node);
+ EXPECT_THAT(ExtractLast(node), EqExtractResult(node, nullptr));
+ CordRep::Unref(node);
+
+ CordRep::Unref(node);
+}
+
} // namespace
} // namespace cord_internal
ABSL_NAMESPACE_END
diff --git a/absl/strings/internal/cord_rep_consume.cc b/absl/strings/internal/cord_rep_consume.cc
index 81514543..20a55797 100644
--- a/absl/strings/internal/cord_rep_consume.cc
+++ b/absl/strings/internal/cord_rep_consume.cc
@@ -40,88 +40,21 @@ CordRep* ClipSubstring(CordRepSubstring* substring) {
return child;
}
-// Unrefs the provided `concat`, and returns `{concat->left, concat->right}`
-// Adds or assumes a reference on `concat->left` and `concat->right`.
-// Returns an array of 2 elements containing the left and right nodes.
-std::array<CordRep*, 2> ClipConcat(CordRepConcat* concat) {
- std::array<CordRep*, 2> result{concat->left, concat->right};
- if (concat->refcount.IsOne()) {
- delete concat;
- } else {
- CordRep::Ref(result[0]);
- CordRep::Ref(result[1]);
- CordRep::Unref(concat);
- }
- return result;
-}
+} // namespace
-void Consume(bool forward, CordRep* rep, ConsumeFn consume_fn) {
+void Consume(CordRep* rep, ConsumeFn consume_fn) {
size_t offset = 0;
size_t length = rep->length;
- struct Entry {
- CordRep* rep;
- size_t offset;
- size_t length;
- };
- absl::InlinedVector<Entry, 40> stack;
-
- for (;;) {
- if (rep->tag == CONCAT) {
- std::array<CordRep*, 2> res = ClipConcat(rep->concat());
- CordRep* left = res[0];
- CordRep* right = res[1];
-
- if (left->length <= offset) {
- // Don't need left node
- offset -= left->length;
- CordRep::Unref(left);
- rep = right;
- continue;
- }
- size_t length_left = left->length - offset;
- if (length_left >= length) {
- // Don't need right node
- CordRep::Unref(right);
- rep = left;
- continue;
- }
-
- // Need both nodes
- size_t length_right = length - length_left;
- if (forward) {
- stack.push_back({right, 0, length_right});
- rep = left;
- length = length_left;
- } else {
- stack.push_back({left, offset, length_left});
- rep = right;
- offset = 0;
- length = length_right;
- }
- } else if (rep->tag == SUBSTRING) {
- offset += rep->substring()->start;
- rep = ClipSubstring(rep->substring());
- } else {
- consume_fn(rep, offset, length);
- if (stack.empty()) return;
-
- rep = stack.back().rep;
- offset = stack.back().offset;
- length = stack.back().length;
- stack.pop_back();
- }
+ if (rep->tag == SUBSTRING) {
+ offset += rep->substring()->start;
+ rep = ClipSubstring(rep->substring());
}
-}
-
-} // namespace
-
-void Consume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(true, rep, std::move(consume_fn));
+ consume_fn(rep, offset, length);
}
void ReverseConsume(CordRep* rep, ConsumeFn consume_fn) {
- return Consume(false, rep, std::move(consume_fn));
+ return Consume(rep, std::move(consume_fn));
}
} // namespace cord_internal
diff --git a/absl/strings/internal/cord_rep_consume_test.cc b/absl/strings/internal/cord_rep_consume_test.cc
deleted file mode 100644
index e507824b..00000000
--- a/absl/strings/internal/cord_rep_consume_test.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2021 The Abseil Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include "absl/strings/internal/cord_rep_consume.h"
-
-#include <functional>
-#include <utility>
-
-#include "gmock/gmock.h"
-#include "gtest/gtest.h"
-#include "absl/strings/internal/cord_internal.h"
-#include "absl/strings/internal/cord_rep_flat.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace cord_internal {
-namespace {
-
-using testing::InSequence;
-using testing::MockFunction;
-
-// Returns the depth of a node
-int Depth(const CordRep* rep) {
- return (rep->tag == CONCAT) ? rep->concat()->depth() : 0;
-}
-
-// Creates a concatenation of the specified nodes.
-CordRepConcat* CreateConcat(CordRep* left, CordRep* right) {
- auto* concat = new CordRepConcat();
- concat->tag = CONCAT;
- concat->left = left;
- concat->right = right;
- concat->length = left->length + right->length;
- concat->set_depth(1 + (std::max)(Depth(left), Depth(right)));
- return concat;
-}
-
-// Creates a flat with the length set to `length`
-CordRepFlat* CreateFlatWithLength(size_t length) {
- auto* flat = CordRepFlat::New(length);
- flat->length = length;
- return flat;
-}
-
-// Creates a substring node on the specified child.
-CordRepSubstring* CreateSubstring(CordRep* child, size_t start, size_t length) {
- auto* rep = new CordRepSubstring();
- rep->length = length;
- rep->tag = SUBSTRING;
- rep->start = start;
- rep->child = child;
- return rep;
-}
-
-// Flats we use in the tests
-CordRep* flat[6];
-
-// Creates a test tree
-CordRep* CreateTestTree() {
- flat[0] = CreateFlatWithLength(1);
- flat[1] = CreateFlatWithLength(7);
- CordRepConcat* left = CreateConcat(flat[0], CreateSubstring(flat[1], 2, 4));
-
- flat[2] = CreateFlatWithLength(9);
- flat[3] = CreateFlatWithLength(13);
- CordRepConcat* right1 = CreateConcat(flat[2], flat[3]);
-
- flat[4] = CreateFlatWithLength(15);
- flat[5] = CreateFlatWithLength(19);
- CordRepConcat* right2 = CreateConcat(flat[4], flat[5]);
-
- CordRepConcat* right = CreateConcat(right1, CreateSubstring(right2, 5, 17));
- return CreateConcat(left, right);
-}
-
-TEST(CordRepConsumeTest, Consume) {
- InSequence in_sequence;
- CordRep* tree = CreateTestTree();
- MockFunction<void(CordRep*, size_t, size_t)> consume;
- EXPECT_CALL(consume, Call(flat[0], 0, 1));
- EXPECT_CALL(consume, Call(flat[1], 2, 4));
- EXPECT_CALL(consume, Call(flat[2], 0, 9));
- EXPECT_CALL(consume, Call(flat[3], 0, 13));
- EXPECT_CALL(consume, Call(flat[4], 5, 10));
- EXPECT_CALL(consume, Call(flat[5], 0, 7));
- Consume(tree, consume.AsStdFunction());
- for (CordRep* rep : flat) {
- EXPECT_TRUE(rep->refcount.IsOne());
- CordRep::Unref(rep);
- }
-}
-
-TEST(CordRepConsumeTest, ConsumeShared) {
- InSequence in_sequence;
- CordRep* tree = CreateTestTree();
- MockFunction<void(CordRep*, size_t, size_t)> consume;
- EXPECT_CALL(consume, Call(flat[0], 0, 1));
- EXPECT_CALL(consume, Call(flat[1], 2, 4));
- EXPECT_CALL(consume, Call(flat[2], 0, 9));
- EXPECT_CALL(consume, Call(flat[3], 0, 13));
- EXPECT_CALL(consume, Call(flat[4], 5, 10));
- EXPECT_CALL(consume, Call(flat[5], 0, 7));
- Consume(CordRep::Ref(tree), consume.AsStdFunction());
- for (CordRep* rep : flat) {
- EXPECT_FALSE(rep->refcount.IsOne());
- CordRep::Unref(rep);
- }
- CordRep::Unref(tree);
-}
-
-TEST(CordRepConsumeTest, Reverse) {
- InSequence in_sequence;
- CordRep* tree = CreateTestTree();
- MockFunction<void(CordRep*, size_t, size_t)> consume;
- EXPECT_CALL(consume, Call(flat[5], 0, 7));
- EXPECT_CALL(consume, Call(flat[4], 5, 10));
- EXPECT_CALL(consume, Call(flat[3], 0, 13));
- EXPECT_CALL(consume, Call(flat[2], 0, 9));
- EXPECT_CALL(consume, Call(flat[1], 2, 4));
- EXPECT_CALL(consume, Call(flat[0], 0, 1));
- ReverseConsume(tree, consume.AsStdFunction());
- for (CordRep* rep : flat) {
- EXPECT_TRUE(rep->refcount.IsOne());
- CordRep::Unref(rep);
- }
-}
-
-TEST(CordRepConsumeTest, ReverseShared) {
- InSequence in_sequence;
- CordRep* tree = CreateTestTree();
- MockFunction<void(CordRep*, size_t, size_t)> consume;
- EXPECT_CALL(consume, Call(flat[5], 0, 7));
- EXPECT_CALL(consume, Call(flat[4], 5, 10));
- EXPECT_CALL(consume, Call(flat[3], 0, 13));
- EXPECT_CALL(consume, Call(flat[2], 0, 9));
- EXPECT_CALL(consume, Call(flat[1], 2, 4));
- EXPECT_CALL(consume, Call(flat[0], 0, 1));
- ReverseConsume(CordRep::Ref(tree), consume.AsStdFunction());
- for (CordRep* rep : flat) {
- EXPECT_FALSE(rep->refcount.IsOne());
- CordRep::Unref(rep);
- }
- CordRep::Unref(tree);
-}
-
-TEST(CordRepConsumeTest, UnreachableFlat) {
- InSequence in_sequence;
- CordRepFlat* flat1 = CreateFlatWithLength(10);
- CordRepFlat* flat2 = CreateFlatWithLength(20);
- CordRepConcat* concat = CreateConcat(flat1, flat2);
- CordRepSubstring* tree = CreateSubstring(concat, 15, 10);
- MockFunction<void(CordRep*, size_t, size_t)> consume;
- EXPECT_CALL(consume, Call(flat2, 5, 10));
- Consume(tree, consume.AsStdFunction());
- EXPECT_TRUE(flat2->refcount.IsOne());
- CordRep::Unref(flat2);
-}
-
-} // namespace
-} // namespace cord_internal
-ABSL_NAMESPACE_END
-} // namespace absl
diff --git a/absl/strings/internal/cord_rep_crc.cc b/absl/strings/internal/cord_rep_crc.cc
new file mode 100644
index 00000000..ee140354
--- /dev/null
+++ b/absl/strings/internal/cord_rep_crc.cc
@@ -0,0 +1,54 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_crc.h"
+
+#include <cassert>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+CordRepCrc* CordRepCrc::New(CordRep* child, uint32_t crc) {
+ assert(child != nullptr);
+ if (child->IsCrc()) {
+ if (child->refcount.IsOne()) {
+ child->crc()->crc = crc;
+ return child->crc();
+ }
+ CordRep* old = child;
+ child = old->crc()->child;
+ CordRep::Ref(child);
+ CordRep::Unref(old);
+ }
+ auto* new_cordrep = new CordRepCrc;
+ new_cordrep->length = child->length;
+ new_cordrep->tag = cord_internal::CRC;
+ new_cordrep->child = child;
+ new_cordrep->crc = crc;
+ return new_cordrep;
+}
+
+void CordRepCrc::Destroy(CordRepCrc* node) {
+ CordRep::Unref(node->child);
+ delete node;
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/internal/cord_rep_crc.h b/absl/strings/internal/cord_rep_crc.h
new file mode 100644
index 00000000..5294b0d1
--- /dev/null
+++ b/absl/strings/internal/cord_rep_crc.h
@@ -0,0 +1,102 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+#define ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
+
+#include <cassert>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/internal/cord_internal.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+
+// CordRepCrc is a CordRep node intended only to appear at the top level of a
+// cord tree. It associates an "expected CRC" with the contained data, to allow
+// for easy passage of checksum data in Cord data flows.
+//
+// From Cord's perspective, the crc value has no semantics; any validation of
+// the contained checksum is the user's responsibility.
+struct CordRepCrc : public CordRep {
+ CordRep* child;
+ uint32_t crc;
+
+ // Consumes `child` and returns a CordRepCrc prefixed tree containing `child`.
+ // If the specified `child` is itself a CordRepCrc node, then this method
+ // either replaces the existing node, or directly updates the crc value in it
+ // depending on the node being shared or not, i.e.: refcount.IsOne().
+ // `child` must not be null. Never returns null.
+ static CordRepCrc* New(CordRep* child, uint32_t crc);
+
+ // Destroys (deletes) the provided node. `node` must not be null.
+ static void Destroy(CordRepCrc* node);
+};
+
+// Consumes `rep` and returns a CordRep* with any outer CordRepCrc wrapper
+// removed. This is usually a no-op (returning `rep`), but this will remove and
+// unref an outer CordRepCrc node.
+inline CordRep* RemoveCrcNode(CordRep* rep) {
+ assert(rep != nullptr);
+ if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ CordRep* child = rep->crc()->child;
+ if (rep->refcount.IsOne()) {
+ delete rep->crc();
+ } else {
+ CordRep::Ref(child);
+ CordRep::Unref(rep);
+ }
+ return child;
+ }
+ return rep;
+}
+
+// Returns `rep` if it is not a CordRepCrc node, or its child if it is.
+// Does not consume or create a reference on `rep` or the returned value.
+inline CordRep* SkipCrcNode(CordRep* rep) {
+ assert(rep != nullptr);
+ if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ return rep->crc()->child;
+ } else {
+ return rep;
+ }
+}
+
+inline const CordRep* SkipCrcNode(const CordRep* rep) {
+ assert(rep != nullptr);
+ if (ABSL_PREDICT_FALSE(rep->IsCrc())) {
+ return rep->crc()->child;
+ } else {
+ return rep;
+ }
+}
+
+inline CordRepCrc* CordRep::crc() {
+ assert(IsCrc());
+ return static_cast<CordRepCrc*>(this);
+}
+
+inline const CordRepCrc* CordRep::crc() const {
+ assert(IsCrc());
+ return static_cast<const CordRepCrc*>(this);
+}
+
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_STRINGS_INTERNAL_CORD_REP_CRC_H_
diff --git a/absl/strings/internal/cord_rep_crc_test.cc b/absl/strings/internal/cord_rep_crc_test.cc
new file mode 100644
index 00000000..80f53485
--- /dev/null
+++ b/absl/strings/internal/cord_rep_crc_test.cc
@@ -0,0 +1,115 @@
+// Copyright 2021 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/strings/internal/cord_rep_crc.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/strings/internal/cord_internal.h"
+#include "absl/strings/internal/cord_rep_test_util.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace cord_internal {
+namespace {
+
+using ::absl::cordrep_testing::MakeFlat;
+using ::testing::Eq;
+using ::testing::Ne;
+
+#if !defined(NDEBUG) && GTEST_HAS_DEATH_TEST
+
+TEST(CordRepCrc, NewWithNullPtr) {
+ EXPECT_DEATH(CordRepCrc::New(nullptr, 0), "");
+}
+
+TEST(CordRepCrc, RemoveCrcWithNullptr) {
+ EXPECT_DEATH(RemoveCrcNode(nullptr), "");
+}
+
+#endif // !NDEBUG && GTEST_HAS_DEATH_TEST
+
+TEST(CordRepCrc, NewDestroy) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRepCrc* crc = CordRepCrc::New(rep, 12345);
+ EXPECT_TRUE(crc->refcount.IsOne());
+ EXPECT_THAT(crc->child, Eq(rep));
+ EXPECT_THAT(crc->crc, Eq(12345));
+ EXPECT_TRUE(rep->refcount.IsOne());
+ CordRepCrc::Destroy(crc);
+}
+
+TEST(CordRepCrc, NewExistingCrcNotShared) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRepCrc* crc = CordRepCrc::New(rep, 12345);
+ CordRepCrc* new_crc = CordRepCrc::New(crc, 54321);
+ EXPECT_THAT(new_crc, Eq(crc));
+ EXPECT_TRUE(new_crc->refcount.IsOne());
+ EXPECT_THAT(new_crc->child, Eq(rep));
+ EXPECT_THAT(new_crc->crc, Eq(54321));
+ EXPECT_TRUE(rep->refcount.IsOne());
+ CordRepCrc::Destroy(new_crc);
+}
+
+TEST(CordRepCrc, NewExistingCrcShared) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRepCrc* crc = CordRepCrc::New(rep, 12345);
+ CordRep::Ref(crc);
+ CordRepCrc* new_crc = CordRepCrc::New(crc, 54321);
+
+ EXPECT_THAT(new_crc, Ne(crc));
+ EXPECT_TRUE(new_crc->refcount.IsOne());
+ EXPECT_TRUE(crc->refcount.IsOne());
+ EXPECT_FALSE(rep->refcount.IsOne());
+ EXPECT_THAT(crc->child, Eq(rep));
+ EXPECT_THAT(new_crc->child, Eq(rep));
+ EXPECT_THAT(crc->crc, Eq(12345));
+ EXPECT_THAT(new_crc->crc, Eq(54321));
+
+ CordRep::Unref(crc);
+ CordRep::Unref(new_crc);
+}
+
+TEST(CordRepCrc, RemoveCrcNotCrc) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRep* nocrc = RemoveCrcNode(rep);
+ EXPECT_THAT(nocrc, Eq(rep));
+ CordRep::Unref(nocrc);
+}
+
+TEST(CordRepCrc, RemoveCrcNotShared) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRepCrc* crc = CordRepCrc::New(rep, 12345);
+ CordRep* nocrc = RemoveCrcNode(crc);
+ EXPECT_THAT(nocrc, Eq(rep));
+ EXPECT_TRUE(rep->refcount.IsOne());
+ CordRep::Unref(nocrc);
+}
+
+TEST(CordRepCrc, RemoveCrcShared) {
+ CordRep* rep = cordrep_testing::MakeFlat("Hello world");
+ CordRepCrc* crc = CordRepCrc::New(rep, 12345);
+ CordRep::Ref(crc);
+ CordRep* nocrc = RemoveCrcNode(crc);
+ EXPECT_THAT(nocrc, Eq(rep));
+ EXPECT_FALSE(rep->refcount.IsOne());
+ CordRep::Unref(nocrc);
+ CordRep::Unref(crc);
+}
+
+} // namespace
+} // namespace cord_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/absl/strings/internal/cord_rep_flat.h b/absl/strings/internal/cord_rep_flat.h
index 4d0f9886..e3e27fcd 100644
--- a/absl/strings/internal/cord_rep_flat.h
+++ b/absl/strings/internal/cord_rep_flat.h
@@ -20,6 +20,8 @@
#include <cstdint>
#include <memory>
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
#include "absl/strings/internal/cord_internal.h"
namespace absl {
@@ -42,23 +44,45 @@ static constexpr size_t kMinFlatSize = 32;
static constexpr size_t kMaxFlatSize = 4096;
static constexpr size_t kMaxFlatLength = kMaxFlatSize - kFlatOverhead;
static constexpr size_t kMinFlatLength = kMinFlatSize - kFlatOverhead;
+static constexpr size_t kMaxLargeFlatSize = 256 * 1024;
+static constexpr size_t kMaxLargeFlatLength = kMaxLargeFlatSize - kFlatOverhead;
+// kTagBase should make the Size <--> Tag computation resilient
+// against changes to the value of FLAT when we add a new tag..
+static constexpr uint8_t kTagBase = FLAT - 4;
+
+// Converts the provided rounded size to the corresponding tag
constexpr uint8_t AllocatedSizeToTagUnchecked(size_t size) {
- return static_cast<uint8_t>((size <= 1024) ? size / 8 + 1
- : 129 + size / 32 - 1024 / 32);
+ return static_cast<uint8_t>(size <= 512 ? kTagBase + size / 8
+ : size <= 8192
+ ? kTagBase + 512 / 8 + size / 64 - 512 / 64
+ : kTagBase + 512 / 8 + ((8192 - 512) / 64) +
+ size / 4096 - 8192 / 4096);
+}
+
+// Converts the provided tag to the corresponding allocated size
+constexpr size_t TagToAllocatedSize(uint8_t tag) {
+ return (tag <= kTagBase + 512 / 8) ? tag * 8 - kTagBase * 8
+ : (tag <= kTagBase + (512 / 8) + ((8192 - 512) / 64))
+ ? 512 + tag * 64 - kTagBase * 64 - 512 / 8 * 64
+ : 8192 + tag * 4096 - kTagBase * 4096 -
+ ((512 / 8) + ((8192 - 512) / 64)) * 4096;
}
-static_assert(kMinFlatSize / 8 + 1 >= FLAT, "");
-static_assert(AllocatedSizeToTagUnchecked(kMaxFlatSize) <= MAX_FLAT_TAG, "");
+static_assert(AllocatedSizeToTagUnchecked(kMinFlatSize) == FLAT, "");
+static_assert(AllocatedSizeToTagUnchecked(kMaxLargeFlatSize) == MAX_FLAT_TAG,
+ "");
-// Helper functions for rounded div, and rounding to exact sizes.
-constexpr size_t DivUp(size_t n, size_t m) { return (n + m - 1) / m; }
-constexpr size_t RoundUp(size_t n, size_t m) { return DivUp(n, m) * m; }
+// RoundUp logically performs `((n + m - 1) / m) * m` to round up to the nearest
+// multiple of `m`, optimized for the invariant that `m` is a power of 2.
+constexpr size_t RoundUp(size_t n, size_t m) {
+ return (n + m - 1) & (0 - m);
+}
// Returns the size to the nearest equal or larger value that can be
// expressed exactly as a tag value.
inline size_t RoundUpForTag(size_t size) {
- return RoundUp(size, (size <= 1024) ? 8 : 32);
+ return RoundUp(size, (size <= 512) ? 8 : (size <= 8192 ? 64 : 4096));
}
// Converts the allocated size to a tag, rounding down if the size
@@ -71,26 +95,26 @@ inline uint8_t AllocatedSizeToTag(size_t size) {
return tag;
}
-// Converts the provided tag to the corresponding allocated size
-constexpr size_t TagToAllocatedSize(uint8_t tag) {
- return (tag <= 129) ? ((tag - 1) * 8) : (1024 + (tag - 129) * 32);
-}
-
// Converts the provided tag to the corresponding available data length
constexpr size_t TagToLength(uint8_t tag) {
return TagToAllocatedSize(tag) - kFlatOverhead;
}
// Enforce that kMaxFlatSize maps to a well-known exact tag value.
-static_assert(TagToAllocatedSize(225) == kMaxFlatSize, "Bad tag logic");
+static_assert(TagToAllocatedSize(MAX_FLAT_TAG) == kMaxLargeFlatSize,
+ "Bad tag logic");
struct CordRepFlat : public CordRep {
+ // Tag for explicit 'large flat' allocation
+ struct Large {};
+
// Creates a new flat node.
- static CordRepFlat* New(size_t len) {
+ template <size_t max_flat_size, typename... Args>
+ static CordRepFlat* NewImpl(size_t len, Args... args ABSL_ATTRIBUTE_UNUSED) {
if (len <= kMinFlatLength) {
len = kMinFlatLength;
- } else if (len > kMaxFlatLength) {
- len = kMaxFlatLength;
+ } else if (len > max_flat_size - kFlatOverhead) {
+ len = max_flat_size - kFlatOverhead;
}
// Round size up so it matches a size we can exactly express in a tag.
@@ -101,6 +125,12 @@ struct CordRepFlat : public CordRep {
return rep;
}
+ static CordRepFlat* New(size_t len) { return NewImpl<kMaxFlatSize>(len); }
+
+ static CordRepFlat* New(Large, size_t len) {
+ return NewImpl<kMaxLargeFlatSize>(len);
+ }
+
// Deletes a CordRepFlat instance created previously through a call to New().
// Flat CordReps are allocated and constructed with raw ::operator new and
// placement new, and must be destructed and deallocated accordingly.
@@ -117,6 +147,17 @@ struct CordRepFlat : public CordRep {
#endif
}
+ // Create a CordRepFlat containing `data`, with an optional additional
+ // extra capacity of up to `extra` bytes. Requires that `data.size()`
+ // is less than kMaxFlatLength.
+ static CordRepFlat* Create(absl::string_view data, size_t extra = 0) {
+ assert(data.size() <= kMaxFlatLength);
+ CordRepFlat* flat = New(data.size() + (std::min)(extra, kMaxFlatLength));
+ memcpy(flat->Data(), data.data(), data.size());
+ flat->length = data.size();
+ return flat;
+ }
+
// Returns a pointer to the data inside this flat rep.
char* Data() { return reinterpret_cast<char*>(storage); }
const char* Data() const { return reinterpret_cast<const char*>(storage); }
diff --git a/absl/strings/internal/cord_rep_ring.cc b/absl/strings/internal/cord_rep_ring.cc
index 07c77eb3..af2fc768 100644
--- a/absl/strings/internal/cord_rep_ring.cc
+++ b/absl/strings/internal/cord_rep_ring.cc
@@ -129,7 +129,9 @@ class CordRepRing::Filler {
index_type pos_;
};
-constexpr size_t CordRepRing::kMaxCapacity; // NOLINT: needed for c++11
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr size_t CordRepRing::kMaxCapacity;
+#endif
bool CordRepRing::IsValid(std::ostream& output) const {
if (capacity_ == 0) {
@@ -277,7 +279,7 @@ CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
// Get current number of entries, and check for max capacity.
size_t entries = rep->entries();
- if (!rep->refcount.IsMutable()) {
+ if (!rep->refcount.IsOne()) {
return Copy(rep, rep->head(), rep->tail(), extra);
} else if (entries + extra > rep->capacity()) {
const size_t min_grow = rep->capacity() + rep->capacity() / 2;
@@ -292,10 +294,10 @@ CordRepRing* CordRepRing::Mutable(CordRepRing* rep, size_t extra) {
}
Span<char> CordRepRing::GetAppendBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
index_type back = retreat(tail_);
CordRep* child = entry_child(back);
- if (child->tag >= FLAT && child->refcount.IsMutable()) {
+ if (child->tag >= FLAT && child->refcount.IsOne()) {
size_t capacity = child->flat()->Capacity();
pos_type end_pos = entry_end_pos(back);
size_t data_offset = entry_data_offset(back);
@@ -312,10 +314,10 @@ Span<char> CordRepRing::GetAppendBuffer(size_t size) {
}
Span<char> CordRepRing::GetPrependBuffer(size_t size) {
- assert(refcount.IsMutable());
+ assert(refcount.IsOne());
CordRep* child = entry_child(head_);
size_t data_offset = entry_data_offset(head_);
- if (data_offset && child->refcount.IsMutable() && child->tag >= FLAT) {
+ if (data_offset && child->refcount.IsOne() && child->tag >= FLAT) {
size_t n = (std::min)(data_offset, size);
this->length += n;
begin_pos_ -= n;
@@ -504,7 +506,7 @@ CordRepRing* CordRepRing::Prepend(CordRepRing* rep, CordRep* child) {
CordRepRing* CordRepRing::Append(CordRepRing* rep, absl::string_view data,
size_t extra) {
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
Span<char> avail = rep->GetAppendBuffer(data.length());
if (!avail.empty()) {
memcpy(avail.data(), data.data(), avail.length());
@@ -538,7 +540,7 @@ CordRepRing* CordRepRing::Append(CordRepRing* rep, absl::string_view data,
CordRepRing* CordRepRing::Prepend(CordRepRing* rep, absl::string_view data,
size_t extra) {
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
Span<char> avail = rep->GetPrependBuffer(data.length());
if (!avail.empty()) {
const char* tail = data.data() + data.length() - avail.length();
@@ -678,7 +680,7 @@ CordRepRing* CordRepRing::SubRing(CordRepRing* rep, size_t offset,
Position tail = rep->FindTail(head.index, offset + len);
const size_t new_entries = rep->entries(head.index, tail.index);
- if (rep->refcount.IsMutable() && extra <= (rep->capacity() - new_entries)) {
+ if (rep->refcount.IsOne() && extra <= (rep->capacity() - new_entries)) {
// We adopt a privately owned rep and no extra entries needed.
if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
@@ -715,7 +717,7 @@ CordRepRing* CordRepRing::RemovePrefix(CordRepRing* rep, size_t len,
}
Position head = rep->Find(len);
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
if (head.index != rep->head_) UnrefEntries(rep, rep->head_, head.index);
rep->head_ = head.index;
} else {
@@ -745,7 +747,7 @@ CordRepRing* CordRepRing::RemoveSuffix(CordRepRing* rep, size_t len,
}
Position tail = rep->FindTail(rep->length - len);
- if (rep->refcount.IsMutable()) {
+ if (rep->refcount.IsOne()) {
// We adopt a privately owned rep, scrub.
if (tail.index != rep->tail_) UnrefEntries(rep, tail.index, rep->tail_);
rep->tail_ = tail.index;
diff --git a/absl/strings/internal/cord_rep_test_util.h b/absl/strings/internal/cord_rep_test_util.h
index ad828af2..18a0a195 100644
--- a/absl/strings/internal/cord_rep_test_util.h
+++ b/absl/strings/internal/cord_rep_test_util.h
@@ -42,18 +42,6 @@ inline cord_internal::CordRepSubstring* MakeSubstring(
return sub;
}
-inline cord_internal::CordRepConcat* MakeConcat(cord_internal::CordRep* left,
- cord_internal::CordRep* right,
- int depth = 0) {
- auto* concat = new cord_internal::CordRepConcat;
- concat->tag = cord_internal::CONCAT;
- concat->length = left->length + right->length;
- concat->left = left;
- concat->right = right;
- concat->set_depth(depth);
- return concat;
-}
-
inline cord_internal::CordRepFlat* MakeFlat(absl::string_view value) {
assert(value.length() <= cord_internal::kMaxFlatLength);
auto* flat = cord_internal::CordRepFlat::New(value.length());
@@ -126,9 +114,6 @@ inline void CordVisitReps(cord_internal::CordRep* rep, Fn&& fn) {
for (cord_internal::CordRep* edge : rep->btree()->Edges()) {
CordVisitReps(edge, fn);
}
- } else if (rep->tag == cord_internal::CONCAT) {
- CordVisitReps(rep->concat()->left, fn);
- CordVisitReps(rep->concat()->right, fn);
}
}
diff --git a/absl/strings/internal/cordz_info.cc b/absl/strings/internal/cordz_info.cc
index 5c18bbc5..dac3fd8b 100644
--- a/absl/strings/internal/cordz_info.cc
+++ b/absl/strings/internal/cordz_info.cc
@@ -20,6 +20,7 @@
#include "absl/debugging/stacktrace.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/cordz_handle.h"
#include "absl/strings/internal/cordz_statistics.h"
@@ -33,7 +34,9 @@ namespace cord_internal {
using ::absl::base_internal::SpinLockHolder;
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr int CordzInfo::kMaxStackDepth;
+#endif
ABSL_CONST_INIT CordzInfo::List CordzInfo::global_list_{absl::kConstInit};
@@ -81,6 +84,14 @@ class CordRepAnalyzer {
size_t refcount = rep->refcount.Get();
RepRef repref{rep, (refcount > 1) ? refcount - 1 : 1};
+ // Process the top level CRC node, if present.
+ if (repref.rep->tag == CRC) {
+ statistics_.node_count++;
+ statistics_.node_counts.crc++;
+ memory_usage_.Add(sizeof(CordRepCrc), repref.refcount);
+ repref = repref.Child(repref.rep->crc()->child);
+ }
+
// Process all top level linear nodes (substrings and flats).
repref = CountLinearReps(repref, memory_usage_);
@@ -89,8 +100,6 @@ class CordRepAnalyzer {
AnalyzeRing(repref);
} else if (repref.rep->tag == BTREE) {
AnalyzeBtree(repref);
- } else if (repref.rep->tag == CONCAT) {
- AnalyzeConcat(repref);
} else {
// We should have either a concat, btree, or ring node if not null.
assert(false);
@@ -132,14 +141,6 @@ class CordRepAnalyzer {
}
};
- // Returns `rr` if `rr.rep` is not null and a CONCAT type.
- // Asserts that `rr.rep` is a concat node or null.
- static RepRef AssertConcat(RepRef repref) {
- const CordRep* rep = repref.rep;
- assert(rep == nullptr || rep->tag == CONCAT);
- return (rep != nullptr && rep->tag == CONCAT) ? repref : RepRef{nullptr, 0};
- }
-
// Counts a flat of the provide allocated size
void CountFlat(size_t size) {
statistics_.node_count++;
@@ -192,34 +193,6 @@ class CordRepAnalyzer {
return rep;
}
- // Analyzes the provided concat node in a flattened recursive way.
- void AnalyzeConcat(RepRef rep) {
- absl::InlinedVector<RepRef, 47> pending;
-
- while (rep.rep != nullptr) {
- const CordRepConcat* concat = rep.rep->concat();
- RepRef left = rep.Child(concat->left);
- RepRef right = rep.Child(concat->right);
-
- statistics_.node_count++;
- statistics_.node_counts.concat++;
- memory_usage_.Add(sizeof(CordRepConcat), rep.refcount);
-
- right = AssertConcat(CountLinearReps(right, memory_usage_));
- rep = AssertConcat(CountLinearReps(left, memory_usage_));
- if (rep.rep != nullptr) {
- if (right.rep != nullptr) {
- pending.push_back(right);
- }
- } else if (right.rep != nullptr) {
- rep = right;
- } else if (!pending.empty()) {
- rep = pending.back();
- pending.pop_back();
- }
- }
- }
-
// Analyzes the provided ring.
void AnalyzeRing(RepRef rep) {
statistics_.node_count++;
diff --git a/absl/strings/internal/cordz_info_statistics_test.cc b/absl/strings/internal/cordz_info_statistics_test.cc
index 7430d281..476c38d2 100644
--- a/absl/strings/internal/cordz_info_statistics_test.cc
+++ b/absl/strings/internal/cordz_info_statistics_test.cc
@@ -22,6 +22,7 @@
#include "absl/strings/cord.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
+#include "absl/strings/internal/cord_rep_crc.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/internal/cord_rep_ring.h"
#include "absl/strings/internal/cordz_info.h"
@@ -75,16 +76,6 @@ CordRepSubstring* Substring(CordRep* rep) {
return substring;
}
-// Creates a concat on the provided reps
-CordRepConcat* Concat(CordRep* left, CordRep* right) {
- auto* concat = new CordRepConcat;
- concat->length = left->length + right->length;
- concat->tag = CONCAT;
- concat->left = left;
- concat->right = right;
- return concat;
-}
-
// Reference count helper
struct RefHelper {
std::vector<CordRep*> refs;
@@ -157,10 +148,6 @@ double FairShareImpl(CordRep* rep, size_t ref) {
rep->ring()->ForEach([&](CordRepRing::index_type i) {
self += FairShareImpl(rep->ring()->entry_child(i), 1);
});
- } else if (rep->tag == CONCAT) {
- self = SizeOf(rep->concat());
- children = FairShareImpl(rep->concat()->left, ref) +
- FairShareImpl(rep->concat()->right, ref);
} else {
assert(false);
}
@@ -306,80 +293,6 @@ TEST(CordzInfoStatisticsTest, SharedSubstring) {
EXPECT_THAT(SampleCord(substring), EqStatistics(expected));
}
-TEST(CordzInfoStatisticsTest, Concat) {
- RefHelper ref;
- auto* flat1 = Flat(300);
- auto* flat2 = Flat(2000);
- auto* concat = ref.NeedsUnref(Concat(flat1, flat2));
-
- CordzStatistics expected;
- expected.size = concat->length;
- expected.estimated_memory_usage =
- SizeOf(concat) + SizeOf(flat1) + SizeOf(flat2);
- expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
- expected.node_count = 3;
- expected.node_counts.flat = 2;
- expected.node_counts.flat_512 = 1;
- expected.node_counts.concat = 1;
-
- EXPECT_THAT(SampleCord(concat), EqStatistics(expected));
-}
-
-TEST(CordzInfoStatisticsTest, DeepConcat) {
- RefHelper ref;
- auto* flat1 = Flat(300);
- auto* flat2 = Flat(2000);
- auto* flat3 = Flat(400);
- auto* external = External(3000);
- auto* substring = Substring(external);
- auto* concat1 = Concat(flat1, flat2);
- auto* concat2 = Concat(flat3, substring);
- auto* concat = ref.NeedsUnref(Concat(concat1, concat2));
-
- CordzStatistics expected;
- expected.size = concat->length;
- expected.estimated_memory_usage = SizeOf(concat) * 3 + SizeOf(flat1) +
- SizeOf(flat2) + SizeOf(flat3) +
- SizeOf(external) + SizeOf(substring);
- expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
-
- expected.node_count = 8;
- expected.node_counts.flat = 3;
- expected.node_counts.flat_512 = 2;
- expected.node_counts.external = 1;
- expected.node_counts.concat = 3;
- expected.node_counts.substring = 1;
-
- EXPECT_THAT(SampleCord(concat), EqStatistics(expected));
-}
-
-TEST(CordzInfoStatisticsTest, DeepSharedConcat) {
- RefHelper ref;
- auto* flat1 = Flat(40);
- auto* flat2 = ref.Ref(Flat(2000), 4);
- auto* flat3 = Flat(70);
- auto* external = ref.Ref(External(3000));
- auto* substring = ref.Ref(Substring(external), 3);
- auto* concat1 = Concat(flat1, flat2);
- auto* concat2 = Concat(flat3, substring);
- auto* concat = ref.Ref(ref.NeedsUnref(Concat(concat1, concat2)));
-
- CordzStatistics expected;
- expected.size = concat->length;
- expected.estimated_memory_usage = SizeOf(concat) * 3 + SizeOf(flat1) +
- SizeOf(flat2) + SizeOf(flat3) +
- SizeOf(external) + SizeOf(substring);
- expected.estimated_fair_share_memory_usage = FairShare(concat);
- expected.node_count = 8;
- expected.node_counts.flat = 3;
- expected.node_counts.flat_64 = 1;
- expected.node_counts.flat_128 = 1;
- expected.node_counts.external = 1;
- expected.node_counts.concat = 3;
- expected.node_counts.substring = 1;
-
- EXPECT_THAT(SampleCord(concat), EqStatistics(expected));
-}
TEST(CordzInfoStatisticsTest, Ring) {
RefHelper ref;
@@ -535,6 +448,23 @@ TEST(CordzInfoStatisticsTest, BtreeNodeShared) {
EXPECT_THAT(SampleCord(tree), EqStatistics(expected));
}
+TEST(CordzInfoStatisticsTest, Crc) {
+ RefHelper ref;
+ auto* left = Flat(1000);
+ auto* crc = ref.NeedsUnref(CordRepCrc::New(left, 12345));
+
+ CordzStatistics expected;
+ expected.size = left->length;
+ expected.estimated_memory_usage = SizeOf(crc) + SizeOf(left);
+ expected.estimated_fair_share_memory_usage = expected.estimated_memory_usage;
+ expected.node_count = 2;
+ expected.node_counts.flat = 1;
+ expected.node_counts.flat_1k = 1;
+ expected.node_counts.crc = 1;
+
+ EXPECT_THAT(SampleCord(crc), EqStatistics(expected));
+}
+
TEST(CordzInfoStatisticsTest, ThreadSafety) {
Notification stop;
static constexpr int kNumThreads = 8;
diff --git a/absl/strings/internal/cordz_sample_token_test.cc b/absl/strings/internal/cordz_sample_token_test.cc
index 9f54301d..6be1770d 100644
--- a/absl/strings/internal/cordz_sample_token_test.cc
+++ b/absl/strings/internal/cordz_sample_token_test.cc
@@ -167,7 +167,7 @@ TEST(CordzSampleTokenTest, MultiThreaded) {
if (cord.data.is_profiled()) {
// 1) Untrack
cord.data.cordz_info()->Untrack();
- cord.data.clear_cordz_info();;
+ cord.data.clear_cordz_info();
} else {
// 2) Track
CordzInfo::TrackCord(cord.data, kTrackCordMethod);
diff --git a/absl/strings/internal/cordz_statistics.h b/absl/strings/internal/cordz_statistics.h
index da4c7dbb..57071905 100644
--- a/absl/strings/internal/cordz_statistics.h
+++ b/absl/strings/internal/cordz_statistics.h
@@ -41,6 +41,7 @@ struct CordzStatistics {
size_t concat = 0; // #concat reps
size_t ring = 0; // #ring buffer reps
size_t btree = 0; // #btree reps
+ size_t crc = 0; // #crc reps
};
// The size of the cord in bytes. This matches the result of Cord::size().
diff --git a/absl/strings/internal/cordz_update_tracker.h b/absl/strings/internal/cordz_update_tracker.h
index 1f764486..c5170662 100644
--- a/absl/strings/internal/cordz_update_tracker.h
+++ b/absl/strings/internal/cordz_update_tracker.h
@@ -39,8 +39,8 @@ class CordzUpdateTracker {
// Tracked update methods.
enum MethodIdentifier {
kUnknown,
- kAppendBuffer,
kAppendCord,
+ kAppendCordBuffer,
kAppendExternalMemory,
kAppendString,
kAssignCord,
@@ -50,16 +50,18 @@ class CordzUpdateTracker {
kConstructorString,
kCordReader,
kFlatten,
+ kGetAppendBuffer,
kGetAppendRegion,
kMakeCordFromExternal,
kMoveAppendCord,
kMoveAssignCord,
kMovePrependCord,
- kPrependBuffer,
kPrependCord,
+ kPrependCordBuffer,
kPrependString,
kRemovePrefix,
kRemoveSuffix,
+ kSetExpectedChecksum,
kSubCord,
// kNumMethods defines the number of entries: must be the last entry.
diff --git a/absl/strings/internal/cordz_update_tracker_test.cc b/absl/strings/internal/cordz_update_tracker_test.cc
index 2348a175..9b1f7986 100644
--- a/absl/strings/internal/cordz_update_tracker_test.cc
+++ b/absl/strings/internal/cordz_update_tracker_test.cc
@@ -37,8 +37,8 @@ using Methods = std::array<Method, Method::kNumMethods>;
// Returns an array of all methods defined in `MethodIdentifier`
Methods AllMethods() {
return Methods{Method::kUnknown,
- Method::kAppendBuffer,
Method::kAppendCord,
+ Method::kAppendCordBuffer,
Method::kAppendExternalMemory,
Method::kAppendString,
Method::kAssignCord,
@@ -48,16 +48,18 @@ Methods AllMethods() {
Method::kConstructorString,
Method::kCordReader,
Method::kFlatten,
+ Method::kGetAppendBuffer,
Method::kGetAppendRegion,
Method::kMakeCordFromExternal,
Method::kMoveAppendCord,
Method::kMoveAssignCord,
Method::kMovePrependCord,
- Method::kPrependBuffer,
Method::kPrependCord,
+ Method::kPrependCordBuffer,
Method::kPrependString,
Method::kRemovePrefix,
Method::kRemoveSuffix,
+ Method::kSetExpectedChecksum,
Method::kSubCord};
}
diff --git a/absl/strings/internal/escaping.cc b/absl/strings/internal/escaping.cc
index c5271286..cfea0961 100644
--- a/absl/strings/internal/escaping.cc
+++ b/absl/strings/internal/escaping.cc
@@ -21,7 +21,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
-const char kBase64Chars[] =
+ABSL_CONST_INIT const char kBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
@@ -102,8 +102,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
}
}
// To save time, we didn't update szdest or szsrc in the loop. So do it now.
- szdest = limit_dest - cur_dest;
- szsrc = limit_src - cur_src;
+ szdest = static_cast<size_t>(limit_dest - cur_dest);
+ szsrc = static_cast<size_t>(limit_src - cur_src);
/* now deal with the tail (<=3 bytes) */
switch (szsrc) {
@@ -154,7 +154,8 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
// the loop because the loop above always reads 4 bytes, and the fourth
// byte is past the end of the input.
if (szdest < 4) return 0;
- uint32_t in = (cur_src[0] << 16) + absl::big_endian::Load16(cur_src + 1);
+ uint32_t in =
+ (uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
cur_dest[0] = base64[in >> 18];
in &= 0x3FFFF;
cur_dest[1] = base64[in >> 12];
@@ -172,7 +173,7 @@ size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
break;
}
- return (cur_dest - dest);
+ return static_cast<size_t>(cur_dest - dest);
}
} // namespace strings_internal
diff --git a/absl/strings/internal/ostringstream.cc b/absl/strings/internal/ostringstream.cc
index 05324c78..dc6cfe16 100644
--- a/absl/strings/internal/ostringstream.cc
+++ b/absl/strings/internal/ostringstream.cc
@@ -27,7 +27,7 @@ OStringStream::Buf::int_type OStringStream::overflow(int c) {
std::streamsize OStringStream::xsputn(const char* s, std::streamsize n) {
assert(s_);
- s_->append(s, n);
+ s_->append(s, static_cast<size_t>(n));
return n;
}
diff --git a/absl/strings/internal/str_format/arg.cc b/absl/strings/internal/str_format/arg.cc
index e28a29b1..02aeeebe 100644
--- a/absl/strings/internal/str_format/arg.cc
+++ b/absl/strings/internal/str_format/arg.cc
@@ -320,7 +320,7 @@ bool ConvertIntArg(T v, const FormatConversionSpecImpl conv,
return ConvertFloatImpl(static_cast<double>(v), conv, sink);
default:
- ABSL_INTERNAL_ASSUME(false);
+ ABSL_ASSUME(false);
}
if (conv.is_basic()) {
diff --git a/absl/strings/internal/str_format/arg.h b/absl/strings/internal/str_format/arg.h
index 3c91be70..b9dda909 100644
--- a/absl/strings/internal/str_format/arg.h
+++ b/absl/strings/internal/str_format/arg.h
@@ -144,7 +144,7 @@ StringConvertResult FormatConvertImpl(const AbslCord& value,
size_t space_remaining = 0;
int width = conv.width();
- if (width >= 0) space_remaining = width;
+ if (width >= 0) space_remaining = static_cast<size_t>(width);
size_t to_write = value.size();
diff --git a/absl/strings/internal/str_format/bind.h b/absl/strings/internal/str_format/bind.h
index b26cff66..80f29654 100644
--- a/absl/strings/internal/str_format/bind.h
+++ b/absl/strings/internal/str_format/bind.h
@@ -25,6 +25,7 @@
#include "absl/strings/internal/str_format/checker.h"
#include "absl/strings/internal/str_format/parser.h"
#include "absl/types/span.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -87,6 +88,36 @@ class FormatSpecTemplate
: public MakeDependent<UntypedFormatSpec, Args...>::type {
using Base = typename MakeDependent<UntypedFormatSpec, Args...>::type;
+ template <bool res>
+ struct ErrorMaker {
+ constexpr bool operator()(int) const { return res; }
+ };
+
+ template <int i, int j>
+ static constexpr bool CheckArity(ErrorMaker<true> SpecifierCount = {},
+ ErrorMaker<i == j> ParametersPassed = {}) {
+ static_assert(SpecifierCount(i) == ParametersPassed(j),
+ "Number of arguments passed must match the number of "
+ "conversion specifiers.");
+ return true;
+ }
+
+ template <FormatConversionCharSet specified, FormatConversionCharSet passed,
+ int arg>
+ static constexpr bool CheckMatch(
+ ErrorMaker<Contains(specified, passed)> MismatchedArgumentNumber = {}) {
+ static_assert(MismatchedArgumentNumber(arg),
+ "Passed argument must match specified format.");
+ return true;
+ }
+
+ template <FormatConversionCharSet... C, size_t... I>
+ static bool CheckMatches(absl::index_sequence<I...>) {
+ bool res[] = {true, CheckMatch<Args, C, I + 1>()...};
+ (void)res;
+ return true;
+ }
+
public:
#ifdef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
@@ -112,7 +143,8 @@ class FormatSpecTemplate
template <typename T = void>
FormatSpecTemplate(string_view s) // NOLINT
__attribute__((enable_if(str_format_internal::EnsureConstexpr(s),
- "constexpr trap"))) {
+ "constexpr trap")))
+ : Base("to avoid noise in the compiler error") {
static_assert(sizeof(T*) == 0,
"Format specified does not match the arguments passed.");
}
@@ -133,13 +165,12 @@ class FormatSpecTemplate
#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
- template <
- FormatConversionCharSet... C,
- typename = typename std::enable_if<sizeof...(C) == sizeof...(Args)>::type,
- typename = typename std::enable_if<AllOf(Contains(Args,
- C)...)>::type>
+ template <FormatConversionCharSet... C>
FormatSpecTemplate(const ExtendedParsedFormat<C...>& pc) // NOLINT
- : Base(&pc) {}
+ : Base(&pc) {
+ CheckArity<sizeof...(C), sizeof...(Args)>();
+ CheckMatches<C...>(absl::make_index_sequence<sizeof...(C)>{});
+ }
};
class Streamable {
diff --git a/absl/strings/internal/str_format/checker.h b/absl/strings/internal/str_format/checker.h
index 2a2601ec..4fd19d13 100644
--- a/absl/strings/internal/str_format/checker.h
+++ b/absl/strings/internal/str_format/checker.h
@@ -22,9 +22,14 @@
// Compile time check support for entry points.
#ifndef ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
-#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+// We disable format checker under vscode intellisense compilation.
+// See https://github.com/microsoft/vscode-cpptools/issues/3683 for
+// more details.
+#if ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) && \
+ !defined(__INTELLISENSE__)
#define ABSL_INTERNAL_ENABLE_FORMAT_CHECKER 1
-#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__)
+#endif // ABSL_HAVE_ATTRIBUTE(enable_if) && !defined(__native_client__) &&
+ // !defined(__INTELLISENSE__)
#endif // ABSL_INTERNAL_ENABLE_FORMAT_CHECKER
namespace absl {
diff --git a/absl/strings/internal/str_format/convert_test.cc b/absl/strings/internal/str_format/convert_test.cc
index 91e03609..300612b7 100644
--- a/absl/strings/internal/str_format/convert_test.cc
+++ b/absl/strings/internal/str_format/convert_test.cc
@@ -24,6 +24,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/internal/str_format/bind.h"
#include "absl/strings/match.h"
@@ -124,6 +125,7 @@ void StrAppendV(std::string *dst, const char *format, va_list ap) {
delete[] buf;
}
+void StrAppend(std::string *, const char *, ...) ABSL_PRINTF_ATTRIBUTE(2, 3);
void StrAppend(std::string *out, const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -131,6 +133,7 @@ void StrAppend(std::string *out, const char *format, ...) {
va_end(ap);
}
+std::string StrPrint(const char *, ...) ABSL_PRINTF_ATTRIBUTE(1, 2);
std::string StrPrint(const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -455,25 +458,36 @@ TYPED_TEST_P(TypedFormatConvertTest, AllIntsWithFlags) {
}
TYPED_TEST_P(TypedFormatConvertTest, Char) {
+ // Pass a bunch of values of type TypeParam to both FormatPack and libc's
+ // vsnprintf("%c", ...) (wrapped in StrPrint) to make sure we get the same
+ // value.
typedef TypeParam T;
using remove_volatile_t = typename std::remove_volatile<T>::type;
- static const T kMin = std::numeric_limits<remove_volatile_t>::min();
- static const T kMax = std::numeric_limits<remove_volatile_t>::max();
- T kVals[] = {
- remove_volatile_t(1), remove_volatile_t(2), remove_volatile_t(10),
- remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10),
- remove_volatile_t(0),
- kMin + remove_volatile_t(1), kMin,
- kMax - remove_volatile_t(1), kMax
+ std::vector<remove_volatile_t> vals = {
+ remove_volatile_t(1), remove_volatile_t(2), remove_volatile_t(10), //
+ remove_volatile_t(-1), remove_volatile_t(-2), remove_volatile_t(-10), //
+ remove_volatile_t(0),
};
- for (const T &c : kVals) {
+
+ // We'd like to test values near std::numeric_limits::min() and
+ // std::numeric_limits::max(), too, but vsnprintf("%c", ...) can't handle
+ // anything larger than an int. Add in the most extreme values we can without
+ // exceeding that range.
+ static const T kMin =
+ static_cast<remove_volatile_t>(std::numeric_limits<int>::min());
+ static const T kMax =
+ static_cast<remove_volatile_t>(std::numeric_limits<int>::max());
+ vals.insert(vals.end(), {kMin + 1, kMin, kMax - 1, kMax});
+
+ for (const T c : vals) {
const FormatArgImpl args[] = {FormatArgImpl(c)};
UntypedFormatSpecImpl format("%c");
- EXPECT_EQ(StrPrint("%c", c), FormatPack(format, absl::MakeSpan(args)));
+ EXPECT_EQ(StrPrint("%c", static_cast<int>(c)),
+ FormatPack(format, absl::MakeSpan(args)));
}
}
-REGISTER_TYPED_TEST_CASE_P(TypedFormatConvertTest, AllIntsWithFlags, Char);
+REGISTER_TYPED_TEST_SUITE_P(TypedFormatConvertTest, AllIntsWithFlags, Char);
typedef ::testing::Types<
int, unsigned, volatile int,
@@ -482,8 +496,8 @@ typedef ::testing::Types<
long long, unsigned long long,
signed char, unsigned char, char>
AllIntTypes;
-INSTANTIATE_TYPED_TEST_CASE_P(TypedFormatConvertTestWithAllIntTypes,
- TypedFormatConvertTest, AllIntTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(TypedFormatConvertTestWithAllIntTypes,
+ TypedFormatConvertTest, AllIntTypes);
TEST_F(FormatConvertTest, VectorBool) {
// Make sure vector<bool>'s values behave as bools.
std::vector<bool> v = {true, false};
diff --git a/absl/strings/internal/str_format/extension.cc b/absl/strings/internal/str_format/extension.cc
index 484f6ebf..f93153d5 100644
--- a/absl/strings/internal/str_format/extension.cc
+++ b/absl/strings/internal/str_format/extension.cc
@@ -33,6 +33,8 @@ std::string FlagsToString(Flags v) {
return s;
}
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
#define ABSL_INTERNAL_X_VAL(id) \
constexpr absl::FormatConversionChar FormatConversionCharInternal::id;
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
@@ -45,17 +47,14 @@ constexpr absl::FormatConversionChar FormatConversionCharInternal::kNone;
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
#undef ABSL_INTERNAL_CHAR_SET_CASE
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
-// NOLINTNEXTLINE(readability-redundant-declaration)
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
+#endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+
bool FormatSinkImpl::PutPaddedString(string_view value, int width,
int precision, bool left) {
size_t space_remaining = 0;
diff --git a/absl/strings/internal/str_format/extension.h b/absl/strings/internal/str_format/extension.h
index 55cbb56d..55e8ac88 100644
--- a/absl/strings/internal/str_format/extension.h
+++ b/absl/strings/internal/str_format/extension.h
@@ -19,6 +19,7 @@
#include <limits.h>
#include <cstddef>
+#include <cstdint>
#include <cstring>
#include <ostream>
@@ -70,7 +71,7 @@ class FormatSinkImpl {
~FormatSinkImpl() { Flush(); }
void Flush() {
- raw_.Write(string_view(buf_, pos_ - buf_));
+ raw_.Write(string_view(buf_, static_cast<size_t>(pos_ - buf_)));
pos_ = buf_;
}
@@ -120,7 +121,9 @@ class FormatSinkImpl {
}
private:
- size_t Avail() const { return buf_ + sizeof(buf_) - pos_; }
+ size_t Avail() const {
+ return static_cast<size_t>(buf_ + sizeof(buf_) - pos_);
+ }
FormatRawSinkImpl raw_;
size_t size_ = 0;
diff --git a/absl/strings/internal/str_format/output.h b/absl/strings/internal/str_format/output.h
index 8030dae0..15e751ab 100644
--- a/absl/strings/internal/str_format/output.h
+++ b/absl/strings/internal/str_format/output.h
@@ -22,6 +22,7 @@
#define ABSL_STRINGS_INTERNAL_STR_FORMAT_OUTPUT_H_
#include <cstdio>
+#include <ios>
#include <ostream>
#include <string>
@@ -71,7 +72,7 @@ inline void AbslFormatFlush(std::string* out, string_view s) {
out->append(s.data(), s.size());
}
inline void AbslFormatFlush(std::ostream* out, string_view s) {
- out->write(s.data(), s.size());
+ out->write(s.data(), static_cast<std::streamsize>(s.size()));
}
inline void AbslFormatFlush(FILERawSink* sink, string_view v) {
diff --git a/absl/strings/internal/str_format/parser.h b/absl/strings/internal/str_format/parser.h
index ad8646ed..32b91d03 100644
--- a/absl/strings/internal/str_format/parser.h
+++ b/absl/strings/internal/str_format/parser.h
@@ -151,7 +151,8 @@ bool ParseFormatString(string_view src, Consumer consumer) {
const char* p = src.data();
const char* const end = p + src.size();
while (p != end) {
- const char* percent = static_cast<const char*>(memchr(p, '%', end - p));
+ const char* percent =
+ static_cast<const char*>(memchr(p, '%', static_cast<size_t>(end - p)));
if (!percent) {
// We found the last substring.
return consumer.Append(string_view(p, end - p));
@@ -242,7 +243,8 @@ class ParsedFormatBase {
string_view text(base, 0);
for (const auto& item : items_) {
const char* const end = text.data() + text.size();
- text = string_view(end, (base + item.text_end) - end);
+ text =
+ string_view(end, static_cast<size_t>((base + item.text_end) - end));
if (item.is_conversion) {
if (!consumer.ConvertOne(item.conv, text)) return false;
} else {
diff --git a/absl/strings/internal/str_join_internal.h b/absl/strings/internal/str_join_internal.h
index 31dbf672..d97d5033 100644
--- a/absl/strings/internal/str_join_internal.h
+++ b/absl/strings/internal/str_join_internal.h
@@ -229,10 +229,11 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s,
std::string result;
if (start != end) {
// Sums size
- size_t result_size = start->size();
+ auto&& start_value = *start;
+ size_t result_size = start_value.size();
for (Iterator it = start; ++it != end;) {
result_size += s.size();
- result_size += it->size();
+ result_size += (*it).size();
}
if (result_size > 0) {
@@ -240,13 +241,15 @@ std::string JoinAlgorithm(Iterator start, Iterator end, absl::string_view s,
// Joins strings
char* result_buf = &*result.begin();
- memcpy(result_buf, start->data(), start->size());
- result_buf += start->size();
+
+ memcpy(result_buf, start_value.data(), start_value.size());
+ result_buf += start_value.size();
for (Iterator it = start; ++it != end;) {
memcpy(result_buf, s.data(), s.size());
result_buf += s.size();
- memcpy(result_buf, it->data(), it->size());
- result_buf += it->size();
+ auto&& value = *it;
+ memcpy(result_buf, value.data(), value.size());
+ result_buf += value.size();
}
}
}
diff --git a/absl/strings/internal/string_constant.h b/absl/strings/internal/string_constant.h
index a11336b7..f68b17d7 100644
--- a/absl/strings/internal/string_constant.h
+++ b/absl/strings/internal/string_constant.h
@@ -35,17 +35,25 @@ namespace strings_internal {
// below.
template <typename T>
struct StringConstant {
+ private:
+ static constexpr bool TryConstexprEval(absl::string_view view) {
+ return view.empty() || 2 * view[0] != 1;
+ }
+
+ public:
static constexpr absl::string_view value = T{}();
constexpr absl::string_view operator()() const { return value; }
// Check to be sure `view` points to constant data.
// Otherwise, it can't be constant evaluated.
- static_assert(value.empty() || 2 * value[0] != 1,
+ static_assert(TryConstexprEval(value),
"The input string_view must point to constant data.");
};
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T>
-constexpr absl::string_view StringConstant<T>::value; // NOLINT
+constexpr absl::string_view StringConstant<T>::value;
+#endif
// Factory function for `StringConstant` instances.
// It supports callables that have a constexpr default constructor and a
diff --git a/absl/strings/internal/utf8.cc b/absl/strings/internal/utf8.cc
index 8fd8edc1..7ecb93df 100644
--- a/absl/strings/internal/utf8.cc
+++ b/absl/strings/internal/utf8.cc
@@ -25,25 +25,25 @@ size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
*buffer = static_cast<char>(utf8_char);
return 1;
} else if (utf8_char <= 0x7FF) {
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xC0 | utf8_char;
+ buffer[0] = static_cast<char>(0xC0 | utf8_char);
return 2;
} else if (utf8_char <= 0xFFFF) {
- buffer[2] = 0x80 | (utf8_char & 0x3F);
+ buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xE0 | utf8_char;
+ buffer[0] = static_cast<char>(0xE0 | utf8_char);
return 3;
} else {
- buffer[3] = 0x80 | (utf8_char & 0x3F);
+ buffer[3] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[2] = 0x80 | (utf8_char & 0x3F);
+ buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[1] = 0x80 | (utf8_char & 0x3F);
+ buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
- buffer[0] = 0xF0 | utf8_char;
+ buffer[0] = static_cast<char>(0xF0 | utf8_char);
return 4;
}
}
diff --git a/absl/strings/numbers.cc b/absl/strings/numbers.cc
index cbd84c91..e798fc69 100644
--- a/absl/strings/numbers.cc
+++ b/absl/strings/numbers.cc
@@ -757,8 +757,8 @@ struct LookupTables {
//
// uint128& operator/=(uint128) is not constexpr, so hardcode the resulting
// array to avoid a static initializer.
-template<>
-const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
+template <>
+ABSL_CONST_INIT const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
0,
0,
MakeUint128(9223372036854775807u, 18446744073709551615u),
@@ -809,8 +809,8 @@ const uint128 LookupTables<uint128>::kVmaxOverBase[] = {
//
// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVmaxOverBase[] = {
+template <>
+ABSL_CONST_INIT const int128 LookupTables<int128>::kVmaxOverBase[] = {
0,
0,
MakeInt128(4611686018427387903, 18446744073709551615u),
@@ -862,8 +862,8 @@ const int128 LookupTables<int128>::kVmaxOverBase[] = {
//
// int128& operator/=(int128) is not constexpr, so hardcode the resulting array
// to avoid a static initializer.
-template<>
-const int128 LookupTables<int128>::kVminOverBase[] = {
+template <>
+ABSL_CONST_INIT const int128 LookupTables<int128>::kVminOverBase[] = {
0,
0,
MakeInt128(-4611686018427387904, 0u),
@@ -904,11 +904,11 @@ const int128 LookupTables<int128>::kVminOverBase[] = {
};
template <typename IntType>
-const IntType LookupTables<IntType>::kVmaxOverBase[] =
+ABSL_CONST_INIT const IntType LookupTables<IntType>::kVmaxOverBase[] =
X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::max());
template <typename IntType>
-const IntType LookupTables<IntType>::kVminOverBase[] =
+ABSL_CONST_INIT const IntType LookupTables<IntType>::kVminOverBase[] =
X_OVER_BASE_INITIALIZER(std::numeric_limits<IntType>::min());
#undef X_OVER_BASE_INITIALIZER
diff --git a/absl/strings/numbers.h b/absl/strings/numbers.h
index 4ae07c2d..86c84ed3 100644
--- a/absl/strings/numbers.h
+++ b/absl/strings/numbers.h
@@ -23,8 +23,12 @@
#ifndef ABSL_STRINGS_NUMBERS_H_
#define ABSL_STRINGS_NUMBERS_H_
-#ifdef __SSE4_2__
-#include <x86intrin.h>
+#ifdef __SSSE3__
+#include <tmmintrin.h>
+#endif
+
+#ifdef _MSC_VER
+#include <intrin.h>
#endif
#include <cstddef>
@@ -36,14 +40,7 @@
#include <type_traits>
#include "absl/base/config.h"
-#ifdef __SSE4_2__
-// TODO(jorg): Remove this when we figure out the right way
-// to swap bytes on SSE 4.2 that works with the compilers
-// we claim to support. Also, add tests for the compiler
-// that doesn't support the Intel _bswap64 intrinsic but
-// does support all the SSE 4.2 intrinsics
#include "absl/base/internal/endian.h"
-#endif
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/numeric/bits.h"
@@ -181,16 +178,19 @@ char* FastIntToBuffer(int_type i, char* buffer) {
// TODO(jorg): This signed-ness check is used because it works correctly
// with enums, and it also serves to check that int_type is not a pointer.
// If one day something like std::is_signed<enum E> works, switch to it.
- if (static_cast<int_type>(1) - 2 < 0) { // Signed
- if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ // These conditions are constexpr bools to suppress MSVC warning C4127.
+ constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+ constexpr bool kUse64Bit = sizeof(i) > 32 / 8;
+ if (kIsSigned) {
+ if (kUse64Bit) {
return FastIntToBuffer(static_cast<int64_t>(i), buffer);
- } else { // 32-bit or less
+ } else {
return FastIntToBuffer(static_cast<int32_t>(i), buffer);
}
- } else { // Unsigned
- if (sizeof(i) > 32 / 8) { // 33-bit to 64-bit
+ } else {
+ if (kUse64Bit) {
return FastIntToBuffer(static_cast<uint64_t>(i), buffer);
- } else { // 32-bit or less
+ } else {
return FastIntToBuffer(static_cast<uint32_t>(i), buffer);
}
}
@@ -209,22 +209,25 @@ ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s, int_type* out,
// TODO(jorg): This signed-ness check is used because it works correctly
// with enums, and it also serves to check that int_type is not a pointer.
// If one day something like std::is_signed<enum E> works, switch to it.
- if (static_cast<int_type>(1) - 2 < 0) { // Signed
- if (sizeof(*out) == 64 / 8) { // 64-bit
+ // These conditions are constexpr bools to suppress MSVC warning C4127.
+ constexpr bool kIsSigned = static_cast<int_type>(1) - 2 < 0;
+ constexpr bool kUse64Bit = sizeof(*out) == 64 / 8;
+ if (kIsSigned) {
+ if (kUse64Bit) {
int64_t val;
parsed = numbers_internal::safe_strto64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else { // 32-bit
+ } else {
int32_t val;
parsed = numbers_internal::safe_strto32_base(s, &val, base);
*out = static_cast<int_type>(val);
}
- } else { // Unsigned
- if (sizeof(*out) == 64 / 8) { // 64-bit
+ } else {
+ if (kUse64Bit) {
uint64_t val;
parsed = numbers_internal::safe_strtou64_base(s, &val, base);
*out = static_cast<int_type>(val);
- } else { // 32-bit
+ } else {
uint32_t val;
parsed = numbers_internal::safe_strtou32_base(s, &val, base);
*out = static_cast<int_type>(val);
@@ -240,7 +243,7 @@ ABSL_MUST_USE_RESULT bool safe_strtoi_base(absl::string_view s, int_type* out,
// Returns the number of non-pad digits of the output (it can never be zero
// since 0 has one digit).
inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
-#ifdef __SSE4_2__
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
uint64_t be = absl::big_endian::FromHost64(val);
const auto kNibbleMask = _mm_set1_epi8(0xf);
const auto kHexDigits = _mm_setr_epi8('0', '1', '2', '3', '4', '5', '6', '7',
@@ -259,7 +262,7 @@ inline size_t FastHexToBufferZeroPad16(uint64_t val, char* out) {
}
#endif
// | 0x1 so that even 0 has 1 digit.
- return 16 - countl_zero(val | 0x1) / 4;
+ return 16 - static_cast<size_t>(countl_zero(val | 0x1) / 4);
}
} // namespace numbers_internal
diff --git a/absl/strings/str_cat.h b/absl/strings/str_cat.h
index a8a85c73..a94bc5df 100644
--- a/absl/strings/str_cat.h
+++ b/absl/strings/str_cat.h
@@ -214,23 +214,29 @@ class AlphaNum {
// A bool ctor would also convert incoming pointers (bletch).
AlphaNum(int x) // NOLINT(runtime/explicit)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned int x) // NOLINT(runtime/explicit)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(long long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(unsigned long long x) // NOLINT(*)
- : piece_(digits_,
- numbers_internal::FastIntToBuffer(x, digits_) - &digits_[0]) {}
+ : piece_(digits_, static_cast<size_t>(
+ numbers_internal::FastIntToBuffer(x, digits_) -
+ &digits_[0])) {}
AlphaNum(float f) // NOLINT(runtime/explicit)
: piece_(digits_, numbers_internal::SixDigitsToBuffer(f, digits_)) {}
@@ -245,7 +251,8 @@ class AlphaNum {
const strings_internal::AlphaNumBuffer<size>& buf)
: piece_(&buf.data[0], buf.size) {}
- AlphaNum(const char* c_str) : piece_(c_str) {} // NOLINT(runtime/explicit)
+ AlphaNum(const char* c_str) // NOLINT(runtime/explicit)
+ : piece_(NullSafeStringView(c_str)) {} // NOLINT(runtime/explicit)
AlphaNum(absl::string_view pc) : piece_(pc) {} // NOLINT(runtime/explicit)
template <typename Allocator>
diff --git a/absl/strings/str_cat_test.cc b/absl/strings/str_cat_test.cc
index f3770dc0..69df2502 100644
--- a/absl/strings/str_cat_test.cc
+++ b/absl/strings/str_cat_test.cc
@@ -210,6 +210,11 @@ TEST(StrCat, CornerCases) {
EXPECT_EQ(result, "");
}
+TEST(StrCat, NullConstCharPtr) {
+ const char* null = nullptr;
+ EXPECT_EQ(absl::StrCat("mon", null, "key"), "monkey");
+}
+
// A minimal allocator that uses malloc().
template <typename T>
struct Mallocator {
diff --git a/absl/strings/str_format_test.cc b/absl/strings/str_format_test.cc
index c60027ad..804e6c22 100644
--- a/absl/strings/str_format_test.cc
+++ b/absl/strings/str_format_test.cc
@@ -719,6 +719,7 @@ TEST_F(FormatWrapperTest, ParsedFormat) {
ABSL_NAMESPACE_END
} // namespace absl
+namespace {
using FormatExtensionTest = ::testing::Test;
struct Point {
@@ -750,6 +751,7 @@ TEST_F(FormatExtensionTest, AbslFormatConvertExample) {
// FormatUntyped will return false for bad character.
EXPECT_FALSE(absl::FormatUntyped(&actual, f1, {absl::FormatArg(p)}));
}
+} // namespace
// Some codegen thunks that we can use to easily dump the generated assembly for
// different StrFormat calls.
diff --git a/absl/strings/str_join.h b/absl/strings/str_join.h
index 33534536..ee5ae7ef 100644
--- a/absl/strings/str_join.h
+++ b/absl/strings/str_join.h
@@ -72,21 +72,15 @@ ABSL_NAMESPACE_BEGIN
// functions. You may provide your own Formatter to enable `absl::StrJoin()` to
// work with arbitrary types.
//
-// The following is an example of a custom Formatter that simply uses
-// `std::to_string()` to format an integer as a std::string.
-//
-// struct MyFormatter {
-// void operator()(std::string* out, int i) const {
-// out->append(std::to_string(i));
-// }
-// };
-//
-// You would use the above formatter by passing an instance of it as the final
-// argument to `absl::StrJoin()`:
-//
-// std::vector<int> v = {1, 2, 3, 4};
-// std::string s = absl::StrJoin(v, "-", MyFormatter());
-// EXPECT_EQ("1-2-3-4", s);
+// The following is an example of a custom Formatter that uses
+// `absl::FormatDuration` to join a list of `absl::Duration`s.
+//
+// std::vector<absl::Duration> v = {absl::Seconds(1), absl::Milliseconds(10)};
+// std::string s =
+// absl::StrJoin(v, ", ", [](std::string* out, absl::Duration dur) {
+// absl::StrAppend(out, absl::FormatDuration(dur));
+// });
+// EXPECT_EQ("1s, 10ms", s);
//
// The following standard formatters are provided within this file:
//
diff --git a/absl/strings/str_join_test.cc b/absl/strings/str_join_test.cc
index 2be6256e..c986e863 100644
--- a/absl/strings/str_join_test.cc
+++ b/absl/strings/str_join_test.cc
@@ -21,6 +21,7 @@
#include <cstdio>
#include <functional>
#include <initializer_list>
+#include <iterator>
#include <map>
#include <memory>
#include <ostream>
@@ -33,6 +34,7 @@
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
+#include "absl/strings/string_view.h"
namespace {
@@ -471,4 +473,136 @@ TEST(StrJoin, Tuple) {
"-", absl::DereferenceFormatter(TestFormatter())));
}
+// A minimal value type for `StrJoin` inputs.
+// Used to ensure we do not excessively require more a specific type, such as a
+// `string_view`.
+//
+// Anything that can be `data()` and `size()` is OK.
+class TestValue {
+ public:
+ TestValue(const char* data, size_t size) : data_(data), size_(size) {}
+ const char* data() const { return data_; }
+ size_t size() const { return size_; }
+
+ private:
+ const char* data_;
+ size_t size_;
+};
+
+// A minimal C++20 forward iterator, used to test that we do not impose
+// excessive requirements on StrJoin inputs.
+//
+// The 2 main differences between pre-C++20 LegacyForwardIterator and the
+// C++20 ForwardIterator are:
+// 1. `operator->` is not required in C++20.
+// 2. `operator*` result does not need to be an lvalue (a reference).
+//
+// The `operator->` requirement was removed on page 17 in:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p1037r0.pdf
+//
+// See the `[iterator.requirements]` section of the C++ standard.
+//
+// The value type is a template parameter so that we can test the behaviour
+// of `StrJoin` specializations, e.g. the NoFormatter specialization for
+// `string_view`.
+template <typename ValueT>
+class TestIterator {
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = ValueT;
+ using pointer = void;
+ using reference = const value_type&;
+ using difference_type = int;
+
+ // `data` must outlive the result.
+ static TestIterator begin(const std::vector<absl::string_view>& data) {
+ return TestIterator(&data, 0);
+ }
+
+ static TestIterator end(const std::vector<absl::string_view>& data) {
+ return TestIterator(nullptr, data.size());
+ }
+
+ bool operator==(const TestIterator& other) const {
+ return pos_ == other.pos_;
+ }
+ bool operator!=(const TestIterator& other) const {
+ return pos_ != other.pos_;
+ }
+
+ // This deliberately returns a `prvalue`.
+ // The requirement to return a reference was removed in C++20.
+ value_type operator*() const {
+ return ValueT((*data_)[pos_].data(), (*data_)[pos_].size());
+ }
+
+ // `operator->()` is deliberately omitted.
+ // The requirement to provide it was removed in C++20.
+
+ TestIterator& operator++() {
+ ++pos_;
+ return *this;
+ }
+
+ TestIterator operator++(int) {
+ TestIterator result = *this;
+ ++(*this);
+ return result;
+ }
+
+ TestIterator& operator--() {
+ --pos_;
+ return *this;
+ }
+
+ TestIterator operator--(int) {
+ TestIterator result = *this;
+ --(*this);
+ return result;
+ }
+
+ private:
+ TestIterator(const std::vector<absl::string_view>* data, size_t pos)
+ : data_(data), pos_(pos) {}
+
+ const std::vector<absl::string_view>* data_;
+ size_t pos_;
+};
+
+template <typename ValueT>
+class TestIteratorRange {
+ public:
+ // `data` must be non-null and must outlive the result.
+ explicit TestIteratorRange(const std::vector<absl::string_view>& data)
+ : begin_(TestIterator<ValueT>::begin(data)),
+ end_(TestIterator<ValueT>::end(data)) {}
+
+ const TestIterator<ValueT>& begin() const { return begin_; }
+ const TestIterator<ValueT>& end() const { return end_; }
+
+ private:
+ TestIterator<ValueT> begin_;
+ TestIterator<ValueT> end_;
+};
+
+TEST(StrJoin, TestIteratorRequirementsNoFormatter) {
+ const std::vector<absl::string_view> a = {"a", "b", "c"};
+
+ // When the value type is string-like (`std::string` or `string_view`),
+ // the NoFormatter template specialization is used internally.
+ EXPECT_EQ("a-b-c",
+ absl::StrJoin(TestIteratorRange<absl::string_view>(a), "-"));
+}
+
+TEST(StrJoin, TestIteratorRequirementsCustomFormatter) {
+ const std::vector<absl::string_view> a = {"a", "b", "c"};
+ EXPECT_EQ("a-b-c",
+ absl::StrJoin(TestIteratorRange<TestValue>(a), "-",
+ [](std::string* out, const TestValue& value) {
+ absl::StrAppend(
+ out,
+ absl::string_view(value.data(), value.size()));
+ }));
+}
+
} // namespace
diff --git a/absl/strings/str_split.h b/absl/strings/str_split.h
index bfbca422..7bbb68a3 100644
--- a/absl/strings/str_split.h
+++ b/absl/strings/str_split.h
@@ -461,8 +461,7 @@ using EnableSplitIfString =
// first two split strings become the `std::pair` `.first` and `.second`
// members, respectively. The remaining split substrings are discarded. If there
// are less than two split substrings, the empty string is used for the
-// corresponding
-// `std::pair` member.
+// corresponding `std::pair` member.
//
// Example:
//
diff --git a/absl/strings/string_view.cc b/absl/strings/string_view.cc
index d596e08c..adce3be9 100644
--- a/absl/strings/string_view.cc
+++ b/absl/strings/string_view.cc
@@ -207,22 +207,11 @@ string_view::size_type string_view::find_last_not_of(
return npos;
}
-// MSVC has non-standard behavior that implicitly creates definitions for static
-// const members. These implicit definitions conflict with explicit out-of-class
-// member definitions that are required by the C++ standard, resulting in
-// LNK1169 "multiply defined" errors at link time. __declspec(selectany) asks
-// MSVC to choose only one definition for the symbol it decorates. See details
-// at https://msdn.microsoft.com/en-us/library/34h23df8(v=vs.100).aspx
-#ifdef _MSC_VER
-#define ABSL_STRING_VIEW_SELECTANY __declspec(selectany)
-#else
-#define ABSL_STRING_VIEW_SELECTANY
-#endif
-ABSL_STRING_VIEW_SELECTANY
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr string_view::size_type string_view::npos;
-ABSL_STRING_VIEW_SELECTANY
constexpr string_view::size_type string_view::kMaxSize;
+#endif
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/strings/string_view.h b/absl/strings/string_view.h
index a4c9a652..e3239f57 100644
--- a/absl/strings/string_view.h
+++ b/absl/strings/string_view.h
@@ -55,8 +55,9 @@ ABSL_NAMESPACE_END
#else // ABSL_USES_STD_STRING_VIEW
-#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
- (defined(__GNUC__) && !defined(__clang__))
+#if ABSL_HAVE_BUILTIN(__builtin_memcmp) || \
+ (defined(__GNUC__) && !defined(__clang__)) || \
+ (defined(_MSC_VER) && _MSC_VER >= 1928)
#define ABSL_INTERNAL_STRING_VIEW_MEMCMP __builtin_memcmp
#else // ABSL_HAVE_BUILTIN(__builtin_memcmp)
#define ABSL_INTERNAL_STRING_VIEW_MEMCMP memcmp
diff --git a/absl/strings/string_view_test.cc b/absl/strings/string_view_test.cc
index 2c13dd1c..9d5463a1 100644
--- a/absl/strings/string_view_test.cc
+++ b/absl/strings/string_view_test.cc
@@ -1189,7 +1189,7 @@ TEST(ComparisonOpsTest, StringCompareNotAmbiguous) {
EXPECT_LT("hello", std::string("world"));
}
-TEST(ComparisonOpsTest, HeterogenousStringViewEquals) {
+TEST(ComparisonOpsTest, HeterogeneousStringViewEquals) {
EXPECT_EQ(absl::string_view("hello"), std::string("hello"));
EXPECT_EQ("hello", absl::string_view("hello"));
}
diff --git a/absl/strings/strip.h b/absl/strings/strip.h
index 111872ca..341e66fc 100644
--- a/absl/strings/strip.h
+++ b/absl/strings/strip.h
@@ -34,8 +34,9 @@ ABSL_NAMESPACE_BEGIN
// ConsumePrefix()
//
-// Strips the `expected` prefix from the start of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` prefix, if found, from the start of `str`.
+// If the operation succeeded, `true` is returned. If not, `false`
+// is returned and `str` is not modified.
//
// Example:
//
@@ -49,8 +50,9 @@ inline bool ConsumePrefix(absl::string_view* str, absl::string_view expected) {
}
// ConsumeSuffix()
//
-// Strips the `expected` suffix from the end of the given string, returning
-// `true` if the strip operation succeeded or false otherwise.
+// Strips the `expected` suffix, if found, from the end of `str`.
+// If the operation succeeded, `true` is returned. If not, `false`
+// is returned and `str` is not modified.
//
// Example:
//
@@ -65,7 +67,7 @@ inline bool ConsumeSuffix(absl::string_view* str, absl::string_view expected) {
// StripPrefix()
//
-// Returns a view into the input string 'str' with the given 'prefix' removed,
+// Returns a view into the input string `str` with the given `prefix` removed,
// but leaving the original string intact. If the prefix does not match at the
// start of the string, returns the original string instead.
ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix(
@@ -76,7 +78,7 @@ ABSL_MUST_USE_RESULT inline absl::string_view StripPrefix(
// StripSuffix()
//
-// Returns a view into the input string 'str' with the given 'suffix' removed,
+// Returns a view into the input string `str` with the given `suffix` removed,
// but leaving the original string intact. If the suffix does not match at the
// end of the string, returns the original string instead.
ABSL_MUST_USE_RESULT inline absl::string_view StripSuffix(
diff --git a/absl/strings/substitute.h b/absl/strings/substitute.h
index 151c56f5..6d2b08ab 100644
--- a/absl/strings/substitute.h
+++ b/absl/strings/substitute.h
@@ -159,8 +159,8 @@ class Arg {
Arg(Hex hex); // NOLINT(runtime/explicit)
Arg(Dec dec); // NOLINT(runtime/explicit)
- // vector<bool>::reference and const_reference require special help to
- // convert to `AlphaNum` because it requires two user defined conversions.
+ // vector<bool>::reference and const_reference require special help to convert
+ // to `Arg` because it requires two user defined conversions.
template <typename T,
absl::enable_if_t<
std::is_class<T>::value &&
@@ -174,6 +174,14 @@ class Arg {
// "0x<hex value>". However, in the case of `nullptr`, "NULL" is printed.
Arg(const void* value); // NOLINT(runtime/explicit)
+ // Normal enums are already handled by the integer formatters.
+ // This overload matches only scoped enums.
+ template <typename T,
+ typename = typename std::enable_if<
+ std::is_enum<T>{} && !std::is_convertible<T, int>{}>::type>
+ Arg(T value) // NOLINT(google-explicit-constructor)
+ : Arg(static_cast<typename std::underlying_type<T>::type>(value)) {}
+
Arg(const Arg&) = delete;
Arg& operator=(const Arg&) = delete;
diff --git a/absl/strings/substitute_test.cc b/absl/strings/substitute_test.cc
index 442c9215..9e6b9403 100644
--- a/absl/strings/substitute_test.cc
+++ b/absl/strings/substitute_test.cc
@@ -184,6 +184,54 @@ TEST(SubstituteTest, VectorBoolRef) {
EXPECT_EQ("Logic be like: true false true false", str);
}
+TEST(SubstituteTest, Enums) {
+ enum UnscopedEnum { kEnum0 = 0, kEnum1 = 1 };
+ EXPECT_EQ("0 1", absl::Substitute("$0 $1", UnscopedEnum::kEnum0,
+ UnscopedEnum::kEnum1));
+
+ enum class ScopedEnum { kEnum0 = 0, kEnum1 = 1 };
+ EXPECT_EQ("0 1",
+ absl::Substitute("$0 $1", ScopedEnum::kEnum0, ScopedEnum::kEnum1));
+
+ enum class ScopedEnumInt32 : int32_t { kEnum0 = 989, kEnum1 = INT32_MIN };
+ EXPECT_EQ("989 -2147483648",
+ absl::Substitute("$0 $1", ScopedEnumInt32::kEnum0,
+ ScopedEnumInt32::kEnum1));
+
+ enum class ScopedEnumUInt32 : uint32_t { kEnum0 = 1, kEnum1 = UINT32_MAX };
+ EXPECT_EQ("1 4294967295", absl::Substitute("$0 $1", ScopedEnumUInt32::kEnum0,
+ ScopedEnumUInt32::kEnum1));
+
+ enum class ScopedEnumInt64 : int64_t { kEnum0 = -1, kEnum1 = 42949672950 };
+ EXPECT_EQ("-1 42949672950", absl::Substitute("$0 $1", ScopedEnumInt64::kEnum0,
+ ScopedEnumInt64::kEnum1));
+
+ enum class ScopedEnumUInt64 : uint64_t { kEnum0 = 1, kEnum1 = 42949672950 };
+ EXPECT_EQ("1 42949672950", absl::Substitute("$0 $1", ScopedEnumUInt64::kEnum0,
+ ScopedEnumUInt64::kEnum1));
+
+ enum class ScopedEnumChar : signed char { kEnum0 = -1, kEnum1 = 1 };
+ EXPECT_EQ("-1 1", absl::Substitute("$0 $1", ScopedEnumChar::kEnum0,
+ ScopedEnumChar::kEnum1));
+
+ enum class ScopedEnumUChar : unsigned char {
+ kEnum0 = 0,
+ kEnum1 = 1,
+ kEnumMax = 255
+ };
+ EXPECT_EQ("0 1 255", absl::Substitute("$0 $1 $2", ScopedEnumUChar::kEnum0,
+ ScopedEnumUChar::kEnum1,
+ ScopedEnumUChar::kEnumMax));
+
+ enum class ScopedEnumInt16 : int16_t { kEnum0 = -100, kEnum1 = 10000 };
+ EXPECT_EQ("-100 10000", absl::Substitute("$0 $1", ScopedEnumInt16::kEnum0,
+ ScopedEnumInt16::kEnum1));
+
+ enum class ScopedEnumUInt16 : uint16_t { kEnum0 = 0, kEnum1 = 10000 };
+ EXPECT_EQ("0 10000", absl::Substitute("$0 $1", ScopedEnumUInt16::kEnum0,
+ ScopedEnumUInt16::kEnum1));
+}
+
#ifdef GTEST_HAS_DEATH_TEST
TEST(SubstituteDeathTest, SubstituteDeath) {
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel
index d7195473..64d3b929 100644
--- a/absl/synchronization/BUILD.bazel
+++ b/absl/synchronization/BUILD.bazel
@@ -34,7 +34,9 @@ cc_library(
hdrs = [
"internal/graphcycles.h",
],
- copts = ABSL_DEFAULT_COPTS,
+ copts = ABSL_DEFAULT_COPTS + select({
+ "//conditions:default": [],
+ }),
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
@@ -95,8 +97,8 @@ cc_library(
deps = [
":graphcycles_internal",
":kernel_timeout_internal",
- "//absl/base",
"//absl/base:atomic_hook",
+ "//absl/base",
"//absl/base:base_internal",
"//absl/base:config",
"//absl/base:core_headers",
@@ -106,7 +108,9 @@ cc_library(
"//absl/debugging:stacktrace",
"//absl/debugging:symbolize",
"//absl/time",
- ],
+ ] + select({
+ "//conditions:default": [],
+ }),
)
cc_test(
@@ -115,6 +119,9 @@ cc_test(
srcs = ["barrier_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":synchronization",
"//absl/time",
@@ -128,6 +135,9 @@ cc_test(
srcs = ["blocking_counter_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":synchronization",
"//absl/time",
@@ -278,6 +288,9 @@ cc_test(
size = "medium",
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = [
+ "no_test_wasm",
+ ],
deps = [
":per_thread_sem_test_common",
":synchronization",
@@ -294,7 +307,10 @@ cc_test(
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- tags = ["no_test_ios_x86_64"],
+ tags = [
+ "no_test_ios_x86_64",
+ "no_test_wasm",
+ ],
deps = [
":synchronization",
"//absl/base:core_headers",
diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt
index 605efe2d..9335c264 100644
--- a/absl/synchronization/CMakeLists.txt
+++ b/absl/synchronization/CMakeLists.txt
@@ -14,6 +14,7 @@
# limitations under the License.
#
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
graphcycles_internal
@@ -32,6 +33,7 @@ absl_cc_library(
absl::raw_logging_internal
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
kernel_timeout_internal
@@ -125,6 +127,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
thread_pool
@@ -170,6 +173,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
per_thread_sem_test_common
diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc
index 53a71b34..44e6129b 100644
--- a/absl/synchronization/internal/create_thread_identity.cc
+++ b/absl/synchronization/internal/create_thread_identity.cc
@@ -38,7 +38,7 @@ ABSL_CONST_INIT static base_internal::ThreadIdentity* thread_identity_freelist;
// A per-thread destructor for reclaiming associated ThreadIdentity objects.
// Since we must preserve their storage we cache them for re-use.
-void ReclaimThreadIdentity(void* v) {
+static void ReclaimThreadIdentity(void* v) {
base_internal::ThreadIdentity* identity =
static_cast<base_internal::ThreadIdentity*>(v);
@@ -48,8 +48,6 @@ void ReclaimThreadIdentity(void* v) {
base_internal::LowLevelAlloc::Free(identity->per_thread_synch.all_locks);
}
- PerThreadSem::Destroy(identity);
-
// We must explicitly clear the current thread's identity:
// (a) Subsequent (unrelated) per-thread destructors may require an identity.
// We must guarantee a new identity is used in this case (this instructor
@@ -71,7 +69,12 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) {
return (addr + align - 1) & ~(align - 1);
}
-static void ResetThreadIdentity(base_internal::ThreadIdentity* identity) {
+void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) {
+ PerThreadSem::Init(identity);
+}
+
+static void ResetThreadIdentityBetweenReuse(
+ base_internal::ThreadIdentity* identity) {
base_internal::PerThreadSynch* pts = &identity->per_thread_synch;
pts->next = nullptr;
pts->skip = nullptr;
@@ -116,8 +119,9 @@ static base_internal::ThreadIdentity* NewThreadIdentity() {
identity = reinterpret_cast<base_internal::ThreadIdentity*>(
RoundUp(reinterpret_cast<intptr_t>(allocation),
base_internal::PerThreadSynch::kAlignment));
+ OneTimeInitThreadIdentity(identity);
}
- ResetThreadIdentity(identity);
+ ResetThreadIdentityBetweenReuse(identity);
return identity;
}
@@ -127,7 +131,6 @@ static base_internal::ThreadIdentity* NewThreadIdentity() {
// REQUIRES: CurrentThreadIdentity(false) == nullptr
base_internal::ThreadIdentity* CreateThreadIdentity() {
base_internal::ThreadIdentity* identity = NewThreadIdentity();
- PerThreadSem::Init(identity);
// Associate the value with the current thread, and attach our destructor.
base_internal::SetCurrentThreadIdentity(identity, ReclaimThreadIdentity);
return identity;
diff --git a/absl/synchronization/internal/create_thread_identity.h b/absl/synchronization/internal/create_thread_identity.h
index e121f683..4cfde091 100644
--- a/absl/synchronization/internal/create_thread_identity.h
+++ b/absl/synchronization/internal/create_thread_identity.h
@@ -36,10 +36,6 @@ namespace synchronization_internal {
// For private use only.
base_internal::ThreadIdentity* CreateThreadIdentity();
-// A per-thread destructor for reclaiming associated ThreadIdentity objects.
-// For private use only.
-void ReclaimThreadIdentity(void* v);
-
// Returns the ThreadIdentity object representing the calling thread; guaranteed
// to be unique for its lifetime. The returned object will remain valid for the
// program's lifetime; although it may be re-assigned to a subsequent thread.
diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc
index a6031787..469e8f32 100644
--- a/absl/synchronization/internal/per_thread_sem.cc
+++ b/absl/synchronization/internal/per_thread_sem.cc
@@ -47,10 +47,6 @@ void PerThreadSem::Init(base_internal::ThreadIdentity *identity) {
identity->is_idle.store(false, std::memory_order_relaxed);
}
-void PerThreadSem::Destroy(base_internal::ThreadIdentity *identity) {
- Waiter::GetWaiter(identity)->~Waiter();
-}
-
void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) {
const int ticker =
identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1;
diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h
index 7beae8ef..90a88809 100644
--- a/absl/synchronization/internal/per_thread_sem.h
+++ b/absl/synchronization/internal/per_thread_sem.h
@@ -66,10 +66,6 @@ class PerThreadSem {
// REQUIRES: May only be called by ThreadIdentity.
static void Init(base_internal::ThreadIdentity* identity);
- // Destroy the PerThreadSem associated with "identity".
- // REQUIRES: May only be called by ThreadIdentity.
- static void Destroy(base_internal::ThreadIdentity* identity);
-
// Increments "identity"'s count.
static inline void Post(base_internal::ThreadIdentity* identity);
@@ -81,8 +77,7 @@ class PerThreadSem {
// Permitted callers.
friend class PerThreadSemTest;
friend class absl::Mutex;
- friend absl::base_internal::ThreadIdentity* CreateThreadIdentity();
- friend void ReclaimThreadIdentity(void* v);
+ friend void OneTimeInitThreadIdentity(absl::base_internal::ThreadIdentity*);
};
} // namespace synchronization_internal
diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc
index db1184e6..24a6b548 100644
--- a/absl/synchronization/internal/per_thread_sem_test.cc
+++ b/absl/synchronization/internal/per_thread_sem_test.cc
@@ -174,6 +174,15 @@ TEST_F(PerThreadSemTest, Timeouts) {
EXPECT_TRUE(Wait(negative_timeout));
}
+TEST_F(PerThreadSemTest, ThreadIdentityReuse) {
+ // Create a base_internal::ThreadIdentity object and keep reusing it. There
+ // should be no memory or resource leaks.
+ for (int i = 0; i < 10000; i++) {
+ std::thread t([]() { GetOrCreateCurrentThreadIdentity(); });
+ t.join();
+ }
+}
+
} // namespace
} // namespace synchronization_internal
diff --git a/absl/synchronization/internal/waiter.cc b/absl/synchronization/internal/waiter.cc
index 28ef311e..f2051d67 100644
--- a/absl/synchronization/internal/waiter.cc
+++ b/absl/synchronization/internal/waiter.cc
@@ -71,8 +71,6 @@ Waiter::Waiter() {
futex_.store(0, std::memory_order_relaxed);
}
-Waiter::~Waiter() = default;
-
bool Waiter::Wait(KernelTimeout t) {
// Loop until we can atomically decrement futex from a positive
// value, waiting on a futex while we believe it is zero.
@@ -161,18 +159,6 @@ Waiter::Waiter() {
wakeup_count_ = 0;
}
-Waiter::~Waiter() {
- const int err = pthread_mutex_destroy(&mu_);
- if (err != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_mutex_destroy failed: %d", err);
- }
-
- const int err2 = pthread_cond_destroy(&cv_);
- if (err2 != 0) {
- ABSL_RAW_LOG(FATAL, "pthread_cond_destroy failed: %d", err2);
- }
-}
-
bool Waiter::Wait(KernelTimeout t) {
struct timespec abs_timeout;
if (t.has_timeout()) {
@@ -240,12 +226,6 @@ Waiter::Waiter() {
wakeups_.store(0, std::memory_order_relaxed);
}
-Waiter::~Waiter() {
- if (sem_destroy(&sem_) != 0) {
- ABSL_RAW_LOG(FATAL, "sem_destroy failed with errno %d\n", errno);
- }
-}
-
bool Waiter::Wait(KernelTimeout t) {
struct timespec abs_timeout;
if (t.has_timeout()) {
@@ -363,11 +343,6 @@ Waiter::Waiter() {
wakeup_count_ = 0;
}
-// SRW locks and condition variables do not need to be explicitly destroyed.
-// https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
-// https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
-Waiter::~Waiter() = default;
-
bool Waiter::Wait(KernelTimeout t) {
SRWLOCK *mu = WinHelper::GetLock(this);
CONDITION_VARIABLE *cv = WinHelper::GetCond(this);
diff --git a/absl/synchronization/internal/waiter.h b/absl/synchronization/internal/waiter.h
index be3df180..b8adfeb5 100644
--- a/absl/synchronization/internal/waiter.h
+++ b/absl/synchronization/internal/waiter.h
@@ -71,9 +71,6 @@ class Waiter {
Waiter(const Waiter&) = delete;
Waiter& operator=(const Waiter&) = delete;
- // Destroy any data to track waits.
- ~Waiter();
-
// Blocks the calling thread until a matching call to `Post()` or
// `t` has passed. Returns `true` if woken (`Post()` called),
// `false` on timeout.
@@ -106,6 +103,12 @@ class Waiter {
#endif
private:
+ // The destructor must not be called since Mutex/CondVar
+ // can use PerThreadSem/Waiter after the thread exits.
+ // Waiter objects are embedded in ThreadIdentity objects,
+ // which are reused via a freelist and are never destroyed.
+ ~Waiter() = delete;
+
#if ABSL_WAITER_MODE == ABSL_WAITER_MODE_FUTEX
// Futexes are defined by specification to be 32-bits.
// Thus std::atomic<int32_t> must be just an int32_t with lockfree methods.
@@ -136,8 +139,11 @@ class Waiter {
// REQUIRES: WinHelper::GetLock(this) must be held.
void InternalCondVarPoke();
- // We can't include Windows.h in our headers, so we use aligned charachter
+ // We can't include Windows.h in our headers, so we use aligned character
// buffers to define the storage of SRWLOCK and CONDITION_VARIABLE.
+ // SRW locks and condition variables do not need to be explicitly destroyed.
+ // https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-initializesrwlock
+ // https://stackoverflow.com/questions/28975958/why-does-windows-have-no-deleteconditionvariable-function-to-go-together-with
alignas(void*) unsigned char mu_storage_[sizeof(void*)];
alignas(void*) unsigned char cv_storage_[sizeof(void*)];
int waiter_count_;
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 76ad41fe..52e2455d 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -109,7 +109,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu,
bool locking, bool trylock,
bool read_lock);
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp)) {
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) {
submit_profile_data.Store(fn);
}
@@ -1744,23 +1744,33 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock);
}
-// The zap_desig_waker bitmask is used to clear the designated waker flag in
-// the mutex if this thread has blocked, and therefore may be the designated
-// waker.
-static const intptr_t zap_desig_waker[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuDesig) // blocked; turn off the designated waker bit
-};
+// Clears the designated waker flag in the mutex if this thread has blocked, and
+// therefore may be the designated waker.
+static intptr_t ClearDesignatedWakerMask(int flag) {
+ assert(flag >= 0);
+ assert(flag <= 1);
+ switch (flag) {
+ case 0: // not blocked
+ return ~static_cast<intptr_t>(0);
+ case 1: // blocked; turn off the designated waker bit
+ return ~static_cast<intptr_t>(kMuDesig);
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+}
-// The ignore_waiting_writers bitmask is used to ignore the existence
-// of waiting writers if a reader that has already blocked once
-// wakes up.
-static const intptr_t ignore_waiting_writers[] = {
- ~static_cast<intptr_t>(0), // not blocked
- ~static_cast<intptr_t>(
- kMuWrWait) // blocked; pretend there are no waiting writers
-};
+// Conditionally ignores the existence of waiting writers if a reader that has
+// already blocked once wakes up.
+static intptr_t IgnoreWaitingWritersMask(int flag) {
+ assert(flag >= 0);
+ assert(flag <= 1);
+ switch (flag) {
+ case 0: // not blocked
+ return ~static_cast<intptr_t>(0);
+ case 1: // blocked; pretend there are no waiting writers
+ return ~static_cast<intptr_t>(kMuWrWait);
+ }
+ ABSL_INTERNAL_UNREACHABLE;
+}
// Internal version of LockWhen(). See LockSlowWithDeadline()
ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond,
@@ -1852,8 +1862,10 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond,
bool unlock = false;
if ((v & how->fast_need_zero) == 0 && // try fast acquire
mu_.compare_exchange_strong(
- v, (how->fast_or | (v & zap_desig_waker[flags & kMuHasBlocked])) +
- how->fast_add,
+ v,
+ (how->fast_or |
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+ how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (cond == nullptr ||
EvalConditionAnnotated(cond, this, true, false, how == kShared)) {
@@ -1927,9 +1939,10 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
CheckForMutexCorruption(v, "Lock");
if ((v & waitp->how->slow_need_zero) == 0) {
if (mu_.compare_exchange_strong(
- v, (waitp->how->fast_or |
- (v & zap_desig_waker[flags & kMuHasBlocked])) +
- waitp->how->fast_add,
+ v,
+ (waitp->how->fast_or |
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked))) +
+ waitp->how->fast_add,
std::memory_order_acquire, std::memory_order_relaxed)) {
if (waitp->cond == nullptr ||
EvalConditionAnnotated(waitp->cond, this, true, false,
@@ -1946,8 +1959,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters
// This thread tries to become the one and only waiter.
PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags);
- intptr_t nv = (v & zap_desig_waker[flags & kMuHasBlocked] & kMuLow) |
- kMuWait;
+ intptr_t nv =
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) |
+ kMuWait;
ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to empty list failed");
if (waitp->how == kExclusive && (v & kMuReader) != 0) {
nv |= kMuWrWait;
@@ -1961,12 +1975,13 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
waitp->thread->waitp = nullptr;
}
} else if ((v & waitp->how->slow_inc_need_zero &
- ignore_waiting_writers[flags & kMuHasBlocked]) == 0) {
+ IgnoreWaitingWritersMask(flags & kMuHasBlocked)) == 0) {
// This is a reader that needs to increment the reader count,
// but the count is currently held in the last waiter.
if (mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuReader,
+ v,
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+ kMuSpin | kMuReader,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
h->readers += kMuOne; // inc reader count in waiter
@@ -1987,8 +2002,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) {
}
} else if ((v & kMuSpin) == 0 && // attempt to queue ourselves
mu_.compare_exchange_strong(
- v, (v & zap_desig_waker[flags & kMuHasBlocked]) | kMuSpin |
- kMuWait,
+ v,
+ (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) |
+ kMuSpin | kMuWait,
std::memory_order_acquire, std::memory_order_relaxed)) {
PerThreadSynch *h = GetPerThreadSynch(v);
PerThreadSynch *new_h = Enqueue(h, waitp, v, flags);
@@ -2315,19 +2331,21 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
- int64_t enqueue_timestamp = wake_list->waitp->contention_start_cycles;
- bool cond_waiter = wake_list->cond_waiter;
+ int64_t wait_cycles = 0;
+ int64_t now = base_internal::CycleClock::Now();
do {
+ // Sample lock contention events only if the waiter was trying to acquire
+ // the lock, not waiting on a condition variable or Condition.
+ if (!wake_list->cond_waiter) {
+ wait_cycles += (now - wake_list->waitp->contention_start_cycles);
+ wake_list->waitp->contention_start_cycles = now;
+ }
wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
- if (!cond_waiter) {
- // Sample lock contention events only if the (first) waiter was trying to
- // acquire the lock, not waiting on a condition variable or Condition.
- int64_t wait_cycles =
- base_internal::CycleClock::Now() - enqueue_timestamp;
+ if (wait_cycles > 0) {
mutex_tracer("slow release", this, wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- submit_profile_data(enqueue_timestamp);
+ submit_profile_data(wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
}
}
@@ -2492,9 +2510,9 @@ void CondVar::Remove(PerThreadSynch *s) {
// before calling Mutex::UnlockSlow(), the Mutex code might be re-entered (via
// the logging code, or via a Condition function) and might potentially attempt
// to block this thread. That would be a problem if the thread were already on
-// a the condition variable waiter queue. Thus, we use the waitp->cv_word
-// to tell the unlock code to call CondVarEnqueue() to queue the thread on the
-// condition variable queue just before the mutex is to be unlocked, and (most
+// a condition variable waiter queue. Thus, we use the waitp->cv_word to tell
+// the unlock code to call CondVarEnqueue() to queue the thread on the condition
+// variable queue just before the mutex is to be unlocked, and (most
// importantly) after any call to an external routine that might re-enter the
// mutex code.
static void CondVarEnqueue(SynchWaitParams *waitp) {
@@ -2557,6 +2575,23 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
while (waitp.thread->state.load(std::memory_order_acquire) ==
PerThreadSynch::kQueued) {
if (!Mutex::DecrementSynchSem(mutex, waitp.thread, t)) {
+ // DecrementSynchSem returned due to timeout.
+ // Now we will either (1) remove ourselves from the wait list in Remove
+ // below, in which case Remove will set thread.state = kAvailable and
+ // we will not call DecrementSynchSem again; or (2) Signal/SignalAll
+ // has removed us concurrently and is calling Wakeup, which will set
+ // thread.state = kAvailable and post to the semaphore.
+ // It's important to reset the timeout for the case (2) because otherwise
+ // we can live-lock in this loop since DecrementSynchSem will always
+ // return immediately due to timeout, but Signal/SignalAll is not
+ // necessary set thread.state = kAvailable yet (and is not scheduled
+ // due to thread priorities or other scheduler artifacts).
+ // Note this could also be resolved if Signal/SignalAll would set
+ // thread.state = kAvailable while holding the wait list spin lock.
+ // But this can't be easily done for SignalAll since it grabs the whole
+ // wait list with a single compare-exchange and does not really grab
+ // the spin lock.
+ t = KernelTimeout::Never();
this->Remove(waitp.thread);
rc = true;
}
diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h
index 38338f24..b69b7089 100644
--- a/absl/synchronization/mutex.h
+++ b/absl/synchronization/mutex.h
@@ -174,9 +174,12 @@ class ABSL_LOCKABLE Mutex {
// Mutex::AssertHeld()
//
- // Return immediately if this thread holds the `Mutex` exclusively (in write
- // mode). Otherwise, may report an error (typically by crashing with a
- // diagnostic), or may return immediately.
+ // Require that the mutex be held exclusively (write mode) by this thread.
+ //
+ // If the mutex is not currently held by this thread, this function may report
+ // an error (typically by crashing with a diagnostic) or it may do nothing.
+ // This function is intended only as a tool to assist debugging; it doesn't
+ // guarantee correctness.
void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK();
// ---------------------------------------------------------------------------
@@ -236,9 +239,13 @@ class ABSL_LOCKABLE Mutex {
// Mutex::AssertReaderHeld()
//
- // Returns immediately if this thread holds the `Mutex` in at least shared
- // mode (read mode). Otherwise, may report an error (typically by
- // crashing with a diagnostic), or may return immediately.
+ // Require that the mutex be held at least in shared mode (read mode) by this
+ // thread.
+ //
+ // If the mutex is not currently held by this thread, this function may report
+ // an error (typically by crashing with a diagnostic) or it may do nothing.
+ // This function is intended only as a tool to assist debugging; it doesn't
+ // guarantee correctness.
void AssertReaderHeld() const ABSL_ASSERT_SHARED_LOCK();
// Mutex::WriterLock()
@@ -984,14 +991,15 @@ inline Condition::Condition(const T *object,
// Register a hook for profiling support.
//
// The function pointer registered here will be called whenever a mutex is
-// contended. The callback is given the absl/base/cycleclock.h timestamp when
-// waiting began.
+// contended. The callback is given the cycles for which waiting happened (as
+// measured by //absl/base/internal/cycleclock.h, and which may not
+// be real "cycle" counts.)
//
// Calls to this function do not race or block, but there is no ordering
// guaranteed between calls to this function and call to the provided hook.
// In particular, the previously registered hook may still be called for some
// time after this function returns.
-void RegisterMutexProfiler(void (*fn)(int64_t wait_timestamp));
+void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles));
// Register a hook for Mutex tracing.
//
diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc
index 4f403176..99bb0175 100644
--- a/absl/synchronization/mutex_test.cc
+++ b/absl/synchronization/mutex_test.cc
@@ -1704,4 +1704,30 @@ TEST(Mutex, MuTime) {
EXPECT_EQ(RunTest(&TestMuTime, threads, iterations, 1), threads * iterations);
}
+TEST(Mutex, SignalExitedThread) {
+ // The test may expose a race when Mutex::Unlock signals a thread
+ // that has already exited.
+#if defined(__wasm__) || defined(__asmjs__)
+ constexpr int kThreads = 1; // OOMs under WASM
+#else
+ constexpr int kThreads = 100;
+#endif
+ std::vector<std::thread> top;
+ for (unsigned i = 0; i < 2 * std::thread::hardware_concurrency(); i++) {
+ top.emplace_back([&]() {
+ for (int i = 0; i < kThreads; i++) {
+ absl::Mutex mu;
+ std::thread t([&]() {
+ mu.Lock();
+ mu.Unlock();
+ });
+ mu.Lock();
+ mu.Unlock();
+ t.join();
+ }
+ });
+ }
+ for (auto &th : top) th.join();
+}
+
} // namespace
diff --git a/absl/synchronization/notification.h b/absl/synchronization/notification.h
index 9a354ca2..4bec2689 100644
--- a/absl/synchronization/notification.h
+++ b/absl/synchronization/notification.h
@@ -22,7 +22,7 @@
// The `Notification` object maintains a private boolean "notified" state that
// transitions to `true` at most once. The `Notification` class provides the
// following primary member functions:
-// * `HasBeenNotified() `to query its state
+// * `HasBeenNotified()` to query its state
// * `WaitForNotification*()` to have threads wait until the "notified" state
// is `true`.
// * `Notify()` to set the notification's "notified" state to `true` and
@@ -52,6 +52,7 @@
#include <atomic>
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
@@ -74,7 +75,7 @@ class Notification {
// Notification::HasBeenNotified()
//
// Returns the value of the notification's internal "notified" state.
- bool HasBeenNotified() const {
+ ABSL_MUST_USE_RESULT bool HasBeenNotified() const {
return HasBeenNotifiedInternal(&this->notified_yet_);
}
diff --git a/absl/time/BUILD.bazel b/absl/time/BUILD.bazel
index e8c49660..aa07df3d 100644
--- a/absl/time/BUILD.bazel
+++ b/absl/time/BUILD.bazel
@@ -64,9 +64,7 @@ cc_library(
hdrs = ["internal/test_util.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
- visibility = [
- "//absl/time:__pkg__",
- ],
+ visibility = ["//visibility:private"],
deps = [
":time",
"//absl/base:config",
diff --git a/absl/time/CMakeLists.txt b/absl/time/CMakeLists.txt
index f6ff8bd1..debab3ba 100644
--- a/absl/time/CMakeLists.txt
+++ b/absl/time/CMakeLists.txt
@@ -87,6 +87,7 @@ absl_cc_library(
$<$<PLATFORM_ID:Darwin>:${CoreFoundation}>
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
time_internal_test_util
diff --git a/absl/time/clock.cc b/absl/time/clock.cc
index 7b204c4e..a091efdc 100644
--- a/absl/time/clock.cc
+++ b/absl/time/clock.cc
@@ -196,7 +196,7 @@ struct ABSL_CACHELINE_ALIGNED TimeState {
absl::base_internal::SpinLock lock{absl::kConstInit,
base_internal::SCHEDULE_KERNEL_ONLY};
};
-ABSL_CONST_INIT static TimeState time_state{};
+ABSL_CONST_INIT static TimeState time_state;
// Return the time in ns as told by the kernel interface. Place in *cycleclock
// the value of the cycleclock at about the time of the syscall.
diff --git a/absl/time/duration.cc b/absl/time/duration.cc
index 4443109a..2bba62da 100644
--- a/absl/time/duration.cc
+++ b/absl/time/duration.cc
@@ -766,13 +766,14 @@ void AppendNumberUnit(std::string* out, double n, DisplayUnit unit) {
// is non-zero.
// Unlike Go, we format the zero duration as 0, with no unit.
std::string FormatDuration(Duration d) {
- const Duration min_duration = Seconds(kint64min);
- if (d == min_duration) {
+ constexpr Duration kMinDuration = Seconds(kint64min);
+ std::string s;
+ if (d == kMinDuration) {
// Avoid needing to negate kint64min by directly returning what the
// following code should produce in that case.
- return "-2562047788015215h30m8s";
+ s = "-2562047788015215h30m8s";
+ return s;
}
- std::string s;
if (d < ZeroDuration()) {
s.append("-");
d = -d;
diff --git a/absl/time/duration_test.cc b/absl/time/duration_test.cc
index b7209e1c..b7abf4ba 100644
--- a/absl/time/duration_test.cc
+++ b/absl/time/duration_test.cc
@@ -349,6 +349,11 @@ TEST(Duration, ToChrono) {
}
TEST(Duration, FactoryOverloads) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
enum E { kOne = 1 };
#define TEST_FACTORY_OVERLOADS(NAME) \
EXPECT_EQ(1, NAME(kOne) / NAME(kOne)); \
@@ -879,6 +884,11 @@ TEST(Duration, RelationalOperators) {
}
TEST(Duration, Addition) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
#define TEST_ADD_OPS(UNIT) \
do { \
EXPECT_EQ(UNIT(2), UNIT(1) + UNIT(1)); \
@@ -972,6 +982,11 @@ TEST(Duration, Negation) {
}
TEST(Duration, AbsoluteValue) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
EXPECT_EQ(absl::ZeroDuration(), AbsDuration(absl::ZeroDuration()));
EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(1)));
EXPECT_EQ(absl::Seconds(1), AbsDuration(absl::Seconds(-1)));
@@ -989,6 +1004,11 @@ TEST(Duration, AbsoluteValue) {
}
TEST(Duration, Multiplication) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
#define TEST_MUL_OPS(UNIT) \
do { \
EXPECT_EQ(UNIT(5), UNIT(2) * 2.5); \
@@ -1241,6 +1261,11 @@ TEST(Duration, RoundTripUnits) {
}
TEST(Duration, TruncConversions) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
// Tests ToTimespec()/DurationFromTimespec()
const struct {
absl::Duration d;
@@ -1537,6 +1562,11 @@ TEST(Duration, ConversionSaturation) {
}
TEST(Duration, FormatDuration) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
// Example from Go's docs.
EXPECT_EQ("72h3m0.5s",
absl::FormatDuration(absl::Hours(72) + absl::Minutes(3) +
@@ -1671,6 +1701,11 @@ TEST(Duration, FormatDuration) {
}
TEST(Duration, ParseDuration) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
absl::Duration d;
// No specified unit. Should only work for zero and infinity.
diff --git a/absl/time/internal/cctz/BUILD.bazel b/absl/time/internal/cctz/BUILD.bazel
index bdc2e309..7304d40d 100644
--- a/absl/time/internal/cctz/BUILD.bazel
+++ b/absl/time/internal/cctz/BUILD.bazel
@@ -85,7 +85,11 @@ cc_library(
deps = [
":civil_time",
"//absl/base:config",
- ],
+ ] + select(
+ {
+ "//conditions:default": [],
+ },
+ ),
)
### tests
@@ -115,6 +119,7 @@ cc_test(
"no_test_android_arm",
"no_test_android_arm64",
"no_test_android_x86",
+ "no_test_wasm",
],
deps = [
":civil_time",
@@ -134,6 +139,7 @@ cc_test(
"no_test_android_arm",
"no_test_android_arm64",
"no_test_android_x86",
+ "no_test_wasm",
],
deps = [
":civil_time",
diff --git a/absl/time/internal/cctz/include/cctz/civil_time_detail.h b/absl/time/internal/cctz/include/cctz/civil_time_detail.h
index 8aadde57..a5b084e6 100644
--- a/absl/time/internal/cctz/include/cctz/civil_time_detail.h
+++ b/absl/time/internal/cctz/include/cctz/civil_time_detail.h
@@ -84,14 +84,13 @@ CONSTEXPR_F bool is_leap_year(year_t y) noexcept {
return y % 4 == 0 && (y % 100 != 0 || y % 400 == 0);
}
CONSTEXPR_F int year_index(year_t y, month_t m) noexcept {
- return (static_cast<int>((y + (m > 2)) % 400) + 400) % 400;
+ const int yi = static_cast<int>((y + (m > 2)) % 400);
+ return yi < 0 ? yi + 400 : yi;
}
-CONSTEXPR_F int days_per_century(year_t y, month_t m) noexcept {
- const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_century(int yi) noexcept {
return 36524 + (yi == 0 || yi > 300);
}
-CONSTEXPR_F int days_per_4years(year_t y, month_t m) noexcept {
- const int yi = year_index(y, m);
+CONSTEXPR_F int days_per_4years(int yi) noexcept {
return 1460 + (yi == 0 || yi > 300 || (yi - 1) % 100 < 96);
}
CONSTEXPR_F int days_per_year(year_t y, month_t m) noexcept {
@@ -133,17 +132,22 @@ CONSTEXPR_F fields n_day(year_t y, month_t m, diff_t d, diff_t cd, hour_t hh,
}
}
if (d > 365) {
+ int yi = year_index(ey, m); // Index into Gregorian 400 year cycle.
for (;;) {
- int n = days_per_century(ey, m);
+ int n = days_per_century(yi);
if (d <= n) break;
d -= n;
ey += 100;
+ yi += 100;
+ if (yi >= 400) yi -= 400;
}
for (;;) {
- int n = days_per_4years(ey, m);
+ int n = days_per_4years(yi);
if (d <= n) break;
d -= n;
ey += 4;
+ yi += 4;
+ if (yi >= 400) yi -= 400;
}
for (;;) {
int n = days_per_year(ey, m);
diff --git a/absl/time/internal/cctz/src/time_zone_format_test.cc b/absl/time/internal/cctz/src/time_zone_format_test.cc
index 6487fa93..f1f79a20 100644
--- a/absl/time/internal/cctz/src/time_zone_format_test.cc
+++ b/absl/time/internal/cctz/src/time_zone_format_test.cc
@@ -18,11 +18,15 @@
#include <sstream>
#include <string>
+#include "absl/base/config.h"
+#include "absl/time/internal/cctz/include/cctz/time_zone.h"
+#if defined(__linux__)
+#include <features.h>
+#endif
+
#include "gmock/gmock.h"
#include "gtest/gtest.h"
-#include "absl/base/config.h"
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
-#include "absl/time/internal/cctz/include/cctz/time_zone.h"
namespace chrono = std::chrono;
@@ -183,8 +187,10 @@ TEST(Format, PosixConversions) {
TestFormatSpecifier(tp, tz, "%F", "1970-01-01");
TestFormatSpecifier(tp, tz, "%g", "70");
TestFormatSpecifier(tp, tz, "%G", "1970");
+#if defined(__GLIBC__)
TestFormatSpecifier(tp, tz, "%k", " 0");
TestFormatSpecifier(tp, tz, "%l", "12");
+#endif
TestFormatSpecifier(tp, tz, "%n", "\n");
TestFormatSpecifier(tp, tz, "%R", "00:00");
TestFormatSpecifier(tp, tz, "%t", "\t");
@@ -216,7 +222,9 @@ TEST(Format, LocaleSpecific) {
#if defined(__linux__)
// SU/C99/TZ extensions
TestFormatSpecifier(tp, tz, "%h", "Jan"); // Same as %b
+#if defined(__GLIBC__)
TestFormatSpecifier(tp, tz, "%P", "am");
+#endif
TestFormatSpecifier(tp, tz, "%r", "12:00:00 AM");
// Modified conversion specifiers %E_
@@ -1045,9 +1053,11 @@ TEST(Parse, LocaleSpecific) {
EXPECT_TRUE(parse("%h", "Feb", tz, &tp));
EXPECT_EQ(2, convert(tp, tz).month()); // Equivalent to %b
+#if defined(__GLIBC__)
tp = reset;
EXPECT_TRUE(parse("%l %p", "5 PM", tz, &tp));
EXPECT_EQ(17, convert(tp, tz).hour());
+#endif
tp = reset;
EXPECT_TRUE(parse("%r", "03:44:55 PM", tz, &tp));
@@ -1055,6 +1065,7 @@ TEST(Parse, LocaleSpecific) {
EXPECT_EQ(44, convert(tp, tz).minute());
EXPECT_EQ(55, convert(tp, tz).second());
+#if defined(__GLIBC__)
tp = reset;
EXPECT_TRUE(parse("%Ec", "Tue Nov 19 05:06:07 2013", tz, &tp));
EXPECT_EQ(convert(civil_second(2013, 11, 19, 5, 6, 7), tz), tp);
@@ -1126,6 +1137,7 @@ TEST(Parse, LocaleSpecific) {
EXPECT_TRUE(parse("%Oy", "04", tz, &tp));
EXPECT_EQ(2004, convert(tp, tz).year());
#endif
+#endif
}
TEST(Parse, ExtendedSeconds) {
diff --git a/absl/time/internal/cctz/src/time_zone_lookup_test.cc b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
index 0226ab71..27a53c5c 100644
--- a/absl/time/internal/cctz/src/time_zone_lookup_test.cc
+++ b/absl/time/internal/cctz/src/time_zone_lookup_test.cc
@@ -21,10 +21,14 @@
#include <thread>
#include <vector>
-#include "gtest/gtest.h"
#include "absl/base/config.h"
-#include "absl/time/internal/cctz/include/cctz/civil_time.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
+#if defined(__linux__)
+#include <features.h>
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/time/internal/cctz/include/cctz/civil_time.h"
namespace chrono = std::chrono;
@@ -1043,7 +1047,7 @@ TEST(MakeTime, LocalTimeLibC) {
// 1) we know how to change the time zone used by localtime()/mktime(),
// 2) cctz and localtime()/mktime() will use similar-enough tzdata, and
// 3) we have some idea about how mktime() behaves during transitions.
-#if defined(__linux__) && !defined(__ANDROID__)
+#if defined(__linux__) && defined(__GLIBC__) && !defined(__ANDROID__)
const char* const ep = getenv("TZ");
std::string tz_name = (ep != nullptr) ? ep : "";
for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
@@ -1182,6 +1186,44 @@ TEST(PrevTransition, AmericaNewYork) {
// We have a transition but we don't know which one.
}
+TEST(NextTransition, Scan) {
+ for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
+ time_zone tz;
+ if (!load_time_zone(*np, &tz)) {
+ continue; // tolerate kTimeZoneNames/zoneinfo skew
+ }
+ SCOPED_TRACE(testing::Message() << "In " << *np);
+
+ auto tp = time_point<absl::time_internal::cctz::seconds>::min();
+ time_zone::civil_transition trans;
+ while (tz.next_transition(tp, &trans)) {
+ time_zone::civil_lookup from_cl = tz.lookup(trans.from);
+ EXPECT_NE(from_cl.kind, time_zone::civil_lookup::REPEATED);
+ time_zone::civil_lookup to_cl = tz.lookup(trans.to);
+ EXPECT_NE(to_cl.kind, time_zone::civil_lookup::SKIPPED);
+
+ auto trans_tp = to_cl.trans;
+ time_zone::absolute_lookup trans_al = tz.lookup(trans_tp);
+ EXPECT_EQ(trans_al.cs, trans.to);
+ auto pre_trans_tp = trans_tp - absl::time_internal::cctz::seconds(1);
+ time_zone::absolute_lookup pre_trans_al = tz.lookup(pre_trans_tp);
+ EXPECT_EQ(pre_trans_al.cs + 1, trans.from);
+
+ auto offset_delta = trans_al.offset - pre_trans_al.offset;
+ EXPECT_EQ(offset_delta, trans.to - trans.from);
+ if (offset_delta == 0) {
+ // This "transition" is only an is_dst or abbr change.
+ EXPECT_EQ(to_cl.kind, time_zone::civil_lookup::UNIQUE);
+ if (trans_al.is_dst == pre_trans_al.is_dst) {
+ EXPECT_STRNE(trans_al.abbr, pre_trans_al.abbr);
+ }
+ }
+
+ tp = trans_tp; // continue scan from transition
+ }
+ }
+}
+
TEST(TimeZoneEdgeCase, AmericaNewYork) {
const time_zone tz = LoadZone("America/New_York");
diff --git a/absl/time/internal/cctz/testdata/README.zoneinfo b/absl/time/internal/cctz/testdata/README.zoneinfo
index 95fb4a91..a41c7b88 100644
--- a/absl/time/internal/cctz/testdata/README.zoneinfo
+++ b/absl/time/internal/cctz/testdata/README.zoneinfo
@@ -13,7 +13,12 @@ New versions can be generated using the following shell script.
trap "rm -fr ${DESTDIR}" 0 2 15
(
cd ${DESTDIR}
- git clone https://github.com/eggert/tz.git
+ if [ -n "${USE_GLOBAL_TZ}" ]
+ then
+ git clone -b global-tz https://github.com/JodaOrg/global-tz.git tz
+ else
+ git clone https://github.com/eggert/tz.git
+ fi
make --directory=tz \
install DESTDIR=${DESTDIR} \
DATAFORM=vanguard \
diff --git a/absl/time/internal/cctz/testdata/version b/absl/time/internal/cctz/testdata/version
index 8ee898ba..ca002de2 100644
--- a/absl/time/internal/cctz/testdata/version
+++ b/absl/time/internal/cctz/testdata/version
@@ -1 +1 @@
-2021e
+2022a
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas b/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
index 5c9a20b9..c0421040 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Punta_Arenas
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago b/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
index 8d603226..cde8dbbf 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
+++ b/absl/time/internal/cctz/testdata/zoneinfo/America/Santiago
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
index ccc57c9c..effc4df5 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Gaza
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
index 906d8d5c..aa52bd26 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Asia/Hebron
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental b/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
index 8d603226..cde8dbbf 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Chile/Continental
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
index 8f83cefb..4e026859 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Kiev
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
index 88a6f3bd..40d23c02 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Simferopol
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
index a5755685..d4c35914 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Uzhgorod
Binary files differ
diff --git a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
index 4ea8dae4..71819a5a 100644
--- a/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
+++ b/absl/time/internal/cctz/testdata/zoneinfo/Europe/Zaporozhye
Binary files differ
diff --git a/absl/time/internal/test_util.cc b/absl/time/internal/test_util.cc
index 9a485a07..454b33a1 100644
--- a/absl/time/internal/test_util.cc
+++ b/absl/time/internal/test_util.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <cstddef>
#include <cstring>
+#include <memory>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
@@ -67,10 +68,6 @@ const struct ZoneInfo {
{"Invalid/TimeZone", nullptr, 0},
{"", nullptr, 0},
- // Also allow for loading the local time zone under TZ=US/Pacific.
- {"US/Pacific", //
- reinterpret_cast<char*>(America_Los_Angeles), America_Los_Angeles_len},
-
// Allows use of the local time zone from a system-specific location.
#ifdef _MSC_VER
{"localtime", //
@@ -114,7 +111,10 @@ std::unique_ptr<cctz::ZoneInfoSource> TestFactory(
new TestZoneInfoSource(zoneinfo.data, zoneinfo.length));
}
}
- ABSL_RAW_LOG(FATAL, "Unexpected time zone \"%s\" in test", name.c_str());
+
+ // The embedded zoneinfo data does not include the zone, so fallback to
+ // built-in UTC. The tests have been crafted so that this should only
+ // happen when testing absl::LocalTimeZone() with an unconstrained ${TZ}.
return nullptr;
}
diff --git a/absl/time/internal/zoneinfo.inc b/absl/time/internal/zoneinfo.inc
index bfed8299..7d8b3ff2 100644
--- a/absl/time/internal/zoneinfo.inc
+++ b/absl/time/internal/zoneinfo.inc
@@ -88,157 +88,156 @@ unsigned char America_Los_Angeles[] = {
0x00, 0x01, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0xbb, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x04,
- 0x1a, 0xc0, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x48, 0xa0, 0xff, 0xff,
- 0xff, 0xff, 0x9f, 0xbb, 0x15, 0x90, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86,
- 0x2a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xf7, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xcb, 0x89, 0x1a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x23,
- 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x61, 0x26, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xd6, 0xfe, 0x74, 0x5c, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x80,
- 0xad, 0x90, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xc3, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xdb, 0xc0, 0x90, 0x10, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xde,
- 0xa5, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0xac, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xde, 0xbe, 0x87, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x89,
- 0x8e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x69, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xe1, 0x69, 0x70, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e,
- 0x4b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x52, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xe4, 0x5e, 0x2d, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe5, 0x29,
- 0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x4a, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xe7, 0x12, 0x51, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x27,
- 0x2c, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xf2, 0x33, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xea, 0x07, 0x0e, 0x10, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd2,
- 0x15, 0x10, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xf0, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xec, 0xb1, 0xf7, 0x10, 0xff, 0xff, 0xff, 0xff, 0xed, 0xc6,
- 0xd2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xee, 0x91, 0xd9, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xef, 0xaf, 0xee, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x71,
- 0xbb, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xd0, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xf2, 0x7f, 0xc1, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x6f,
- 0xb2, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0xa3, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xf5, 0x4f, 0x94, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x3f,
- 0x85, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x76, 0x90, 0xff, 0xff,
- 0xff, 0xff, 0xf8, 0x28, 0xa2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf9, 0x0f,
- 0x58, 0x90, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x84, 0x10, 0xff, 0xff,
- 0xff, 0xff, 0xfa, 0xf8, 0x83, 0x20, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe8,
- 0x66, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x65, 0x20, 0xff, 0xff,
- 0xff, 0xff, 0xfd, 0xc8, 0x48, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb8,
- 0x47, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8, 0x2a, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x98, 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x01, 0x88,
- 0x0c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78, 0x0b, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x03, 0x71, 0x28, 0x90, 0x00, 0x00, 0x00, 0x00, 0x04, 0x61,
- 0x27, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x51, 0x0a, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x06, 0x41, 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x07, 0x30,
- 0xec, 0x90, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x43, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x09, 0x10, 0xce, 0x90, 0x00, 0x00, 0x00, 0x00, 0x09, 0xad,
- 0xbf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0xb0, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x0b, 0xe0, 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd9,
- 0xcd, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x91, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x0e, 0xb9, 0xaf, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xa9,
- 0xae, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x91, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x11, 0x89, 0x90, 0x20, 0x00, 0x00, 0x00, 0x00, 0x12, 0x79,
- 0x73, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x72, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x14, 0x59, 0x55, 0x10, 0x00, 0x00, 0x00, 0x00, 0x15, 0x49,
- 0x54, 0x20, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x37, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x17, 0x29, 0x36, 0x20, 0x00, 0x00, 0x00, 0x00, 0x18, 0x22,
- 0x53, 0x90, 0x00, 0x00, 0x00, 0x00, 0x19, 0x09, 0x18, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x1a, 0x02, 0x35, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf2,
- 0x34, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe2, 0x17, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x1c, 0xd2, 0x16, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1,
- 0xf9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xf8, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x1f, 0xa1, 0xdb, 0x90, 0x00, 0x00, 0x00, 0x00, 0x20, 0x76,
- 0x2b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0xbd, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x22, 0x56, 0x0d, 0x20, 0x00, 0x00, 0x00, 0x00, 0x23, 0x6a,
- 0xda, 0x10, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xef, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x25, 0x4a, 0xbc, 0x10, 0x00, 0x00, 0x00, 0x00, 0x26, 0x15,
- 0xd1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x9e, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x27, 0xfe, 0xed, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x29, 0x0a,
- 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xcf, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x2a, 0xea, 0x62, 0x10, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xbe,
- 0xb1, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x7e, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x2d, 0x9e, 0x93, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb3,
- 0x60, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x75, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x30, 0x93, 0x42, 0x90, 0x00, 0x00, 0x00, 0x00, 0x31, 0x67,
- 0x92, 0x20, 0x00, 0x00, 0x00, 0x00, 0x32, 0x73, 0x24, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x33, 0x47, 0x74, 0x20, 0x00, 0x00, 0x00, 0x00, 0x34, 0x53,
- 0x06, 0x90, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x56, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x36, 0x32, 0xe8, 0x90, 0x00, 0x00, 0x00, 0x00, 0x37, 0x07,
- 0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1c, 0x05, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x38, 0xe7, 0x1a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x39, 0xfb,
- 0xe7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xfc, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x3b, 0xdb, 0xc9, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb0,
- 0x18, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0xab, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x3e, 0x8f, 0xfa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9b,
- 0x8d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xdc, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x41, 0x84, 0xa9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x42, 0x4f,
- 0xbe, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x8b, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x44, 0x2f, 0xa0, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x45, 0x44,
- 0x6d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xd3, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x47, 0x2d, 0x8a, 0x10, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd3,
- 0xb5, 0x20, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x6c, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x49, 0xb3, 0x97, 0x20, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xed,
- 0x4e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0xb3, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x4c, 0xd6, 0x6a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x7c,
- 0x95, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x4c, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x4f, 0x5c, 0x77, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x50, 0x96,
- 0x2e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x59, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x52, 0x76, 0x10, 0x90, 0x00, 0x00, 0x00, 0x00, 0x53, 0x1c,
- 0x3b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xf2, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x54, 0xfc, 0x1d, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x56, 0x35,
- 0xd4, 0x90, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x3a, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x58, 0x1e, 0xf1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x58, 0xc5,
- 0x1c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xd3, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x5a, 0xa4, 0xfe, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde,
- 0xb5, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xe0, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x5d, 0xbe, 0x97, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x64,
- 0xc2, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x79, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x60, 0x4d, 0xde, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x61, 0x87,
- 0x95, 0x90, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0xc0, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x63, 0x67, 0x77, 0x90, 0x00, 0x00, 0x00, 0x00, 0x64, 0x0d,
- 0xa2, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x59, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x65, 0xed, 0x84, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x67, 0x27,
- 0x3b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x66, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x69, 0x07, 0x1d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x69, 0xad,
- 0x48, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xff, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x6b, 0x96, 0x65, 0x20, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xd0,
- 0x1c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x47, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x6e, 0xaf, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x56,
- 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xe0, 0x10, 0x00, 0x00,
- 0x00, 0x00, 0x71, 0x36, 0x0b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f,
- 0xc2, 0x10, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xed, 0x20, 0x00, 0x00,
- 0x00, 0x00, 0x74, 0x4f, 0xa4, 0x10, 0x00, 0x00, 0x00, 0x00, 0x74, 0xff,
- 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0xc0, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x76, 0xde, 0xeb, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x78, 0x18,
- 0xa2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xcd, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x79, 0xf8, 0x84, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x9e,
- 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x66, 0x90, 0x00, 0x00,
- 0x00, 0x00, 0x7c, 0x7e, 0x91, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xb8,
- 0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x73, 0xa0, 0x00, 0x00,
- 0x00, 0x00, 0x7f, 0x98, 0x2a, 0x90, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02,
- 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x90, 0x01,
- 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff, 0x9d, 0x90, 0x01,
- 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x50,
- 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57, 0x54, 0x00, 0x50,
- 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x0a, 0x50, 0x53, 0x54, 0x38, 0x50, 0x44, 0x54, 0x2c, 0x4d, 0x33,
- 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, 0x2e, 0x31, 0x2e, 0x30,
- 0x0a
+ 0x00, 0xba, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xff, 0xff,
+ 0xff, 0xff, 0x5e, 0x04, 0x1a, 0xc0, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6,
+ 0x48, 0xa0, 0xff, 0xff, 0xff, 0xff, 0x9f, 0xbb, 0x15, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xa0, 0x86, 0x2a, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a,
+ 0xf7, 0x90, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x89, 0x1a, 0xa0, 0xff, 0xff,
+ 0xff, 0xff, 0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x61,
+ 0x26, 0x10, 0xff, 0xff, 0xff, 0xff, 0xd6, 0xfe, 0x74, 0x5c, 0xff, 0xff,
+ 0xff, 0xff, 0xd8, 0x80, 0xad, 0x90, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe,
+ 0xc3, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x90, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xdc, 0xde, 0xa5, 0x90, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9,
+ 0xac, 0x90, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x87, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xdf, 0x89, 0x8e, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e,
+ 0x69, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x70, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xe2, 0x7e, 0x4b, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49,
+ 0x52, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x2d, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xe5, 0x29, 0x34, 0x90, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47,
+ 0x4a, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x12, 0x51, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xe8, 0x27, 0x2c, 0x10, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xf2,
+ 0x33, 0x10, 0xff, 0xff, 0xff, 0xff, 0xea, 0x07, 0x0e, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xea, 0xd2, 0x15, 0x10, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6,
+ 0xf0, 0x10, 0xff, 0xff, 0xff, 0xff, 0xec, 0xb1, 0xf7, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xed, 0xc6, 0xd2, 0x10, 0xff, 0xff, 0xff, 0xff, 0xee, 0x91,
+ 0xd9, 0x10, 0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xee, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf0, 0x71, 0xbb, 0x10, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f,
+ 0xd0, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0xc1, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf3, 0x6f, 0xb2, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f,
+ 0xa3, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x94, 0x90, 0xff, 0xff,
+ 0xff, 0xff, 0xf6, 0x3f, 0x85, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f,
+ 0x76, 0x90, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0xa2, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xf9, 0x0f, 0x58, 0x90, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08,
+ 0x84, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x83, 0x20, 0xff, 0xff,
+ 0xff, 0xff, 0xfb, 0xe8, 0x66, 0x10, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8,
+ 0x65, 0x20, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x48, 0x10, 0xff, 0xff,
+ 0xff, 0xff, 0xfe, 0xb8, 0x47, 0x20, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa8,
+ 0x2a, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x29, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x88, 0x0c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x02, 0x78,
+ 0x0b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x71, 0x28, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x04, 0x61, 0x27, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x05, 0x51,
+ 0x0a, 0x90, 0x00, 0x00, 0x00, 0x00, 0x06, 0x41, 0x09, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x07, 0x30, 0xec, 0x90, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d,
+ 0x43, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xce, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x09, 0xad, 0xbf, 0x20, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0,
+ 0xb0, 0x90, 0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0xaf, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x0c, 0xd9, 0xcd, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0,
+ 0x91, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0xaf, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x0f, 0xa9, 0xae, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99,
+ 0x91, 0x10, 0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x90, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x12, 0x79, 0x73, 0x10, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69,
+ 0x72, 0x20, 0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x55, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x15, 0x49, 0x54, 0x20, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39,
+ 0x37, 0x10, 0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x36, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x18, 0x22, 0x53, 0x90, 0x00, 0x00, 0x00, 0x00, 0x19, 0x09,
+ 0x18, 0x20, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x35, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x1a, 0xf2, 0x34, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe2,
+ 0x17, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd2, 0x16, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x1d, 0xc1, 0xf9, 0x90, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1,
+ 0xf8, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xdb, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x20, 0x76, 0x2b, 0x20, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81,
+ 0xbd, 0x90, 0x00, 0x00, 0x00, 0x00, 0x22, 0x56, 0x0d, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x23, 0x6a, 0xda, 0x10, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35,
+ 0xef, 0x20, 0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0xbc, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x26, 0x15, 0xd1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a,
+ 0x9e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xed, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x29, 0x0a, 0x80, 0x10, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde,
+ 0xcf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x62, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x2b, 0xbe, 0xb1, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3,
+ 0x7e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x93, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x2e, 0xb3, 0x60, 0x90, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e,
+ 0x75, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x42, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x31, 0x67, 0x92, 0x20, 0x00, 0x00, 0x00, 0x00, 0x32, 0x73,
+ 0x24, 0x90, 0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x74, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x34, 0x53, 0x06, 0x90, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27,
+ 0x56, 0x20, 0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xe8, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x37, 0x07, 0x38, 0x20, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1c,
+ 0x05, 0x10, 0x00, 0x00, 0x00, 0x00, 0x38, 0xe7, 0x1a, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x39, 0xfb, 0xe7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6,
+ 0xfc, 0x20, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0xc9, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x3c, 0xb0, 0x18, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb,
+ 0xab, 0x10, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xfa, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x3f, 0x9b, 0x8d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f,
+ 0xdc, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0xa9, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x42, 0x4f, 0xbe, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64,
+ 0x8b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0xa0, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x45, 0x44, 0x6d, 0x90, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3,
+ 0xd3, 0x20, 0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x8a, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x47, 0xd3, 0xb5, 0x20, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d,
+ 0x6c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x97, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x4a, 0xed, 0x4e, 0x10, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c,
+ 0xb3, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x6a, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x4d, 0x7c, 0x95, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6,
+ 0x4c, 0x90, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x77, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x50, 0x96, 0x2e, 0x90, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c,
+ 0x59, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x52, 0x76, 0x10, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x53, 0x1c, 0x3b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55,
+ 0xf2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x54, 0xfc, 0x1d, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x56, 0x35, 0xd4, 0x90, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5,
+ 0x3a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xf1, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x58, 0xc5, 0x1c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe,
+ 0xd3, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xfe, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x5b, 0xde, 0xb5, 0x10, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84,
+ 0xe0, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x97, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x5e, 0x64, 0xc2, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e,
+ 0x79, 0x10, 0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xde, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x61, 0x87, 0x95, 0x90, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d,
+ 0xc0, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x77, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x64, 0x0d, 0xa2, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47,
+ 0x59, 0x90, 0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x84, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x67, 0x27, 0x3b, 0x90, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd,
+ 0x66, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x69, 0x07, 0x1d, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x69, 0xad, 0x48, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6,
+ 0xff, 0x90, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x65, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x6c, 0xd0, 0x1c, 0x10, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76,
+ 0x47, 0x20, 0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xfe, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x6f, 0x56, 0x29, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f,
+ 0xe0, 0x10, 0x00, 0x00, 0x00, 0x00, 0x71, 0x36, 0x0b, 0x20, 0x00, 0x00,
+ 0x00, 0x00, 0x72, 0x6f, 0xc2, 0x10, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15,
+ 0xed, 0x20, 0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0xa4, 0x10, 0x00, 0x00,
+ 0x00, 0x00, 0x74, 0xff, 0x09, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38,
+ 0xc0, 0x90, 0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xeb, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x78, 0x18, 0xa2, 0x90, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe,
+ 0xcd, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x84, 0x90, 0x00, 0x00,
+ 0x00, 0x00, 0x7a, 0x9e, 0xaf, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8,
+ 0x66, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x91, 0xa0, 0x00, 0x00,
+ 0x00, 0x00, 0x7d, 0xb8, 0x48, 0x90, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e,
+ 0x73, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x2a, 0x90, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0x91, 0x26, 0x00, 0x00, 0xff, 0xff,
+ 0x9d, 0x90, 0x01, 0x04, 0xff, 0xff, 0x8f, 0x80, 0x00, 0x08, 0xff, 0xff,
+ 0x9d, 0x90, 0x01, 0x0c, 0xff, 0xff, 0x9d, 0x90, 0x01, 0x10, 0x4c, 0x4d,
+ 0x54, 0x00, 0x50, 0x44, 0x54, 0x00, 0x50, 0x53, 0x54, 0x00, 0x50, 0x57,
+ 0x54, 0x00, 0x50, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x00, 0x00, 0x01, 0x0a, 0x50, 0x53, 0x54, 0x38, 0x50, 0x44, 0x54,
+ 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31, 0x2e,
+ 0x31, 0x2e, 0x30, 0x0a
};
-unsigned int America_Los_Angeles_len = 2845;
+unsigned int America_Los_Angeles_len = 2836;
unsigned char America_New_York[] = {
0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
@@ -349,203 +348,202 @@ unsigned char America_New_York[] = {
0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xf8, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x03, 0xf0, 0x90,
- 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x1e, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0x9f, 0xba, 0xeb, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x86, 0x00, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xcd, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xa2, 0x65, 0xe2, 0x70, 0xff, 0xff, 0xff, 0xff, 0xa3, 0x83, 0xe9, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xa4, 0x6a, 0xae, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xa5, 0x35, 0xa7, 0x60, 0xff, 0xff, 0xff, 0xff, 0xa6, 0x53, 0xca, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xa7, 0x15, 0x89, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xa8, 0x33, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa8, 0xfe, 0xa5, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xaa, 0x13, 0x8e, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xaa, 0xde, 0x87, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xab, 0xf3, 0x70, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xac, 0xbe, 0x69, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xad, 0xd3, 0x52, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xae, 0x9e, 0x4b, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xaf, 0xb3, 0x34, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xb0, 0x7e, 0x2d, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x9c, 0x51, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xb2, 0x67, 0x4a, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xb3, 0x7c, 0x33, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x47, 0x2c, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xb5, 0x5c, 0x15, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xb6, 0x27, 0x0e, 0x60, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x3b, 0xf7, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xb8, 0x06, 0xf0, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xb9, 0x1b, 0xd9, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb9, 0xe6, 0xd2, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xbb, 0x04, 0xf5, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xbb, 0xc6, 0xb4, 0x60, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xe4, 0xd7, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xbd, 0xaf, 0xd0, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xbe, 0xc4, 0xb9, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0xb2, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa4, 0x9b, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xc1, 0x6f, 0x94, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc2, 0x84, 0x7d, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xc3, 0x4f, 0x76, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xc4, 0x64, 0x5f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x2f, 0x58, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xc6, 0x4d, 0x7c, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xc7, 0x0f, 0x3a, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc8, 0x2d, 0x5e, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xc8, 0xf8, 0x57, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xca, 0x0d, 0x40, 0x70, 0xff, 0xff, 0xff, 0xff, 0xca, 0xd8, 0x39, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xcb, 0x88, 0xf0, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xd2, 0x60, 0xfb, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xd3, 0x75, 0xe4, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xd4, 0x40, 0xdd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x55, 0xc6, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xd6, 0x20, 0xbf, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xd7, 0x35, 0xa8, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x00, 0xa1, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xd9, 0x15, 0x8a, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xd9, 0xe0, 0x83, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xda, 0xfe, 0xa7, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x65, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xdc, 0xde, 0x89, 0x70, 0xff, 0xff, 0xff, 0xff, 0xdd, 0xa9, 0x82, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x6b, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xdf, 0x89, 0x64, 0x60, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9e, 0x4d, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x46, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xe2, 0x7e, 0x2f, 0x70, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x49, 0x28, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x11, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xe5, 0x57, 0x2e, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x47, 0x2d, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xe7, 0x37, 0x10, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xe8, 0x27, 0x0f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x16, 0xf2, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xea, 0x06, 0xf1, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xea, 0xf6, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe6, 0xd3, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xec, 0xd6, 0xb6, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xed, 0xc6, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xee, 0xbf, 0xd3, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xd2, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xf0, 0x9f, 0xb5, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf1, 0x8f, 0xb4, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0x97, 0x60, 0xff, 0xff, 0xff, 0xff,
- 0xf3, 0x6f, 0x96, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf4, 0x5f, 0x79, 0x60,
- 0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x78, 0x70, 0xff, 0xff, 0xff, 0xff,
- 0xf6, 0x3f, 0x5b, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x2f, 0x5a, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0x77, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xf9, 0x0f, 0x3c, 0x70, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x08, 0x59, 0xe0,
- 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x58, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xfb, 0xe8, 0x3b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0x3a, 0xf0,
- 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x1d, 0xe0, 0xff, 0xff, 0xff, 0xff,
- 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa7, 0xff, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x87, 0xe1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x02, 0x77, 0xe0, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0xfe, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x04, 0x60, 0xfd, 0x70, 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0xe0, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x06, 0x40, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x07, 0x30, 0xc2, 0x60, 0x00, 0x00, 0x00, 0x00, 0x07, 0x8d, 0x19, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xa4, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x09, 0xad, 0x94, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf0, 0x86, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x0c, 0xd9, 0xa2, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xc0, 0x67, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0x84, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x0f, 0xa9, 0x83, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x10, 0x99, 0x66, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x65, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x12, 0x79, 0x48, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x13, 0x69, 0x47, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x2a, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x15, 0x49, 0x29, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x16, 0x39, 0x0c, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x0b, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x18, 0x22, 0x29, 0x60, 0x00, 0x00, 0x00, 0x00, 0x19, 0x08, 0xed, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x0b, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x1a, 0xf2, 0x0a, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0xed, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd1, 0xec, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x1d, 0xc1, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xb1, 0xce, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xb1, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x20, 0x76, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x21, 0x81, 0x93, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x22, 0x55, 0xe2, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x23, 0x6a, 0xaf, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x24, 0x35, 0xc4, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0x91, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x26, 0x15, 0xa6, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x27, 0x2a, 0x73, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xc3, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x29, 0x0a, 0x55, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x29, 0xde, 0xa5, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x37, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x2b, 0xbe, 0x87, 0x70, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd3, 0x54, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x69, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x2e, 0xb3, 0x36, 0x60, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x7e, 0x4b, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x31, 0x67, 0x67, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0xfa, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x49, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x34, 0x52, 0xdc, 0x60, 0x00, 0x00, 0x00, 0x00, 0x35, 0x27, 0x2b, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xbe, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x37, 0x07, 0x0d, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0xda, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x38, 0xe6, 0xef, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x39, 0xfb, 0xbc, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc6, 0xd1, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0x9e, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x3c, 0xaf, 0xee, 0x70, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbb, 0x80, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xd0, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x3f, 0x9b, 0x62, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x40, 0x6f, 0xb2, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0x7f, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x42, 0x4f, 0x94, 0x70, 0x00, 0x00, 0x00, 0x00, 0x43, 0x64, 0x61, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0x76, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x45, 0x44, 0x43, 0x60, 0x00, 0x00, 0x00, 0x00, 0x45, 0xf3, 0xa8, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x5f, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x47, 0xd3, 0x8a, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x49, 0x0d, 0x41, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x6c, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x4a, 0xed, 0x23, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x9c, 0x89, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x4d, 0x7c, 0x6b, 0x70, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xb6, 0x22, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x4d, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x50, 0x96, 0x04, 0x60, 0x00, 0x00, 0x00, 0x00, 0x51, 0x3c, 0x2f, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x52, 0x75, 0xe6, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x53, 0x1c, 0x11, 0x70, 0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0xc8, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x54, 0xfb, 0xf3, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x56, 0x35, 0xaa, 0x60, 0x00, 0x00, 0x00, 0x00, 0x56, 0xe5, 0x0f, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xc6, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x58, 0xc4, 0xf1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x59, 0xfe, 0xa8, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xd3, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x5b, 0xde, 0x8a, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x84, 0xb5, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x6c, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x5e, 0x64, 0x97, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9e, 0x4e, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xb4, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x61, 0x87, 0x6b, 0x60, 0x00, 0x00, 0x00, 0x00, 0x62, 0x2d, 0x96, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x4d, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x64, 0x0d, 0x78, 0x70, 0x00, 0x00, 0x00, 0x00, 0x65, 0x47, 0x2f, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x5a, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x67, 0x27, 0x11, 0x60, 0x00, 0x00, 0x00, 0x00, 0x67, 0xcd, 0x3c, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x69, 0x06, 0xf3, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x69, 0xad, 0x1e, 0x70, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xe6, 0xd5, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x3a, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x6c, 0xcf, 0xf1, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x76, 0x1c, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xd3, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x6f, 0x55, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x70, 0x8f, 0xb5, 0xe0,
- 0x00, 0x00, 0x00, 0x00, 0x71, 0x35, 0xe0, 0xf0, 0x00, 0x00, 0x00, 0x00,
- 0x72, 0x6f, 0x97, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x73, 0x15, 0xc2, 0xf0,
- 0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0x79, 0xe0, 0x00, 0x00, 0x00, 0x00,
- 0x74, 0xfe, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00, 0x76, 0x38, 0x96, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xc1, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x78, 0x18, 0x78, 0x60, 0x00, 0x00, 0x00, 0x00, 0x78, 0xbe, 0xa3, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x5a, 0x60, 0x00, 0x00, 0x00, 0x00,
- 0x7a, 0x9e, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xd8, 0x3c, 0x60,
- 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x67, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x7d, 0xb8, 0x1e, 0x60, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x5e, 0x49, 0x70,
- 0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x00, 0x60, 0x00, 0x02, 0x01, 0x02,
- 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
- 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
- 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
- 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec,
+ 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0xff, 0xff, 0xff, 0xff,
+ 0x5e, 0x03, 0xf0, 0x90, 0xff, 0xff, 0xff, 0xff, 0x9e, 0xa6, 0x1e, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0x9f, 0xba, 0xeb, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xa0, 0x86, 0x00, 0x70, 0xff, 0xff, 0xff, 0xff, 0xa1, 0x9a, 0xcd, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xa2, 0x65, 0xe2, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xa3, 0x83, 0xe9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x6a, 0xae, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xa5, 0x35, 0xa7, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xa6, 0x53, 0xca, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xa7, 0x15, 0x89, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xa8, 0x33, 0xac, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xa8, 0xfe, 0xa5, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xaa, 0x13, 0x8e, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xaa, 0xde, 0x87, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xab, 0xf3, 0x70, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xac, 0xbe, 0x69, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xad, 0xd3, 0x52, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xae, 0x9e, 0x4b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xaf, 0xb3, 0x34, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xb0, 0x7e, 0x2d, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xb1, 0x9c, 0x51, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb2, 0x67, 0x4a, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xb3, 0x7c, 0x33, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xb4, 0x47, 0x2c, 0x60, 0xff, 0xff, 0xff, 0xff, 0xb5, 0x5c, 0x15, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xb6, 0x27, 0x0e, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xb7, 0x3b, 0xf7, 0x70, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x06, 0xf0, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xb9, 0x1b, 0xd9, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xb9, 0xe6, 0xd2, 0x60, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x04, 0xf5, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xbb, 0xc6, 0xb4, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xbc, 0xe4, 0xd7, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xaf, 0xd0, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xbe, 0xc4, 0xb9, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0x8f, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc0, 0xa4, 0x9b, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xc1, 0x6f, 0x94, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xc2, 0x84, 0x7d, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x4f, 0x76, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xc4, 0x64, 0x5f, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xc5, 0x2f, 0x58, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xc6, 0x4d, 0x7c, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xc7, 0x0f, 0x3a, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xc8, 0x2d, 0x5e, 0x70, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xf8, 0x57, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xca, 0x0d, 0x40, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xca, 0xd8, 0x39, 0x60, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x88, 0xf0, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xd2, 0x23, 0xf4, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xd2, 0x60, 0xfb, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x75, 0xe4, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xd4, 0x40, 0xdd, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xd5, 0x55, 0xc6, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xd6, 0x20, 0xbf, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xd7, 0x35, 0xa8, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xd8, 0x00, 0xa1, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x15, 0x8a, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xd9, 0xe0, 0x83, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xda, 0xfe, 0xa7, 0x70, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xc0, 0x65, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xdc, 0xde, 0x89, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xdd, 0xa9, 0x82, 0x60, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbe, 0x6b, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xdf, 0x89, 0x64, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xe0, 0x9e, 0x4d, 0x70, 0xff, 0xff, 0xff, 0xff, 0xe1, 0x69, 0x46, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, 0x2f, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xe3, 0x49, 0x28, 0x60, 0xff, 0xff, 0xff, 0xff, 0xe4, 0x5e, 0x11, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xe5, 0x57, 0x2e, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xe6, 0x47, 0x2d, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x37, 0x10, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xe8, 0x27, 0x0f, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xe9, 0x16, 0xf2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xea, 0x06, 0xf1, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xea, 0xf6, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xeb, 0xe6, 0xd3, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd6, 0xb6, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xed, 0xc6, 0xb5, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xee, 0xbf, 0xd3, 0x60, 0xff, 0xff, 0xff, 0xff, 0xef, 0xaf, 0xd2, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x9f, 0xb5, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xf1, 0x8f, 0xb4, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x7f, 0x97, 0x60,
+ 0xff, 0xff, 0xff, 0xff, 0xf3, 0x6f, 0x96, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xf4, 0x5f, 0x79, 0x60, 0xff, 0xff, 0xff, 0xff, 0xf5, 0x4f, 0x78, 0x70,
+ 0xff, 0xff, 0xff, 0xff, 0xf6, 0x3f, 0x5b, 0x60, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0x2f, 0x5a, 0x70, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x28, 0x77, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xf9, 0x0f, 0x3c, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xfa, 0x08, 0x59, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf8, 0x58, 0xf0,
+ 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe8, 0x3b, 0xe0, 0xff, 0xff, 0xff, 0xff,
+ 0xfc, 0xd8, 0x3a, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xc8, 0x1d, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb8, 0x1c, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xa7, 0xff, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xfe, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x87, 0xe1, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x77, 0xe0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0xfe, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x60, 0xfd, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x05, 0x50, 0xe0, 0x60, 0x00, 0x00, 0x00, 0x00, 0x06, 0x40, 0xdf, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x30, 0xc2, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x8d, 0x19, 0x70, 0x00, 0x00, 0x00, 0x00, 0x09, 0x10, 0xa4, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0xad, 0x94, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x0a, 0xf0, 0x86, 0x60, 0x00, 0x00, 0x00, 0x00, 0x0b, 0xe0, 0x85, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd9, 0xa2, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x0d, 0xc0, 0x67, 0x70, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb9, 0x84, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xa9, 0x83, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x10, 0x99, 0x66, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x11, 0x89, 0x65, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x12, 0x79, 0x48, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x13, 0x69, 0x47, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x14, 0x59, 0x2a, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x15, 0x49, 0x29, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x16, 0x39, 0x0c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x17, 0x29, 0x0b, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x18, 0x22, 0x29, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x19, 0x08, 0xed, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x02, 0x0b, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x1a, 0xf2, 0x0a, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1b, 0xe1, 0xed, 0x60, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xd1, 0xec, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, 0xcf, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x1e, 0xb1, 0xce, 0x70, 0x00, 0x00, 0x00, 0x00, 0x1f, 0xa1, 0xb1, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x20, 0x76, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x21, 0x81, 0x93, 0x60, 0x00, 0x00, 0x00, 0x00, 0x22, 0x55, 0xe2, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x23, 0x6a, 0xaf, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x24, 0x35, 0xc4, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x25, 0x4a, 0x91, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x26, 0x15, 0xa6, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x27, 0x2a, 0x73, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x27, 0xfe, 0xc3, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x29, 0x0a, 0x55, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x29, 0xde, 0xa5, 0x70, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xea, 0x37, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x2b, 0xbe, 0x87, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x2c, 0xd3, 0x54, 0x60, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x9e, 0x69, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb3, 0x36, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x2f, 0x7e, 0x4b, 0x70, 0x00, 0x00, 0x00, 0x00, 0x30, 0x93, 0x18, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x31, 0x67, 0x67, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x32, 0x72, 0xfa, 0x60, 0x00, 0x00, 0x00, 0x00, 0x33, 0x47, 0x49, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x34, 0x52, 0xdc, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x35, 0x27, 0x2b, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x36, 0x32, 0xbe, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x37, 0x07, 0x0d, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x38, 0x1b, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x38, 0xe6, 0xef, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x39, 0xfb, 0xbc, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x3a, 0xc6, 0xd1, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xdb, 0x9e, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0xaf, 0xee, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3d, 0xbb, 0x80, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x8f, 0xd0, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9b, 0x62, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x40, 0x6f, 0xb2, 0x70, 0x00, 0x00, 0x00, 0x00, 0x41, 0x84, 0x7f, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x42, 0x4f, 0x94, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x43, 0x64, 0x61, 0x60, 0x00, 0x00, 0x00, 0x00, 0x44, 0x2f, 0x76, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x45, 0x44, 0x43, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0xf3, 0xa8, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x47, 0x2d, 0x5f, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0xd3, 0x8a, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x49, 0x0d, 0x41, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x49, 0xb3, 0x6c, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x4a, 0xed, 0x23, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x4b, 0x9c, 0x89, 0x70, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xd6, 0x40, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x4d, 0x7c, 0x6b, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x4e, 0xb6, 0x22, 0x60, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x5c, 0x4d, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x50, 0x96, 0x04, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x51, 0x3c, 0x2f, 0x70, 0x00, 0x00, 0x00, 0x00, 0x52, 0x75, 0xe6, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x53, 0x1c, 0x11, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x54, 0x55, 0xc8, 0x60, 0x00, 0x00, 0x00, 0x00, 0x54, 0xfb, 0xf3, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x56, 0x35, 0xaa, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0xe5, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x58, 0x1e, 0xc6, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x58, 0xc4, 0xf1, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x59, 0xfe, 0xa8, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xa4, 0xd3, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde, 0x8a, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x5c, 0x84, 0xb5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbe, 0x6c, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x5e, 0x64, 0x97, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x5f, 0x9e, 0x4e, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x60, 0x4d, 0xb4, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x61, 0x87, 0x6b, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x62, 0x2d, 0x96, 0x70, 0x00, 0x00, 0x00, 0x00, 0x63, 0x67, 0x4d, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x64, 0x0d, 0x78, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x65, 0x47, 0x2f, 0x60, 0x00, 0x00, 0x00, 0x00, 0x65, 0xed, 0x5a, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x67, 0x27, 0x11, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x67, 0xcd, 0x3c, 0x70, 0x00, 0x00, 0x00, 0x00, 0x69, 0x06, 0xf3, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x69, 0xad, 0x1e, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x6a, 0xe6, 0xd5, 0x60, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x96, 0x3a, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x6c, 0xcf, 0xf1, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x6d, 0x76, 0x1c, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x6e, 0xaf, 0xd3, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x6f, 0x55, 0xfe, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x8f, 0xb5, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x71, 0x35, 0xe0, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x72, 0x6f, 0x97, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x15, 0xc2, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x74, 0x4f, 0x79, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x74, 0xfe, 0xdf, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x76, 0x38, 0x96, 0x60, 0x00, 0x00, 0x00, 0x00, 0x76, 0xde, 0xc1, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x78, 0x18, 0x78, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x78, 0xbe, 0xa3, 0x70, 0x00, 0x00, 0x00, 0x00, 0x79, 0xf8, 0x5a, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x7a, 0x9e, 0x85, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x7b, 0xd8, 0x3c, 0x60, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7e, 0x67, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x7d, 0xb8, 0x1e, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x7e, 0x5e, 0x49, 0x70, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x98, 0x00, 0x60,
0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0xba, 0x9e, 0x00, 0x00, 0xff,
- 0xff, 0xc7, 0xc0, 0x01, 0x04, 0xff, 0xff, 0xb9, 0xb0, 0x00, 0x08, 0xff,
- 0xff, 0xc7, 0xc0, 0x01, 0x0c, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x10, 0x4c,
- 0x4d, 0x54, 0x00, 0x45, 0x44, 0x54, 0x00, 0x45, 0x53, 0x54, 0x00, 0x45,
- 0x57, 0x54, 0x00, 0x45, 0x50, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
- 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x45, 0x53, 0x54, 0x35, 0x45, 0x44,
- 0x54, 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c, 0x4d, 0x31, 0x31,
- 0x2e, 0x31, 0x2e, 0x30, 0x0a
+ 0x02, 0x03, 0x04, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
+ 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0xff, 0xff, 0xba, 0x9e,
+ 0x00, 0x00, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x04, 0xff, 0xff, 0xb9, 0xb0,
+ 0x00, 0x08, 0xff, 0xff, 0xc7, 0xc0, 0x01, 0x0c, 0xff, 0xff, 0xc7, 0xc0,
+ 0x01, 0x10, 0x4c, 0x4d, 0x54, 0x00, 0x45, 0x44, 0x54, 0x00, 0x45, 0x53,
+ 0x54, 0x00, 0x45, 0x57, 0x54, 0x00, 0x45, 0x50, 0x54, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x45, 0x53, 0x54,
+ 0x35, 0x45, 0x44, 0x54, 0x2c, 0x4d, 0x33, 0x2e, 0x32, 0x2e, 0x30, 0x2c,
+ 0x4d, 0x31, 0x31, 0x2e, 0x31, 0x2e, 0x30, 0x0a
};
-unsigned int America_New_York_len = 3545;
+unsigned int America_New_York_len = 3536;
unsigned char Australia_Sydney[] = {
0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e, 0x80, 0x00, 0x00, 0x00,
- 0x9c, 0x4e, 0xa6, 0x9c, 0x9c, 0xbc, 0x20, 0xf0, 0xcb, 0x54, 0xb3, 0x00,
- 0xcb, 0xc7, 0x57, 0x70, 0xcc, 0xb7, 0x56, 0x80, 0xcd, 0xa7, 0x39, 0x70,
- 0xce, 0xa0, 0x73, 0x00, 0xcf, 0x87, 0x1b, 0x70, 0x03, 0x70, 0x39, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e,
+ 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x0e, 0x80, 0x00, 0x00, 0x00,
+ 0x9c, 0x4e, 0xc2, 0x80, 0x9c, 0xbc, 0x2f, 0x00, 0xcb, 0x54, 0xb3, 0x00,
+ 0xcb, 0xc7, 0x65, 0x80, 0xcc, 0xb7, 0x56, 0x80, 0xcd, 0xa7, 0x47, 0x80,
+ 0xce, 0xa0, 0x73, 0x00, 0xcf, 0x87, 0x29, 0x80, 0x03, 0x70, 0x39, 0x80,
0x04, 0x0d, 0x1c, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x05, 0xf6, 0x38, 0x80,
0x07, 0x2f, 0xfd, 0x80, 0x07, 0xd6, 0x1a, 0x80, 0x09, 0x0f, 0xdf, 0x80,
0x09, 0xb5, 0xfc, 0x80, 0x0a, 0xef, 0xc1, 0x80, 0x0b, 0x9f, 0x19, 0x00,
@@ -590,140 +588,137 @@ unsigned char Australia_Sydney[] = {
0x77, 0xe9, 0x8f, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x79, 0xc9, 0x71, 0x00,
0x7a, 0xb9, 0x62, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x7c, 0xa2, 0x7e, 0x80,
0x7d, 0x92, 0x6f, 0x80, 0x7e, 0x82, 0x60, 0x80, 0x7f, 0x72, 0x51, 0x80,
- 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03,
- 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x00, 0x00,
+ 0x03, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x00, 0x00,
0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
- 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00,
- 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, 0x00, 0x41, 0x45, 0x44, 0x54,
- 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00,
+ 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d,
+ 0x54, 0x00, 0x41, 0x45, 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00,
+ 0x00, 0x01, 0x01, 0x00, 0x54, 0x5a, 0x69, 0x66, 0x32, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x8f, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0e,
- 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
- 0x73, 0x16, 0x7f, 0x3c, 0xff, 0xff, 0xff, 0xff, 0x9c, 0x4e, 0xa6, 0x9c,
- 0xff, 0xff, 0xff, 0xff, 0x9c, 0xbc, 0x20, 0xf0, 0xff, 0xff, 0xff, 0xff,
- 0xcb, 0x54, 0xb3, 0x00, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xc7, 0x57, 0x70,
- 0xff, 0xff, 0xff, 0xff, 0xcc, 0xb7, 0x56, 0x80, 0xff, 0xff, 0xff, 0xff,
- 0xcd, 0xa7, 0x39, 0x70, 0xff, 0xff, 0xff, 0xff, 0xce, 0xa0, 0x73, 0x00,
- 0xff, 0xff, 0xff, 0xff, 0xcf, 0x87, 0x1b, 0x70, 0x00, 0x00, 0x00, 0x00,
- 0x03, 0x70, 0x39, 0x80, 0x00, 0x00, 0x00, 0x00, 0x04, 0x0d, 0x1c, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0x1b, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x05, 0xf6, 0x38, 0x80, 0x00, 0x00, 0x00, 0x00, 0x07, 0x2f, 0xfd, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x07, 0xd6, 0x1a, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x09, 0x0f, 0xdf, 0x80, 0x00, 0x00, 0x00, 0x00, 0x09, 0xb5, 0xfc, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x0a, 0xef, 0xc1, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x0b, 0x9f, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xd8, 0xde, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x0d, 0x7e, 0xfb, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x0e, 0xb8, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x5e, 0xdd, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x98, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x11, 0x3e, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x78, 0x84, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x13, 0x1e, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x14, 0x58, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0xfe, 0x83, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x16, 0x38, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x17, 0x0c, 0x89, 0x80, 0x00, 0x00, 0x00, 0x00, 0x18, 0x21, 0x64, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x18, 0xc7, 0x81, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x1a, 0x01, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xa7, 0x63, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0x28, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x1c, 0x87, 0x45, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xc1, 0x0a, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x1e, 0x79, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x1f, 0x97, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x59, 0x7e, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0xce, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x22, 0x42, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x69, 0xeb, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x24, 0x22, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x25, 0x49, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xef, 0xea, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x27, 0x29, 0xaf, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x27, 0xcf, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x09, 0x91, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x29, 0xaf, 0xae, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x2a, 0xe9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x98, 0xca, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd2, 0x8f, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x2d, 0x78, 0xac, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xb2, 0x71, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x2f, 0x58, 0x8e, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x30, 0x92, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00, 0x31, 0x5d, 0x5a, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0x35, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x33, 0x3d, 0x3c, 0x80, 0x00, 0x00, 0x00, 0x00, 0x34, 0x52, 0x17, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x35, 0x1d, 0x1e, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x36, 0x31, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x36, 0xfd, 0x00, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0x16, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x38, 0xdc, 0xe2, 0x80, 0x00, 0x00, 0x00, 0x00, 0x39, 0xa7, 0xe9, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x3a, 0xbc, 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x3b, 0xda, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xa5, 0xe1, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x3d, 0xba, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x3e, 0x85, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x9a, 0x9e, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x40, 0x65, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x41, 0x83, 0xba, 0x80, 0x00, 0x00, 0x00, 0x00, 0x42, 0x45, 0x87, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x43, 0x63, 0x9c, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x44, 0x2e, 0xa3, 0x80, 0x00, 0x00, 0x00, 0x00, 0x45, 0x43, 0x7e, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x46, 0x05, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x47, 0x23, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x47, 0xf7, 0xa2, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x48, 0xe7, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x49, 0xd7, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0x75, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x4b, 0xb7, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x4c, 0xa7, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x97, 0x48, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x4e, 0x87, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x4f, 0x77, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x70, 0x55, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x51, 0x60, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x52, 0x50, 0x37, 0x80, 0x00, 0x00, 0x00, 0x00, 0x53, 0x40, 0x28, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x54, 0x30, 0x19, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x55, 0x20, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x56, 0x0f, 0xfb, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x56, 0xff, 0xec, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x57, 0xef, 0xdd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x58, 0xdf, 0xce, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x59, 0xcf, 0xbf, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x5a, 0xbf, 0xb0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xb8, 0xdc, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa8, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x5d, 0x98, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x88, 0xaf, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x5f, 0x78, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x60, 0x68, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x58, 0x82, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x62, 0x48, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x63, 0x38, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x28, 0x55, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x65, 0x18, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x66, 0x11, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00, 0x67, 0x01, 0x62, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x67, 0xf1, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x68, 0xe1, 0x44, 0x80, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd1, 0x35, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x6a, 0xc1, 0x26, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x6b, 0xb1, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xa1, 0x08, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x6d, 0x90, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x6e, 0x80, 0xea, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x70, 0xdb, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x70, 0x6a, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x71, 0x59, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x49, 0xe9, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x73, 0x39, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x74, 0x29, 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x19, 0xbc, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x76, 0x09, 0xad, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x76, 0xf9, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xe9, 0x8f, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x78, 0xd9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x79, 0xc9, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0xb9, 0x62, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x7b, 0xb2, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x7c, 0xa2, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x92, 0x6f, 0x80,
- 0x00, 0x00, 0x00, 0x00, 0x7e, 0x82, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00,
- 0x7f, 0x72, 0x51, 0x80, 0x00, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02,
- 0x01, 0x02, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04, 0x03, 0x04,
- 0x03, 0x04, 0x03, 0x00, 0x00, 0x8d, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x9a,
- 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x00, 0x00, 0x9a,
- 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54,
- 0x00, 0x41, 0x45, 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00,
- 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x41, 0x45,
- 0x53, 0x54, 0x2d, 0x31, 0x30, 0x41, 0x45, 0x44, 0x54, 0x2c, 0x4d, 0x31,
- 0x30, 0x2e, 0x31, 0x2e, 0x30, 0x2c, 0x4d, 0x34, 0x2e, 0x31, 0x2e, 0x30,
- 0x2f, 0x33, 0x0a
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x8e, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x0e,
+ 0xff, 0xff, 0xff, 0xff, 0x73, 0x16, 0x7f, 0x3c, 0xff, 0xff, 0xff, 0xff,
+ 0x9c, 0x4e, 0xc2, 0x80, 0xff, 0xff, 0xff, 0xff, 0x9c, 0xbc, 0x2f, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xcb, 0x54, 0xb3, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xcb, 0xc7, 0x65, 0x80, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xb7, 0x56, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xcd, 0xa7, 0x47, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xce, 0xa0, 0x73, 0x00, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x87, 0x29, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x70, 0x39, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x0d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x50, 0x1b, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0xf6, 0x38, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x2f, 0xfd, 0x80, 0x00, 0x00, 0x00, 0x00, 0x07, 0xd6, 0x1a, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x09, 0x0f, 0xdf, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x09, 0xb5, 0xfc, 0x80, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xef, 0xc1, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x0b, 0x9f, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0xd8, 0xde, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x7e, 0xfb, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0xb8, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x5e, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x98, 0xa2, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x11, 0x3e, 0xbf, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x12, 0x78, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x1e, 0xa1, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x14, 0x58, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x14, 0xfe, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x38, 0x48, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x17, 0x0c, 0x89, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x18, 0x21, 0x64, 0x80, 0x00, 0x00, 0x00, 0x00, 0x18, 0xc7, 0x81, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x1a, 0x01, 0x46, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1a, 0xa7, 0x63, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1b, 0xe1, 0x28, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x87, 0x45, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1d, 0xc1, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x79, 0x9c, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x97, 0xb2, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x20, 0x59, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x21, 0x80, 0xce, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x22, 0x42, 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x23, 0x69, 0xeb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x22, 0x7d, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x25, 0x49, 0xcd, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x25, 0xef, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x29, 0xaf, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x27, 0xcf, 0xcc, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x29, 0x09, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0xaf, 0xae, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x2a, 0xe9, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2b, 0x98, 0xca, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2c, 0xd2, 0x8f, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x2d, 0x78, 0xac, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x2e, 0xb2, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x58, 0x8e, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x30, 0x92, 0x53, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x31, 0x5d, 0x5a, 0x80, 0x00, 0x00, 0x00, 0x00, 0x32, 0x72, 0x35, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x33, 0x3d, 0x3c, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x34, 0x52, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00, 0x35, 0x1d, 0x1e, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x36, 0x31, 0xf9, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x36, 0xfd, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x1b, 0x16, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0xdc, 0xe2, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x39, 0xa7, 0xe9, 0x80, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xbc, 0xc4, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x3b, 0xda, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3c, 0xa5, 0xe1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xba, 0xbc, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x85, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0x9a, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x65, 0xa5, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x41, 0x83, 0xba, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x42, 0x45, 0x87, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x63, 0x9c, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x44, 0x2e, 0xa3, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x45, 0x43, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00, 0x46, 0x05, 0x4b, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x47, 0x23, 0x60, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x47, 0xf7, 0xa2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0xe7, 0x93, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x49, 0xd7, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4a, 0xc7, 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0xb7, 0x66, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4c, 0xa7, 0x57, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x4d, 0x97, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x87, 0x39, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x4f, 0x77, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x70, 0x55, 0x80, 0x00, 0x00, 0x00, 0x00, 0x51, 0x60, 0x46, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x50, 0x37, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x53, 0x40, 0x28, 0x80, 0x00, 0x00, 0x00, 0x00, 0x54, 0x30, 0x19, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x55, 0x20, 0x0a, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x56, 0x0f, 0xfb, 0x80, 0x00, 0x00, 0x00, 0x00, 0x56, 0xff, 0xec, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x57, 0xef, 0xdd, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x58, 0xdf, 0xce, 0x80, 0x00, 0x00, 0x00, 0x00, 0x59, 0xcf, 0xbf, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x5a, 0xbf, 0xb0, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x5b, 0xb8, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa8, 0xcd, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x5d, 0x98, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x5e, 0x88, 0xaf, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x78, 0xa0, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x60, 0x68, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x61, 0x58, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x48, 0x73, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x63, 0x38, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x64, 0x28, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0x18, 0x46, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x66, 0x11, 0x71, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x67, 0x01, 0x62, 0x80, 0x00, 0x00, 0x00, 0x00, 0x67, 0xf1, 0x53, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x68, 0xe1, 0x44, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x69, 0xd1, 0x35, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xc1, 0x26, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x6b, 0xb1, 0x17, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x6c, 0xa1, 0x08, 0x80, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x90, 0xf9, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x6e, 0x80, 0xea, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x6f, 0x70, 0xdb, 0x80, 0x00, 0x00, 0x00, 0x00, 0x70, 0x6a, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x71, 0x59, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x72, 0x49, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x39, 0xda, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x74, 0x29, 0xcb, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x75, 0x19, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x09, 0xad, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x76, 0xf9, 0x9e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x77, 0xe9, 0x8f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0xd9, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x79, 0xc9, 0x71, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7a, 0xb9, 0x62, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xb2, 0x8d, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x7c, 0xa2, 0x7e, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x7d, 0x92, 0x6f, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x82, 0x60, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0x72, 0x51, 0x80, 0x03, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01,
+ 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x00, 0x00, 0x8d, 0xc4, 0x00, 0x00,
+ 0x00, 0x00, 0x9a, 0xb0, 0x01, 0x04, 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09,
+ 0x00, 0x00, 0x8c, 0xa0, 0x00, 0x09, 0x4c, 0x4d, 0x54, 0x00, 0x41, 0x45,
+ 0x44, 0x54, 0x00, 0x41, 0x45, 0x53, 0x54, 0x00, 0x00, 0x01, 0x01, 0x00,
+ 0x0a, 0x41, 0x45, 0x53, 0x54, 0x2d, 0x31, 0x30, 0x41, 0x45, 0x44, 0x54,
+ 0x2c, 0x4d, 0x31, 0x30, 0x2e, 0x31, 0x2e, 0x30, 0x2c, 0x4d, 0x34, 0x2e,
+ 0x31, 0x2e, 0x30, 0x2f, 0x33, 0x0a
};
-unsigned int Australia_Sydney_len = 2223;
+unsigned int Australia_Sydney_len = 2190;
diff --git a/absl/time/time.h b/absl/time/time.h
index 5abd815a..bd01867e 100644
--- a/absl/time/time.h
+++ b/absl/time/time.h
@@ -120,7 +120,7 @@ using EnableIfFloat =
// Duration
//
-// The `absl::Duration` class represents a signed, fixed-length span of time.
+// The `absl::Duration` class represents a signed, fixed-length amount of time.
// A `Duration` is generated using a unit-specific factory function, or is
// the result of subtracting one `absl::Time` from another. Durations behave
// like unit-safe integers and they support all the natural integer-like
@@ -162,7 +162,7 @@ class Duration {
constexpr Duration() : rep_hi_(0), rep_lo_(0) {} // zero-length duration
// Copyable.
-#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1910
+#if !defined(__clang__) && defined(_MSC_VER) && _MSC_VER < 1930
// Explicitly defining the constexpr copy constructor avoids an MSVC bug.
constexpr Duration(const Duration& d)
: rep_hi_(d.rep_hi_), rep_lo_(d.rep_lo_) {}
@@ -579,7 +579,7 @@ bool ParseDuration(absl::string_view dur_string, Duration* d);
// AbslParseFlag()
//
-// Parses a command-line flag string representation `text` into a a Duration
+// Parses a command-line flag string representation `text` into a Duration
// value. Duration flags must be specified in a format that is valid input for
// `absl::ParseDuration()`.
bool AbslParseFlag(absl::string_view text, Duration* dst, std::string* error);
@@ -750,23 +750,24 @@ constexpr Time UnixEpoch() { return Time(); }
constexpr Time UniversalEpoch() {
// 719162 is the number of days from 0001-01-01 to 1970-01-01,
// assuming the Gregorian calendar.
- return Time(time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, 0U));
+ return Time(
+ time_internal::MakeDuration(-24 * 719162 * int64_t{3600}, uint32_t{0}));
}
// InfiniteFuture()
//
// Returns an `absl::Time` that is infinitely far in the future.
constexpr Time InfiniteFuture() {
- return Time(
- time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U));
+ return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
+ ~uint32_t{0}));
}
// InfinitePast()
//
// Returns an `absl::Time` that is infinitely far in the past.
constexpr Time InfinitePast() {
- return Time(
- time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U));
+ return Time(time_internal::MakeDuration((std::numeric_limits<int64_t>::min)(),
+ ~uint32_t{0}));
}
// FromUnixNanos()
@@ -1422,14 +1423,17 @@ constexpr int64_t GetRepHi(Duration d) { return d.rep_hi_; }
constexpr uint32_t GetRepLo(Duration d) { return d.rep_lo_; }
// Returns true iff d is positive or negative infinity.
-constexpr bool IsInfiniteDuration(Duration d) { return GetRepLo(d) == ~0U; }
+constexpr bool IsInfiniteDuration(Duration d) {
+ return GetRepLo(d) == ~uint32_t{0};
+}
// Returns an infinite Duration with the opposite sign.
// REQUIRES: IsInfiniteDuration(d)
constexpr Duration OppositeInfinity(Duration d) {
return GetRepHi(d) < 0
- ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~0U)
- : MakeDuration((std::numeric_limits<int64_t>::min)(), ~0U);
+ ? MakeDuration((std::numeric_limits<int64_t>::max)(), ~uint32_t{0})
+ : MakeDuration((std::numeric_limits<int64_t>::min)(),
+ ~uint32_t{0});
}
// Returns (-n)-1 (equivalently -(n+1)) without avoidable overflow.
@@ -1568,7 +1572,7 @@ constexpr Duration operator-(Duration d) {
constexpr Duration InfiniteDuration() {
return time_internal::MakeDuration((std::numeric_limits<int64_t>::max)(),
- ~0U);
+ ~uint32_t{0});
}
constexpr Duration FromChrono(const std::chrono::nanoseconds& d) {
diff --git a/absl/time/time_test.cc b/absl/time/time_test.cc
index cde9423f..d235e9ad 100644
--- a/absl/time/time_test.cc
+++ b/absl/time/time_test.cc
@@ -377,6 +377,11 @@ TEST(Time, FloorConversion) {
}
TEST(Time, RoundtripConversion) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
#define TEST_CONVERSION_ROUND_TRIP(SOURCE, FROM, TO, MATCHER) \
EXPECT_THAT(TO(FROM(SOURCE)), MATCHER(SOURCE))
@@ -558,6 +563,11 @@ TEST(Time, FromChrono) {
}
TEST(Time, ToChronoTime) {
+#if defined(ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT) && \
+ ABSL_SKIP_TIME_TESTS_BROKEN_ON_MSVC_OPT
+ GTEST_SKIP();
+#endif
+
EXPECT_EQ(std::chrono::system_clock::from_time_t(-1),
absl::ToChronoTime(absl::FromTimeT(-1)));
EXPECT_EQ(std::chrono::system_clock::from_time_t(0),
diff --git a/absl/types/BUILD.bazel b/absl/types/BUILD.bazel
index 38ed2286..bb801012 100644
--- a/absl/types/BUILD.bazel
+++ b/absl/types/BUILD.bazel
@@ -314,6 +314,7 @@ cc_library(
name = "compare",
hdrs = ["compare.h"],
copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:core_headers",
"//absl/meta:type_traits",
diff --git a/absl/types/CMakeLists.txt b/absl/types/CMakeLists.txt
index d7e8614e..830953ae 100644
--- a/absl/types/CMakeLists.txt
+++ b/absl/types/CMakeLists.txt
@@ -43,6 +43,7 @@ absl_cc_library(
PUBLIC
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
bad_any_cast_impl
@@ -239,6 +240,7 @@ absl_cc_test(
GTest::gmock_main
)
+# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
conformance_testing
diff --git a/absl/types/any.h b/absl/types/any.h
index fc5a0746..204da26d 100644
--- a/absl/types/any.h
+++ b/absl/types/any.h
@@ -81,18 +81,9 @@ ABSL_NAMESPACE_END
#include <utility>
#include "absl/base/internal/fast_type_id.h"
-#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
#include "absl/types/bad_any_cast.h"
-// NOTE: This macro is an implementation detail that is undefined at the bottom
-// of the file. It is not intended for expansion directly from user code.
-#ifdef ABSL_ANY_DETAIL_HAS_RTTI
-#error ABSL_ANY_DETAIL_HAS_RTTI cannot be directly set
-#elif !defined(__GNUC__) || defined(__GXX_RTTI)
-#define ABSL_ANY_DETAIL_HAS_RTTI 1
-#endif // !defined(__GNUC__) || defined(__GXX_RTTI)
-
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -348,7 +339,7 @@ class any {
// returns `false`.
bool has_value() const noexcept { return obj_ != nullptr; }
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
// Returns: typeid(T) if *this has a contained object of type T, otherwise
// typeid(void).
const std::type_info& type() const noexcept {
@@ -358,7 +349,7 @@ class any {
return typeid(void);
}
-#endif // ABSL_ANY_DETAIL_HAS_RTTI
+#endif // ABSL_INTERNAL_HAS_RTTI
private:
// Tagged type-erased abstraction for holding a cloneable object.
@@ -367,9 +358,9 @@ class any {
virtual ~ObjInterface() = default;
virtual std::unique_ptr<ObjInterface> Clone() const = 0;
virtual const void* ObjTypeId() const noexcept = 0;
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
virtual const std::type_info& Type() const noexcept = 0;
-#endif // ABSL_ANY_DETAIL_HAS_RTTI
+#endif // ABSL_INTERNAL_HAS_RTTI
};
// Hold a value of some queryable type, with an ability to Clone it.
@@ -386,9 +377,9 @@ class any {
const void* ObjTypeId() const noexcept final { return IdForType<T>(); }
-#if ABSL_ANY_DETAIL_HAS_RTTI
+#ifdef ABSL_INTERNAL_HAS_RTTI
const std::type_info& Type() const noexcept final { return typeid(T); }
-#endif // ABSL_ANY_DETAIL_HAS_RTTI
+#endif // ABSL_INTERNAL_HAS_RTTI
T value;
};
@@ -521,8 +512,6 @@ T* any_cast(any* operand) noexcept {
ABSL_NAMESPACE_END
} // namespace absl
-#undef ABSL_ANY_DETAIL_HAS_RTTI
-
#endif // ABSL_USES_STD_ANY
#endif // ABSL_TYPES_ANY_H_
diff --git a/absl/types/any_test.cc b/absl/types/any_test.cc
index 70e4ba22..d382b927 100644
--- a/absl/types/any_test.cc
+++ b/absl/types/any_test.cc
@@ -754,26 +754,23 @@ TEST(AnyTest, FailedCopy) {
// Test the guarantees regarding exceptions in emplace.
TEST(AnyTest, FailedEmplace) {
- {
- BadCopyable bad;
- absl::any target;
- ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
- }
+ BadCopyable bad;
+ absl::any target;
+ ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
+}
- {
- BadCopyable bad;
- absl::any target(absl::in_place_type<int>);
- ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
-#if defined(ABSL_USES_STD_ANY) && defined(__GLIBCXX__)
- // libstdc++ std::any::emplace() implementation (as of 7.2) has a bug: if an
- // exception is thrown, *this contains a value.
-#define ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG 1
-#endif
-#if defined(ABSL_HAVE_EXCEPTIONS) && \
- !defined(ABSL_GLIBCXX_ANY_EMPLACE_EXCEPTION_BUG)
- EXPECT_FALSE(target.has_value());
+// GCC and Clang have a bug here.
+// Ine some cases, the exception seems to be thrown at the wrong time, and
+// target may contain a value.
+#ifdef __GNUC__
+TEST(AnyTest, DISABLED_FailedEmplaceInPlace) {
+#else
+TEST(AnyTest, FailedEmplaceInPlace) {
#endif
- }
+ BadCopyable bad;
+ absl::any target(absl::in_place_type<int>);
+ ABSL_ANY_TEST_EXPECT_BAD_COPY(target.emplace<BadCopyable>(bad));
+ EXPECT_FALSE(target.has_value());
}
} // namespace
diff --git a/absl/types/internal/conformance_profile.h b/absl/types/internal/conformance_profile.h
index cf64ff4f..37b017db 100644
--- a/absl/types/internal/conformance_profile.h
+++ b/absl/types/internal/conformance_profile.h
@@ -719,6 +719,7 @@ struct SyntacticConformanceProfileOf {
type##_support); \
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL(bool, is_##type)
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(default_constructible);
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(move_constructible);
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(copy_constructible);
@@ -733,6 +734,7 @@ ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_equal_comparable);
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(greater_than_comparable);
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(swappable);
ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF(hashable);
+#endif
#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF
#undef ABSL_INTERNAL_CONFORMANCE_TESTING_DATA_MEMBER_DEF_IMPL
diff --git a/absl/types/internal/optional.h b/absl/types/internal/optional.h
index 92932b60..6ed0c669 100644
--- a/absl/types/internal/optional.h
+++ b/absl/types/internal/optional.h
@@ -91,7 +91,15 @@ class optional_data_dtor_base {
void destruct() noexcept {
if (engaged_) {
+ // `data_` must be initialized if `engaged_` is true.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
data_.~T();
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
engaged_ = false;
}
}
diff --git a/absl/types/internal/variant.h b/absl/types/internal/variant.h
index 772008c7..7c402ece 100644
--- a/absl/types/internal/variant.h
+++ b/absl/types/internal/variant.h
@@ -16,8 +16,8 @@
// separate file to avoid cluttering the top of the API header with
// implementation details.
-#ifndef ABSL_TYPES_variant_internal_H_
-#define ABSL_TYPES_variant_internal_H_
+#ifndef ABSL_TYPES_VARIANT_INTERNAL_H_
+#define ABSL_TYPES_VARIANT_INTERNAL_H_
#include <cassert>
#include <cstddef>
@@ -1643,4 +1643,4 @@ ABSL_NAMESPACE_END
} // namespace absl
#endif // !defined(ABSL_USES_STD_VARIANT)
-#endif // ABSL_TYPES_variant_internal_H_
+#endif // ABSL_TYPES_VARIANT_INTERNAL_H_
diff --git a/absl/types/optional.h b/absl/types/optional.h
index 61540cfd..134b2aff 100644
--- a/absl/types/optional.h
+++ b/absl/types/optional.h
@@ -282,15 +282,16 @@ class optional : private optional_internal::optional_data<T>,
optional& operator=(optional&& src) = default;
// Value assignment operators
- template <
- typename U = T,
- typename = typename std::enable_if<absl::conjunction<
- absl::negation<
- std::is_same<optional<T>, typename std::decay<U>::type>>,
- absl::negation<
- absl::conjunction<std::is_scalar<T>,
- std::is_same<T, typename std::decay<U>::type>>>,
- std::is_constructible<T, U>, std::is_assignable<T&, U>>::value>::type>
+ template <typename U = T,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
+ typename = typename std::enable_if<absl::conjunction<
+ absl::negation<
+ std::is_same<optional<T>, typename std::decay<U>::type> >,
+ absl::negation<absl::conjunction<
+ std::is_scalar<T>,
+ std::is_same<T, typename std::decay<U>::type> > >,
+ std::is_constructible<T, U>,
+ std::is_assignable<T&, U> >::value>::type>
optional& operator=(U&& v) {
this->assign(std::forward<U>(v));
return *this;
@@ -298,13 +299,14 @@ class optional : private optional_internal::optional_data<T>,
template <
typename U,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
typename = typename std::enable_if<absl::conjunction<
- absl::negation<std::is_same<T, U>>,
+ absl::negation<std::is_same<T, U> >,
std::is_constructible<T, const U&>, std::is_assignable<T&, const U&>,
absl::negation<
optional_internal::
is_constructible_convertible_assignable_from_optional<
- T, U>>>::value>::type>
+ T, U> > >::value>::type>
optional& operator=(const optional<U>& rhs) {
if (rhs) {
this->assign(*rhs);
@@ -315,13 +317,14 @@ class optional : private optional_internal::optional_data<T>,
}
template <typename U,
+ int&..., // Workaround an internal compiler error in GCC 5 to 10.
typename = typename std::enable_if<absl::conjunction<
- absl::negation<std::is_same<T, U>>, std::is_constructible<T, U>,
- std::is_assignable<T&, U>,
+ absl::negation<std::is_same<T, U> >,
+ std::is_constructible<T, U>, std::is_assignable<T&, U>,
absl::negation<
optional_internal::
is_constructible_convertible_assignable_from_optional<
- T, U>>>::value>::type>
+ T, U> > >::value>::type>
optional& operator=(optional<U>&& rhs) {
if (rhs) {
this->assign(std::move(*rhs));
diff --git a/absl/types/optional_test.cc b/absl/types/optional_test.cc
index 7ef142cb..9477c150 100644
--- a/absl/types/optional_test.cc
+++ b/absl/types/optional_test.cc
@@ -27,6 +27,37 @@
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
+#if defined(__cplusplus) && __cplusplus >= 202002L
+// In C++20, volatile-qualified return types are deprecated.
+#define ABSL_VOLATILE_RETURN_TYPES_DEPRECATED 1
+#endif
+
+// The following types help test an internal compiler error in GCC5 though
+// GCC10. The case OptionalTest.InternalCompilerErrorInGcc5ToGcc10 crashes the
+// compiler without a workaround. This test case should remain at the beginning
+// of the file as the internal compiler error is sensitive to other constructs
+// in this file.
+template <class T, class...>
+using GccIceHelper1 = T;
+template <typename T>
+struct GccIceHelper2 {};
+template <typename T>
+class GccIce {
+ template <typename U,
+ typename SecondTemplateArgHasToExistForSomeReason = void,
+ typename DependentType = void,
+ typename = std::is_assignable<GccIceHelper1<T, DependentType>&, U>>
+ GccIce& operator=(GccIceHelper2<U> const&) {}
+};
+
+TEST(OptionalTest, InternalCompilerErrorInGcc5ToGcc10) {
+ GccIce<int> instantiate_ice_with_same_type_as_optional;
+ static_cast<void>(instantiate_ice_with_same_type_as_optional);
+ absl::optional<int> val1;
+ absl::optional<int> val2;
+ val1 = val2;
+}
+
struct Hashable {};
namespace std {
@@ -205,6 +236,7 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_TRUE(opt42_copy);
EXPECT_EQ(42, *opt42_copy);
}
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
absl::optional<volatile int> empty, opt42 = 42;
absl::optional<volatile int> empty_copy(empty);
@@ -213,6 +245,7 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_TRUE(opt42_copy);
EXPECT_EQ(42, *opt42_copy);
}
+#endif
// test copyablility
EXPECT_TRUE(std::is_copy_constructible<absl::optional<int>>::value);
EXPECT_TRUE(std::is_copy_constructible<absl::optional<Copyable>>::value);
@@ -224,18 +257,11 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_FALSE(
absl::is_trivially_copy_constructible<absl::optional<Copyable>>::value);
-#if defined(ABSL_USES_STD_OPTIONAL) && defined(__GLIBCXX__)
- // libstdc++ std::optional implementation (as of 7.2) has a bug: when T is
- // trivially copyable, optional<T> is not trivially copyable (due to one of
- // its base class is unconditionally nontrivial).
-#define ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG 1
-#endif
-#ifndef ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG
EXPECT_TRUE(
absl::is_trivially_copy_constructible<absl::optional<int>>::value);
EXPECT_TRUE(
absl::is_trivially_copy_constructible<absl::optional<const int>>::value);
-#ifndef _MSC_VER
+#if !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
// See defect report "Trivial copy/move constructor for class with volatile
// member" at
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#2094
@@ -244,8 +270,7 @@ TEST(optionalTest, CopyConstructor) {
// Also a cv-qualified scalar type should be trivially copyable.
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<volatile int>>::value);
-#endif // _MSC_VER
-#endif // ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG
+#endif // !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
// constexpr copy constructor for trivially copyable types
{
@@ -275,17 +300,10 @@ TEST(optionalTest, CopyConstructor) {
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<const TrivialCopyable>>::value);
#endif
- // When testing with VS 2017 15.3, there seems to be a bug in MSVC
- // std::optional when T is volatile-qualified. So skipping this test.
- // Bug report:
- // https://connect.microsoft.com/VisualStudio/feedback/details/3142534
-#if defined(ABSL_USES_STD_OPTIONAL) && defined(_MSC_VER) && _MSC_VER >= 1911
-#define ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG 1
-#endif
-#ifndef ABSL_MSVC_OPTIONAL_VOLATILE_COPY_BUG
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
EXPECT_FALSE(std::is_copy_constructible<
absl::optional<volatile TrivialCopyable>>::value);
-#endif
+#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
}
}
@@ -305,11 +323,9 @@ TEST(optionalTest, MoveConstructor) {
EXPECT_FALSE(std::is_move_constructible<absl::optional<NonMovable>>::value);
// test noexcept
EXPECT_TRUE(std::is_nothrow_move_constructible<absl::optional<int>>::value);
-#ifndef ABSL_USES_STD_OPTIONAL
EXPECT_EQ(
absl::default_allocator_is_nothrow::value,
std::is_nothrow_move_constructible<absl::optional<MoveableThrow>>::value);
-#endif
EXPECT_TRUE(std::is_nothrow_move_constructible<
absl::optional<MoveableNoThrow>>::value);
}
@@ -638,8 +654,7 @@ TEST(optionalTest, CopyAssignment) {
EXPECT_TRUE(absl::is_copy_assignable<NonTrivial>::value);
EXPECT_FALSE(absl::is_trivially_copy_assignable<NonTrivial>::value);
- // std::optional doesn't support volatile nontrivial types.
-#ifndef ABSL_USES_STD_OPTIONAL
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
StructorListener listener;
Listenable::listener = &listener;
@@ -658,7 +673,7 @@ TEST(optionalTest, CopyAssignment) {
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.volatile_copy_assign);
}
-#endif // ABSL_USES_STD_OPTIONAL
+#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
}
TEST(optionalTest, MoveAssignment) {
@@ -681,8 +696,7 @@ TEST(optionalTest, MoveAssignment) {
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.move_assign);
}
- // std::optional doesn't support volatile nontrivial types.
-#ifndef ABSL_USES_STD_OPTIONAL
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
StructorListener listener;
Listenable::listener = &listener;
@@ -702,7 +716,7 @@ TEST(optionalTest, MoveAssignment) {
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.volatile_move_assign);
}
-#endif // ABSL_USES_STD_OPTIONAL
+#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
EXPECT_FALSE(absl::is_move_assignable<absl::optional<const int>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::optional<Copyable>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::optional<MoveableThrow>>::value);
@@ -1038,6 +1052,7 @@ TEST(optionalTest, Value) {
#endif
EXPECT_EQ("c&&", TypeQuals(OC(absl::in_place, "xvalue_c").value()));
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
// test on volatile type
using OV = absl::optional<volatile int>;
OV lvalue_v(absl::in_place, 42);
@@ -1045,6 +1060,7 @@ TEST(optionalTest, Value) {
EXPECT_EQ(42, OV(42).value());
EXPECT_TRUE((std::is_same<volatile int&, decltype(lvalue_v.value())>::value));
EXPECT_TRUE((std::is_same<volatile int&&, decltype(OV(42).value())>::value));
+#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
// test exception throw on value()
absl::optional<int> empty;
@@ -1087,6 +1103,7 @@ TEST(optionalTest, DerefOperator) {
#endif
EXPECT_EQ("c&&", TypeQuals(*OC(absl::in_place, "xvalue_c")));
+#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
// test on volatile type
using OV = absl::optional<volatile int>;
OV lvalue_v(absl::in_place, 42);
@@ -1094,6 +1111,7 @@ TEST(optionalTest, DerefOperator) {
EXPECT_EQ(42, *OV(42));
EXPECT_TRUE((std::is_same<volatile int&, decltype(*lvalue_v)>::value));
EXPECT_TRUE((std::is_same<volatile int&&, decltype(*OV(42))>::value));
+#endif // !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
constexpr absl::optional<int> opt1(1);
static_assert(*opt1 == 1, "");
@@ -1558,12 +1576,10 @@ TEST(optionalTest, NoExcept) {
static_assert(
std::is_nothrow_move_constructible<absl::optional<MoveMeNoThrow>>::value,
"");
-#ifndef ABSL_USES_STD_OPTIONAL
static_assert(absl::default_allocator_is_nothrow::value ==
std::is_nothrow_move_constructible<
absl::optional<MoveMeThrow>>::value,
"");
-#endif
std::vector<absl::optional<MoveMeNoThrow>> v;
for (int i = 0; i < 10; ++i) v.emplace_back();
}
diff --git a/absl/types/span.h b/absl/types/span.h
index 6272bb7a..fdfbd77c 100644
--- a/absl/types/span.h
+++ b/absl/types/span.h
@@ -664,7 +664,8 @@ constexpr Span<T> MakeSpan(T* ptr, size_t size) noexcept {
template <int&... ExplicitArgumentBarrier, typename T>
Span<T> MakeSpan(T* begin, T* end) noexcept {
- return ABSL_HARDENING_ASSERT(begin <= end), Span<T>(begin, end - begin);
+ return ABSL_HARDENING_ASSERT(begin <= end),
+ Span<T>(begin, static_cast<size_t>(end - begin));
}
template <int&... ExplicitArgumentBarrier, typename C>
diff --git a/ci/cmake_common.sh b/ci/cmake_common.sh
index 51f31069..3c08255b 100644
--- a/ci/cmake_common.sh
+++ b/ci/cmake_common.sh
@@ -14,7 +14,7 @@
# The commit of GoogleTest to be used in the CMake tests in this directory.
# Keep this in sync with the commit in the WORKSPACE file.
-readonly ABSL_GOOGLETEST_COMMIT="8d51ffdfab10b3fba636ae69bc03da4b54f8c235"
+readonly ABSL_GOOGLETEST_COMMIT="15460959cbbfa20e66ef0b5ab497367e47fc0a04" # release-1.12.0
# Avoid depending on GitHub by looking for a cached copy of the commit first.
if [[ -r "${KOKORO_GFILE_DIR:-}/distdir/${ABSL_GOOGLETEST_COMMIT}.zip" ]]; then
diff --git a/ci/linux_clang-latest_libcxx_asan_bazel.sh b/ci/linux_clang-latest_libcxx_asan_bazel.sh
index 5245933a..196e5c1d 100755
--- a/ci/linux_clang-latest_libcxx_asan_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_asan_bazel.sh
@@ -70,13 +70,14 @@ for std in ${STD}; do
--rm \
-e CC="/opt/llvm/clang/bin/clang" \
-e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
- -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \
- -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \
+ -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \
+ -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \
${DOCKER_EXTRA_ARGS:-} \
${DOCKER_CONTAINER} \
/usr/local/bin/bazel test ... \
--compilation_mode="${compilation_mode}" \
--copt="${exceptions_mode}" \
+ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
--copt="-fsanitize=address" \
--copt="-fsanitize=float-divide-by-zero" \
--copt="-fsanitize=nullability" \
diff --git a/ci/linux_clang-latest_libcxx_bazel.sh b/ci/linux_clang-latest_libcxx_bazel.sh
index e0fe653d..39aecf58 100755
--- a/ci/linux_clang-latest_libcxx_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_bazel.sh
@@ -71,8 +71,8 @@ for std in ${STD}; do
--rm \
-e CC="/opt/llvm/clang/bin/clang" \
-e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
- -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib" \
- -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/c++/v1" \
+ -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx/lib/x86_64-unknown-linux-gnu" \
+ -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx/include/c++/v1" \
${DOCKER_EXTRA_ARGS:-} \
${DOCKER_CONTAINER} \
/bin/sh -c "
@@ -83,6 +83,7 @@ for std in ${STD}; do
/usr/local/bin/bazel test ... \
--compilation_mode=\"${compilation_mode}\" \
--copt=\"${exceptions_mode}\" \
+ --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \
--copt=-Werror \
--define=\"absl=1\" \
--distdir=\"/bazel-distdir\" \
diff --git a/ci/linux_clang-latest_libcxx_tsan_bazel.sh b/ci/linux_clang-latest_libcxx_tsan_bazel.sh
index 555f6b1c..b0a1abff 100755
--- a/ci/linux_clang-latest_libcxx_tsan_bazel.sh
+++ b/ci/linux_clang-latest_libcxx_tsan_bazel.sh
@@ -70,14 +70,15 @@ for std in ${STD}; do
--rm \
-e CC="/opt/llvm/clang/bin/clang" \
-e BAZEL_CXXOPTS="-std=${std}:-nostdinc++" \
- -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib" \
- -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/c++/v1" \
+ -e BAZEL_LINKOPTS="-L/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu:-lc++:-lc++abi:-lm:-Wl,-rpath=/opt/llvm/libcxx-tsan/lib/x86_64-unknown-linux-gnu" \
+ -e CPLUS_INCLUDE_PATH="/opt/llvm/libcxx-tsan/include/x86_64-unknown-linux-gnu/c++/v1:/opt/llvm/libcxx-tsan/include/c++/v1" \
${DOCKER_EXTRA_ARGS:-} \
${DOCKER_CONTAINER} \
/usr/local/bin/bazel test ... \
--build_tag_filters="-notsan" \
--compilation_mode="${compilation_mode}" \
--copt="${exceptions_mode}" \
+ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
--copt="-fsanitize=thread" \
--copt="-fno-sanitize-blacklist" \
--copt=-Werror \
diff --git a/ci/linux_clang-latest_libstdcxx_bazel.sh b/ci/linux_clang-latest_libstdcxx_bazel.sh
index 36fdf82c..8550481b 100755
--- a/ci/linux_clang-latest_libstdcxx_bazel.sh
+++ b/ci/linux_clang-latest_libstdcxx_bazel.sh
@@ -75,6 +75,7 @@ for std in ${STD}; do
/usr/local/bin/bazel test ... \
--compilation_mode="${compilation_mode}" \
--copt="--gcc-toolchain=/usr/local" \
+ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
--copt="${exceptions_mode}" \
--copt=-Werror \
--define="absl=1" \
diff --git a/ci/linux_docker_containers.sh b/ci/linux_docker_containers.sh
index 32865b83..f55e153b 100644
--- a/ci/linux_docker_containers.sh
+++ b/ci/linux_docker_containers.sh
@@ -16,6 +16,6 @@
# Test scripts should source this file to get the identifiers.
readonly LINUX_ALPINE_CONTAINER="gcr.io/google.com/absl-177019/alpine:20201026"
-readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210617"
-readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20210617"
-readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20210617"
+readonly LINUX_CLANG_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217"
+readonly LINUX_GCC_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217"
+readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20220621"
diff --git a/ci/linux_gcc-floor_libstdcxx_bazel.sh b/ci/linux_gcc-floor_libstdcxx_bazel.sh
index 54ab68a3..8b41b5d7 100755
--- a/ci/linux_gcc-floor_libstdcxx_bazel.sh
+++ b/ci/linux_gcc-floor_libstdcxx_bazel.sh
@@ -75,6 +75,7 @@ for std in ${STD}; do
/usr/local/bin/bazel test ... \
--compilation_mode="${compilation_mode}" \
--copt="${exceptions_mode}" \
+ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
--copt=-Werror \
--define="absl=1" \
--distdir="/bazel-distdir" \
diff --git a/ci/linux_gcc-latest_libstdcxx_bazel.sh b/ci/linux_gcc-latest_libstdcxx_bazel.sh
index 0555eced..3f7c71af 100755
--- a/ci/linux_gcc-latest_libstdcxx_bazel.sh
+++ b/ci/linux_gcc-latest_libstdcxx_bazel.sh
@@ -81,6 +81,7 @@ for std in ${STD}; do
/usr/local/bin/bazel test ... \
--compilation_mode=\"${compilation_mode}\" \
--copt=\"${exceptions_mode}\" \
+ --copt=\"-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1\" \
--copt=-Werror \
--define=\"absl=1\" \
--distdir=\"/bazel-distdir\" \
diff --git a/ci/linux_gcc-latest_libstdcxx_cmake.sh b/ci/linux_gcc-latest_libstdcxx_cmake.sh
index ab06aa05..eccb3818 100755
--- a/ci/linux_gcc-latest_libstdcxx_cmake.sh
+++ b/ci/linux_gcc-latest_libstdcxx_cmake.sh
@@ -54,7 +54,7 @@ for std in ${ABSL_CMAKE_CXX_STANDARDS}; do
cmake /abseil-cpp \
-DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
-DBUILD_SHARED_LIBS=${build_shared} \
- -DBUILD_TESTING=ON \
+ -DABSL_BUILD_TESTING=ON \
-DCMAKE_BUILD_TYPE=${compilation_mode} \
-DCMAKE_CXX_STANDARD=${std} \
-DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
diff --git a/ci/linux_gcc_alpine_cmake.sh b/ci/linux_gcc_alpine_cmake.sh
index bce27d29..bf2e1239 100755
--- a/ci/linux_gcc_alpine_cmake.sh
+++ b/ci/linux_gcc_alpine_cmake.sh
@@ -53,7 +53,7 @@ for std in ${ABSL_CMAKE_CXX_STANDARDS}; do
/bin/sh -c "
cmake /abseil-cpp \
-DABSL_GOOGLETEST_DOWNLOAD_URL=${ABSL_GOOGLETEST_DOWNLOAD_URL} \
- -DBUILD_TESTING=ON \
+ -DABSL_BUILD_TESTING=ON \
-DCMAKE_BUILD_TYPE=${compilation_mode} \
-DCMAKE_CXX_STANDARD=${std} \
-DCMAKE_MODULE_LINKER_FLAGS=\"-Wl,--no-undefined\" && \
diff --git a/ci/macos_xcode_bazel.sh b/ci/macos_xcode_bazel.sh
index 9e14e660..8d69f1de 100755
--- a/ci/macos_xcode_bazel.sh
+++ b/ci/macos_xcode_bazel.sh
@@ -24,7 +24,7 @@ if [[ -z ${ABSEIL_ROOT:-} ]]; then
fi
# If we are running on Kokoro, check for a versioned Bazel binary.
-KOKORO_GFILE_BAZEL_BIN="bazel-3.7.0-darwin-x86_64"
+KOKORO_GFILE_BAZEL_BIN="bazel-5.1.1-darwin-x86_64"
if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then
BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}"
chmod +x ${BAZEL_BIN}
@@ -32,6 +32,13 @@ else
BAZEL_BIN="bazel"
fi
+# Avoid depending on external sites like GitHub by checking --distdir for
+# external dependencies first.
+# https://docs.bazel.build/versions/master/guide.html#distdir
+if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -d "${KOKORO_GFILE_DIR}/distdir" ]]; then
+ BAZEL_EXTRA_ARGS="--distdir=${KOKORO_GFILE_DIR}/distdir ${BAZEL_EXTRA_ARGS:-}"
+fi
+
# Print the compiler and Bazel versions.
echo "---------------"
gcc -v
@@ -46,9 +53,11 @@ if [[ -n "${ALTERNATE_OPTIONS:-}" ]]; then
fi
${BAZEL_BIN} test ... \
+ --copt="-DGTEST_REMOVE_LEGACY_TEST_CASEAPI_=1" \
--copt=-Werror \
--keep_going \
--show_timestamps \
--test_env="TZDIR=${ABSEIL_ROOT}/absl/time/internal/cctz/testdata/zoneinfo" \
--test_output=errors \
- --test_tag_filters=-benchmark
+ --test_tag_filters=-benchmark \
+ ${BAZEL_EXTRA_ARGS:-}
diff --git a/ci/macos_xcode_cmake.sh b/ci/macos_xcode_cmake.sh
index 2a870cf4..71ea2534 100755
--- a/ci/macos_xcode_cmake.sh
+++ b/ci/macos_xcode_cmake.sh
@@ -45,7 +45,7 @@ for compilation_mode in ${ABSL_CMAKE_BUILD_TYPES}; do
time cmake ${ABSEIL_ROOT} \
-GXcode \
-DBUILD_SHARED_LIBS=${build_shared} \
- -DBUILD_TESTING=ON \
+ -DABSL_BUILD_TESTING=ON \
-DCMAKE_BUILD_TYPE=${compilation_mode} \
-DCMAKE_CXX_STANDARD=11 \
-DCMAKE_MODULE_LINKER_FLAGS="-Wl,--no-undefined" \