aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-15 01:31:49 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-15 01:31:49 +0000
commitae8623c8eb14cfab3305afb1487c4a11ce832680 (patch)
tree61f88d9a095a390cab484c109bd55ce293b117b0
parent6e9b9330d4c6a1279ce715150fca8a6b7c8377c6 (diff)
parent362ba1a486881856074e7fbfc651041a87cd0fd6 (diff)
downloadgoogle-benchmark-android12-mainline-statsd-release.tar.gz
Snap for 7550844 from 362ba1a486881856074e7fbfc651041a87cd0fd6 to mainline-os-statsd-releaseandroid-mainline-12.0.0_r84android-mainline-12.0.0_r58android12-mainline-statsd-release
Change-Id: Ia2ca9bf43aa6e472e77f6fede2b88d67110b838c
-rw-r--r--.github/ISSUE_TEMPLATE/bug_report.md32
-rw-r--r--.github/ISSUE_TEMPLATE/feature_request.md20
-rw-r--r--.github/workflows/bazel.yml33
-rw-r--r--.github/workflows/build-and-test.yml38
-rw-r--r--.github/workflows/pylint.yml26
-rw-r--r--.github/workflows/test_bindings.yml24
-rw-r--r--.gitignore4
-rw-r--r--.travis.yml10
-rw-r--r--AUTHORS2
-rw-r--r--Android.bp40
-rw-r--r--BUILD.bazel4
-rw-r--r--CMakeLists.txt14
-rw-r--r--CONTRIBUTORS5
-rw-r--r--METADATA7
l---------NOTICE1
-rw-r--r--README.md99
-rw-r--r--WORKSPACE36
-rw-r--r--bindings/python/BUILD3
-rw-r--r--bindings/python/build_defs.bzl25
-rw-r--r--bindings/python/google_benchmark/BUILD38
-rw-r--r--bindings/python/google_benchmark/__init__.py158
-rw-r--r--bindings/python/google_benchmark/benchmark.cc181
-rw-r--r--bindings/python/google_benchmark/example.py136
-rw-r--r--bindings/python/pybind11.BUILD20
-rw-r--r--bindings/python/python_headers.BUILD6
-rw-r--r--bindings/python/requirements.txt2
-rw-r--r--cmake/CXXFeatureCheck.cmake5
-rw-r--r--cmake/GoogleTest.cmake.in2
-rw-r--r--cmake/benchmark.pc.in4
-rw-r--r--docs/releasing.md (renamed from releasing.md)0
-rw-r--r--docs/tools.md6
-rw-r--r--include/benchmark/benchmark.h44
-rw-r--r--mingw.py320
-rw-r--r--requirements.txt2
-rw-r--r--setup.py140
-rw-r--r--src/CMakeLists.txt2
-rw-r--r--src/benchmark.cc4
-rw-r--r--src/benchmark_register.cc43
-rw-r--r--src/benchmark_register.h3
-rw-r--r--src/benchmark_runner.cc7
-rw-r--r--src/commandlineflags.cc2
-rw-r--r--src/cycleclock.h59
-rw-r--r--src/internal_macros.h10
-rw-r--r--src/json_reporter.cc6
-rw-r--r--src/reporter.cc2
-rw-r--r--src/sleep.cc16
-rw-r--r--src/string_util.cc3
-rw-r--r--src/sysinfo.cc28
-rw-r--r--src/timers.cc72
-rw-r--r--test/CMakeLists.txt3
-rw-r--r--test/args_product_test.cc77
-rw-r--r--test/benchmark_gtest.cc6
-rw-r--r--test/commandlineflags_gtest.cc144
-rw-r--r--test/fixture_test.cc14
-rw-r--r--test/options_test.cc1
-rw-r--r--test/output_test_helper.cc9
-rw-r--r--test/reporter_output_test.cc94
-rw-r--r--test/skip_with_error_test.cc6
-rw-r--r--test/statistics_gtest.cc4
-rw-r--r--tools/BUILD.bazel19
-rwxr-xr-xtools/compare.py29
-rw-r--r--tools/gbench/report.py620
-rw-r--r--tools/gbench/util.py5
-rw-r--r--tools/requirements.txt1
64 files changed, 2085 insertions, 691 deletions
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..6c2ced9
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,32 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: "[BUG]"
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**System**
+Which OS, compiler, and compiler version are you using:
+ - OS:
+ - Compiler and version:
+
+**To reproduce**
+Steps to reproduce the behavior:
+1. sync to commit ...
+2. cmake/bazel...
+3. make ...
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000..9e8ab6a
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: "[FR]"
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml
new file mode 100644
index 0000000..d6bbe62
--- /dev/null
+++ b/.github/workflows/bazel.yml
@@ -0,0 +1,33 @@
+name: bazel
+
+on:
+ push: {}
+ pull_request: {}
+
+jobs:
+ build-and-test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v1
+
+ - name: mount bazel cache
+ uses: actions/cache@v1
+ with:
+ path: "/home/runner/.cache/bazel"
+ key: bazel
+
+ - name: install bazelisk
+ run: |
+ curl -LO "https://github.com/bazelbuild/bazelisk/releases/download/v1.1.0/bazelisk-linux-amd64"
+ mkdir -p "${GITHUB_WORKSPACE}/bin/"
+ mv bazelisk-linux-amd64 "${GITHUB_WORKSPACE}/bin/bazel"
+ chmod +x "${GITHUB_WORKSPACE}/bin/bazel"
+
+ - name: build
+ run: |
+ "${GITHUB_WORKSPACE}/bin/bazel" build //...
+
+ - name: test
+ run: |
+ "${GITHUB_WORKSPACE}/bin/bazel" test //test/...
diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml
new file mode 100644
index 0000000..f0f0626
--- /dev/null
+++ b/.github/workflows/build-and-test.yml
@@ -0,0 +1,38 @@
+name: build-and-test
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ job:
+ # TODO(dominic): Extend this to include compiler and set through env: CC/CXX.
+ name: ${{ matrix.os }}.${{ matrix.build_type }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, ubuntu-16.04, ubuntu-20.04, macos-latest, windows-latest]
+ build_type: ['Release', 'Debug']
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: create build environment
+ run: cmake -E make_directory ${{ runner.workspace }}/_build
+
+ - name: configure cmake
+ shell: bash
+ working-directory: ${{ runner.workspace }}/_build
+ run: cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
+
+ - name: build
+ shell: bash
+ working-directory: ${{ runner.workspace }}/_build
+ run: cmake --build . --config ${{ matrix.build_type }}
+
+ - name: test
+ shell: bash
+ working-directory: ${{ runner.workspace }}/_build
+ run: ctest -C ${{ matrix.build_type }}
diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml
new file mode 100644
index 0000000..c869674
--- /dev/null
+++ b/.github/workflows/pylint.yml
@@ -0,0 +1,26 @@
+name: pylint
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+jobs:
+ pylint:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v1
+ with:
+ python-version: 3.8
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install pylint pylint-exit conan
+ - name: Run pylint
+ run: |
+ pylint `find . -name '*.py'|xargs` || pylint-exit $?
diff --git a/.github/workflows/test_bindings.yml b/.github/workflows/test_bindings.yml
new file mode 100644
index 0000000..273d7f9
--- /dev/null
+++ b/.github/workflows/test_bindings.yml
@@ -0,0 +1,24 @@
+name: test-bindings
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+ branches: [master]
+
+jobs:
+ python_bindings:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python
+ uses: actions/setup-python@v1
+ with:
+ python-version: 3.8
+ - name: Install benchmark
+ run:
+ python setup.py install
+ - name: Run example bindings
+ run:
+ python bindings/python/google_benchmark/example.py
diff --git a/.gitignore b/.gitignore
index a7716e3..be55d77 100644
--- a/.gitignore
+++ b/.gitignore
@@ -60,3 +60,7 @@ CMakeSettings.json
# Visual Studio Code cache/options directory
.vscode/
+
+# Python build stuff
+dist/
+*.egg-info*
diff --git a/.travis.yml b/.travis.yml
index 6b6cfc7..36e343d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,10 +2,6 @@ sudo: required
dist: trusty
language: cpp
-env:
- global:
- - /usr/local/bin:$PATH
-
matrix:
include:
- compiler: gcc
@@ -169,7 +165,7 @@ matrix:
- BUILD_32_BITS=ON
- EXTRA_FLAGS="-m32"
- os: osx
- osx_image: xcode8.3
+ osx_image: xcode9.4
compiler: gcc
env:
- COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug
@@ -215,11 +211,11 @@ install:
- if [ "${TRAVIS_OS_NAME}" == "linux" ]; then
sudo apt-get update -qq;
sudo apt-get install -qq unzip cmake3;
- wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh;
+ wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh;
travis_wait sudo bash bazel-installer.sh;
fi
- if [ "${TRAVIS_OS_NAME}" == "osx" ]; then
- curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh;
+ curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh;
travis_wait sudo bash bazel-installer.sh;
fi
diff --git a/AUTHORS b/AUTHORS
index 89205a1..3068b2e 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -13,6 +13,7 @@ Alex Steele <steeleal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Carto
+Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
@@ -54,3 +55,4 @@ Stripe, Inc.
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
+Min-Yih Hsu <yihshyng223@gmail.com>
diff --git a/Android.bp b/Android.bp
index e886e1c..1f1a2d6 100644
--- a/Android.bp
+++ b/Android.bp
@@ -14,8 +14,24 @@
// limitations under the License.
//
-cc_library_static {
- name: "libgoogle-benchmark",
+package {
+ default_applicable_licenses: ["external_google-benchmark_license"],
+}
+
+// Added automatically by a large-scale-change
+license {
+ name: "external_google-benchmark_license",
+ visibility: [":__subpackages__"],
+ license_kinds: [
+ "SPDX-license-identifier-Apache-2.0",
+ ],
+ license_text: [
+ "LICENSE",
+ ],
+}
+
+cc_defaults {
+ name: "libgoogle-benchmark-defaults",
host_supported: true,
local_include_dirs: ["include"],
vendor_available: true,
@@ -25,16 +41,28 @@ cc_library_static {
"-Werror",
"-Wno-deprecated-declarations",
],
-
- exclude_srcs: [
- "src/benchmark_main.cc",
- ],
srcs: [
"src/*.cc",
],
export_include_dirs: ["include"],
}
+// For benchmarks that define their own main().
+cc_library_static {
+ name: "libgoogle-benchmark",
+ defaults: ["libgoogle-benchmark-defaults"],
+ exclude_srcs: [
+ "src/benchmark_main.cc",
+ ],
+}
+
+// For benchmarks that want to use the default main().
+// Make sure this dependency is in the whole_static_libs attribute.
+cc_library_static {
+ name: "libgoogle-benchmark-main",
+ defaults: ["libgoogle-benchmark-defaults"],
+}
+
cc_test {
name: "google-benchmark-test",
srcs: ["test/basic_test.cc"],
diff --git a/BUILD.bazel b/BUILD.bazel
index d97a019..eb35b62 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1,3 +1,5 @@
+load("@rules_cc//cc:defs.bzl", "cc_library")
+
licenses(["notice"])
config_setting(
@@ -8,8 +10,6 @@ config_setting(
visibility = [":__subpackages__"],
)
-load("@rules_cc//cc:defs.bzl", "cc_library")
-
cc_library(
name = "benchmark",
srcs = glob(
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 67c0b70..1007254 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -194,6 +194,7 @@ else()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
+ add_cxx_compiler_flag(-Wno-lto-type-mismatch)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
@@ -245,11 +246,17 @@ if (BENCHMARK_USE_LIBCXX)
endif()
endif(BENCHMARK_USE_LIBCXX)
+set(EXTRA_CXX_FLAGS "")
+if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
+ # Clang on Windows fails to compile the regex feature check under C++11
+ set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14")
+endif()
+
# C++ feature checks
# Determine the correct regular expression engine to use
-cxx_feature_check(STD_REGEX)
-cxx_feature_check(GNU_POSIX_REGEX)
-cxx_feature_check(POSIX_REGEX)
+cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS})
+cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS})
+cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS})
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
@@ -257,6 +264,7 @@ if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
+
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
set(THREADS_PREFER_PTHREAD_FLAG ON)
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index 88f7eee..b5e1aa4 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -22,12 +22,14 @@
#
# Please keep the list sorted.
+Abhina Sreeskantharajan <abhina.sreeskantharajan@ibm.com>
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steelal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
+Christian Wassermann <christian_wassermann@web.de>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
@@ -40,6 +42,7 @@ Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
+Fanbo Meng <fanbo.meng@ibm.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com>
@@ -71,8 +74,10 @@ Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
+Steven Wan <wan.yu@ibm.com>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>
+Min-Yih Hsu <yihshyng223@gmail.com>
diff --git a/METADATA b/METADATA
index 7405160..0584c04 100644
--- a/METADATA
+++ b/METADATA
@@ -9,10 +9,11 @@ third_party {
type: GIT
value: "https://github.com/google/benchmark.git"
}
- version: "8982e1ee6aef31e48170400b7d1dc9969b156e5e"
+ version: "ea5a5bbff491fd625c6e3458f6edd680b8bd5452"
+ license_type: NOTICE
last_upgrade_date {
- year: 2020
+ year: 2021
month: 2
- day: 7
+ day: 12
}
}
diff --git a/NOTICE b/NOTICE
deleted file mode 120000
index 7a694c9..0000000
--- a/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-LICENSE \ No newline at end of file
diff --git a/README.md b/README.md
index 560464e..6c09b9d 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,13 @@
# Benchmark
+[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test)
+[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint)
+[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings)
+
[![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark)
[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master)
[![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark)
-[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/)
+
A library to benchmark code snippets, similar to unit tests. Example:
@@ -70,13 +74,13 @@ $ git clone https://github.com/google/googletest.git benchmark/googletest
# Go to the library root directory
$ cd benchmark
# Make a build directory to place the build output.
-$ mkdir build && cd build
-# Generate a Makefile with cmake.
-# Use cmake -G <generator> to generate a different file type.
-$ cmake ../
+$ cmake -E make_directory "build"
+# Generate build system files with cmake.
+$ cmake -E chdir "build" cmake -DCMAKE_BUILD_TYPE=Release ../
+# or, starting with CMake 3.13, use a simpler form:
+# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build"
# Build the library.
-# Use make -j<number_of_parallel_jobs> to speed up the build process, e.g. make -j8 .
-$ make
+$ cmake --build "build" --config Release
```
This builds the `benchmark` and `benchmark_main` libraries and tests.
On a unix system, the build directory should now look something like this:
@@ -94,13 +98,13 @@ On a unix system, the build directory should now look something like this:
Next, you can run the tests to check the build.
```bash
-$ make test
+$ cmake -E chdir "build" ctest --build-config Release
```
If you want to install the library globally, also run:
```
-sudo make install
+sudo cmake --build "build" --config Release --target install
```
Note that Google Benchmark requires Google Test to build and run the tests. This
@@ -117,17 +121,14 @@ to `CMAKE_ARGS`.
### Debug vs Release
By default, benchmark builds as a debug library. You will see a warning in the
-output when this is the case. To build it as a release library instead, use:
-
-```
-cmake -DCMAKE_BUILD_TYPE=Release
-```
+output when this is the case. To build it as a release library instead, add
+`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown
+above. The use of `--config Release` in build commands is needed to properly
+support multi-configuration tools (like Visual Studio for example) and can be
+skipped for other build systems (like Makefile).
-To enable link-time optimisation, use
-
-```
-cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true
-```
+To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when
+generating the build system files.
If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake
cache variables, if autodetection fails.
@@ -135,7 +136,6 @@ cache variables, if autodetection fails.
If you are using clang, you may need to set `LLVMAR_EXECUTABLE`,
`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables.
-
### Stable and Experimental Library Versions
The main branch contains the latest stable version of the benchmarking library;
@@ -552,6 +552,29 @@ pair.
BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}});
```
+Some benchmarks may require specific argument values that cannot be expressed
+with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a
+benchmark input for each combination in the product of the supplied vectors.
+
+```c++
+BENCHMARK(BM_SetInsert)
+ ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}})
+// would generate the same benchmark arguments as
+BENCHMARK(BM_SetInsert)
+ ->Args({1<<10, 20})
+ ->Args({3<<10, 20})
+ ->Args({8<<10, 20})
+ ->Args({3<<10, 40})
+ ->Args({8<<10, 40})
+ ->Args({1<<10, 40})
+ ->Args({1<<10, 60})
+ ->Args({3<<10, 60})
+ ->Args({8<<10, 60})
+ ->Args({1<<10, 80})
+ ->Args({3<<10, 80})
+ ->Args({8<<10, 80});
+```
+
For more complex patterns of inputs, passing a custom function to `Apply` allows
programmatic specification of an arbitrary set of arguments on which to run the
benchmark. The following example enumerates a dense range on one parameter,
@@ -622,7 +645,7 @@ that might be used to customize high-order term calculation.
```c++
BENCHMARK(BM_StringCompare)->RangeMultiplier(2)
- ->Range(1<<10, 1<<18)->Complexity([](int64_t n)->double{return n; });
+ ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; });
```
<a name="templated-benchmarks" />
@@ -1184,7 +1207,9 @@ Users must explicitly exit the loop, otherwise all iterations will be performed.
Users may explicitly return to exit the benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
-including before and after the benchmark loop.
+including before and after the benchmark loop. Moreover, if `SkipWithError(...)`
+has been used, it is not required to reach the benchmark loop and one may return
+from the benchmark function early.
For example:
@@ -1192,24 +1217,32 @@ For example:
static void BM_test(benchmark::State& state) {
auto resource = GetResource();
if (!resource.good()) {
- state.SkipWithError("Resource is not good!");
- // KeepRunning() loop will not be entered.
+ state.SkipWithError("Resource is not good!");
+ // KeepRunning() loop will not be entered.
}
while (state.KeepRunning()) {
- auto data = resource.read_data();
- if (!resource.good()) {
- state.SkipWithError("Failed to read data!");
- break; // Needed to skip the rest of the iteration.
- }
- do_stuff(data);
+ auto data = resource.read_data();
+ if (!resource.good()) {
+ state.SkipWithError("Failed to read data!");
+ break; // Needed to skip the rest of the iteration.
+ }
+ do_stuff(data);
}
}
static void BM_test_ranged_fo(benchmark::State & state) {
- state.SkipWithError("test will not be entered");
+ auto resource = GetResource();
+ if (!resource.good()) {
+ state.SkipWithError("Resource is not good!");
+ return; // Early return is allowed when SkipWithError() has been used.
+ }
for (auto _ : state) {
- state.SkipWithError("Failed!");
- break; // REQUIRED to prevent all further iterations.
+ auto data = resource.read_data();
+ if (!resource.good()) {
+ state.SkipWithError("Failed to read data!");
+ break; // REQUIRED to prevent all further iterations.
+ }
+ do_stuff(data);
}
}
```
diff --git a/WORKSPACE b/WORKSPACE
index 8df248a..631f3ba 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -6,10 +6,46 @@ http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
+ sha256 = "d7dc12c1d5bc1a87474de8e3d17b7731a4dcebcfb8aa3990fe8ac7734ef12f2f",
+)
+
+http_archive(
+ name = "com_google_absl",
+ sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111",
+ strip_prefix = "abseil-cpp-20200225.2",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"],
)
http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
+ sha256 = "8f827dd550db8b4fdf73904690df0be9fccc161017c9038a724bc9a0617a1bc8",
+)
+
+http_archive(
+ name = "pybind11",
+ build_file = "@//bindings/python:pybind11.BUILD",
+ sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
+ strip_prefix = "pybind11-2.4.3",
+ urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"],
+)
+
+new_local_repository(
+ name = "python_headers",
+ build_file = "@//bindings/python:python_headers.BUILD",
+ path = "/usr/include/python3.6", # May be overwritten by setup.py.
+)
+
+http_archive(
+ name = "rules_python",
+ url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz",
+ sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0",
+)
+
+load("@rules_python//python:pip.bzl", pip3_install="pip_install")
+
+pip3_install(
+ name = "py_deps",
+ requirements = "//:requirements.txt",
)
diff --git a/bindings/python/BUILD b/bindings/python/BUILD
new file mode 100644
index 0000000..9559a76
--- /dev/null
+++ b/bindings/python/BUILD
@@ -0,0 +1,3 @@
+exports_files(glob(["*.BUILD"]))
+exports_files(["build_defs.bzl"])
+
diff --git a/bindings/python/build_defs.bzl b/bindings/python/build_defs.bzl
new file mode 100644
index 0000000..45907aa
--- /dev/null
+++ b/bindings/python/build_defs.bzl
@@ -0,0 +1,25 @@
+_SHARED_LIB_SUFFIX = {
+ "//conditions:default": ".so",
+ "//:windows": ".dll",
+}
+
+def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []):
+ for shared_lib_suffix in _SHARED_LIB_SUFFIX.values():
+ shared_lib_name = name + shared_lib_suffix
+ native.cc_binary(
+ name = shared_lib_name,
+ linkshared = 1,
+ linkstatic = 1,
+ srcs = srcs + hdrs,
+ copts = copts,
+ features = features,
+ deps = deps,
+ )
+
+ return native.py_library(
+ name = name,
+ data = select({
+ platform: [name + shared_lib_suffix]
+ for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items()
+ }),
+ )
diff --git a/bindings/python/google_benchmark/BUILD b/bindings/python/google_benchmark/BUILD
new file mode 100644
index 0000000..3c1561f
--- /dev/null
+++ b/bindings/python/google_benchmark/BUILD
@@ -0,0 +1,38 @@
+load("//bindings/python:build_defs.bzl", "py_extension")
+
+py_library(
+ name = "google_benchmark",
+ srcs = ["__init__.py"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":_benchmark",
+ # pip; absl:app
+ ],
+)
+
+py_extension(
+ name = "_benchmark",
+ srcs = ["benchmark.cc"],
+ copts = [
+ "-fexceptions",
+ "-fno-strict-aliasing",
+ ],
+ features = ["-use_header_modules"],
+ deps = [
+ "//:benchmark",
+ "@pybind11",
+ "@python_headers",
+ ],
+)
+
+py_test(
+ name = "example",
+ srcs = ["example.py"],
+ python_version = "PY3",
+ srcs_version = "PY3",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":google_benchmark",
+ ],
+)
+
diff --git a/bindings/python/google_benchmark/__init__.py b/bindings/python/google_benchmark/__init__.py
new file mode 100644
index 0000000..f31285e
--- /dev/null
+++ b/bindings/python/google_benchmark/__init__.py
@@ -0,0 +1,158 @@
+# Copyright 2020 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Python benchmarking utilities.
+
+Example usage:
+ import google_benchmark as benchmark
+
+ @benchmark.register
+ def my_benchmark(state):
+ ... # Code executed outside `while` loop is not timed.
+
+ while state:
+ ... # Code executed within `while` loop is timed.
+
+ if __name__ == '__main__':
+ benchmark.main()
+"""
+
+from absl import app
+from google_benchmark import _benchmark
+from google_benchmark._benchmark import (
+ Counter,
+ kNanosecond,
+ kMicrosecond,
+ kMillisecond,
+ kSecond,
+ oNone,
+ o1,
+ oN,
+ oNSquared,
+ oNCubed,
+ oLogN,
+ oNLogN,
+ oAuto,
+ oLambda,
+)
+
+
+__all__ = [
+ "register",
+ "main",
+ "Counter",
+ "kNanosecond",
+ "kMicrosecond",
+ "kMillisecond",
+ "kSecond",
+ "oNone",
+ "o1",
+ "oN",
+ "oNSquared",
+ "oNCubed",
+ "oLogN",
+ "oNLogN",
+ "oAuto",
+ "oLambda",
+]
+
+__version__ = "0.2.0"
+
+
+class __OptionMaker:
+ """A stateless class to collect benchmark options.
+
+ Collect all decorator calls like @option.range(start=0, limit=1<<5).
+ """
+
+ class Options:
+ """Pure data class to store options calls, along with the benchmarked function."""
+
+ def __init__(self, func):
+ self.func = func
+ self.builder_calls = []
+
+ @classmethod
+ def make(cls, func_or_options):
+ """Make Options from Options or the benchmarked function."""
+ if isinstance(func_or_options, cls.Options):
+ return func_or_options
+ return cls.Options(func_or_options)
+
+ def __getattr__(self, builder_name):
+ """Append option call in the Options."""
+
+ # The function that get returned on @option.range(start=0, limit=1<<5).
+ def __builder_method(*args, **kwargs):
+
+ # The decorator that get called, either with the benchmared function
+ # or the previous Options
+ def __decorator(func_or_options):
+ options = self.make(func_or_options)
+ options.builder_calls.append((builder_name, args, kwargs))
+ # The decorator returns Options so it is not technically a decorator
+ # and needs a final call to @regiser
+ return options
+
+ return __decorator
+
+ return __builder_method
+
+
+# Alias for nicer API.
+# We have to instanciate an object, even if stateless, to be able to use __getattr__
+# on option.range
+option = __OptionMaker()
+
+
+def register(undefined=None, *, name=None):
+ """Register function for benchmarking."""
+ if undefined is None:
+ # Decorator is called without parenthesis so we return a decorator
+ return lambda f: register(f, name=name)
+
+ # We have either the function to benchmark (simple case) or an instance of Options
+ # (@option._ case).
+ options = __OptionMaker.make(undefined)
+
+ if name is None:
+ name = options.func.__name__
+
+ # We register the benchmark and reproduce all the @option._ calls onto the
+ # benchmark builder pattern
+ benchmark = _benchmark.RegisterBenchmark(name, options.func)
+ for name, args, kwargs in options.builder_calls[::-1]:
+ getattr(benchmark, name)(*args, **kwargs)
+
+ # return the benchmarked function because the decorator does not modify it
+ return options.func
+
+
+def _flags_parser(argv):
+ argv = _benchmark.Initialize(argv)
+ return app.parse_flags_with_usage(argv)
+
+
+def _run_benchmarks(argv):
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ return _benchmark.RunSpecifiedBenchmarks()
+
+
+def main(argv=None):
+ return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)
+
+
+# Methods for use with custom main function.
+initialize = _benchmark.Initialize
+run_benchmarks = _benchmark.RunSpecifiedBenchmarks
diff --git a/bindings/python/google_benchmark/benchmark.cc b/bindings/python/google_benchmark/benchmark.cc
new file mode 100644
index 0000000..d80816e
--- /dev/null
+++ b/bindings/python/google_benchmark/benchmark.cc
@@ -0,0 +1,181 @@
+// Benchmark for Python.
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "pybind11/operators.h"
+#include "pybind11/pybind11.h"
+#include "pybind11/stl.h"
+#include "pybind11/stl_bind.h"
+
+#include "benchmark/benchmark.h"
+
+PYBIND11_MAKE_OPAQUE(benchmark::UserCounters);
+
+namespace {
+namespace py = ::pybind11;
+
+std::vector<std::string> Initialize(const std::vector<std::string>& argv) {
+ // The `argv` pointers here become invalid when this function returns, but
+ // benchmark holds the pointer to `argv[0]`. We create a static copy of it
+ // so it persists, and replace the pointer below.
+ static std::string executable_name(argv[0]);
+ std::vector<char*> ptrs;
+ ptrs.reserve(argv.size());
+ for (auto& arg : argv) {
+ ptrs.push_back(const_cast<char*>(arg.c_str()));
+ }
+ ptrs[0] = const_cast<char*>(executable_name.c_str());
+ int argc = static_cast<int>(argv.size());
+ benchmark::Initialize(&argc, ptrs.data());
+ std::vector<std::string> remaining_argv;
+ remaining_argv.reserve(argc);
+ for (int i = 0; i < argc; ++i) {
+ remaining_argv.emplace_back(ptrs[i]);
+ }
+ return remaining_argv;
+}
+
+benchmark::internal::Benchmark* RegisterBenchmark(const char* name,
+ py::function f) {
+ return benchmark::RegisterBenchmark(
+ name, [f](benchmark::State& state) { f(&state); });
+}
+
+PYBIND11_MODULE(_benchmark, m) {
+ using benchmark::TimeUnit;
+ py::enum_<TimeUnit>(m, "TimeUnit")
+ .value("kNanosecond", TimeUnit::kNanosecond)
+ .value("kMicrosecond", TimeUnit::kMicrosecond)
+ .value("kMillisecond", TimeUnit::kMillisecond)
+ .value("kSecond", TimeUnit::kSecond)
+ .export_values();
+
+ using benchmark::BigO;
+ py::enum_<BigO>(m, "BigO")
+ .value("oNone", BigO::oNone)
+ .value("o1", BigO::o1)
+ .value("oN", BigO::oN)
+ .value("oNSquared", BigO::oNSquared)
+ .value("oNCubed", BigO::oNCubed)
+ .value("oLogN", BigO::oLogN)
+ .value("oNLogN", BigO::oLogN)
+ .value("oAuto", BigO::oAuto)
+ .value("oLambda", BigO::oLambda)
+ .export_values();
+
+ using benchmark::internal::Benchmark;
+ py::class_<Benchmark>(m, "Benchmark")
+ // For methods returning a pointer tor the current object, reference
+ // return policy is used to ask pybind not to take ownership oof the
+ // returned object and avoid calling delete on it.
+ // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies
+ //
+ // For methods taking a const std::vector<...>&, a copy is created
+ // because a it is bound to a Python list.
+ // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html
+ .def("unit", &Benchmark::Unit, py::return_value_policy::reference)
+ .def("arg", &Benchmark::Arg, py::return_value_policy::reference)
+ .def("args", &Benchmark::Args, py::return_value_policy::reference)
+ .def("range", &Benchmark::Range, py::return_value_policy::reference,
+ py::arg("start"), py::arg("limit"))
+ .def("dense_range", &Benchmark::DenseRange,
+ py::return_value_policy::reference, py::arg("start"),
+ py::arg("limit"), py::arg("step") = 1)
+ .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference)
+ .def("args_product", &Benchmark::ArgsProduct,
+ py::return_value_policy::reference)
+ .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference)
+ .def("arg_names", &Benchmark::ArgNames,
+ py::return_value_policy::reference)
+ .def("range_pair", &Benchmark::RangePair,
+ py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"),
+ py::arg("lo2"), py::arg("hi2"))
+ .def("range_multiplier", &Benchmark::RangeMultiplier,
+ py::return_value_policy::reference)
+ .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference)
+ .def("iterations", &Benchmark::Iterations,
+ py::return_value_policy::reference)
+ .def("repetitions", &Benchmark::Repetitions,
+ py::return_value_policy::reference)
+ .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly,
+ py::return_value_policy::reference, py::arg("value") = true)
+ .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime,
+ py::return_value_policy::reference)
+ .def("use_real_time", &Benchmark::UseRealTime,
+ py::return_value_policy::reference)
+ .def("use_manual_time", &Benchmark::UseManualTime,
+ py::return_value_policy::reference)
+ .def(
+ "complexity",
+ (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity,
+ py::return_value_policy::reference,
+ py::arg("complexity") = benchmark::oAuto);
+
+ using benchmark::Counter;
+ py::class_<Counter> py_counter(m, "Counter");
+
+ py::enum_<Counter::Flags>(py_counter, "Flags")
+ .value("kDefaults", Counter::Flags::kDefaults)
+ .value("kIsRate", Counter::Flags::kIsRate)
+ .value("kAvgThreads", Counter::Flags::kAvgThreads)
+ .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate)
+ .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant)
+ .value("kIsIterationInvariantRate",
+ Counter::Flags::kIsIterationInvariantRate)
+ .value("kAvgIterations", Counter::Flags::kAvgIterations)
+ .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate)
+ .value("kInvert", Counter::Flags::kInvert)
+ .export_values()
+ .def(py::self | py::self);
+
+ py::enum_<Counter::OneK>(py_counter, "OneK")
+ .value("kIs1000", Counter::OneK::kIs1000)
+ .value("kIs1024", Counter::OneK::kIs1024)
+ .export_values();
+
+ py_counter
+ .def(py::init<double, Counter::Flags, Counter::OneK>(),
+ py::arg("value") = 0., py::arg("flags") = Counter::kDefaults,
+ py::arg("k") = Counter::kIs1000)
+ .def(py::init([](double value) { return Counter(value); }))
+ .def_readwrite("value", &Counter::value)
+ .def_readwrite("flags", &Counter::flags)
+ .def_readwrite("oneK", &Counter::oneK);
+ py::implicitly_convertible<py::float_, Counter>();
+ py::implicitly_convertible<py::int_, Counter>();
+
+ py::bind_map<benchmark::UserCounters>(m, "UserCounters");
+
+ using benchmark::State;
+ py::class_<State>(m, "State")
+ .def("__bool__", &State::KeepRunning)
+ .def_property_readonly("keep_running", &State::KeepRunning)
+ .def("pause_timing", &State::PauseTiming)
+ .def("resume_timing", &State::ResumeTiming)
+ .def("skip_with_error", &State::SkipWithError)
+ .def_property_readonly("error_occured", &State::error_occurred)
+ .def("set_iteration_time", &State::SetIterationTime)
+ .def_property("bytes_processed", &State::bytes_processed,
+ &State::SetBytesProcessed)
+ .def_property("complexity_n", &State::complexity_length_n,
+ &State::SetComplexityN)
+ .def_property("items_processed", &State::items_processed,
+ &State::SetItemsProcessed)
+ .def("set_label", (void (State::*)(const char*)) & State::SetLabel)
+ .def("range", &State::range, py::arg("pos") = 0)
+ .def_property_readonly("iterations", &State::iterations)
+ .def_readwrite("counters", &State::counters)
+ .def_readonly("thread_index", &State::thread_index)
+ .def_readonly("threads", &State::threads);
+
+ m.def("Initialize", Initialize);
+ m.def("RegisterBenchmark", RegisterBenchmark,
+ py::return_value_policy::reference);
+ m.def("RunSpecifiedBenchmarks",
+ []() { benchmark::RunSpecifiedBenchmarks(); });
+};
+} // namespace
diff --git a/bindings/python/google_benchmark/example.py b/bindings/python/google_benchmark/example.py
new file mode 100644
index 0000000..9134e8c
--- /dev/null
+++ b/bindings/python/google_benchmark/example.py
@@ -0,0 +1,136 @@
+# Copyright 2020 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Example of Python using C++ benchmark framework.
+
+To run this example, you must first install the `google_benchmark` Python package.
+
+To install using `setup.py`, download and extract the `google_benchmark` source.
+In the extracted directory, execute:
+ python setup.py install
+"""
+
+import random
+import time
+
+import google_benchmark as benchmark
+from google_benchmark import Counter
+
+
+@benchmark.register
+def empty(state):
+ while state:
+ pass
+
+
+@benchmark.register
+def sum_million(state):
+ while state:
+ sum(range(1_000_000))
+
+@benchmark.register
+def pause_timing(state):
+ """Pause timing every iteration."""
+ while state:
+ # Construct a list of random ints every iteration without timing it
+ state.pause_timing()
+ random_list = [random.randint(0, 100) for _ in range(100)]
+ state.resume_timing()
+ # Time the in place sorting algorithm
+ random_list.sort()
+
+
+@benchmark.register
+def skipped(state):
+ if True: # Test some predicate here.
+ state.skip_with_error("some error")
+ return # NOTE: You must explicitly return, or benchmark will continue.
+
+ ... # Benchmark code would be here.
+
+
+@benchmark.register
+def manual_timing(state):
+ while state:
+ # Manually count Python CPU time
+ start = time.perf_counter() # perf_counter_ns() in Python 3.7+
+ # Something to benchmark
+ time.sleep(0.01)
+ end = time.perf_counter()
+ state.set_iteration_time(end - start)
+
+
+@benchmark.register
+def custom_counters(state):
+ """Collect cutom metric using benchmark.Counter."""
+ num_foo = 0.0
+ while state:
+ # Benchmark some code here
+ pass
+ # Collect some custom metric named foo
+ num_foo += 0.13
+
+ # Automatic Counter from numbers.
+ state.counters["foo"] = num_foo
+ # Set a counter as a rate.
+ state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
+ # Set a counter as an inverse of rate.
+ state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
+ # Set a counter as a thread-average quantity.
+ state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
+ # There's also a combined flag:
+ state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)
+
+
+@benchmark.register
+@benchmark.option.measure_process_cpu_time()
+@benchmark.option.use_real_time()
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register(name="sum_million_microseconds")
+@benchmark.option.unit(benchmark.kMicrosecond)
+def with_options(state):
+ while state:
+ sum(range(1_000_000))
+
+
+@benchmark.register
+@benchmark.option.arg(100)
+@benchmark.option.arg(1000)
+def passing_argument(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range(8, limit=8 << 10)
+def using_range(state):
+ while state:
+ sum(range(state.range(0)))
+
+
+@benchmark.register
+@benchmark.option.range_multiplier(2)
+@benchmark.option.range(1 << 10, 1 << 18)
+@benchmark.option.complexity(benchmark.oN)
+def computing_complexity(state):
+ while state:
+ sum(range(state.range(0)))
+ state.complexity_n = state.range(0)
+
+
+if __name__ == "__main__":
+ benchmark.main()
diff --git a/bindings/python/pybind11.BUILD b/bindings/python/pybind11.BUILD
new file mode 100644
index 0000000..bc83350
--- /dev/null
+++ b/bindings/python/pybind11.BUILD
@@ -0,0 +1,20 @@
+cc_library(
+ name = "pybind11",
+ hdrs = glob(
+ include = [
+ "include/pybind11/*.h",
+ "include/pybind11/detail/*.h",
+ ],
+ exclude = [
+ "include/pybind11/common.h",
+ "include/pybind11/eigen.h",
+ ],
+ ),
+ copts = [
+ "-fexceptions",
+ "-Wno-undefined-inline",
+ "-Wno-pragma-once-outside-header",
+ ],
+ includes = ["include"],
+ visibility = ["//visibility:public"],
+)
diff --git a/bindings/python/python_headers.BUILD b/bindings/python/python_headers.BUILD
new file mode 100644
index 0000000..9c34cf6
--- /dev/null
+++ b/bindings/python/python_headers.BUILD
@@ -0,0 +1,6 @@
+cc_library(
+ name = "python_headers",
+ hdrs = glob(["**/*.h"]),
+ includes = ["."],
+ visibility = ["//visibility:public"],
+)
diff --git a/bindings/python/requirements.txt b/bindings/python/requirements.txt
new file mode 100644
index 0000000..f5bbe7e
--- /dev/null
+++ b/bindings/python/requirements.txt
@@ -0,0 +1,2 @@
+absl-py>=0.7.1
+
diff --git a/cmake/CXXFeatureCheck.cmake b/cmake/CXXFeatureCheck.cmake
index 059d510..62e6741 100644
--- a/cmake/CXXFeatureCheck.cmake
+++ b/cmake/CXXFeatureCheck.cmake
@@ -27,6 +27,11 @@ function(cxx_feature_check FILE)
return()
endif()
+ if (ARGC GREATER 1)
+ message(STATUS "Enabling additional flags: ${ARGV1}")
+ list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1})
+ endif()
+
if (NOT DEFINED COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
diff --git a/cmake/GoogleTest.cmake.in b/cmake/GoogleTest.cmake.in
index 28818ee..fd957ff 100644
--- a/cmake/GoogleTest.cmake.in
+++ b/cmake/GoogleTest.cmake.in
@@ -31,7 +31,7 @@ if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}"
)
else()
if(NOT ALLOW_DOWNLOADING_GOOGLETEST)
- message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable ALLOW_DOWNLOADING_GOOGLETEST, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.")
+ message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.")
else()
message(WARNING "Did not find Google Test sources! Fetching from web...")
ExternalProject_Add(
diff --git a/cmake/benchmark.pc.in b/cmake/benchmark.pc.in
index 43ca8f9..34beb01 100644
--- a/cmake/benchmark.pc.in
+++ b/cmake/benchmark.pc.in
@@ -1,7 +1,7 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
-libdir=${prefix}/lib
-includedir=${prefix}/include
+libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
+includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: @PROJECT_NAME@
Description: Google microbenchmark framework
diff --git a/releasing.md b/docs/releasing.md
index f0cd701..f0cd701 100644
--- a/releasing.md
+++ b/docs/releasing.md
diff --git a/docs/tools.md b/docs/tools.md
index 4a3b2e9..f2d0c49 100644
--- a/docs/tools.md
+++ b/docs/tools.md
@@ -4,7 +4,11 @@
The `compare.py` can be used to compare the result of benchmarks.
-**NOTE**: the utility relies on the scipy package which can be installed using [these instructions](https://www.scipy.org/install.html).
+### Dependencies
+The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip:
+```bash
+pip3 install -r requirements.txt
+```
### Displaying aggregates only
diff --git a/include/benchmark/benchmark.h b/include/benchmark/benchmark.h
index 144e212..f57e3e7 100644
--- a/include/benchmark/benchmark.h
+++ b/include/benchmark/benchmark.h
@@ -176,6 +176,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond);
#include <map>
#include <set>
#include <string>
+#include <utility>
#include <vector>
#if defined(BENCHMARK_HAS_CXX11)
@@ -406,7 +407,7 @@ typedef std::map<std::string, Counter> UserCounters;
// TimeUnit is passed to a benchmark in order to specify the order of magnitude
// for the measured time.
-enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond };
+enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond };
// BigO is passed to a benchmark in order to specify the asymptotic
// computational
@@ -541,6 +542,9 @@ class State {
// responsibility to exit the scope as needed.
void SkipWithError(const char* msg);
+ // Returns true if an error has been reported with 'SkipWithError(...)'.
+ bool error_occurred() const { return error_occurred_; }
+
// REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which
// is used instead of automatically measured time if UseManualTime() was
@@ -824,6 +828,11 @@ class Benchmark {
// REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
Benchmark* Ranges(const std::vector<std::pair<int64_t, int64_t> >& ranges);
+ // Run this benchmark once for each combination of values in the (cartesian)
+ // product of the supplied argument lists.
+ // REQUIRES: The function passed to the constructor must accept arg1, arg2 ...
+ Benchmark* ArgsProduct(const std::vector<std::vector<int64_t> >& arglists);
+
// Equivalent to ArgNames({name})
Benchmark* ArgName(const std::string& name);
@@ -1097,6 +1106,9 @@ class Fixture : public internal::Benchmark {
BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n)
#define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c)
#define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c
+// Helper for concatenation with macro name expansion
+#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \
+ BaseClass##_##Method##_Benchmark
#define BENCHMARK_PRIVATE_DECLARE(n) \
static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \
@@ -1217,27 +1229,27 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_DEFINE_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a)
#endif
#define BENCHMARK_REGISTER_F(BaseClass, Method) \
- BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark)
+ BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method))
#define BENCHMARK_PRIVATE_REGISTER_F(TestName) \
BENCHMARK_PRIVATE_DECLARE(TestName) = \
@@ -1247,23 +1259,23 @@ class Fixture : public internal::Benchmark {
#define BENCHMARK_F(BaseClass, Method) \
BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \
BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#ifdef BENCHMARK_HAS_CXX11
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \
BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \
BENCHMARK_REGISTER_F(BaseClass, Method); \
- void BaseClass##_##Method##_Benchmark::BenchmarkCase
+ void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase
#else
#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \
BENCHMARK_TEMPLATE1_F(BaseClass, Method, a)
@@ -1291,10 +1303,16 @@ struct CPUInfo {
int num_sharing;
};
+ enum Scaling {
+ UNKNOWN,
+ ENABLED,
+ DISABLED
+ };
+
int num_cpus;
double cycles_per_second;
std::vector<CacheInfo> caches;
- bool scaling_enabled;
+ Scaling scaling;
std::vector<double> load_avg;
static const CPUInfo& Get();
@@ -1559,6 +1577,8 @@ class MemoryManager {
inline const char* GetTimeUnitString(TimeUnit unit) {
switch (unit) {
+ case kSecond:
+ return "s";
case kMillisecond:
return "ms";
case kMicrosecond:
@@ -1571,6 +1591,8 @@ inline const char* GetTimeUnitString(TimeUnit unit) {
inline double GetTimeUnitMultiplier(TimeUnit unit) {
switch (unit) {
+ case kSecond:
+ return 1;
case kMillisecond:
return 1e3;
case kMicrosecond:
diff --git a/mingw.py b/mingw.py
deleted file mode 100644
index 65cf4b8..0000000
--- a/mingw.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#! /usr/bin/env python
-# encoding: utf-8
-
-import argparse
-import errno
-import logging
-import os
-import platform
-import re
-import sys
-import subprocess
-import tempfile
-
-try:
- import winreg
-except ImportError:
- import _winreg as winreg
-try:
- import urllib.request as request
-except ImportError:
- import urllib as request
-try:
- import urllib.parse as parse
-except ImportError:
- import urlparse as parse
-
-class EmptyLogger(object):
- '''
- Provides an implementation that performs no logging
- '''
- def debug(self, *k, **kw):
- pass
- def info(self, *k, **kw):
- pass
- def warn(self, *k, **kw):
- pass
- def error(self, *k, **kw):
- pass
- def critical(self, *k, **kw):
- pass
- def setLevel(self, *k, **kw):
- pass
-
-urls = (
- 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
- 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
- 'repository.txt',
- 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
- 'repository.txt'
-)
-'''
-A list of mingw-build repositories
-'''
-
-def repository(urls = urls, log = EmptyLogger()):
- '''
- Downloads and parse mingw-build repository files and parses them
- '''
- log.info('getting mingw-builds repository')
- versions = {}
- re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
- re_sub = r'http://downloads.sourceforge.net/project/\1'
- for url in urls:
- log.debug(' - requesting: %s', url)
- socket = request.urlopen(url)
- repo = socket.read()
- if not isinstance(repo, str):
- repo = repo.decode();
- socket.close()
- for entry in repo.split('\n')[:-1]:
- value = entry.split('|')
- version = tuple([int(n) for n in value[0].strip().split('.')])
- version = versions.setdefault(version, {})
- arch = value[1].strip()
- if arch == 'x32':
- arch = 'i686'
- elif arch == 'x64':
- arch = 'x86_64'
- arch = version.setdefault(arch, {})
- threading = arch.setdefault(value[2].strip(), {})
- exceptions = threading.setdefault(value[3].strip(), {})
- revision = exceptions.setdefault(int(value[4].strip()[3:]),
- re_sourceforge.sub(re_sub, value[5].strip()))
- return versions
-
-def find_in_path(file, path=None):
- '''
- Attempts to find an executable in the path
- '''
- if platform.system() == 'Windows':
- file += '.exe'
- if path is None:
- path = os.environ.get('PATH', '')
- if type(path) is type(''):
- path = path.split(os.pathsep)
- return list(filter(os.path.exists,
- map(lambda dir, file=file: os.path.join(dir, file), path)))
-
-def find_7zip(log = EmptyLogger()):
- '''
- Attempts to find 7zip for unpacking the mingw-build archives
- '''
- log.info('finding 7zip')
- path = find_in_path('7z')
- if not path:
- key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
- path, _ = winreg.QueryValueEx(key, 'Path')
- path = [os.path.join(path, '7z.exe')]
- log.debug('found \'%s\'', path[0])
- return path[0]
-
-find_7zip()
-
-def unpack(archive, location, log = EmptyLogger()):
- '''
- Unpacks a mingw-builds archive
- '''
- sevenzip = find_7zip(log)
- log.info('unpacking %s', os.path.basename(archive))
- cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
- log.debug(' - %r', cmd)
- with open(os.devnull, 'w') as devnull:
- subprocess.check_call(cmd, stdout = devnull)
-
-def download(url, location, log = EmptyLogger()):
- '''
- Downloads and unpacks a mingw-builds archive
- '''
- log.info('downloading MinGW')
- log.debug(' - url: %s', url)
- log.debug(' - location: %s', location)
-
- re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
-
- stream = request.urlopen(url)
- try:
- content = stream.getheader('Content-Disposition') or ''
- except AttributeError:
- content = stream.headers.getheader('Content-Disposition') or ''
- matches = re_content.match(content)
- if matches:
- filename = matches.group(2)
- else:
- parsed = parse.urlparse(stream.geturl())
- filename = os.path.basename(parsed.path)
-
- try:
- os.makedirs(location)
- except OSError as e:
- if e.errno == errno.EEXIST and os.path.isdir(location):
- pass
- else:
- raise
-
- archive = os.path.join(location, filename)
- with open(archive, 'wb') as out:
- while True:
- buf = stream.read(1024)
- if not buf:
- break
- out.write(buf)
- unpack(archive, location, log = log)
- os.remove(archive)
-
- possible = os.path.join(location, 'mingw64')
- if not os.path.exists(possible):
- possible = os.path.join(location, 'mingw32')
- if not os.path.exists(possible):
- raise ValueError('Failed to find unpacked MinGW: ' + possible)
- return possible
-
-def root(location = None, arch = None, version = None, threading = None,
- exceptions = None, revision = None, log = EmptyLogger()):
- '''
- Returns the root folder of a specific version of the mingw-builds variant
- of gcc. Will download the compiler if needed
- '''
-
- # Get the repository if we don't have all the information
- if not (arch and version and threading and exceptions and revision):
- versions = repository(log = log)
-
- # Determine some defaults
- version = version or max(versions.keys())
- if not arch:
- arch = platform.machine().lower()
- if arch == 'x86':
- arch = 'i686'
- elif arch == 'amd64':
- arch = 'x86_64'
- if not threading:
- keys = versions[version][arch].keys()
- if 'posix' in keys:
- threading = 'posix'
- elif 'win32' in keys:
- threading = 'win32'
- else:
- threading = keys[0]
- if not exceptions:
- keys = versions[version][arch][threading].keys()
- if 'seh' in keys:
- exceptions = 'seh'
- elif 'sjlj' in keys:
- exceptions = 'sjlj'
- else:
- exceptions = keys[0]
- if revision is None:
- revision = max(versions[version][arch][threading][exceptions].keys())
- if not location:
- location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
-
- # Get the download url
- url = versions[version][arch][threading][exceptions][revision]
-
- # Tell the user whatzzup
- log.info('finding MinGW %s', '.'.join(str(v) for v in version))
- log.debug(' - arch: %s', arch)
- log.debug(' - threading: %s', threading)
- log.debug(' - exceptions: %s', exceptions)
- log.debug(' - revision: %s', revision)
- log.debug(' - url: %s', url)
-
- # Store each specific revision differently
- slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
- slug = slug.format(
- version = '.'.join(str(v) for v in version),
- arch = arch,
- threading = threading,
- exceptions = exceptions,
- revision = revision
- )
- if arch == 'x86_64':
- root_dir = os.path.join(location, slug, 'mingw64')
- elif arch == 'i686':
- root_dir = os.path.join(location, slug, 'mingw32')
- else:
- raise ValueError('Unknown MinGW arch: ' + arch)
-
- # Download if needed
- if not os.path.exists(root_dir):
- downloaded = download(url, os.path.join(location, slug), log = log)
- if downloaded != root_dir:
- raise ValueError('The location of mingw did not match\n%s\n%s'
- % (downloaded, root_dir))
-
- return root_dir
-
-def str2ver(string):
- '''
- Converts a version string into a tuple
- '''
- try:
- version = tuple(int(v) for v in string.split('.'))
- if len(version) is not 3:
- raise ValueError()
- except ValueError:
- raise argparse.ArgumentTypeError(
- 'please provide a three digit version string')
- return version
-
-def main():
- '''
- Invoked when the script is run directly by the python interpreter
- '''
- parser = argparse.ArgumentParser(
- description = 'Downloads a specific version of MinGW',
- formatter_class = argparse.ArgumentDefaultsHelpFormatter
- )
- parser.add_argument('--location',
- help = 'the location to download the compiler to',
- default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
- parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
- help = 'the target MinGW architecture string')
- parser.add_argument('--version', type = str2ver,
- help = 'the version of GCC to download')
- parser.add_argument('--threading', choices = ['posix', 'win32'],
- help = 'the threading type of the compiler')
- parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
- help = 'the method to throw exceptions')
- parser.add_argument('--revision', type=int,
- help = 'the revision of the MinGW release')
- group = parser.add_mutually_exclusive_group()
- group.add_argument('-v', '--verbose', action='store_true',
- help='increase the script output verbosity')
- group.add_argument('-q', '--quiet', action='store_true',
- help='only print errors and warning')
- args = parser.parse_args()
-
- # Create the logger
- logger = logging.getLogger('mingw')
- handler = logging.StreamHandler()
- formatter = logging.Formatter('%(message)s')
- handler.setFormatter(formatter)
- logger.addHandler(handler)
- logger.setLevel(logging.INFO)
- if args.quiet:
- logger.setLevel(logging.WARN)
- if args.verbose:
- logger.setLevel(logging.DEBUG)
-
- # Get MinGW
- root_dir = root(location = args.location, arch = args.arch,
- version = args.version, threading = args.threading,
- exceptions = args.exceptions, revision = args.revision,
- log = logger)
-
- sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
-
-if __name__ == '__main__':
- try:
- main()
- except IOError as e:
- sys.stderr.write('IO error: %s\n' % e)
- sys.exit(1)
- except OSError as e:
- sys.stderr.write('OS error: %s\n' % e)
- sys.exit(1)
- except KeyboardInterrupt as e:
- sys.stderr.write('Killed\n')
- sys.exit(1)
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..85e8986
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+numpy == 1.19.4
+scipy == 1.5.4
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..5cdab10
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,140 @@
+import os
+import posixpath
+import re
+import shutil
+import sys
+
+from distutils import sysconfig
+import setuptools
+from setuptools.command import build_ext
+
+
+HERE = os.path.dirname(os.path.abspath(__file__))
+
+
+IS_WINDOWS = sys.platform.startswith("win")
+
+
+def _get_version():
+ """Parse the version string from __init__.py."""
+ with open(
+ os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py")
+ ) as init_file:
+ try:
+ version_line = next(
+ line for line in init_file if line.startswith("__version__")
+ )
+ except StopIteration:
+ raise ValueError("__version__ not defined in __init__.py")
+ else:
+ namespace = {}
+ exec(version_line, namespace) # pylint: disable=exec-used
+ return namespace["__version__"]
+
+
+def _parse_requirements(path):
+ with open(os.path.join(HERE, path)) as requirements:
+ return [
+ line.rstrip()
+ for line in requirements
+ if not (line.isspace() or line.startswith("#"))
+ ]
+
+
+class BazelExtension(setuptools.Extension):
+ """A C/C++ extension that is defined as a Bazel BUILD target."""
+
+ def __init__(self, name, bazel_target):
+ self.bazel_target = bazel_target
+ self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split(
+ ":"
+ )
+ setuptools.Extension.__init__(self, name, sources=[])
+
+
+class BuildBazelExtension(build_ext.build_ext):
+ """A command that runs Bazel to build a C/C++ extension."""
+
+ def run(self):
+ for ext in self.extensions:
+ self.bazel_build(ext)
+ build_ext.build_ext.run(self)
+
+ def bazel_build(self, ext):
+ """Runs the bazel build to create the package."""
+ with open("WORKSPACE", "r") as workspace:
+ workspace_contents = workspace.read()
+
+ with open("WORKSPACE", "w") as workspace:
+ workspace.write(
+ re.sub(
+ r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
+ sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
+ workspace_contents,
+ )
+ )
+
+ if not os.path.exists(self.build_temp):
+ os.makedirs(self.build_temp)
+
+ bazel_argv = [
+ "bazel",
+ "build",
+ ext.bazel_target,
+ "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"),
+ "--compilation_mode=" + ("dbg" if self.debug else "opt"),
+ ]
+
+ if IS_WINDOWS:
+ # Link with python*.lib.
+ for library_dir in self.library_dirs:
+ bazel_argv.append("--linkopt=/LIBPATH:" + library_dir)
+
+ self.spawn(bazel_argv)
+
+ shared_lib_suffix = '.dll' if IS_WINDOWS else '.so'
+ ext_bazel_bin_path = os.path.join(
+ self.build_temp, 'bazel-bin',
+ ext.relpath, ext.target_name + shared_lib_suffix)
+
+ ext_dest_path = self.get_ext_fullpath(ext.name)
+ ext_dest_dir = os.path.dirname(ext_dest_path)
+ if not os.path.exists(ext_dest_dir):
+ os.makedirs(ext_dest_dir)
+ shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
+
+
+setuptools.setup(
+ name="google_benchmark",
+ version=_get_version(),
+ url="https://github.com/google/benchmark",
+ description="A library to benchmark code snippets.",
+ author="Google",
+ author_email="benchmark-py@google.com",
+ # Contained modules and scripts.
+ package_dir={"": "bindings/python"},
+ packages=setuptools.find_packages("bindings/python"),
+ install_requires=_parse_requirements("bindings/python/requirements.txt"),
+ cmdclass=dict(build_ext=BuildBazelExtension),
+ ext_modules=[
+ BazelExtension(
+ "google_benchmark._benchmark",
+ "//bindings/python/google_benchmark:_benchmark",
+ )
+ ],
+ zip_safe=False,
+ # PyPI package information.
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: Apache Software License",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Topic :: Software Development :: Testing",
+ "Topic :: System :: Benchmark",
+ ],
+ license="Apache 2.0",
+ keywords="benchmark",
+)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 81c902c..35d559e 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -23,7 +23,6 @@ set_target_properties(benchmark PROPERTIES
OUTPUT_NAME "benchmark"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
- DEBUG_POSTFIX "d"
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
@@ -61,7 +60,6 @@ set_target_properties(benchmark_main PROPERTIES
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
- DEBUG_POSTFIX "d"
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
diff --git a/src/benchmark.cc b/src/benchmark.cc
index b751b9c..1c049f2 100644
--- a/src/benchmark.cc
+++ b/src/benchmark.cc
@@ -284,10 +284,10 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
}
// Disable deprecated warnings temporarily because we need to reference
-// CSVReporter but don't want to trigger -Werror=-Wdeprecated
+// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
#ifdef __GNUC__
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated"
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::unique_ptr<BenchmarkReporter> CreateReporter(
diff --git a/src/benchmark_register.cc b/src/benchmark_register.cc
index cca39b2..65d9944 100644
--- a/src/benchmark_register.cc
+++ b/src/benchmark_register.cc
@@ -31,6 +31,7 @@
#include <fstream>
#include <iostream>
#include <memory>
+#include <numeric>
#include <sstream>
#include <thread>
@@ -303,33 +304,41 @@ Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
- std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
range_multiplier_);
- total *= arglists[i].size();
}
- std::vector<std::size_t> ctr(arglists.size(), 0);
+ ArgsProduct(arglists);
- for (std::size_t i = 0; i < total; i++) {
- std::vector<int64_t> tmp;
- tmp.reserve(arglists.size());
-
- for (std::size_t j = 0; j < arglists.size(); j++) {
- tmp.push_back(arglists[j].at(ctr[j]));
- }
+ return this;
+}
- args_.push_back(std::move(tmp));
+Benchmark* Benchmark::ArgsProduct(
+ const std::vector<std::vector<int64_t>>& arglists) {
+ CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
- for (std::size_t j = 0; j < arglists.size(); j++) {
- if (ctr[j] + 1 < arglists[j].size()) {
- ++ctr[j];
- break;
- }
- ctr[j] = 0;
+ std::vector<std::size_t> indices(arglists.size());
+ const std::size_t total = std::accumulate(
+ std::begin(arglists), std::end(arglists), std::size_t{1},
+ [](const std::size_t res, const std::vector<int64_t>& arglist) {
+ return res * arglist.size();
+ });
+ std::vector<int64_t> args;
+ args.reserve(arglists.size());
+ for (std::size_t i = 0; i < total; i++) {
+ for (std::size_t arg = 0; arg < arglists.size(); arg++) {
+ args.push_back(arglists[arg][indices[arg]]);
}
+ args_.push_back(args);
+ args.clear();
+
+ std::size_t arg = 0;
+ do {
+ indices[arg] = (indices[arg] + 1) % arglists[arg].size();
+ } while (indices[arg++] == 0 && arg < arglists.size());
}
+
return this;
}
diff --git a/src/benchmark_register.h b/src/benchmark_register.h
index 61377d7..c774e6f 100644
--- a/src/benchmark_register.h
+++ b/src/benchmark_register.h
@@ -1,6 +1,7 @@
#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
+#include <limits>
#include <vector>
#include "check.h"
@@ -86,7 +87,7 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
}
// Treat 0 as a special case (see discussion on #762).
- if (lo <= 0 && hi >= 0) {
+ if (lo < 0 && hi >= 0) {
dst->push_back(0);
}
diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc
index 337fac1..7bc6b63 100644
--- a/src/benchmark_runner.cc
+++ b/src/benchmark_runner.cc
@@ -117,7 +117,7 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager);
- CHECK(st.iterations() >= st.max_iterations)
+ CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
@@ -263,8 +263,9 @@ class BenchmarkRunner {
if (multiplier <= 1.0) multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up.
- const IterationCount max_next_iters =
- std::lround(std::max(multiplier * i.iters, i.iters + 1.0));
+ const IterationCount max_next_iters = static_cast<IterationCount>(
+ std::lround(std::max(multiplier * static_cast<double>(i.iters),
+ static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
diff --git a/src/commandlineflags.cc b/src/commandlineflags.cc
index 3380a12..0648fe3 100644
--- a/src/commandlineflags.cc
+++ b/src/commandlineflags.cc
@@ -88,7 +88,7 @@ static std::string FlagToEnvVar(const char* flag) {
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
- return "BENCHMARK_" + env_var;
+ return env_var;
}
} // namespace
diff --git a/src/cycleclock.h b/src/cycleclock.h
index d5d62c4..6843b69 100644
--- a/src/cycleclock.h
+++ b/src/cycleclock.h
@@ -36,7 +36,7 @@
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
-#if defined(COMPILER_MSVC) && !defined(_M_IX86)
+#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
@@ -84,13 +84,21 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
- int64_t tbl, tbu0, tbu1;
- asm("mftbu %0" : "=r"(tbu0));
- asm("mftb %0" : "=r"(tbl));
- asm("mftbu %0" : "=r"(tbu1));
- tbl &= -static_cast<int64_t>(tbu0 == tbu1);
- // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage)
- return (tbu1 << 32) | tbl;
+#if defined(__powerpc64__) || defined(__ppc64__)
+ int64_t tb;
+ asm volatile("mfspr %0, 268" : "=r"(tb));
+ return tb;
+#else
+ uint32_t tbl, tbu0, tbu1;
+ asm volatile(
+ "mftbu %0\n"
+ "mftb %1\n"
+ "mftbu %2"
+ : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
+ tbl &= -static_cast<int32_t>(tbu0 == tbu1);
+ // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
+ return (static_cast<uint64_t>(tbu1) << 32) | tbl;
+#endif
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
@@ -106,6 +114,12 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
+#elif defined(COMPILER_MSVC) && defined(_M_ARM64)
+ // See https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics?view=vs-2019
+ // and https://reviews.llvm.org/D53115
+ int64_t virtual_timer_value;
+ virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT);
+ return virtual_timer_value;
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
@@ -153,7 +167,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
-#elif defined(__mips__)
+#elif defined(__mips__) || defined(__m68k__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
@@ -162,21 +176,32 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
+#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL)
+ // z/OS XL compiler HLASM syntax.
+ asm(" stck %0" : "=m"(tsc) : : "cc");
+#else
asm("stck %0" : "=Q"(tsc) : : "cc");
+#endif
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32
- uint64_t cycles_low, cycles_hi0, cycles_hi1;
- asm("rdcycleh %0" : "=r"(cycles_hi0));
- asm("rdcycle %0" : "=r"(cycles_lo));
- asm("rdcycleh %0" : "=r"(cycles_hi1));
- // This matches the PowerPC overflow detection, above
- cycles_lo &= -static_cast<int64_t>(cycles_hi0 == cycles_hi1);
- return (cycles_hi1 << 32) | cycles_lo;
+ uint32_t cycles_lo, cycles_hi0, cycles_hi1;
+ // This asm also includes the PowerPC overflow handling strategy, as above.
+ // Implemented in assembly because Clang insisted on branching.
+ asm volatile(
+ "rdcycleh %0\n"
+ "rdcycle %1\n"
+ "rdcycleh %2\n"
+ "sub %0, %0, %2\n"
+ "seqz %0, %0\n"
+ "sub %0, zero, %0\n"
+ "and %1, %1, %0\n"
+ : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
+ return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else
uint64_t cycles;
- asm("rdcycle %0" : "=r"(cycles));
+ asm volatile("rdcycle %0" : "=r"(cycles));
return cycles;
#endif
#else
diff --git a/src/internal_macros.h b/src/internal_macros.h
index 6adf00d..91f367b 100644
--- a/src/internal_macros.h
+++ b/src/internal_macros.h
@@ -13,7 +13,11 @@
#endif
#if defined(__clang__)
- #if !defined(COMPILER_CLANG)
+ #if defined(__ibmxl__)
+ #if !defined(COMPILER_IBMXL)
+ #define COMPILER_IBMXL
+ #endif
+ #elif !defined(COMPILER_CLANG)
#define COMPILER_CLANG
#endif
#elif defined(_MSC_VER)
@@ -58,6 +62,8 @@
#define BENCHMARK_OS_NETBSD 1
#elif defined(__OpenBSD__)
#define BENCHMARK_OS_OPENBSD 1
+#elif defined(__DragonFly__)
+ #define BENCHMARK_OS_DRAGONFLY 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
@@ -72,6 +78,8 @@
#define BENCHMARK_OS_SOLARIS 1
#elif defined(__QNX__)
#define BENCHMARK_OS_QNX 1
+#elif defined(__MVS__)
+#define BENCHMARK_OS_ZOS 1
#endif
#if defined(__ANDROID__) && defined(__GLIBCXX__)
diff --git a/src/json_reporter.cc b/src/json_reporter.cc
index e5f3c35..959d245 100644
--- a/src/json_reporter.cc
+++ b/src/json_reporter.cc
@@ -122,8 +122,10 @@ bool JSONReporter::ReportContext(const Context& context) {
<< FormatKV("mhz_per_cpu",
RoundDouble(info.cycles_per_second / 1000000.0))
<< ",\n";
- out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled)
- << ",\n";
+ if (CPUInfo::Scaling::UNKNOWN != info.scaling) {
+ out << indent << FormatKV("cpu_scaling_enabled", info.scaling == CPUInfo::Scaling::ENABLED ? true : false)
+ << ",\n";
+ }
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
diff --git a/src/reporter.cc b/src/reporter.cc
index 0b54fa4..337575a 100644
--- a/src/reporter.cc
+++ b/src/reporter.cc
@@ -64,7 +64,7 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Out << "\n";
}
- if (info.scaling_enabled) {
+ if (CPUInfo::Scaling::ENABLED == info.scaling) {
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
diff --git a/src/sleep.cc b/src/sleep.cc
index 1512ac9..4609d54 100644
--- a/src/sleep.cc
+++ b/src/sleep.cc
@@ -24,6 +24,10 @@
#include <windows.h>
#endif
+#ifdef BENCHMARK_OS_ZOS
+#include <unistd.h>
+#endif
+
namespace benchmark {
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
@@ -33,11 +37,23 @@ void SleepForSeconds(double seconds) {
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds) {
+#ifdef BENCHMARK_OS_ZOS
+ // z/OS does not support nanosleep. Instead call sleep() and then usleep() to
+ // sleep for the remaining microseconds because usleep() will fail if its
+ // argument is greater than 1000000.
+ div_t sleepTime = div(microseconds, kNumMicrosPerSecond);
+ int seconds = sleepTime.quot;
+ while (seconds != 0)
+ seconds = sleep(seconds);
+ while (usleep(sleepTime.rem) == -1 && errno == EINTR)
+ ;
+#else
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
+#endif
}
void SleepForMilliseconds(int milliseconds) {
diff --git a/src/string_util.cc b/src/string_util.cc
index 39b01a1..ac60b55 100644
--- a/src/string_util.cc
+++ b/src/string_util.cc
@@ -1,6 +1,9 @@
#include "string_util.h"
#include <array>
+#ifdef BENCHMARK_STL_ANDROID_GNUSTL
+#include <cerrno>
+#endif
#include <cmath>
#include <cstdarg>
#include <cstdio>
diff --git a/src/sysinfo.cc b/src/sysinfo.cc
index 5b7c4af..b30b4f8 100644
--- a/src/sysinfo.cc
+++ b/src/sysinfo.cc
@@ -29,7 +29,8 @@
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \
- defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD
+ defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \
+ defined BENCHMARK_OS_DRAGONFLY
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
@@ -57,6 +58,7 @@
#include <memory>
#include <sstream>
#include <locale>
+#include <utility>
#include "check.h"
#include "cycleclock.h"
@@ -209,11 +211,11 @@ bool ReadFromFile(std::string const& fname, ArgT* arg) {
return f.good();
}
-bool CpuScalingEnabled(int num_cpus) {
+CPUInfo::Scaling CpuScaling(int num_cpus) {
// We don't have a valid CPU count, so don't even bother.
- if (num_cpus <= 0) return false;
+ if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN;
#ifdef BENCHMARK_OS_QNX
- return false;
+ return CPUInfo::Scaling::UNKNOWN;
#endif
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
@@ -223,10 +225,11 @@ bool CpuScalingEnabled(int num_cpus) {
for (int cpu = 0; cpu < num_cpus; ++cpu) {
std::string governor_file =
StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
- if (ReadFromFile(governor_file, &res) && res != "performance") return true;
+ if (ReadFromFile(governor_file, &res) && res != "performance") return CPUInfo::Scaling::ENABLED;
}
+ return CPUInfo::Scaling::DISABLED;
#endif
- return false;
+ return CPUInfo::Scaling::UNKNOWN;
}
int CountSetBitsInCPUMap(std::string Val) {
@@ -382,9 +385,11 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX() {
case CACHE_FLAG_UNIFIED :
info.type = "Unified";
info.level = 2;
+ break;
case CACHE_FLAG_SHARED :
info.type = "Shared";
info.level = 3;
+ break;
default :
continue;
break;
@@ -603,6 +608,8 @@ double GetCPUCyclesPerSecond() {
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
"hw.cpuspeed";
+#elif defined BENCHMARK_OS_DRAGONFLY
+ "hw.tsc_frequency";
#else
"hw.cpufrequency";
#endif
@@ -667,9 +674,10 @@ double GetCPUCyclesPerSecond() {
}
std::vector<double> GetLoadAvg() {
-#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
- defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
- defined BENCHMARK_OS_OPENBSD) && !defined(__ANDROID__)
+#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \
+ defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
+ defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \
+ !defined(__ANDROID__)
constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
@@ -695,7 +703,7 @@ CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()),
cycles_per_second(GetCPUCyclesPerSecond()),
caches(GetCacheSizes()),
- scaling_enabled(CpuScalingEnabled(num_cpus)),
+ scaling(CpuScaling(num_cpus)),
load_avg(GetLoadAvg()) {}
diff --git a/src/timers.cc b/src/timers.cc
index 7613ff9..1d3ab9a 100644
--- a/src/timers.cc
+++ b/src/timers.cc
@@ -28,7 +28,8 @@
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
-#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
+#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \
+ defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#if defined(BENCHMARK_OS_MACOSX)
@@ -178,40 +179,67 @@ double ThreadCPUUsage() {
#endif
}
-namespace {
-
-std::string DateTimeString(bool local) {
+std::string LocalDateTimeString() {
+ // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM.
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
- const std::size_t kStorageSize = 128;
- char storage[kStorageSize];
- std::size_t written;
+ const std::size_t kTzOffsetLen = 6;
+ const std::size_t kTimestampLen = 19;
+
+ std::size_t tz_len;
+ std::size_t timestamp_len;
+ long int offset_minutes;
+ char tz_offset_sign = '+';
+ // Long enough buffers to avoid format-overflow warnings
+ char tz_offset[128];
+ char storage[128];
- if (local) {
#if defined(BENCHMARK_OS_WINDOWS)
- written =
- std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
+ std::tm *timeinfo_p = ::localtime(&now);
#else
- std::tm timeinfo;
- ::localtime_r(&now, &timeinfo);
- written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
+ std::tm timeinfo;
+ std::tm *timeinfo_p = &timeinfo;
+ ::localtime_r(&now, &timeinfo);
#endif
+
+ tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p);
+
+ if (tz_len < kTzOffsetLen && tz_len > 1) {
+ // Timezone offset was written. strftime writes offset as +HHMM or -HHMM,
+ // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse
+ // the offset as an integer, then reprint it to a string.
+
+ offset_minutes = ::strtol(tz_offset, NULL, 10);
+ if (offset_minutes < 0) {
+ offset_minutes *= -1;
+ tz_offset_sign = '-';
+ }
+
+ tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
+ tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
+ CHECK(tz_len == kTzOffsetLen);
+ ((void)tz_len); // Prevent unused variable warning in optimized build.
} else {
+ // Unknown offset. RFC3339 specifies that unknown local offsets should be
+ // written as UTC time with -00:00 timezone.
#if defined(BENCHMARK_OS_WINDOWS)
- written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
+ // Potential race condition if another thread calls localtime or gmtime.
+ timeinfo_p = ::gmtime(&now);
#else
- std::tm timeinfo;
::gmtime_r(&now, &timeinfo);
- written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
+
+ strncpy(tz_offset, "-00:00", kTzOffsetLen + 1);
}
- CHECK(written < kStorageSize);
- ((void)written); // prevent unused variable in optimized mode.
- return std::string(storage);
-}
-} // end namespace
+ timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S",
+ timeinfo_p);
+ CHECK(timestamp_len == kTimestampLen);
+ // Prevent unused variable warning in optimized build.
+ ((void)kTimestampLen);
-std::string LocalDateTimeString() { return DateTimeString(true); }
+ std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1);
+ return std::string(storage);
+}
} // end namespace benchmark
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 0d228b8..c1a3a3f 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -113,6 +113,9 @@ add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
+compile_benchmark_test(args_product_test)
+add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01)
+
compile_benchmark_test_with_main(link_main_test)
add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
diff --git a/test/args_product_test.cc b/test/args_product_test.cc
new file mode 100644
index 0000000..8a859f8
--- /dev/null
+++ b/test/args_product_test.cc
@@ -0,0 +1,77 @@
+#include "benchmark/benchmark.h"
+
+#include <cassert>
+#include <iostream>
+#include <set>
+#include <vector>
+
+class ArgsProductFixture : public ::benchmark::Fixture {
+ public:
+ ArgsProductFixture()
+ : expectedValues({{0, 100, 2000, 30000},
+ {1, 15, 3, 8},
+ {1, 15, 3, 9},
+ {1, 15, 7, 8},
+ {1, 15, 7, 9},
+ {1, 15, 10, 8},
+ {1, 15, 10, 9},
+ {2, 15, 3, 8},
+ {2, 15, 3, 9},
+ {2, 15, 7, 8},
+ {2, 15, 7, 9},
+ {2, 15, 10, 8},
+ {2, 15, 10, 9},
+ {4, 5, 6, 11}}) {}
+
+ void SetUp(const ::benchmark::State& state) {
+ std::vector<int64_t> ranges = {state.range(0), state.range(1),
+ state.range(2), state.range(3)};
+
+ assert(expectedValues.find(ranges) != expectedValues.end());
+
+ actualValues.insert(ranges);
+ }
+
+ // NOTE: This is not TearDown as we want to check after _all_ runs are
+ // complete.
+ virtual ~ArgsProductFixture() {
+ if (actualValues != expectedValues) {
+ std::cout << "EXPECTED\n";
+ for (auto v : expectedValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ std::cout << "ACTUAL\n";
+ for (auto v : actualValues) {
+ std::cout << "{";
+ for (int64_t iv : v) {
+ std::cout << iv << ", ";
+ }
+ std::cout << "}\n";
+ }
+ }
+ }
+
+ std::set<std::vector<int64_t>> expectedValues;
+ std::set<std::vector<int64_t>> actualValues;
+};
+
+BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) {
+ for (auto _ : state) {
+ int64_t product =
+ state.range(0) * state.range(1) * state.range(2) * state.range(3);
+ for (int64_t x = 0; x < product; x++) {
+ benchmark::DoNotOptimize(x);
+ }
+ }
+}
+
+BENCHMARK_REGISTER_F(ArgsProductFixture, Empty)
+ ->Args({0, 100, 2000, 30000})
+ ->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}})
+ ->Args({4, 5, 6, 11});
+
+BENCHMARK_MAIN();
diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc
index 9557b20..6dbf7a5 100644
--- a/test/benchmark_gtest.cc
+++ b/test/benchmark_gtest.cc
@@ -90,6 +90,12 @@ TEST(AddRangeTest, ZeroOnlyRange) {
EXPECT_THAT(dst, testing::ElementsAre(0));
}
+TEST(AddRangeTest, ZeroStartingRange) {
+ std::vector<int> dst;
+ AddRange(&dst, 0, 2, 2);
+ EXPECT_THAT(dst, testing::ElementsAre(0, 1, 2));
+}
+
TEST(AddRangeTest, NegativeRange64) {
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
diff --git a/test/commandlineflags_gtest.cc b/test/commandlineflags_gtest.cc
index 36bdb44..656020f 100644
--- a/test/commandlineflags_gtest.cc
+++ b/test/commandlineflags_gtest.cc
@@ -26,175 +26,175 @@ int unsetenv(const char* name) {
#endif // BENCHMARK_OS_WINDOWS
TEST(BoolFromEnv, Default) {
- ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(BoolFromEnv("not_in_env", true), true);
}
TEST(BoolFromEnv, False) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "0", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "N", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "N", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "n", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "n", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "NO", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "No", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "No", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "no", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "no", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "F", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "F", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "f", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "f", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "FALSE", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "False", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "False", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "false", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "false", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "OFF", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Off", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "off", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "off", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", true), false);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
TEST(BoolFromEnv, True) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "1", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "1", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Y", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "y", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "y", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "YES", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "Yes", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "yes", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "T", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "T", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "t", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "t", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "TRUE", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "True", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "True", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "true", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "true", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "ON", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "On", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "On", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "on", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "on", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
#ifndef BENCHMARK_OS_WINDOWS
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "", 1), 0);
EXPECT_EQ(BoolFromEnv("in_env", false), true);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
#endif
}
TEST(Int32FromEnv, NotInEnv) {
- ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42);
}
TEST(Int32FromEnv, InvalidInteger) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 42), 42);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
TEST(Int32FromEnv, ValidInteger) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "42", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "42", 1), 0);
EXPECT_EQ(Int32FromEnv("in_env", 64), 42);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
TEST(DoubleFromEnv, NotInEnv) {
- ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51);
}
TEST(DoubleFromEnv, InvalidReal) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
TEST(DoubleFromEnv, ValidReal) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "0.51", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0);
EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51);
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
TEST(StringFromEnv, Default) {
- ASSERT_EQ(unsetenv("BENCHMARK_NOT_IN_ENV"), 0);
+ ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0);
EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo");
}
TEST(StringFromEnv, Valid) {
- ASSERT_EQ(setenv("BENCHMARK_IN_ENV", "foo", 1), 0);
+ ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0);
EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo");
- unsetenv("BENCHMARK_IN_ENV");
+ unsetenv("IN_ENV");
}
} // namespace
diff --git a/test/fixture_test.cc b/test/fixture_test.cc
index 1462b10..a331c7d 100644
--- a/test/fixture_test.cc
+++ b/test/fixture_test.cc
@@ -4,7 +4,9 @@
#include <cassert>
#include <memory>
-class MyFixture : public ::benchmark::Fixture {
+#define FIXTURE_BECHMARK_NAME MyFixture
+
+class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture {
public:
void SetUp(const ::benchmark::State& state) {
if (state.thread_index == 0) {
@@ -20,19 +22,19 @@ class MyFixture : public ::benchmark::Fixture {
}
}
- ~MyFixture() { assert(data == nullptr); }
+ ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); }
std::unique_ptr<int> data;
};
-BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
+BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
for (auto _ : st) {
}
}
-BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
+BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) {
if (st.thread_index == 0) {
assert(data.get() != nullptr);
assert(*data == 42);
@@ -43,7 +45,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
}
st.SetItemsProcessed(st.range(0));
}
-BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42);
-BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu();
+BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42);
+BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42)->ThreadPerCpu();
BENCHMARK_MAIN();
diff --git a/test/options_test.cc b/test/options_test.cc
index 7bfc235..9f9a786 100644
--- a/test/options_test.cc
+++ b/test/options_test.cc
@@ -25,6 +25,7 @@ BENCHMARK(BM_basic)->Arg(42);
BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond);
BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond);
BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond);
+BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kSecond);
BENCHMARK(BM_basic)->Range(1, 8);
BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8);
BENCHMARK(BM_basic)->DenseRange(10, 15);
diff --git a/test/output_test_helper.cc b/test/output_test_helper.cc
index bdb34c8..1aebc55 100644
--- a/test/output_test_helper.cc
+++ b/test/output_test_helper.cc
@@ -48,6 +48,9 @@ SubMap& GetSubstitutions() {
{" %s ", "[ ]+"},
{"%time", "[ ]*" + time_re + "[ ]+ns"},
{"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"},
+ {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
+ {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"},
+ {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"},
{"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"},
{"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"},
{"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"},
@@ -56,6 +59,8 @@ SubMap& GetSubstitutions() {
"items_per_second,label,error_occurred,error_message"},
{"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"},
{"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"},
+ {"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"},
+ {"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"},
{"%csv_bytes_report",
"[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"},
{"%csv_items_report",
@@ -374,10 +379,10 @@ int SetSubstitutions(
}
// Disable deprecated warnings temporarily because we need to reference
-// CSVReporter but don't want to trigger -Werror=-Wdeprecated
+// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
#ifdef __GNUC__
#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wdeprecated"
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
void RunOutputTests(int argc, char* argv[]) {
using internal::GetTestCaseList;
diff --git a/test/reporter_output_test.cc b/test/reporter_output_test.cc
index 1a96b5f..d24a57d 100644
--- a/test/reporter_output_test.cc
+++ b/test/reporter_output_test.cc
@@ -15,7 +15,7 @@ ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
static int AddContextCases() {
AddCases(TC_ConsoleErr,
{
- {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default},
+ {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
{"Running .*/reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
@@ -28,8 +28,7 @@ static int AddContextCases() {
MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
- {"\"cpu_scaling_enabled\": ", MR_Next},
- {"\"caches\": \\[$", MR_Next}});
+ {"\"caches\": \\[$", MR_Default}});
auto const& Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches;
if (!Caches.empty()) {
@@ -170,6 +169,93 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
"label\"%csv_label_report_end$"}});
// ========================================================================= //
+// ------------------------ Testing Time Label Output ---------------------- //
+// ========================================================================= //
+
+void BM_time_label_nanosecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_nanosecond\",$"},
+ {"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 0,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ns\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
+
+void BM_time_label_microsecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_microsecond\",$"},
+ {"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 0,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"us\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
+
+void BM_time_label_millisecond(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
+ADD_CASES(TC_JSONOut,
+ {{"\"name\": \"BM_time_label_millisecond\",$"},
+ {"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 0,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"ms\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
+
+void BM_time_label_second(benchmark::State& state) {
+ for (auto _ : state) {
+ }
+}
+BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
+
+ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
+ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
+ {"\"run_name\": \"BM_time_label_second\",$", MR_Next},
+ {"\"run_type\": \"iteration\",$", MR_Next},
+ {"\"repetitions\": 0,$", MR_Next},
+ {"\"repetition_index\": 0,$", MR_Next},
+ {"\"threads\": 1,$", MR_Next},
+ {"\"iterations\": %int,$", MR_Next},
+ {"\"real_time\": %float,$", MR_Next},
+ {"\"cpu_time\": %float,$", MR_Next},
+ {"\"time_unit\": \"s\"$", MR_Next},
+ {"}", MR_Next}});
+ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
+
+// ========================================================================= //
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
@@ -713,7 +799,7 @@ ADD_CASES(
// ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ //
// ========================================================================= //
-#if 0 // enable when csv testing code correctly handles multi-line fields
+#if 0 // enable when csv testing code correctly handles multi-line fields
void BM_JSON_Format(benchmark::State& state) {
state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
for (auto _ : state) {
diff --git a/test/skip_with_error_test.cc b/test/skip_with_error_test.cc
index 0657977..97a2e3c 100644
--- a/test/skip_with_error_test.cc
+++ b/test/skip_with_error_test.cc
@@ -61,6 +61,12 @@ int AddCases(const char* base_name, std::initializer_list<TestCase> const& v) {
} // end namespace
+void BM_error_no_running(benchmark::State& state) {
+ state.SkipWithError("error message");
+}
+BENCHMARK(BM_error_no_running);
+ADD_CASES("BM_error_no_running", {{"", true, "error message"}});
+
void BM_error_before_running(benchmark::State& state) {
state.SkipWithError("error message");
while (state.KeepRunning()) {
diff --git a/test/statistics_gtest.cc b/test/statistics_gtest.cc
index 99e3149..3ddc72d 100644
--- a/test/statistics_gtest.cc
+++ b/test/statistics_gtest.cc
@@ -21,8 +21,8 @@ TEST(StatisticsTest, Median) {
TEST(StatisticsTest, StdDev) {
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0);
EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0);
- EXPECT_FLOAT_EQ(benchmark::StatisticsStdDev({1.5, 2.4, 3.3, 4.2, 5.1}),
- 1.42302495);
+ EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}),
+ 1.151086443322134);
}
} // end namespace
diff --git a/tools/BUILD.bazel b/tools/BUILD.bazel
new file mode 100644
index 0000000..5895883
--- /dev/null
+++ b/tools/BUILD.bazel
@@ -0,0 +1,19 @@
+load("@py_deps//:requirements.bzl", "requirement")
+
+py_library(
+ name = "gbench",
+ srcs = glob(["gbench/*.py"]),
+ deps = [
+ requirement("numpy"),
+ requirement("scipy"),
+ ],
+)
+
+py_binary(
+ name = "compare",
+ srcs = ["compare.py"],
+ python_version = "PY2",
+ deps = [
+ ":gbench",
+ ],
+)
diff --git a/tools/compare.py b/tools/compare.py
index 539ace6..66eed93 100755
--- a/tools/compare.py
+++ b/tools/compare.py
@@ -7,6 +7,7 @@ compare.py - versatile benchmark output compare tool
import argparse
from argparse import ArgumentParser
+import json
import sys
import gbench
from gbench import util, report
@@ -48,6 +49,20 @@ def create_parser():
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
+ parser.add_argument(
+ '--no-color',
+ dest='color',
+ default=True,
+ action="store_false",
+ help="Do not use colors in the terminal output"
+ )
+
+ parser.add_argument(
+ '-d',
+ '--dump_to_json',
+ dest='dump_to_json',
+ help="Additionally, dump benchmark comparison output to this file in JSON format.")
+
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
@@ -236,14 +251,20 @@ def main():
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
- # Diff and output
- output_lines = gbench.report.generate_difference_report(
- json1, json2, args.display_aggregates_only,
- args.utest, args.utest_alpha)
+ diff_report = gbench.report.get_difference_report(
+ json1, json2, args.utest)
+ output_lines = gbench.report.print_difference_report(
+ diff_report,
+ args.display_aggregates_only,
+ args.utest, args.utest_alpha, args.color)
print(description)
for ln in output_lines:
print(ln)
+ # Optionally, diff and output to JSON
+ if args.dump_to_json is not None:
+ with open(args.dump_to_json, 'w') as f_json:
+ json.dump(diff_report, f_json)
class TestParser(unittest.TestCase):
def setUp(self):
diff --git a/tools/gbench/report.py b/tools/gbench/report.py
index 5bd3a8d..bf29492 100644
--- a/tools/gbench/report.py
+++ b/tools/gbench/report.py
@@ -154,6 +154,7 @@ def extract_field(partition, field_name):
rhs = [x[field_name] for x in partition[1]]
return [lhs, rhs]
+
def calc_utest(timings_cpu, timings_time):
min_rep_cnt = min(len(timings_time[0]),
len(timings_time[1]),
@@ -171,46 +172,106 @@ def calc_utest(timings_cpu, timings_time):
return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
-def print_utest(partition, utest_alpha, first_col_width, use_color=True):
+def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
- timings_time = extract_field(partition, 'real_time')
- timings_cpu = extract_field(partition, 'cpu_time')
- have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time)
-
# Check if we failed miserably with minimum required repetitions for utest
- if not have_optimal_repetitions and cpu_pvalue is None and time_pvalue is None:
+ if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None:
return []
dsc = "U Test, Repetitions: {} vs {}".format(
- len(timings_cpu[0]), len(timings_cpu[1]))
+ utest['nr_of_repetitions'], utest['nr_of_repetitions_other'])
dsc_color = BC_OKGREEN
# We still got some results to show but issue a warning about it.
- if not have_optimal_repetitions:
+ if not utest['have_optimal_repetitions']:
dsc_color = BC_WARNING
dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format(
UTEST_OPTIMAL_REPETITIONS)
special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}"
- last_name = partition[0][0]['name']
return [color_format(use_color,
special_str,
BC_HEADER,
- "{}{}".format(last_name, UTEST_COL_NAME),
+ "{}{}".format(bc_name, UTEST_COL_NAME),
first_col_width,
- get_utest_color(time_pvalue), time_pvalue,
- get_utest_color(cpu_pvalue), cpu_pvalue,
+ get_utest_color(
+ utest['time_pvalue']), utest['time_pvalue'],
+ get_utest_color(
+ utest['cpu_pvalue']), utest['cpu_pvalue'],
dsc_color, dsc,
endc=BC_ENDC)]
-def generate_difference_report(
+def get_difference_report(
json1,
json2,
- display_aggregates_only=False,
+ utest=False):
+ """
+ Calculate and report the difference between each test of two benchmarks
+ runs specified as 'json1' and 'json2'. Output is another json containing
+ relevant details for each test run.
+ """
+ assert utest is True or utest is False
+
+ diff_report = []
+ partitions = partition_benchmarks(json1, json2)
+ for partition in partitions:
+ benchmark_name = partition[0][0]['name']
+ time_unit = partition[0][0]['time_unit']
+ measurements = []
+ utest_results = {}
+ # Careful, we may have different repetition count.
+ for i in range(min(len(partition[0]), len(partition[1]))):
+ bn = partition[0][i]
+ other_bench = partition[1][i]
+ measurements.append({
+ 'real_time': bn['real_time'],
+ 'cpu_time': bn['cpu_time'],
+ 'real_time_other': other_bench['real_time'],
+ 'cpu_time_other': other_bench['cpu_time'],
+ 'time': calculate_change(bn['real_time'], other_bench['real_time']),
+ 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time'])
+ })
+
+ # After processing the whole partition, if requested, do the U test.
+ if utest:
+ timings_cpu = extract_field(partition, 'cpu_time')
+ timings_time = extract_field(partition, 'real_time')
+ have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time)
+ if cpu_pvalue and time_pvalue:
+ utest_results = {
+ 'have_optimal_repetitions': have_optimal_repetitions,
+ 'cpu_pvalue': cpu_pvalue,
+ 'time_pvalue': time_pvalue,
+ 'nr_of_repetitions': len(timings_cpu[0]),
+ 'nr_of_repetitions_other': len(timings_cpu[1])
+ }
+
+ # Store only if we had any measurements for given benchmark.
+ # E.g. partition_benchmarks will filter out the benchmarks having
+ # time units which are not compatible with other time units in the
+ # benchmark suite.
+ if measurements:
+ run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else ''
+ aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
+ diff_report.append({
+ 'name': benchmark_name,
+ 'measurements': measurements,
+ 'time_unit': time_unit,
+ 'run_type': run_type,
+ 'aggregate_name': aggregate_name,
+ 'utest': utest_results
+ })
+
+ return diff_report
+
+
+def print_difference_report(
+ json_diff_report,
+ include_aggregates_only=False,
utest=False,
utest_alpha=0.05,
use_color=True):
@@ -219,14 +280,16 @@ def generate_difference_report(
runs specified as 'json1' and 'json2'.
"""
assert utest is True or utest is False
- first_col_width = find_longest_name(json1['benchmarks'])
- def find_test(name):
- for b in json2['benchmarks']:
- if b['name'] == name:
- return b
- return None
+ def get_color(res):
+ if res > 0.05:
+ return BC_FAIL
+ elif res > -0.07:
+ return BC_WHITE
+ else:
+ return BC_CYAN
+ first_col_width = find_longest_name(json_diff_report)
first_col_width = max(
first_col_width,
len('Benchmark'))
@@ -235,50 +298,36 @@ def generate_difference_report(
'Benchmark', 12 + first_col_width)
output_strs = [first_line, '-' * len(first_line)]
- partitions = partition_benchmarks(json1, json2)
- for partition in partitions:
- # Careful, we may have different repetition count.
- for i in range(min(len(partition[0]), len(partition[1]))):
- bn = partition[0][i]
- other_bench = partition[1][i]
+ fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
+ for benchmark in json_diff_report:
+ # *If* we were asked to only include aggregates,
+ # and if it is non-aggregate, then skip it.
+ if include_aggregates_only and 'run_type' in benchmark:
+ if benchmark['run_type'] != 'aggregate':
+ continue
- # *If* we were asked to only display aggregates,
- # and if it is non-aggregate, then skip it.
- if display_aggregates_only and 'run_type' in bn and 'run_type' in other_bench:
- assert bn['run_type'] == other_bench['run_type']
- if bn['run_type'] != 'aggregate':
- continue
-
- fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
-
- def get_color(res):
- if res > 0.05:
- return BC_FAIL
- elif res > -0.07:
- return BC_WHITE
- else:
- return BC_CYAN
-
- tres = calculate_change(bn['real_time'], other_bench['real_time'])
- cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time'])
+ for measurement in benchmark['measurements']:
output_strs += [color_format(use_color,
fmt_str,
BC_HEADER,
- bn['name'],
+ benchmark['name'],
first_col_width,
- get_color(tres),
- tres,
- get_color(cpures),
- cpures,
- bn['real_time'],
- other_bench['real_time'],
- bn['cpu_time'],
- other_bench['cpu_time'],
+ get_color(measurement['time']),
+ measurement['time'],
+ get_color(measurement['cpu']),
+ measurement['cpu'],
+ measurement['real_time'],
+ measurement['real_time_other'],
+ measurement['cpu_time'],
+ measurement['cpu_time_other'],
endc=BC_ENDC)]
- # After processing the whole partition, if requested, do the U test.
- if utest:
- output_strs += print_utest(partition,
+ # After processing the measurements, if requested and
+ # if applicable (e.g. u-test exists for given benchmark),
+ # print the U test.
+ if utest and benchmark['utest']:
+ output_strs += print_utest(benchmark['name'],
+ benchmark['utest'],
utest_alpha=utest_alpha,
first_col_width=first_col_width,
use_color=use_color)
@@ -319,21 +368,26 @@ class TestGetUniqueBenchmarkNames(unittest.TestCase):
class TestReportDifference(unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test1_run1.json')
- testOutput2 = os.path.join(testInputs, 'test1_run2.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_basic(self):
+ @classmethod
+ def setUpClass(cls):
+ def load_results():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test1_run1.json')
+ testOutput2 = os.path.join(testInputs, 'test1_run2.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ json1, json2 = load_results()
+ cls.json_diff_report = get_difference_report(json1, json2)
+
+ def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'],
['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'],
@@ -351,9 +405,8 @@ class TestReportDifference(unittest.TestCase):
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, use_color=False)
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -363,31 +416,118 @@ class TestReportDifference(unittest.TestCase):
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report_output(self):
+ expected_output = [
+ {
+ 'name': 'BM_SameTimes',
+ 'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_2xFaster',
+ 'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_2xSlower',
+ 'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_1PercentFaster',
+ 'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_1PercentSlower',
+ 'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_10PercentFaster',
+ 'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_10PercentSlower',
+ 'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_100xSlower',
+ 'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_100xFaster',
+ 'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_10PercentCPUToTime',
+ 'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_ThirdFaster',
+ 'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_NotBadTimeUnit',
+ 'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
+ 'time_unit': 's',
+ 'utest': {}
+ },
+ ]
+ self.assertEqual(len(self.json_diff_report), len(expected_output))
+ for out, expected in zip(
+ self.json_diff_report, expected_output):
+ self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['time_unit'], expected['time_unit'])
+ assert_utest(self, out, expected)
+ assert_measurements(self, out, expected)
+
class TestReportDifferenceBetweenFamilies(unittest.TestCase):
- def load_result(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput = os.path.join(testInputs, 'test2_run.json')
- with open(testOutput, 'r') as f:
- json = json.load(f)
- return json
+ @classmethod
+ def setUpClass(cls):
+ def load_result():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput = os.path.join(testInputs, 'test2_run.json')
+ with open(testOutput, 'r') as f:
+ json = json.load(f)
+ return json
+
+ json = load_result()
+ json1 = filter_benchmark(json, "BM_Z.ro", ".")
+ json2 = filter_benchmark(json, "BM_O.e", ".")
+ cls.json_diff_report = get_difference_report(json1, json2)
- def test_basic(self):
+ def test_json_diff_report_pretty_printing(self):
expect_lines = [
['.', '-0.5000', '-0.5000', '10', '5', '10', '5'],
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
]
- json = self.load_result()
- json1 = filter_benchmark(json, "BM_Z.ro", ".")
- json2 = filter_benchmark(json, "BM_O.e", ".")
- output_lines_with_header = generate_difference_report(
- json1, json2, use_color=False)
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -397,24 +537,64 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
self.assertEqual(len(parts), 7)
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report(self):
+ expected_output = [
+ {
+ 'name': u'.',
+ 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': u'./4',
+ 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}],
+ 'time_unit': 'ns',
+ 'utest': {},
+ },
+ {
+ 'name': u'Prefix/.',
+ 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': u'Prefix/./3',
+ 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
+ 'time_unit': 'ns',
+ 'utest': {}
+ }
+ ]
+ self.assertEqual(len(self.json_diff_report), len(expected_output))
+ for out, expected in zip(
+ self.json_diff_report, expected_output):
+ self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['time_unit'], expected['time_unit'])
+ assert_utest(self, out, expected)
+ assert_measurements(self, out, expected)
+
class TestReportDifferenceWithUTest(unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_utest(self):
- expect_lines = []
+ @classmethod
+ def setUpClass(cls):
+ def load_results():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test3_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test3_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ json1, json2 = load_results()
+ cls.json_diff_report = get_difference_report(
+ json1, json2, utest=True)
+
+ def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
@@ -453,9 +633,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, utest=True, utest_alpha=0.05, use_color=False)
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
print("\n".join(output_lines_with_header))
@@ -464,25 +643,105 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report(self):
+ expected_output = [
+ {
+ 'name': u'BM_One',
+ 'measurements': [
+ {'time': -0.1,
+ 'cpu': 0.1,
+ 'real_time': 10,
+ 'real_time_other': 9,
+ 'cpu_time': 100,
+ 'cpu_time_other': 110}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': u'BM_Two',
+ 'measurements': [
+ {'time': 0.1111111111111111,
+ 'cpu': -0.011111111111111112,
+ 'real_time': 9,
+ 'real_time_other': 10,
+ 'cpu_time': 90,
+ 'cpu_time_other': 89},
+ {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
+ 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
+ }
+ },
+ {
+ 'name': u'short',
+ 'measurements': [
+ {'time': -0.125,
+ 'cpu': -0.0625,
+ 'real_time': 8,
+ 'real_time_other': 7,
+ 'cpu_time': 80,
+ 'cpu_time_other': 75},
+ {'time': -0.4325,
+ 'cpu': -0.13506493506493514,
+ 'real_time': 8,
+ 'real_time_other': 4.54,
+ 'cpu_time': 77,
+ 'cpu_time_other': 66.6}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
+ }
+ },
+ {
+ 'name': u'medium',
+ 'measurements': [
+ {'time': -0.375,
+ 'cpu': -0.3375,
+ 'real_time': 8,
+ 'real_time_other': 5,
+ 'cpu_time': 80,
+ 'cpu_time_other': 53}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {}
+ }
+ ]
+ self.assertEqual(len(self.json_diff_report), len(expected_output))
+ for out, expected in zip(
+ self.json_diff_report, expected_output):
+ self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['time_unit'], expected['time_unit'])
+ assert_utest(self, out, expected)
+ assert_measurements(self, out, expected)
+
class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
unittest.TestCase):
- def load_results(self):
- import json
- testInputs = os.path.join(
- os.path.dirname(
- os.path.realpath(__file__)),
- 'Inputs')
- testOutput1 = os.path.join(testInputs, 'test3_run0.json')
- testOutput2 = os.path.join(testInputs, 'test3_run1.json')
- with open(testOutput1, 'r') as f:
- json1 = json.load(f)
- with open(testOutput2, 'r') as f:
- json2 = json.load(f)
- return json1, json2
-
- def test_utest(self):
- expect_lines = []
+ @classmethod
+ def setUpClass(cls):
+ def load_results():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test3_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test3_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ json1, json2 = load_results()
+ cls.json_diff_report = get_difference_report(
+ json1, json2, utest=True)
+
+ def test_json_diff_report_pretty_printing(self):
expect_lines = [
['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
@@ -519,10 +778,10 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
'9+',
'repetitions',
'recommended.'],
+ ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53']
]
- json1, json2 = self.load_results()
- output_lines_with_header = generate_difference_report(
- json1, json2, display_aggregates_only=True,
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report,
utest=True, utest_alpha=0.05, use_color=False)
output_lines = output_lines_with_header[2:]
print("\n")
@@ -532,6 +791,109 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report(self):
+ expected_output = [
+ {
+ 'name': u'BM_One',
+ 'measurements': [
+ {'time': -0.1,
+ 'cpu': 0.1,
+ 'real_time': 10,
+ 'real_time_other': 9,
+ 'cpu_time': 100,
+ 'cpu_time_other': 110}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {}
+ },
+ {
+ 'name': u'BM_Two',
+ 'measurements': [
+ {'time': 0.1111111111111111,
+ 'cpu': -0.011111111111111112,
+ 'real_time': 9,
+ 'real_time_other': 10,
+ 'cpu_time': 90,
+ 'cpu_time_other': 89},
+ {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8,
+ 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
+ }
+ },
+ {
+ 'name': u'short',
+ 'measurements': [
+ {'time': -0.125,
+ 'cpu': -0.0625,
+ 'real_time': 8,
+ 'real_time_other': 7,
+ 'cpu_time': 80,
+ 'cpu_time_other': 75},
+ {'time': -0.4325,
+ 'cpu': -0.13506493506493514,
+ 'real_time': 8,
+ 'real_time_other': 4.54,
+ 'cpu_time': 77,
+ 'cpu_time_other': 66.6}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
+ }
+ },
+ {
+ 'name': u'medium',
+ 'measurements': [
+ {'real_time_other': 5,
+ 'cpu_time': 80,
+ 'time': -0.375,
+ 'real_time': 8,
+ 'cpu_time_other': 53,
+ 'cpu': -0.3375
+ }
+ ],
+ 'utest': {},
+ 'time_unit': u'ns',
+ 'aggregate_name': ''
+ }
+ ]
+ self.assertEqual(len(self.json_diff_report), len(expected_output))
+ for out, expected in zip(
+ self.json_diff_report, expected_output):
+ self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['time_unit'], expected['time_unit'])
+ assert_utest(self, out, expected)
+ assert_measurements(self, out, expected)
+
+
+def assert_utest(unittest_instance, lhs, rhs):
+ if lhs['utest']:
+ unittest_instance.assertAlmostEqual(
+ lhs['utest']['cpu_pvalue'],
+ rhs['utest']['cpu_pvalue'])
+ unittest_instance.assertAlmostEqual(
+ lhs['utest']['time_pvalue'],
+ rhs['utest']['time_pvalue'])
+ unittest_instance.assertEqual(
+ lhs['utest']['have_optimal_repetitions'],
+ rhs['utest']['have_optimal_repetitions'])
+ else:
+ # lhs is empty. assert if rhs is not.
+ unittest_instance.assertEqual(lhs['utest'], rhs['utest'])
+
+
+def assert_measurements(unittest_instance, lhs, rhs):
+ for m1, m2 in zip(lhs['measurements'], rhs['measurements']):
+ unittest_instance.assertEqual(m1['real_time'], m2['real_time'])
+ unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time'])
+ # m1['time'] and m1['cpu'] hold values which are being calculated,
+ # and therefore we must use almost-equal pattern.
+ unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4)
+ unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4)
+
if __name__ == '__main__':
unittest.main()
diff --git a/tools/gbench/util.py b/tools/gbench/util.py
index 1f8e8e2..661c4ba 100644
--- a/tools/gbench/util.py
+++ b/tools/gbench/util.py
@@ -158,7 +158,6 @@ def run_or_load_benchmark(filename, benchmark_flags):
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
- elif ftype == IT_Executable:
+ if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
- else:
- assert False # This branch is unreachable
+ raise ValueError('Unknown file type %s' % ftype)
diff --git a/tools/requirements.txt b/tools/requirements.txt
new file mode 100644
index 0000000..3b3331b
--- /dev/null
+++ b/tools/requirements.txt
@@ -0,0 +1 @@
+scipy>=1.5.0 \ No newline at end of file