aboutsummaryrefslogtreecommitdiff
path: root/bindings/python/google_benchmark/example.py
blob: d95a0438d64524ddd0e19688da24302501564327 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Python using C++ benchmark framework.

To run this example, you must first install the `google_benchmark` Python package.

To install using `setup.py`, download and extract the `google_benchmark` source.
In the extracted directory, execute:
  python setup.py install
"""

import random
import time

import google_benchmark as benchmark
from google_benchmark import Counter


@benchmark.register
def empty(state):
    while state:
        pass


@benchmark.register
def sum_million(state):
    while state:
        sum(range(1_000_000))

@benchmark.register
def pause_timing(state):
    """Pause timing every iteration."""
    while state:
        # Construct a list of random ints every iteration without timing it
        state.pause_timing()
        random_list = [random.randint(0, 100) for _ in range(100)]
        state.resume_timing()
        # Time the in place sorting algorithm
        random_list.sort()


@benchmark.register
def skipped(state):
    if True:  # Test some predicate here.
        state.skip_with_error("some error")
        return  # NOTE: You must explicitly return, or benchmark will continue.

    ...  # Benchmark code would be here.


@benchmark.register
def manual_timing(state):
    while state:
        # Manually count Python CPU time
        start = time.perf_counter()  # perf_counter_ns() in Python 3.7+
        # Something to benchmark
        time.sleep(0.01)
        end = time.perf_counter()
        state.set_iteration_time(end - start)


@benchmark.register
def custom_counters(state):
    """Collect custom metric using benchmark.Counter."""
    num_foo = 0.0
    while state:
        # Benchmark some code here
        pass
        # Collect some custom metric named foo
        num_foo += 0.13

    # Automatic Counter from numbers.
    state.counters["foo"] = num_foo
    # Set a counter as a rate.
    state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate)
    #  Set a counter as an inverse of rate.
    state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert)
    # Set a counter as a thread-average quantity.
    state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads)
    # There's also a combined flag:
    state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate)


@benchmark.register
@benchmark.option.measure_process_cpu_time()
@benchmark.option.use_real_time()
def with_options(state):
    while state:
        sum(range(1_000_000))


@benchmark.register(name="sum_million_microseconds")
@benchmark.option.unit(benchmark.kMicrosecond)
def with_options2(state):
    while state:
        sum(range(1_000_000))


@benchmark.register
@benchmark.option.arg(100)
@benchmark.option.arg(1000)
def passing_argument(state):
    while state:
        sum(range(state.range(0)))


@benchmark.register
@benchmark.option.range(8, limit=8 << 10)
def using_range(state):
    while state:
        sum(range(state.range(0)))


@benchmark.register
@benchmark.option.range_multiplier(2)
@benchmark.option.range(1 << 10, 1 << 18)
@benchmark.option.complexity(benchmark.oN)
def computing_complexity(state):
    while state:
        sum(range(state.range(0)))
    state.complexity_n = state.range(0)


if __name__ == "__main__":
    benchmark.main()