aboutsummaryrefslogtreecommitdiff
path: root/src/benchmark_runner.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/benchmark_runner.cc')
-rw-r--r--src/benchmark_runner.cc559
1 files changed, 347 insertions, 212 deletions
diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc
index 7bc6b63..f7ae424 100644
--- a/src/benchmark_runner.cc
+++ b/src/benchmark_runner.cc
@@ -13,12 +13,13 @@
// limitations under the License.
#include "benchmark_runner.h"
+
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
-#ifndef BENCHMARK_OS_FUCHSIA
+#if !defined(BENCHMARK_OS_FUCHSIA) && !defined(BENCHMARK_OS_QURT)
#include <sys/resource.h>
#endif
#include <sys/time.h>
@@ -27,11 +28,14 @@
#include <algorithm>
#include <atomic>
+#include <climits>
+#include <cmath>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
+#include <limits>
#include <memory>
#include <string>
#include <thread>
@@ -45,6 +49,7 @@
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
+#include "perf_counters.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
@@ -60,64 +65,72 @@ MemoryManager* memory_manager = nullptr;
namespace {
static constexpr IterationCount kMaxIterations = 1000000000;
+const double kDefaultMinTime =
+ std::strtod(::benchmark::kDefaultMinTimeStr, /*p_end*/ nullptr);
BenchmarkReporter::Run CreateRunReport(
const benchmark::internal::BenchmarkInstance& b,
const internal::ThreadManager::Result& results,
IterationCount memory_iterations,
- const MemoryManager::Result& memory_result, double seconds,
- int64_t repetition_index) {
+ const MemoryManager::Result* memory_result, double seconds,
+ int64_t repetition_index, int64_t repeats) {
// Create report about this benchmark run.
BenchmarkReporter::Run report;
- report.run_name = b.name;
- report.error_occurred = results.has_error_;
- report.error_message = results.error_message_;
+ report.run_name = b.name();
+ report.family_index = b.family_index();
+ report.per_family_instance_index = b.per_family_instance_index();
+ report.skipped = results.skipped_;
+ report.skip_message = results.skip_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
- report.time_unit = b.time_unit;
- report.threads = b.threads;
+ report.time_unit = b.time_unit();
+ report.threads = b.threads();
report.repetition_index = repetition_index;
- report.repetitions = b.repetitions;
+ report.repetitions = repeats;
- if (!report.error_occurred) {
- if (b.use_manual_time) {
+ if (!report.skipped) {
+ if (b.use_manual_time()) {
report.real_accumulated_time = results.manual_time_used;
} else {
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
- report.complexity = b.complexity;
- report.complexity_lambda = b.complexity_lambda;
- report.statistics = b.statistics;
+ report.complexity = b.complexity();
+ report.complexity_lambda = b.complexity_lambda();
+ report.statistics = &b.statistics();
report.counters = results.counters;
if (memory_iterations > 0) {
- report.has_memory_result = true;
+ assert(memory_result != nullptr);
+ report.memory_result = memory_result;
report.allocs_per_iter =
- memory_iterations ? static_cast<double>(memory_result.num_allocs) /
+ memory_iterations ? static_cast<double>(memory_result->num_allocs) /
memory_iterations
: 0;
- report.max_bytes_used = memory_result.max_bytes_used;
}
- internal::Finish(&report.counters, results.iterations, seconds, b.threads);
+ internal::Finish(&report.counters, results.iterations, seconds,
+ b.threads());
}
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
-// Adds the stats collected for the thread into *total.
+// Adds the stats collected for the thread into manager->results.
void RunInThread(const BenchmarkInstance* b, IterationCount iters,
- int thread_id, ThreadManager* manager) {
+ int thread_id, ThreadManager* manager,
+ PerfCountersMeasurement* perf_counters_measurement) {
internal::ThreadTimer timer(
- b->measure_process_cpu_time
+ b->measure_process_cpu_time()
? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
- State st = b->Run(iters, thread_id, &timer, manager);
- CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
+
+ State st =
+ b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
+ BM_CHECK(st.skipped() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
@@ -132,229 +145,351 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
manager->NotifyThreadComplete();
}
-class BenchmarkRunner {
- public:
- BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_,
- std::vector<BenchmarkReporter::Run>* complexity_reports_)
- : b(b_),
- complexity_reports(*complexity_reports_),
- min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
- repeats(b.repetitions != 0 ? b.repetitions
+double ComputeMinTime(const benchmark::internal::BenchmarkInstance& b,
+ const BenchTimeType& iters_or_time) {
+ if (!IsZero(b.min_time())) return b.min_time();
+ // If the flag was used to specify number of iters, then return the default
+ // min_time.
+ if (iters_or_time.tag == BenchTimeType::ITERS) return kDefaultMinTime;
+
+ return iters_or_time.time;
+}
+
+IterationCount ComputeIters(const benchmark::internal::BenchmarkInstance& b,
+ const BenchTimeType& iters_or_time) {
+ if (b.iterations() != 0) return b.iterations();
+
+ // We've already concluded that this flag is currently used to pass
+ // iters but do a check here again anyway.
+ BM_CHECK(iters_or_time.tag == BenchTimeType::ITERS);
+ return iters_or_time.iters;
+}
+
+} // end namespace
+
+BenchTimeType ParseBenchMinTime(const std::string& value) {
+ BenchTimeType ret;
+
+ if (value.empty()) {
+ ret.tag = BenchTimeType::TIME;
+ ret.time = 0.0;
+ return ret;
+ }
+
+ if (value.back() == 'x') {
+ char* p_end;
+ // Reset errno before it's changed by strtol.
+ errno = 0;
+ IterationCount num_iters = std::strtol(value.c_str(), &p_end, 10);
+
+ // After a valid parse, p_end should have been set to
+ // point to the 'x' suffix.
+ BM_CHECK(errno == 0 && p_end != nullptr && *p_end == 'x')
+ << "Malformed iters value passed to --benchmark_min_time: `" << value
+ << "`. Expected --benchmark_min_time=<integer>x.";
+
+ ret.tag = BenchTimeType::ITERS;
+ ret.iters = num_iters;
+ return ret;
+ }
+
+ bool has_suffix = value.back() == 's';
+ if (!has_suffix) {
+ BM_VLOG(0) << "Value passed to --benchmark_min_time should have a suffix. "
+ "Eg., `30s` for 30-seconds.";
+ }
+
+ char* p_end;
+ // Reset errno before it's changed by strtod.
+ errno = 0;
+ double min_time = std::strtod(value.c_str(), &p_end);
+
+ // After a successful parse, p_end should point to the suffix 's',
+ // or the end of the string if the suffix was omitted.
+ BM_CHECK(errno == 0 && p_end != nullptr &&
+ ((has_suffix && *p_end == 's') || *p_end == '\0'))
+ << "Malformed seconds value passed to --benchmark_min_time: `" << value
+ << "`. Expected --benchmark_min_time=<float>x.";
+
+ ret.tag = BenchTimeType::TIME;
+ ret.time = min_time;
+
+ return ret;
+}
+
+BenchmarkRunner::BenchmarkRunner(
+ const benchmark::internal::BenchmarkInstance& b_,
+ PerfCountersMeasurement* pcm_,
+ BenchmarkReporter::PerFamilyRunReports* reports_for_family_)
+ : b(b_),
+ reports_for_family(reports_for_family_),
+ parsed_benchtime_flag(ParseBenchMinTime(FLAGS_benchmark_min_time)),
+ min_time(ComputeMinTime(b_, parsed_benchtime_flag)),
+ min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0)
+ ? b.min_warmup_time()
+ : FLAGS_benchmark_min_warmup_time),
+ warmup_done(!(min_warmup_time > 0.0)),
+ repeats(b.repetitions() != 0 ? b.repetitions()
: FLAGS_benchmark_repetitions),
- has_explicit_iteration_count(b.iterations != 0),
- pool(b.threads - 1),
- iters(has_explicit_iteration_count ? b.iterations : 1) {
+ has_explicit_iteration_count(b.iterations() != 0 ||
+ parsed_benchtime_flag.tag ==
+ BenchTimeType::ITERS),
+ pool(b.threads() - 1),
+ iters(has_explicit_iteration_count
+ ? ComputeIters(b_, parsed_benchtime_flag)
+ : 1),
+ perf_counters_measurement_ptr(pcm_) {
+ run_results.display_report_aggregates_only =
+ (FLAGS_benchmark_report_aggregates_only ||
+ FLAGS_benchmark_display_aggregates_only);
+ run_results.file_report_aggregates_only =
+ FLAGS_benchmark_report_aggregates_only;
+ if (b.aggregation_report_mode() != internal::ARM_Unspecified) {
run_results.display_report_aggregates_only =
- (FLAGS_benchmark_report_aggregates_only ||
- FLAGS_benchmark_display_aggregates_only);
+ (b.aggregation_report_mode() &
+ internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
- FLAGS_benchmark_report_aggregates_only;
- if (b.aggregation_report_mode != internal::ARM_Unspecified) {
- run_results.display_report_aggregates_only =
- (b.aggregation_report_mode &
- internal::ARM_DisplayReportAggregatesOnly);
- run_results.file_report_aggregates_only =
- (b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
- }
+ (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
+ BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
+ (perf_counters_measurement_ptr->num_counters() == 0))
+ << "Perf counters were requested but could not be set up.";
+ }
+}
- for (int repetition_num = 0; repetition_num < repeats; repetition_num++) {
- DoOneRepetition(repetition_num);
- }
+BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() {
+ BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n";
- // Calculate additional statistics
- run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(b.threads()));
- // Maybe calculate complexity report
- if ((b.complexity != oNone) && b.last_benchmark_instance) {
- auto additional_run_stats = ComputeBigO(complexity_reports);
- run_results.aggregates_only.insert(run_results.aggregates_only.end(),
- additional_run_stats.begin(),
- additional_run_stats.end());
- complexity_reports.clear();
- }
+ // Run all but one thread in separate threads
+ for (std::size_t ti = 0; ti < pool.size(); ++ti) {
+ pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
+ manager.get(), perf_counters_measurement_ptr);
}
+ // And run one thread here directly.
+ // (If we were asked to run just one thread, we don't create new threads.)
+ // Yes, we need to do this here *after* we start the separate threads.
+ RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr);
- RunResults&& get_results() { return std::move(run_results); }
+ // The main thread has finished. Now let's wait for the other threads.
+ manager->WaitForAllThreads();
+ for (std::thread& thread : pool) thread.join();
- private:
- RunResults run_results;
+ IterationResults i;
+ // Acquire the measurements/counters from the manager, UNDER THE LOCK!
+ {
+ MutexLock l(manager->GetBenchmarkMutex());
+ i.results = manager->results;
+ }
- const benchmark::internal::BenchmarkInstance& b;
- std::vector<BenchmarkReporter::Run>& complexity_reports;
+ // And get rid of the manager.
+ manager.reset();
- const double min_time;
- const int repeats;
- const bool has_explicit_iteration_count;
+ // Adjust real/manual time stats since they were reported per thread.
+ i.results.real_time_used /= b.threads();
+ i.results.manual_time_used /= b.threads();
+ // If we were measuring whole-process CPU usage, adjust the CPU time too.
+ if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads();
- std::vector<std::thread> pool;
+ BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
+ << i.results.real_time_used << "\n";
- IterationCount iters; // preserved between repetitions!
- // So only the first repetition has to find/calculate it,
- // the other repetitions will just use that precomputed iteration count.
+ // By using KeepRunningBatch a benchmark can iterate more times than
+ // requested, so take the iteration count from i.results.
+ i.iters = i.results.iterations / b.threads();
- struct IterationResults {
- internal::ThreadManager::Result results;
- IterationCount iters;
- double seconds;
- };
- IterationResults DoNIterations() {
- VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
+ // Base decisions off of real time if requested by this benchmark.
+ i.seconds = i.results.cpu_time_used;
+ if (b.use_manual_time()) {
+ i.seconds = i.results.manual_time_used;
+ } else if (b.use_real_time()) {
+ i.seconds = i.results.real_time_used;
+ }
- std::unique_ptr<internal::ThreadManager> manager;
- manager.reset(new internal::ThreadManager(b.threads));
+ return i;
+}
- // Run all but one thread in separate threads
- for (std::size_t ti = 0; ti < pool.size(); ++ti) {
- pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1),
- manager.get());
- }
- // And run one thread here directly.
- // (If we were asked to run just one thread, we don't create new threads.)
- // Yes, we need to do this here *after* we start the separate threads.
- RunInThread(&b, iters, 0, manager.get());
+IterationCount BenchmarkRunner::PredictNumItersNeeded(
+ const IterationResults& i) const {
+ // See how much iterations should be increased by.
+ // Note: Avoid division by zero with max(seconds, 1ns).
+ double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9);
+ // If our last run was at least 10% of FLAGS_benchmark_min_time then we
+ // use the multiplier directly.
+ // Otherwise we use at most 10 times expansion.
+ // NOTE: When the last run was at least 10% of the min time the max
+ // expansion should be 14x.
+ const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1;
+ multiplier = is_significant ? multiplier : 10.0;
+
+ // So what seems to be the sufficiently-large iteration count? Round up.
+ const IterationCount max_next_iters = static_cast<IterationCount>(
+ std::lround(std::max(multiplier * static_cast<double>(i.iters),
+ static_cast<double>(i.iters) + 1.0)));
+ // But we do have *some* limits though..
+ const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
+
+ BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
+ return next_iters; // round up before conversion to integer.
+}
- // The main thread has finished. Now let's wait for the other threads.
- manager->WaitForAllThreads();
- for (std::thread& thread : pool) thread.join();
+bool BenchmarkRunner::ShouldReportIterationResults(
+ const IterationResults& i) const {
+ // Determine if this run should be reported;
+ // Either it has run for a sufficient amount of time
+ // or because an error was reported.
+ return i.results.skipped_ ||
+ i.iters >= kMaxIterations || // Too many iterations already.
+ i.seconds >=
+ GetMinTimeToApply() || // The elapsed time is large enough.
+ // CPU time is specified but the elapsed real time greatly exceeds
+ // the minimum time.
+ // Note that user provided timers are except from this test.
+ ((i.results.real_time_used >= 5 * GetMinTimeToApply()) &&
+ !b.use_manual_time());
+}
- IterationResults i;
- // Acquire the measurements/counters from the manager, UNDER THE LOCK!
- {
- MutexLock l(manager->GetBenchmarkMutex());
- i.results = manager->results;
- }
+double BenchmarkRunner::GetMinTimeToApply() const {
+ // In order to re-use functionality to run and measure benchmarks for running
+ // a warmup phase of the benchmark, we need a way of telling whether to apply
+ // min_time or min_warmup_time. This function will figure out if we are in the
+ // warmup phase and therefore need to apply min_warmup_time or if we already
+ // in the benchmarking phase and min_time needs to be applied.
+ return warmup_done ? min_time : min_warmup_time;
+}
- // And get rid of the manager.
- manager.reset();
+void BenchmarkRunner::FinishWarmUp(const IterationCount& i) {
+ warmup_done = true;
+ iters = i;
+}
- // Adjust real/manual time stats since they were reported per thread.
- i.results.real_time_used /= b.threads;
- i.results.manual_time_used /= b.threads;
- // If we were measuring whole-process CPU usage, adjust the CPU time too.
- if (b.measure_process_cpu_time) i.results.cpu_time_used /= b.threads;
-
- VLOG(2) << "Ran in " << i.results.cpu_time_used << "/"
- << i.results.real_time_used << "\n";
-
- // So for how long were we running?
- i.iters = iters;
- // Base decisions off of real time if requested by this benchmark.
- i.seconds = i.results.cpu_time_used;
- if (b.use_manual_time) {
- i.seconds = i.results.manual_time_used;
- } else if (b.use_real_time) {
- i.seconds = i.results.real_time_used;
+void BenchmarkRunner::RunWarmUp() {
+ // Use the same mechanisms for warming up the benchmark as used for actually
+ // running and measuring the benchmark.
+ IterationResults i_warmup;
+ // Dont use the iterations determined in the warmup phase for the actual
+ // measured benchmark phase. While this may be a good starting point for the
+ // benchmark and it would therefore get rid of the need to figure out how many
+ // iterations are needed if min_time is set again, this may also be a complete
+ // wrong guess since the warmup loops might be considerably slower (e.g
+ // because of caching effects).
+ const IterationCount i_backup = iters;
+
+ for (;;) {
+ b.Setup();
+ i_warmup = DoNIterations();
+ b.Teardown();
+
+ const bool finish = ShouldReportIterationResults(i_warmup);
+
+ if (finish) {
+ FinishWarmUp(i_backup);
+ break;
}
- return i;
+ // Although we are running "only" a warmup phase where running enough
+ // iterations at once without measuring time isn't as important as it is for
+ // the benchmarking phase, we still do it the same way as otherwise it is
+ // very confusing for the user to know how to choose a proper value for
+ // min_warmup_time if a different approach on running it is used.
+ iters = PredictNumItersNeeded(i_warmup);
+ assert(iters > i_warmup.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
}
+}
- IterationCount PredictNumItersNeeded(const IterationResults& i) const {
- // See how much iterations should be increased by.
- // Note: Avoid division by zero with max(seconds, 1ns).
- double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
- // If our last run was at least 10% of FLAGS_benchmark_min_time then we
- // use the multiplier directly.
- // Otherwise we use at most 10 times expansion.
- // NOTE: When the last run was at least 10% of the min time the max
- // expansion should be 14x.
- bool is_significant = (i.seconds / min_time) > 0.1;
- multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
- if (multiplier <= 1.0) multiplier = 2.0;
-
- // So what seems to be the sufficiently-large iteration count? Round up.
- const IterationCount max_next_iters = static_cast<IterationCount>(
- std::lround(std::max(multiplier * static_cast<double>(i.iters),
- static_cast<double>(i.iters) + 1.0)));
- // But we do have *some* sanity limits though..
- const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
-
- VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
- return next_iters; // round up before conversion to integer.
+void BenchmarkRunner::DoOneRepetition() {
+ assert(HasRepeatsRemaining() && "Already done all repetitions?");
+
+ const bool is_the_first_repetition = num_repetitions_done == 0;
+
+ // In case a warmup phase is requested by the benchmark, run it now.
+ // After running the warmup phase the BenchmarkRunner should be in a state as
+ // this warmup never happened except the fact that warmup_done is set. Every
+ // other manipulation of the BenchmarkRunner instance would be a bug! Please
+ // fix it.
+ if (!warmup_done) RunWarmUp();
+
+ IterationResults i;
+ // We *may* be gradually increasing the length (iteration count)
+ // of the benchmark until we decide the results are significant.
+ // And once we do, we report those last results and exit.
+ // Please do note that the if there are repetitions, the iteration count
+ // is *only* calculated for the *first* repetition, and other repetitions
+ // simply use that precomputed iteration count.
+ for (;;) {
+ b.Setup();
+ i = DoNIterations();
+ b.Teardown();
+
+ // Do we consider the results to be significant?
+ // If we are doing repetitions, and the first repetition was already done,
+ // it has calculated the correct iteration time, so we have run that very
+ // iteration count just now. No need to calculate anything. Just report.
+ // Else, the normal rules apply.
+ const bool results_are_significant = !is_the_first_repetition ||
+ has_explicit_iteration_count ||
+ ShouldReportIterationResults(i);
+
+ if (results_are_significant) break; // Good, let's report them!
+
+ // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
+ // iteration count, and run the benchmark again...
+
+ iters = PredictNumItersNeeded(i);
+ assert(iters > i.iters &&
+ "if we did more iterations than we want to do the next time, "
+ "then we should have accepted the current iteration run.");
}
- bool ShouldReportIterationResults(const IterationResults& i) const {
- // Determine if this run should be reported;
- // Either it has run for a sufficient amount of time
- // or because an error was reported.
- return i.results.has_error_ ||
- i.iters >= kMaxIterations || // Too many iterations already.
- i.seconds >= min_time || // The elapsed time is large enough.
- // CPU time is specified but the elapsed real time greatly exceeds
- // the minimum time.
- // Note that user provided timers are except from this sanity check.
- ((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
+ // Oh, one last thing, we need to also produce the 'memory measurements'..
+ MemoryManager::Result* memory_result = nullptr;
+ IterationCount memory_iterations = 0;
+ if (memory_manager != nullptr) {
+ // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an
+ // optional so we don't have to own the Result here.
+ // Can't do it now due to cxx03.
+ memory_results.push_back(MemoryManager::Result());
+ memory_result = &memory_results.back();
+ // Only run a few iterations to reduce the impact of one-time
+ // allocations in benchmarks that are not properly managed.
+ memory_iterations = std::min<IterationCount>(16, iters);
+ memory_manager->Start();
+ std::unique_ptr<internal::ThreadManager> manager;
+ manager.reset(new internal::ThreadManager(1));
+ b.Setup();
+ RunInThread(&b, memory_iterations, 0, manager.get(),
+ perf_counters_measurement_ptr);
+ manager->WaitForAllThreads();
+ manager.reset();
+ b.Teardown();
+ memory_manager->Stop(*memory_result);
}
- void DoOneRepetition(int64_t repetition_index) {
- const bool is_the_first_repetition = repetition_index == 0;
- IterationResults i;
-
- // We *may* be gradually increasing the length (iteration count)
- // of the benchmark until we decide the results are significant.
- // And once we do, we report those last results and exit.
- // Please do note that the if there are repetitions, the iteration count
- // is *only* calculated for the *first* repetition, and other repetitions
- // simply use that precomputed iteration count.
- for (;;) {
- i = DoNIterations();
-
- // Do we consider the results to be significant?
- // If we are doing repetitions, and the first repetition was already done,
- // it has calculated the correct iteration time, so we have run that very
- // iteration count just now. No need to calculate anything. Just report.
- // Else, the normal rules apply.
- const bool results_are_significant = !is_the_first_repetition ||
- has_explicit_iteration_count ||
- ShouldReportIterationResults(i);
-
- if (results_are_significant) break; // Good, let's report them!
-
- // Nope, bad iteration. Let's re-estimate the hopefully-sufficient
- // iteration count, and run the benchmark again...
-
- iters = PredictNumItersNeeded(i);
- assert(iters > i.iters &&
- "if we did more iterations than we want to do the next time, "
- "then we should have accepted the current iteration run.");
- }
+ // Ok, now actually report.
+ BenchmarkReporter::Run report =
+ CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds,
+ num_repetitions_done, repeats);
- // Oh, one last thing, we need to also produce the 'memory measurements'..
- MemoryManager::Result memory_result;
- IterationCount memory_iterations = 0;
- if (memory_manager != nullptr) {
- // Only run a few iterations to reduce the impact of one-time
- // allocations in benchmarks that are not properly managed.
- memory_iterations = std::min<IterationCount>(16, iters);
- memory_manager->Start();
- std::unique_ptr<internal::ThreadManager> manager;
- manager.reset(new internal::ThreadManager(1));
- RunInThread(&b, memory_iterations, 0, manager.get());
- manager->WaitForAllThreads();
- manager.reset();
-
- memory_manager->Stop(&memory_result);
- }
+ if (reports_for_family) {
+ ++reports_for_family->num_runs_done;
+ if (!report.skipped) reports_for_family->Runs.push_back(report);
+ }
- // Ok, now actualy report.
- BenchmarkReporter::Run report =
- CreateRunReport(b, i.results, memory_iterations, memory_result,
- i.seconds, repetition_index);
+ run_results.non_aggregates.push_back(report);
- if (!report.error_occurred && b.complexity != oNone)
- complexity_reports.push_back(report);
+ ++num_repetitions_done;
+}
- run_results.non_aggregates.push_back(report);
- }
-};
+RunResults&& BenchmarkRunner::GetResults() {
+ assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?");
-} // end namespace
+ // Calculate additional statistics over the repetitions of this instance.
+ run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
-RunResults RunBenchmark(
- const benchmark::internal::BenchmarkInstance& b,
- std::vector<BenchmarkReporter::Run>* complexity_reports) {
- internal::BenchmarkRunner r(b, complexity_reports);
- return r.get_results();
+ return std::move(run_results);
}
} // end namespace internal