aboutsummaryrefslogtreecommitdiff
path: root/tools/gbench/report.py
diff options
context:
space:
mode:
Diffstat (limited to 'tools/gbench/report.py')
-rw-r--r--tools/gbench/report.py392
1 files changed, 345 insertions, 47 deletions
diff --git a/tools/gbench/report.py b/tools/gbench/report.py
index bf29492..b2bbfb9 100644
--- a/tools/gbench/report.py
+++ b/tools/gbench/report.py
@@ -1,11 +1,14 @@
-import unittest
"""report.py - Utilities for reporting statistics about benchmark results
"""
+
+import unittest
import os
import re
import copy
+import random
-from scipy.stats import mannwhitneyu
+from scipy.stats import mannwhitneyu, gmean
+from numpy import array
class BenchmarkColor(object):
@@ -39,6 +42,13 @@ UTEST_MIN_REPETITIONS = 2
UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better.
UTEST_COL_NAME = "_pvalue"
+_TIME_UNIT_TO_SECONDS_MULTIPLIER = {
+ "s": 1.0,
+ "ms": 1e-3,
+ "us": 1e-6,
+ "ns": 1e-9,
+}
+
def color_format(use_color, fmt_str, *args, **kwargs):
"""
@@ -148,6 +158,30 @@ def partition_benchmarks(json1, json2):
return partitions
+def get_timedelta_field_as_seconds(benchmark, field_name):
+ """
+ Get value of field_name field of benchmark, which is time with time unit
+ time_unit, as time in seconds.
+ """
+ timedelta = benchmark[field_name]
+ time_unit = benchmark.get('time_unit', 's')
+ return timedelta * _TIME_UNIT_TO_SECONDS_MULTIPLIER.get(time_unit)
+
+
+def calculate_geomean(json):
+ """
+ Extract all real/cpu times from all the benchmarks as seconds,
+ and calculate their geomean.
+ """
+ times = []
+ for benchmark in json['benchmarks']:
+ if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate':
+ continue
+ times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'),
+ get_timedelta_field_as_seconds(benchmark, 'cpu_time')])
+ return gmean(times) if times else array([])
+
+
def extract_field(partition, field_name):
# The count of elements may be different. We want *all* of them.
lhs = [x[field_name] for x in partition[0]]
@@ -172,6 +206,7 @@ def calc_utest(timings_cpu, timings_time):
return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue
+
def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True):
def get_utest_color(pval):
return BC_FAIL if pval >= utest_alpha else BC_OKGREEN
@@ -220,6 +255,7 @@ def get_difference_report(
partitions = partition_benchmarks(json1, json2)
for partition in partitions:
benchmark_name = partition[0][0]['name']
+ label = partition[0][0]['label'] if 'label' in partition[0][0] else ''
time_unit = partition[0][0]['time_unit']
measurements = []
utest_results = {}
@@ -240,7 +276,8 @@ def get_difference_report(
if utest:
timings_cpu = extract_field(partition, 'cpu_time')
timings_time = extract_field(partition, 'real_time')
- have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(timings_cpu, timings_time)
+ have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest(
+ timings_cpu, timings_time)
if cpu_pvalue and time_pvalue:
utest_results = {
'have_optimal_repetitions': have_optimal_repetitions,
@@ -259,6 +296,7 @@ def get_difference_report(
aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else ''
diff_report.append({
'name': benchmark_name,
+ 'label': label,
'measurements': measurements,
'time_unit': time_unit,
'run_type': run_type,
@@ -266,6 +304,26 @@ def get_difference_report(
'utest': utest_results
})
+ lhs_gmean = calculate_geomean(json1)
+ rhs_gmean = calculate_geomean(json2)
+ if lhs_gmean.any() and rhs_gmean.any():
+ diff_report.append({
+ 'name': 'OVERALL_GEOMEAN',
+ 'label': '',
+ 'measurements': [{
+ 'real_time': lhs_gmean[0],
+ 'cpu_time': lhs_gmean[1],
+ 'real_time_other': rhs_gmean[0],
+ 'cpu_time_other': rhs_gmean[1],
+ 'time': calculate_change(lhs_gmean[0], rhs_gmean[0]),
+ 'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1])
+ }],
+ 'time_unit': 's',
+ 'run_type': 'aggregate',
+ 'aggregate_name': 'geomean',
+ 'utest': {}
+ })
+
return diff_report
@@ -301,26 +359,23 @@ def print_difference_report(
fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}"
for benchmark in json_diff_report:
# *If* we were asked to only include aggregates,
- # and if it is non-aggregate, then skip it.
- if include_aggregates_only and 'run_type' in benchmark:
- if benchmark['run_type'] != 'aggregate':
- continue
-
- for measurement in benchmark['measurements']:
- output_strs += [color_format(use_color,
- fmt_str,
- BC_HEADER,
- benchmark['name'],
- first_col_width,
- get_color(measurement['time']),
- measurement['time'],
- get_color(measurement['cpu']),
- measurement['cpu'],
- measurement['real_time'],
- measurement['real_time_other'],
- measurement['cpu_time'],
- measurement['cpu_time_other'],
- endc=BC_ENDC)]
+ # and if it is non-aggregate, then don't print it.
+ if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate':
+ for measurement in benchmark['measurements']:
+ output_strs += [color_format(use_color,
+ fmt_str,
+ BC_HEADER,
+ benchmark['name'],
+ first_col_width,
+ get_color(measurement['time']),
+ measurement['time'],
+ get_color(measurement['cpu']),
+ measurement['cpu'],
+ measurement['real_time'],
+ measurement['real_time_other'],
+ measurement['cpu_time'],
+ measurement['cpu_time_other'],
+ endc=BC_ENDC)]
# After processing the measurements, if requested and
# if applicable (e.g. u-test exists for given benchmark),
@@ -404,6 +459,8 @@ class TestReportDifference(unittest.TestCase):
'-0.1000', '100', '110', '100', '90'],
['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'],
['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'],
+ ['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'],
+ ['OVERALL_GEOMEAN', '-0.8113', '-0.7779', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
@@ -420,81 +477,137 @@ class TestReportDifference(unittest.TestCase):
expected_output = [
{
'name': 'BM_SameTimes',
- 'measurements': [{'time': 0.0000, 'cpu': 0.0000, 'real_time': 10, 'real_time_other': 10, 'cpu_time': 10, 'cpu_time_other': 10}],
+ 'label': '',
+ 'measurements': [{'time': 0.0000, 'cpu': 0.0000,
+ 'real_time': 10, 'real_time_other': 10,
+ 'cpu_time': 10, 'cpu_time_other': 10}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xFaster',
- 'measurements': [{'time': -0.5000, 'cpu': -0.5000, 'real_time': 50, 'real_time_other': 25, 'cpu_time': 50, 'cpu_time_other': 25}],
+ 'label': '',
+ 'measurements': [{'time': -0.5000, 'cpu': -0.5000,
+ 'real_time': 50, 'real_time_other': 25,
+ 'cpu_time': 50, 'cpu_time_other': 25}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_2xSlower',
- 'measurements': [{'time': 1.0000, 'cpu': 1.0000, 'real_time': 50, 'real_time_other': 100, 'cpu_time': 50, 'cpu_time_other': 100}],
+ 'label': '',
+ 'measurements': [{'time': 1.0000, 'cpu': 1.0000,
+ 'real_time': 50, 'real_time_other': 100,
+ 'cpu_time': 50, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentFaster',
- 'measurements': [{'time': -0.0100, 'cpu': -0.0100, 'real_time': 100, 'real_time_other': 98.9999999, 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
+ 'label': '',
+ 'measurements': [{'time': -0.0100, 'cpu': -0.0100,
+ 'real_time': 100, 'real_time_other': 98.9999999,
+ 'cpu_time': 100, 'cpu_time_other': 98.9999999}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_1PercentSlower',
- 'measurements': [{'time': 0.0100, 'cpu': 0.0100, 'real_time': 100, 'real_time_other': 101, 'cpu_time': 100, 'cpu_time_other': 101}],
+ 'label': '',
+ 'measurements': [{'time': 0.0100, 'cpu': 0.0100,
+ 'real_time': 100, 'real_time_other': 101,
+ 'cpu_time': 100, 'cpu_time_other': 101}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentFaster',
- 'measurements': [{'time': -0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 90, 'cpu_time': 100, 'cpu_time_other': 90}],
+ 'label': '',
+ 'measurements': [{'time': -0.1000, 'cpu': -0.1000,
+ 'real_time': 100, 'real_time_other': 90,
+ 'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentSlower',
- 'measurements': [{'time': 0.1000, 'cpu': 0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 110}],
+ 'label': '',
+ 'measurements': [{'time': 0.1000, 'cpu': 0.1000,
+ 'real_time': 100, 'real_time_other': 110,
+ 'cpu_time': 100, 'cpu_time_other': 110}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xSlower',
- 'measurements': [{'time': 99.0000, 'cpu': 99.0000, 'real_time': 100, 'real_time_other': 10000, 'cpu_time': 100, 'cpu_time_other': 10000}],
+ 'label': '',
+ 'measurements': [{'time': 99.0000, 'cpu': 99.0000,
+ 'real_time': 100, 'real_time_other': 10000,
+ 'cpu_time': 100, 'cpu_time_other': 10000}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_100xFaster',
- 'measurements': [{'time': -0.9900, 'cpu': -0.9900, 'real_time': 10000, 'real_time_other': 100, 'cpu_time': 10000, 'cpu_time_other': 100}],
+ 'label': '',
+ 'measurements': [{'time': -0.9900, 'cpu': -0.9900,
+ 'real_time': 10000, 'real_time_other': 100,
+ 'cpu_time': 10000, 'cpu_time_other': 100}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_10PercentCPUToTime',
- 'measurements': [{'time': 0.1000, 'cpu': -0.1000, 'real_time': 100, 'real_time_other': 110, 'cpu_time': 100, 'cpu_time_other': 90}],
+ 'label': '',
+ 'measurements': [{'time': 0.1000, 'cpu': -0.1000,
+ 'real_time': 100, 'real_time_other': 110,
+ 'cpu_time': 100, 'cpu_time_other': 90}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_ThirdFaster',
- 'measurements': [{'time': -0.3333, 'cpu': -0.3334, 'real_time': 100, 'real_time_other': 67, 'cpu_time': 100, 'cpu_time_other': 67}],
+ 'label': '',
+ 'measurements': [{'time': -0.3333, 'cpu': -0.3334,
+ 'real_time': 100, 'real_time_other': 67,
+ 'cpu_time': 100, 'cpu_time_other': 67}],
'time_unit': 'ns',
'utest': {}
},
{
'name': 'BM_NotBadTimeUnit',
- 'measurements': [{'time': -0.9000, 'cpu': 0.2000, 'real_time': 0.4, 'real_time_other': 0.04, 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
+ 'label': '',
+ 'measurements': [{'time': -0.9000, 'cpu': 0.2000,
+ 'real_time': 0.4, 'real_time_other': 0.04,
+ 'cpu_time': 0.5, 'cpu_time_other': 0.6}],
+ 'time_unit': 's',
+ 'utest': {}
+ },
+ {
+ 'name': 'BM_hasLabel',
+ 'label': 'a label',
+ 'measurements': [{'time': 0.0000, 'cpu': 0.0000,
+ 'real_time': 1, 'real_time_other': 1,
+ 'cpu_time': 1, 'cpu_time_other': 1}],
'time_unit': 's',
'utest': {}
},
+ {
+ 'name': 'OVERALL_GEOMEAN',
+ 'label': '',
+ 'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06,
+ 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07,
+ 'time': -0.8112976497120911, 'cpu': -0.7778551721181174}],
+ 'time_unit': 's',
+ 'run_type': 'aggregate',
+ 'aggregate_name': 'geomean', 'utest': {}
+ },
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
for out, expected in zip(
self.json_diff_report, expected_output):
self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['label'], expected['label'])
self.assertEqual(out['time_unit'], expected['time_unit'])
assert_utest(self, out, expected)
assert_measurements(self, out, expected)
@@ -525,6 +638,7 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'],
['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'],
['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'],
+ ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, use_color=False)
@@ -562,6 +676,16 @@ class TestReportDifferenceBetweenFamilies(unittest.TestCase):
'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}],
'time_unit': 'ns',
'utest': {}
+ },
+ {
+ 'name': 'OVERALL_GEOMEAN',
+ 'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08,
+ 'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08,
+ 'time': -0.5000000000000009, 'cpu': -0.5000000000000009}],
+ 'time_unit': 's',
+ 'run_type': 'aggregate',
+ 'aggregate_name': 'geomean',
+ 'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
@@ -600,8 +724,8 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
- '0.6985',
- '0.6985',
+ '1.0000',
+ '0.6667',
'U',
'Test,',
'Repetitions:',
@@ -618,7 +742,7 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
- '0.1489',
+ '0.2000',
'U',
'Test,',
'Repetitions:',
@@ -632,6 +756,7 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
'repetitions',
'recommended.'],
['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
+ ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False)
@@ -643,6 +768,53 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
parts = [x for x in output_lines[i].split(' ') if x]
self.assertEqual(expect_lines[i], parts)
+ def test_json_diff_report_pretty_printing_aggregates_only(self):
+ expect_lines = [
+ ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'],
+ ['BM_Two_pvalue',
+ '1.0000',
+ '0.6667',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '2.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'],
+ ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
+ ['short_pvalue',
+ '0.7671',
+ '0.2000',
+ 'U',
+ 'Test,',
+ 'Repetitions:',
+ '2',
+ 'vs',
+ '3.',
+ 'WARNING:',
+ 'Results',
+ 'unreliable!',
+ '9+',
+ 'repetitions',
+ 'recommended.'],
+ ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
+ ]
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False)
+ output_lines = output_lines_with_header[2:]
+ print("\n")
+ print("\n".join(output_lines_with_header))
+ self.assertEqual(len(output_lines), len(expect_lines))
+ for i in range(0, len(output_lines)):
+ parts = [x for x in output_lines[i].split(' ') if x]
+ self.assertEqual(expect_lines[i], parts)
+
def test_json_diff_report(self):
expected_output = [
{
@@ -672,7 +844,7 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
],
'time_unit': 'ns',
'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
}
},
{
@@ -693,7 +865,7 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
],
'time_unit': 'ns',
'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
}
},
{
@@ -708,6 +880,16 @@ class TestReportDifferenceWithUTest(unittest.TestCase):
],
'time_unit': 'ns',
'utest': {}
+ },
+ {
+ 'name': 'OVERALL_GEOMEAN',
+ 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
+ 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
+ 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
+ 'time_unit': 's',
+ 'run_type': 'aggregate',
+ 'aggregate_name': 'geomean',
+ 'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
@@ -747,8 +929,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'],
['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'],
['BM_Two_pvalue',
- '0.6985',
- '0.6985',
+ '1.0000',
+ '0.6667',
'U',
'Test,',
'Repetitions:',
@@ -765,7 +947,7 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
['short', '-0.4325', '-0.1351', '8', '5', '77', '67'],
['short_pvalue',
'0.7671',
- '0.1489',
+ '0.2000',
'U',
'Test,',
'Repetitions:',
@@ -778,7 +960,8 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
'9+',
'repetitions',
'recommended.'],
- ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53']
+ ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'],
+ ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0']
]
output_lines_with_header = print_difference_report(
self.json_diff_report,
@@ -820,7 +1003,7 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
],
'time_unit': 'ns',
'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6985353583033387, 'time_pvalue': 0.6985353583033387
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0
}
},
{
@@ -841,7 +1024,7 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
],
'time_unit': 'ns',
'utest': {
- 'have_optimal_repetitions': False, 'cpu_pvalue': 0.14891467317876572, 'time_pvalue': 0.7670968684102772
+ 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772
}
},
{
@@ -853,11 +1036,83 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
'real_time': 8,
'cpu_time_other': 53,
'cpu': -0.3375
- }
+ }
],
'utest': {},
'time_unit': u'ns',
'aggregate_name': ''
+ },
+ {
+ 'name': 'OVERALL_GEOMEAN',
+ 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08,
+ 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08,
+ 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}],
+ 'time_unit': 's',
+ 'run_type': 'aggregate',
+ 'aggregate_name': 'geomean',
+ 'utest': {}
+ }
+ ]
+ self.assertEqual(len(self.json_diff_report), len(expected_output))
+ for out, expected in zip(
+ self.json_diff_report, expected_output):
+ self.assertEqual(out['name'], expected['name'])
+ self.assertEqual(out['time_unit'], expected['time_unit'])
+ assert_utest(self, out, expected)
+ assert_measurements(self, out, expected)
+
+
+class TestReportDifferenceForPercentageAggregates(
+ unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ def load_results():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput1 = os.path.join(testInputs, 'test4_run0.json')
+ testOutput2 = os.path.join(testInputs, 'test4_run1.json')
+ with open(testOutput1, 'r') as f:
+ json1 = json.load(f)
+ with open(testOutput2, 'r') as f:
+ json2 = json.load(f)
+ return json1, json2
+
+ json1, json2 = load_results()
+ cls.json_diff_report = get_difference_report(
+ json1, json2, utest=True)
+
+ def test_json_diff_report_pretty_printing(self):
+ expect_lines = [
+ ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0']
+ ]
+ output_lines_with_header = print_difference_report(
+ self.json_diff_report,
+ utest=True, utest_alpha=0.05, use_color=False)
+ output_lines = output_lines_with_header[2:]
+ print("\n")
+ print("\n".join(output_lines_with_header))
+ self.assertEqual(len(output_lines), len(expect_lines))
+ for i in range(0, len(output_lines)):
+ parts = [x for x in output_lines[i].split(' ') if x]
+ self.assertEqual(expect_lines[i], parts)
+
+ def test_json_diff_report(self):
+ expected_output = [
+ {
+ 'name': u'whocares',
+ 'measurements': [
+ {'time': -0.5,
+ 'cpu': 0.5,
+ 'real_time': 0.01,
+ 'real_time_other': 0.005,
+ 'cpu_time': 0.10,
+ 'cpu_time_other': 0.15}
+ ],
+ 'time_unit': 'ns',
+ 'utest': {}
}
]
self.assertEqual(len(self.json_diff_report), len(expected_output))
@@ -869,6 +1124,49 @@ class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly(
assert_measurements(self, out, expected)
+class TestReportSorting(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ def load_result():
+ import json
+ testInputs = os.path.join(
+ os.path.dirname(
+ os.path.realpath(__file__)),
+ 'Inputs')
+ testOutput = os.path.join(testInputs, 'test4_run.json')
+ with open(testOutput, 'r') as f:
+ json = json.load(f)
+ return json
+
+ cls.json = load_result()
+
+ def test_json_diff_report_pretty_printing(self):
+ import util
+
+ expected_names = [
+ "99 family 0 instance 0 repetition 0",
+ "98 family 0 instance 0 repetition 1",
+ "97 family 0 instance 0 aggregate",
+ "96 family 0 instance 1 repetition 0",
+ "95 family 0 instance 1 repetition 1",
+ "94 family 0 instance 1 aggregate",
+ "93 family 1 instance 0 repetition 0",
+ "92 family 1 instance 0 repetition 1",
+ "91 family 1 instance 0 aggregate",
+ "90 family 1 instance 1 repetition 0",
+ "89 family 1 instance 1 repetition 1",
+ "88 family 1 instance 1 aggregate"
+ ]
+
+ for n in range(len(self.json['benchmarks']) ** 2):
+ random.shuffle(self.json['benchmarks'])
+ sorted_benchmarks = util.sort_benchmark_results(self.json)[
+ 'benchmarks']
+ self.assertEqual(len(expected_names), len(sorted_benchmarks))
+ for out, expected in zip(sorted_benchmarks, expected_names):
+ self.assertEqual(out['name'], expected)
+
+
def assert_utest(unittest_instance, lhs, rhs):
if lhs['utest']:
unittest_instance.assertAlmostEqual(