blob: 7bbf0d0157473482711a111b7d151a6d49dab973 [file] [log] [blame]
Eric Fiselierf6e09e52016-08-09 18:56:48 +00001#!/usr/bin/env python
2"""
3compare_bench.py - Compare two benchmarks or their results and report the
4 difference.
5"""
Eric Fiselier133a7202017-04-18 07:17:20 +00006import argparse
7from argparse import ArgumentParser
Eric Fiselierf6e09e52016-08-09 18:56:48 +00008import sys
9import gbench
10from gbench import util, report
Eric Fiselier133a7202017-04-18 07:17:20 +000011from gbench.util import *
12
13def check_inputs(in1, in2, flags):
14 """
15 Perform checking on the user provided inputs and diagnose any abnormalities
16 """
17 in1_kind, in1_err = classify_input_file(in1)
18 in2_kind, in2_err = classify_input_file(in2)
19 output_file = find_benchmark_flag('--benchmark_out=', flags)
20 output_type = find_benchmark_flag('--benchmark_out_format=', flags)
21 if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
22 print(("WARNING: '--benchmark_out=%s' will be passed to both "
23 "benchmarks causing it to be overwritten") % output_file)
24 if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
25 print("WARNING: passing --benchmark flags has no effect since both "
26 "inputs are JSON")
27 if output_type is not None and output_type != 'json':
28 print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`"
29 " is not supported.") % output_type)
30 sys.exit(1)
31
Eric Fiselierf6e09e52016-08-09 18:56:48 +000032
33def main():
Eric Fiselier133a7202017-04-18 07:17:20 +000034 parser = ArgumentParser(
35 description='compare the results of two benchmarks')
36 parser.add_argument(
37 'test1', metavar='test1', type=str, nargs=1,
38 help='A benchmark executable or JSON output file')
39 parser.add_argument(
40 'test2', metavar='test2', type=str, nargs=1,
41 help='A benchmark executable or JSON output file')
Eric Fiselier133a7202017-04-18 07:17:20 +000042 parser.add_argument(
Eric Fiselier19039762018-01-18 04:23:01 +000043 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER,
Eric Fiselier133a7202017-04-18 07:17:20 +000044 help='Arguments to pass when running benchmark executables'
45 )
46 args, unknown_args = parser.parse_known_args()
Eric Fiselierf6e09e52016-08-09 18:56:48 +000047 # Parse the command line flags
Eric Fiselier133a7202017-04-18 07:17:20 +000048 test1 = args.test1[0]
49 test2 = args.test2[0]
Eric Fiselier19039762018-01-18 04:23:01 +000050 if unknown_args:
51 # should never happen
Eric Fiselier133a7202017-04-18 07:17:20 +000052 print("Unrecognized positional argument arguments: '%s'"
Eric Fiselier19039762018-01-18 04:23:01 +000053 % unknown_args)
Eric Fiselierf6e09e52016-08-09 18:56:48 +000054 exit(1)
Eric Fiselier19039762018-01-18 04:23:01 +000055 benchmark_options = args.benchmark_options
Eric Fiselier133a7202017-04-18 07:17:20 +000056 check_inputs(test1, test2, benchmark_options)
Eric Fiselierf6e09e52016-08-09 18:56:48 +000057 # Run the benchmarks and report the results
Eric Fiselier133a7202017-04-18 07:17:20 +000058 json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options)
59 json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options)
Eric Fiselierf6e09e52016-08-09 18:56:48 +000060 output_lines = gbench.report.generate_difference_report(json1, json2)
Eric Fiselier133a7202017-04-18 07:17:20 +000061 print('Comparing %s to %s' % (test1, test2))
Eric Fiselierf6e09e52016-08-09 18:56:48 +000062 for ln in output_lines:
63 print(ln)
64
65
66if __name__ == '__main__':
67 main()