blob: d27e24b34922c0c54078a48f88dbc17810dbbea9 [file] [log] [blame]
Eric Fiselier19039762018-01-18 04:23:01 +00001#!/usr/bin/env python
2
3"""
4compare.py - versatile benchmark output compare tool
5"""
6
7import argparse
8from argparse import ArgumentParser
9import sys
10import gbench
11from gbench import util, report
12from gbench.util import *
13
14
15def check_inputs(in1, in2, flags):
16 """
17 Perform checking on the user provided inputs and diagnose any abnormalities
18 """
19 in1_kind, in1_err = classify_input_file(in1)
20 in2_kind, in2_err = classify_input_file(in2)
21 output_file = find_benchmark_flag('--benchmark_out=', flags)
22 output_type = find_benchmark_flag('--benchmark_out_format=', flags)
23 if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
24 print(("WARNING: '--benchmark_out=%s' will be passed to both "
25 "benchmarks causing it to be overwritten") % output_file)
26 if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
27 print("WARNING: passing optional flags has no effect since both "
28 "inputs are JSON")
29 if output_type is not None and output_type != 'json':
30 print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
31 " is not supported.") % output_type)
32 sys.exit(1)
33
34
35def create_parser():
36 parser = ArgumentParser(
37 description='versatile benchmark output compare tool')
Eric Fiselierfcafd3e2018-07-10 04:02:00 +000038
39 utest = parser.add_argument_group()
40 utest.add_argument(
41 '--no-utest',
42 dest='utest',
43 default=True,
44 action="store_false",
45 help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
46 alpha_default = 0.05
47 utest.add_argument(
48 "--alpha",
49 dest='utest_alpha',
50 default=alpha_default,
51 type=float,
52 help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
53 alpha_default)
54
Eric Fiselier19039762018-01-18 04:23:01 +000055 subparsers = parser.add_subparsers(
56 help='This tool has multiple modes of operation:',
57 dest='mode')
58
59 parser_a = subparsers.add_parser(
60 'benchmarks',
61 help='The most simple use-case, compare all the output of these two benchmarks')
62 baseline = parser_a.add_argument_group(
63 'baseline', 'The benchmark baseline')
64 baseline.add_argument(
65 'test_baseline',
66 metavar='test_baseline',
67 type=argparse.FileType('r'),
68 nargs=1,
69 help='A benchmark executable or JSON output file')
70 contender = parser_a.add_argument_group(
71 'contender', 'The benchmark that will be compared against the baseline')
72 contender.add_argument(
73 'test_contender',
74 metavar='test_contender',
75 type=argparse.FileType('r'),
76 nargs=1,
77 help='A benchmark executable or JSON output file')
78 parser_a.add_argument(
79 'benchmark_options',
80 metavar='benchmark_options',
81 nargs=argparse.REMAINDER,
82 help='Arguments to pass when running benchmark executables')
83
84 parser_b = subparsers.add_parser(
85 'filters', help='Compare filter one with the filter two of benchmark')
86 baseline = parser_b.add_argument_group(
87 'baseline', 'The benchmark baseline')
88 baseline.add_argument(
89 'test',
90 metavar='test',
91 type=argparse.FileType('r'),
92 nargs=1,
93 help='A benchmark executable or JSON output file')
94 baseline.add_argument(
95 'filter_baseline',
96 metavar='filter_baseline',
97 type=str,
98 nargs=1,
99 help='The first filter, that will be used as baseline')
100 contender = parser_b.add_argument_group(
101 'contender', 'The benchmark that will be compared against the baseline')
102 contender.add_argument(
103 'filter_contender',
104 metavar='filter_contender',
105 type=str,
106 nargs=1,
107 help='The second filter, that will be compared against the baseline')
108 parser_b.add_argument(
109 'benchmark_options',
110 metavar='benchmark_options',
111 nargs=argparse.REMAINDER,
112 help='Arguments to pass when running benchmark executables')
113
114 parser_c = subparsers.add_parser(
115 'benchmarksfiltered',
116 help='Compare filter one of first benchmark with filter two of the second benchmark')
117 baseline = parser_c.add_argument_group(
118 'baseline', 'The benchmark baseline')
119 baseline.add_argument(
120 'test_baseline',
121 metavar='test_baseline',
122 type=argparse.FileType('r'),
123 nargs=1,
124 help='A benchmark executable or JSON output file')
125 baseline.add_argument(
126 'filter_baseline',
127 metavar='filter_baseline',
128 type=str,
129 nargs=1,
130 help='The first filter, that will be used as baseline')
131 contender = parser_c.add_argument_group(
132 'contender', 'The benchmark that will be compared against the baseline')
133 contender.add_argument(
134 'test_contender',
135 metavar='test_contender',
136 type=argparse.FileType('r'),
137 nargs=1,
138 help='The second benchmark executable or JSON output file, that will be compared against the baseline')
139 contender.add_argument(
140 'filter_contender',
141 metavar='filter_contender',
142 type=str,
143 nargs=1,
144 help='The second filter, that will be compared against the baseline')
145 parser_c.add_argument(
146 'benchmark_options',
147 metavar='benchmark_options',
148 nargs=argparse.REMAINDER,
149 help='Arguments to pass when running benchmark executables')
150
151 return parser
152
153
154def main():
155 # Parse the command line flags
156 parser = create_parser()
157 args, unknown_args = parser.parse_known_args()
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000158 if args.mode is None:
159 parser.print_help()
160 exit(1)
Eric Fiselier19039762018-01-18 04:23:01 +0000161 assert not unknown_args
162 benchmark_options = args.benchmark_options
163
164 if args.mode == 'benchmarks':
165 test_baseline = args.test_baseline[0].name
166 test_contender = args.test_contender[0].name
167 filter_baseline = ''
168 filter_contender = ''
169
170 # NOTE: if test_baseline == test_contender, you are analyzing the stdev
171
172 description = 'Comparing %s to %s' % (test_baseline, test_contender)
173 elif args.mode == 'filters':
174 test_baseline = args.test[0].name
175 test_contender = args.test[0].name
176 filter_baseline = args.filter_baseline[0]
177 filter_contender = args.filter_contender[0]
178
179 # NOTE: if filter_baseline == filter_contender, you are analyzing the
180 # stdev
181
182 description = 'Comparing %s to %s (from %s)' % (
183 filter_baseline, filter_contender, args.test[0].name)
184 elif args.mode == 'benchmarksfiltered':
185 test_baseline = args.test_baseline[0].name
186 test_contender = args.test_contender[0].name
187 filter_baseline = args.filter_baseline[0]
188 filter_contender = args.filter_contender[0]
189
190 # NOTE: if test_baseline == test_contender and
191 # filter_baseline == filter_contender, you are analyzing the stdev
192
193 description = 'Comparing %s (from %s) to %s (from %s)' % (
194 filter_baseline, test_baseline, filter_contender, test_contender)
195 else:
196 # should never happen
197 print("Unrecognized mode of operation: '%s'" % args.mode)
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000198 parser.print_help()
Eric Fiselier19039762018-01-18 04:23:01 +0000199 exit(1)
200
201 check_inputs(test_baseline, test_contender, benchmark_options)
202
203 options_baseline = []
204 options_contender = []
205
206 if filter_baseline and filter_contender:
207 options_baseline = ['--benchmark_filter=%s' % filter_baseline]
208 options_contender = ['--benchmark_filter=%s' % filter_contender]
209
210 # Run the benchmarks and report the results
211 json1 = json1_orig = gbench.util.run_or_load_benchmark(
212 test_baseline, benchmark_options + options_baseline)
213 json2 = json2_orig = gbench.util.run_or_load_benchmark(
214 test_contender, benchmark_options + options_contender)
215
216 # Now, filter the benchmarks so that the difference report can work
217 if filter_baseline and filter_contender:
218 replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
219 json1 = gbench.report.filter_benchmark(
220 json1_orig, filter_baseline, replacement)
221 json2 = gbench.report.filter_benchmark(
222 json2_orig, filter_contender, replacement)
223
224 # Diff and output
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000225 output_lines = gbench.report.generate_difference_report(
226 json1, json2, args.utest, args.utest_alpha)
Eric Fiselier19039762018-01-18 04:23:01 +0000227 print(description)
228 for ln in output_lines:
229 print(ln)
230
231
232import unittest
233
234
235class TestParser(unittest.TestCase):
236 def setUp(self):
237 self.parser = create_parser()
238 testInputs = os.path.join(
239 os.path.dirname(
240 os.path.realpath(__file__)),
241 'gbench',
242 'Inputs')
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000243 self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
244 self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
Eric Fiselier19039762018-01-18 04:23:01 +0000245
246 def test_benchmarks_basic(self):
247 parsed = self.parser.parse_args(
248 ['benchmarks', self.testInput0, self.testInput1])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000249 self.assertTrue(parsed.utest)
250 self.assertEqual(parsed.mode, 'benchmarks')
251 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
252 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
253 self.assertFalse(parsed.benchmark_options)
254
255 def test_benchmarks_basic_without_utest(self):
256 parsed = self.parser.parse_args(
257 ['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
258 self.assertFalse(parsed.utest)
259 self.assertEqual(parsed.utest_alpha, 0.05)
260 self.assertEqual(parsed.mode, 'benchmarks')
261 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
262 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
263 self.assertFalse(parsed.benchmark_options)
264
265 def test_benchmarks_basic_with_utest_alpha(self):
266 parsed = self.parser.parse_args(
267 ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
268 self.assertTrue(parsed.utest)
269 self.assertEqual(parsed.utest_alpha, 0.314)
270 self.assertEqual(parsed.mode, 'benchmarks')
271 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
272 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
273 self.assertFalse(parsed.benchmark_options)
274
275 def test_benchmarks_basic_without_utest_with_utest_alpha(self):
276 parsed = self.parser.parse_args(
277 ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
278 self.assertFalse(parsed.utest)
279 self.assertEqual(parsed.utest_alpha, 0.314)
Eric Fiselier19039762018-01-18 04:23:01 +0000280 self.assertEqual(parsed.mode, 'benchmarks')
281 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
282 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
283 self.assertFalse(parsed.benchmark_options)
284
285 def test_benchmarks_with_remainder(self):
286 parsed = self.parser.parse_args(
287 ['benchmarks', self.testInput0, self.testInput1, 'd'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000288 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000289 self.assertEqual(parsed.mode, 'benchmarks')
290 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
291 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
292 self.assertEqual(parsed.benchmark_options, ['d'])
293
294 def test_benchmarks_with_remainder_after_doubleminus(self):
295 parsed = self.parser.parse_args(
296 ['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000297 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000298 self.assertEqual(parsed.mode, 'benchmarks')
299 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
300 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
301 self.assertEqual(parsed.benchmark_options, ['e'])
302
303 def test_filters_basic(self):
304 parsed = self.parser.parse_args(
305 ['filters', self.testInput0, 'c', 'd'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000306 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000307 self.assertEqual(parsed.mode, 'filters')
308 self.assertEqual(parsed.test[0].name, self.testInput0)
309 self.assertEqual(parsed.filter_baseline[0], 'c')
310 self.assertEqual(parsed.filter_contender[0], 'd')
311 self.assertFalse(parsed.benchmark_options)
312
313 def test_filters_with_remainder(self):
314 parsed = self.parser.parse_args(
315 ['filters', self.testInput0, 'c', 'd', 'e'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000316 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000317 self.assertEqual(parsed.mode, 'filters')
318 self.assertEqual(parsed.test[0].name, self.testInput0)
319 self.assertEqual(parsed.filter_baseline[0], 'c')
320 self.assertEqual(parsed.filter_contender[0], 'd')
321 self.assertEqual(parsed.benchmark_options, ['e'])
322
323 def test_filters_with_remainder_after_doubleminus(self):
324 parsed = self.parser.parse_args(
325 ['filters', self.testInput0, 'c', 'd', '--', 'f'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000326 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000327 self.assertEqual(parsed.mode, 'filters')
328 self.assertEqual(parsed.test[0].name, self.testInput0)
329 self.assertEqual(parsed.filter_baseline[0], 'c')
330 self.assertEqual(parsed.filter_contender[0], 'd')
331 self.assertEqual(parsed.benchmark_options, ['f'])
332
333 def test_benchmarksfiltered_basic(self):
334 parsed = self.parser.parse_args(
335 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000336 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000337 self.assertEqual(parsed.mode, 'benchmarksfiltered')
338 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
339 self.assertEqual(parsed.filter_baseline[0], 'c')
340 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
341 self.assertEqual(parsed.filter_contender[0], 'e')
342 self.assertFalse(parsed.benchmark_options)
343
344 def test_benchmarksfiltered_with_remainder(self):
345 parsed = self.parser.parse_args(
346 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000347 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000348 self.assertEqual(parsed.mode, 'benchmarksfiltered')
349 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
350 self.assertEqual(parsed.filter_baseline[0], 'c')
351 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
352 self.assertEqual(parsed.filter_contender[0], 'e')
353 self.assertEqual(parsed.benchmark_options[0], 'f')
354
355 def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
356 parsed = self.parser.parse_args(
357 ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
Eric Fiselierfcafd3e2018-07-10 04:02:00 +0000358 self.assertTrue(parsed.utest)
Eric Fiselier19039762018-01-18 04:23:01 +0000359 self.assertEqual(parsed.mode, 'benchmarksfiltered')
360 self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
361 self.assertEqual(parsed.filter_baseline[0], 'c')
362 self.assertEqual(parsed.test_contender[0].name, self.testInput1)
363 self.assertEqual(parsed.filter_contender[0], 'e')
364 self.assertEqual(parsed.benchmark_options[0], 'g')
365
366
367if __name__ == '__main__':
368 # unittest.main()
369 main()
370
371# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
372# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
373# kate: indent-mode python; remove-trailing-spaces modified;