blob: 25c6ebeb3ca42bca6089273ba2b6d1093d04adfd [file] [log] [blame]
Zhizhou Yange5986902017-08-10 17:37:53 -07001#!/usr/bin/env python2
2#
3# Copyright 2017 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# pylint: disable=cros-logging-import
8
9# This is the script to run specified benchmark with different toolchain
10# settings. It includes the process of building benchmark locally and running
11# benchmark on DUT.
12
13"""Main script to run the benchmark suite from building to testing."""
14from __future__ import print_function
15
16import argparse
17import config
18import ConfigParser
19import logging
20import os
21import subprocess
22import sys
23
24logging.basicConfig(level=logging.INFO)
25
26def _parse_arguments(argv):
Zhizhou Yang62362922017-08-30 16:04:36 -070027 parser = argparse.ArgumentParser(description='Build and run specific '
28 'benchamrk')
29 parser.add_argument(
30 '-b',
31 '--bench',
32 action='append',
33 default=[],
34 help='Select which benchmark to run')
Zhizhou Yange5986902017-08-10 17:37:53 -070035
Zhizhou Yang62362922017-08-30 16:04:36 -070036 # Only one of compiler directory and llvm prebuilts version can be indicated
37 # at the beginning, so set -c and -l into a exclusive group.
38 group = parser.add_mutually_exclusive_group()
Zhizhou Yange5986902017-08-10 17:37:53 -070039
Zhizhou Yang62362922017-08-30 16:04:36 -070040 # The toolchain setting arguments has action of 'append', so that users
41 # could compare performance with several toolchain settings together.
42 group.add_argument(
43 '-c',
44 '--compiler_dir',
45 metavar='DIR',
46 action='append',
47 default=[],
48 help='Specify path to the compiler\'s bin directory. '
49 'You shall give several paths, each with a -c, to '
50 'compare performance differences in '
51 'each compiler.')
Zhizhou Yange5986902017-08-10 17:37:53 -070052
Zhizhou Yang62362922017-08-30 16:04:36 -070053 parser.add_argument(
54 '-o',
55 '--build_os',
56 action='append',
57 default=[],
58 help='Specify the host OS to build the benchmark.')
Zhizhou Yange5986902017-08-10 17:37:53 -070059
Zhizhou Yang62362922017-08-30 16:04:36 -070060 group.add_argument(
61 '-l',
62 '--llvm_prebuilts_version',
63 action='append',
64 default=[],
65 help='Specify the version of prebuilt LLVM. When '
66 'specific prebuilt version of LLVM already '
67 'exists, no need to pass the path to compiler '
68 'directory.')
Zhizhou Yange5986902017-08-10 17:37:53 -070069
Zhizhou Yang62362922017-08-30 16:04:36 -070070 parser.add_argument(
71 '-f',
72 '--cflags',
73 action='append',
74 default=[],
75 help='Specify the cflags options for the toolchain. '
76 'Be sure to quote all the cflags with quotation '
77 'mark("") or use equal(=).')
78 parser.add_argument(
79 '--ldflags',
80 action='append',
81 default=[],
82 help='Specify linker flags for the toolchain.')
Zhizhou Yange5986902017-08-10 17:37:53 -070083
Zhizhou Yang62362922017-08-30 16:04:36 -070084 parser.add_argument(
85 '-i',
86 '--iterations',
87 type=int,
88 default=1,
89 help='Specify how many iterations does the test '
90 'take.')
Zhizhou Yange5986902017-08-10 17:37:53 -070091
Zhizhou Yang62362922017-08-30 16:04:36 -070092 # Arguments -s and -r are for connecting to DUT.
93 parser.add_argument(
94 '-s',
95 '--serials',
96 help='Comma separate list of device serials under '
97 'test.')
Zhizhou Yange5986902017-08-10 17:37:53 -070098
Zhizhou Yang62362922017-08-30 16:04:36 -070099 parser.add_argument(
100 '-r',
101 '--remote',
102 default='localhost',
103 help='hostname[:port] if the ADB device is connected '
104 'to a remote machine. Ensure this workstation '
105 'is configured for passwordless ssh access as '
106 'users "root" or "adb"')
Zhizhou Yange5986902017-08-10 17:37:53 -0700107
Zhizhou Yang62362922017-08-30 16:04:36 -0700108 # Arguments -frequency and -m are for device settings
109 parser.add_argument(
110 '--frequency',
111 type=int,
112 default=979200,
113 help='Specify the CPU frequency of the device. The '
114 'unit is KHZ. The available value is defined in'
115 'cpufreq/scaling_available_frequency file in '
116 'device\'s each core directory. '
117 'The default value is 979200, which shows a '
118 'balance in noise and performance. Lower '
119 'frequency will slow down the performance but '
120 'reduce noise.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700121
Zhizhou Yang62362922017-08-30 16:04:36 -0700122 parser.add_argument(
123 '-m',
124 '--mode',
125 default='little',
126 help='User can specify whether \'little\' or \'big\' '
127 'mode to use. The default one is little mode. '
128 'The little mode runs on a single core of '
129 'Cortex-A53, while big mode runs on single core '
130 'of Cortex-A57.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700131
Zhizhou Yang62362922017-08-30 16:04:36 -0700132 # Configure file for benchmark test
133 parser.add_argument(
134 '-t',
135 '--test',
136 help='Specify the test settings with configuration '
137 'file.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700138
Zhizhou Yang62362922017-08-30 16:04:36 -0700139 # Whether to keep old json result or not
140 parser.add_argument(
141 '-k',
142 '--keep',
143 default='False',
144 help='User can specify whether to keep the old json '
145 'results from last run. This can be useful if you '
146 'want to compare performance differences in two or '
147 'more different runs. Default is False(off).')
Zhizhou Yange5986902017-08-10 17:37:53 -0700148
Zhizhou Yang62362922017-08-30 16:04:36 -0700149 return parser.parse_args(argv)
Zhizhou Yange5986902017-08-10 17:37:53 -0700150
151
152# Clear old log files in bench suite directory
153def clear_logs():
Zhizhou Yang62362922017-08-30 16:04:36 -0700154 logging.info('Removing old logfiles...')
155 for f in ['build_log', 'device_log', 'test_log']:
156 logfile = os.path.join(config.bench_suite_dir, f)
157 try:
158 os.remove(logfile)
159 except OSError:
160 logging.info('No logfile %s need to be removed. Ignored.', f)
161 logging.info('Old logfiles been removed.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700162
163
164# Clear old json files in bench suite directory
165def clear_results():
Zhizhou Yang62362922017-08-30 16:04:36 -0700166 logging.info('Clearing old json results...')
167 for bench in config.bench_list:
168 result = os.path.join(config.bench_suite_dir, bench + '.json')
169 try:
170 os.remove(result)
171 except OSError:
172 logging.info('no %s json file need to be removed. Ignored.', bench)
173 logging.info('Old json results been removed.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700174
175
176# Use subprocess.check_call to run other script, and put logs to files
177def check_call_with_log(cmd, log_file):
Zhizhou Yang62362922017-08-30 16:04:36 -0700178 log_file = os.path.join(config.bench_suite_dir, log_file)
179 with open(log_file, 'a') as logfile:
180 log_header = 'Log for command: %s\n' % (cmd)
181 logfile.write(log_header)
182 try:
183 subprocess.check_call(cmd, stdout=logfile)
184 except subprocess.CalledProcessError:
185 logging.error('Error running %s, please check %s for more info.',
186 cmd, log_file)
187 raise
188 logging.info('Logs for %s are written to %s.', cmd, log_file)
Zhizhou Yange5986902017-08-10 17:37:53 -0700189
190
191def set_device(serials, remote, frequency):
Zhizhou Yang62362922017-08-30 16:04:36 -0700192 setting_cmd = [
Zhizhou Yange5986902017-08-10 17:37:53 -0700193 os.path.join(
194 os.path.join(config.android_home, config.autotest_dir),
Zhizhou Yang62362922017-08-30 16:04:36 -0700195 'site_utils/set_device.py')
Zhizhou Yange5986902017-08-10 17:37:53 -0700196 ]
Zhizhou Yang62362922017-08-30 16:04:36 -0700197 setting_cmd.append('-r=' + remote)
198 setting_cmd.append('-q=' + str(frequency))
Zhizhou Yange5986902017-08-10 17:37:53 -0700199
200 # Deal with serials.
201 # If there is no serails specified, try to run test on the only device.
202 # If specified, split the serials into a list and run test on each device.
203 if serials:
Zhizhou Yang62362922017-08-30 16:04:36 -0700204 for serial in serials.split(','):
205 setting_cmd.append('-s=' + serial)
206 check_call_with_log(setting_cmd, 'device_log')
207 setting_cmd.pop()
Zhizhou Yange5986902017-08-10 17:37:53 -0700208 else:
Zhizhou Yang62362922017-08-30 16:04:36 -0700209 check_call_with_log(setting_cmd, 'device_log')
210
211 logging.info('CPU mode and frequency set successfully!')
212
213
214def log_ambiguous_args():
215 logging.error('The count of arguments does not match!')
216 raise ValueError('The count of arguments does not match.')
217
218
219# Check if the count of building arguments are log_ambiguous or not. The
220# number of -c/-l, -f, and -os should be either all 0s or all the same.
221def check_count(compiler, llvm_version, build_os, cflags, ldflags):
222 # Count will be set to 0 if no compiler or llvm_version specified.
223 # Otherwise, one of these two args length should be 0 and count will be
224 # the other one.
225 count = max(len(compiler), len(llvm_version))
226
227 # Check if number of cflags is 0 or the same with before.
228 if len(cflags) != 0:
229 if count != 0 and len(cflags) != count:
230 log_ambiguous_args()
231 count = len(cflags)
232
233 if len(ldflags) != 0:
234 if count != 0 and len(ldflags) != count:
235 log_ambiguous_args()
236 count = len(ldflags)
237
238 if len(build_os) != 0:
239 if count != 0 and len(build_os) != count:
240 log_ambiguous_args()
241 count = len(build_os)
242
243 # If no settings are passed, only run default once.
244 return max(1, count)
245
246
247# Build benchmark binary with toolchain settings
248def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
249 ldflags):
250 # Build benchmark locally
251 build_cmd = ['./build_bench.py', '-b=' + bench]
252 if compiler:
253 build_cmd.append('-c=' + compiler[setting_no])
254 if llvm_version:
255 build_cmd.append('-l=' + llvm_version[setting_no])
256 if build_os:
257 build_cmd.append('-o=' + build_os[setting_no])
258 if cflags:
259 build_cmd.append('-f=' + cflags[setting_no])
260 if ldflags:
261 build_cmd.append('--ldflags=' + ldflags[setting_no])
262
263 logging.info('Building benchmark for toolchain setting No.%d...',
264 setting_no)
265 logging.info('Command: %s', build_cmd)
266
267 try:
268 subprocess.check_call(build_cmd)
269 except:
270 logging.error('Error while building benchmark!')
271 raise
272
273
274def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
275
276 # Run autotest script for benchmark on DUT
277 check_call_with_log(test_cmd, 'test_log')
278
279 logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
280 'device %s.', setting_no, i, serial)
281
282 # Rename results from the bench_result generated in autotest
283 bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
284 if not os.path.exists(bench_result):
285 logging.error('No result found at %s, '
286 'please check test_log for details.', bench_result)
287 raise OSError('Result file %s not found.' % bench_result)
288
289 new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial,
290 setting_no, i)
291 new_bench_result_path = os.path.join(config.bench_suite_dir,
292 new_bench_result)
293 try:
294 os.rename(bench_result, new_bench_result_path)
295 except OSError:
296 logging.error('Error while renaming raw result %s to %s',
297 bench_result, new_bench_result_path)
298 raise
299
300 logging.info('Benchmark result saved at %s.', new_bench_result_path)
301
302
303def test_bench(bench, setting_no, iterations, serials, remote, mode):
304 logging.info('Start running benchmark on device...')
305
306 # Run benchmark and tests on DUT
307 for i in xrange(iterations):
308 logging.info('Iteration No.%d:', i)
309 test_cmd = [
310 os.path.join(
311 os.path.join(config.android_home, config.autotest_dir),
312 'site_utils/test_bench.py')
313 ]
314 test_cmd.append('-b=' + bench)
315 test_cmd.append('-r=' + remote)
316 test_cmd.append('-m=' + mode)
317
318 # Deal with serials. If there is no serails specified, try to run test
319 # on the only device. If specified, split the serials into a list and
320 # run test on each device.
321 if serials:
322 for serial in serials.split(','):
323 test_cmd.append('-s=' + serial)
324
325 run_and_collect_result(test_cmd, setting_no, i, bench, serial)
326 test_cmd.pop()
327 else:
328 run_and_collect_result(test_cmd, setting_no, i, bench)
Zhizhou Yange5986902017-08-10 17:37:53 -0700329
330
331def gen_json(bench, setting_no, iterations, serials):
Zhizhou Yang62362922017-08-30 16:04:36 -0700332 bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
Zhizhou Yange5986902017-08-10 17:37:53 -0700333
Zhizhou Yang62362922017-08-30 16:04:36 -0700334 logging.info('Generating JSON file for Crosperf...')
Zhizhou Yange5986902017-08-10 17:37:53 -0700335
Zhizhou Yang62362922017-08-30 16:04:36 -0700336 if not serials:
337 serials = 'default'
Zhizhou Yange5986902017-08-10 17:37:53 -0700338
Zhizhou Yang62362922017-08-30 16:04:36 -0700339 for serial in serials.split(','):
Zhizhou Yange5986902017-08-10 17:37:53 -0700340
Zhizhou Yang62362922017-08-30 16:04:36 -0700341 # Platform will be used as device lunch combo instead
342 #experiment = '_'.join([serial, str(setting_no)])
343 experiment = config.product_combo
Zhizhou Yange5986902017-08-10 17:37:53 -0700344
Zhizhou Yang62362922017-08-30 16:04:36 -0700345 # Input format: bench_result_{bench}_{serial}_{setting_no}_
346 input_file = '_'.join([bench_result, bench,
347 serial, str(setting_no), ''])
348 gen_json_cmd = [
349 './gen_json.py', '--input=' + input_file,
350 '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
351 '--bench=' + bench, '--platform=' + experiment,
352 '--iterations=' + str(iterations)
353 ]
Zhizhou Yange5986902017-08-10 17:37:53 -0700354
Zhizhou Yang62362922017-08-30 16:04:36 -0700355 logging.info('Command: %s', gen_json_cmd)
356 if subprocess.call(gen_json_cmd):
357 logging.error('Error while generating JSON file, please check raw'
358 ' data of the results at %s.', input_file)
Zhizhou Yange5986902017-08-10 17:37:53 -0700359
360
361def gen_crosperf(infile, outfile):
Zhizhou Yang62362922017-08-30 16:04:36 -0700362 # Set environment variable for crosperf
363 os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
Zhizhou Yange5986902017-08-10 17:37:53 -0700364
Zhizhou Yang62362922017-08-30 16:04:36 -0700365 logging.info('Generating Crosperf Report...')
366 crosperf_cmd = [
367 os.path.join(config.toolchain_utils, 'generate_report.py'),
368 '-i=' + infile, '-o=' + outfile, '-f'
369 ]
Zhizhou Yange5986902017-08-10 17:37:53 -0700370
Zhizhou Yang62362922017-08-30 16:04:36 -0700371 # Run crosperf generate_report.py
372 logging.info('Command: %s', crosperf_cmd)
373 subprocess.call(crosperf_cmd)
Zhizhou Yange5986902017-08-10 17:37:53 -0700374
Zhizhou Yang62362922017-08-30 16:04:36 -0700375 logging.info('Report generated successfully!')
376 logging.info('Report Location: ' + outfile + '.html at bench'
377 'suite directory.')
Zhizhou Yange5986902017-08-10 17:37:53 -0700378
379
380def main(argv):
Zhizhou Yang62362922017-08-30 16:04:36 -0700381 # Set environment variable for the local loacation of benchmark suite.
382 # This is for collecting testing results to benchmark suite directory.
383 os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
Zhizhou Yange5986902017-08-10 17:37:53 -0700384
Zhizhou Yang62362922017-08-30 16:04:36 -0700385 # Set Android type, used for the difference part between aosp and internal.
386 os.environ['ANDROID_TYPE'] = config.android_type
Zhizhou Yange5986902017-08-10 17:37:53 -0700387
Zhizhou Yang62362922017-08-30 16:04:36 -0700388 # Set ANDROID_HOME for both building and testing.
389 os.environ['ANDROID_HOME'] = config.android_home
Zhizhou Yange5986902017-08-10 17:37:53 -0700390
Zhizhou Yang62362922017-08-30 16:04:36 -0700391 # Set environment variable for architecture, this will be used in
392 # autotest.
393 os.environ['PRODUCT'] = config.product
Zhizhou Yange5986902017-08-10 17:37:53 -0700394
Zhizhou Yang62362922017-08-30 16:04:36 -0700395 arguments = _parse_arguments(argv)
Zhizhou Yange5986902017-08-10 17:37:53 -0700396
Zhizhou Yang62362922017-08-30 16:04:36 -0700397 bench_list = arguments.bench
398 if not bench_list:
399 bench_list = config.bench_list
Zhizhou Yange5986902017-08-10 17:37:53 -0700400
Zhizhou Yang62362922017-08-30 16:04:36 -0700401 compiler = arguments.compiler_dir
402 build_os = arguments.build_os
403 llvm_version = arguments.llvm_prebuilts_version
404 cflags = arguments.cflags
405 ldflags = arguments.ldflags
406 iterations = arguments.iterations
407 serials = arguments.serials
408 remote = arguments.remote
409 frequency = arguments.frequency
410 mode = arguments.mode
411 keep = arguments.keep
Zhizhou Yange5986902017-08-10 17:37:53 -0700412
Zhizhou Yang62362922017-08-30 16:04:36 -0700413 # Clear old logs every time before run script
414 clear_logs()
Zhizhou Yange5986902017-08-10 17:37:53 -0700415
Zhizhou Yang62362922017-08-30 16:04:36 -0700416 if keep == 'False':
417 clear_results()
Zhizhou Yange5986902017-08-10 17:37:53 -0700418
Zhizhou Yang62362922017-08-30 16:04:36 -0700419 # Set test mode and frequency of CPU on the DUT
420 set_device(serials, remote, frequency)
Zhizhou Yange5986902017-08-10 17:37:53 -0700421
Zhizhou Yang62362922017-08-30 16:04:36 -0700422 test = arguments.test
423 # if test configuration file has been given, use the build settings
424 # in the configuration file and run the test.
425 if test:
426 test_config = ConfigParser.ConfigParser(allow_no_value=True)
427 if not test_config.read(test):
428 logging.error('Error while reading from building '
429 'configuration file %s.', test)
430 raise RuntimeError('Error while reading configuration file %s.'
431 % test)
Zhizhou Yange5986902017-08-10 17:37:53 -0700432
Zhizhou Yang62362922017-08-30 16:04:36 -0700433 for setting_no, section in enumerate(test_config.sections()):
434 bench = test_config.get(section, 'bench')
435 compiler = [test_config.get(section, 'compiler')]
436 build_os = [test_config.get(section, 'build_os')]
437 llvm_version = [test_config.get(section, 'llvm_version')]
438 cflags = [test_config.get(section, 'cflags')]
439 ldflags = [test_config.get(section, 'ldflags')]
Zhizhou Yange5986902017-08-10 17:37:53 -0700440
Zhizhou Yang62362922017-08-30 16:04:36 -0700441 # Set iterations from test_config file, if not exist, use the one
442 # from command line.
443 it = test_config.get(section, 'iterations')
444 if not it:
445 it = iterations
446 it = int(it)
Zhizhou Yange5986902017-08-10 17:37:53 -0700447
Zhizhou Yang62362922017-08-30 16:04:36 -0700448 # Build benchmark for each single test configuration
449 build_bench(0, bench, compiler, llvm_version,
450 build_os, cflags, ldflags)
Zhizhou Yange5986902017-08-10 17:37:53 -0700451
Zhizhou Yang62362922017-08-30 16:04:36 -0700452 test_bench(bench, setting_no, it, serials, remote, mode)
Zhizhou Yange5986902017-08-10 17:37:53 -0700453
Zhizhou Yang62362922017-08-30 16:04:36 -0700454 gen_json(bench, setting_no, it, serials)
Zhizhou Yange5986902017-08-10 17:37:53 -0700455
Zhizhou Yang62362922017-08-30 16:04:36 -0700456 for bench in config.bench_list:
457 infile = os.path.join(config.bench_suite_dir, bench + '.json')
458 if os.path.exists(infile):
459 outfile = os.path.join(config.bench_suite_dir,
460 bench + '_report')
461 gen_crosperf(infile, outfile)
462
463 # Stop script if there is only config file provided
464 return 0
465
466 # If no configuration file specified, continue running.
467 # Check if the count of the setting arguments are log_ambiguous.
468 setting_count = check_count(compiler, llvm_version, build_os,
469 cflags, ldflags)
470
471 for bench in bench_list:
472 logging.info('Start building and running benchmark: [%s]', bench)
473 # Run script for each toolchain settings
474 for setting_no in xrange(setting_count):
475 build_bench(setting_no, bench, compiler, llvm_version,
476 build_os, cflags, ldflags)
477
478 # Run autotest script for benchmark test on device
479 test_bench(bench, setting_no, iterations, serials, remote, mode)
480
481 gen_json(bench, setting_no, iterations, serials)
482
483 infile = os.path.join(config.bench_suite_dir, bench + '.json')
Zhizhou Yange5986902017-08-10 17:37:53 -0700484 outfile = os.path.join(config.bench_suite_dir, bench + '_report')
485 gen_crosperf(infile, outfile)
486
Zhizhou Yange5986902017-08-10 17:37:53 -0700487
488if __name__ == '__main__':
Zhizhou Yang62362922017-08-30 16:04:36 -0700489 main(sys.argv[1:])