blob: 960843013011935676e38a5bb12d4e454dc32687 [file] [log] [blame]
Simran Basi259a2b52017-06-21 16:14:07 -07001#!/usr/bin/env python
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
mikehoranbe9102f2017-08-04 16:04:03 -070017"""
18Command line utility for running Android tests through TradeFederation.
Simran Basi259a2b52017-06-21 16:14:07 -070019
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
nelsonliedbd7452018-08-27 11:11:11 +080026from __future__ import print_function
27
mikehoranbe9102f2017-08-04 16:04:03 -070028import logging
Simran Basi259a2b52017-06-21 16:14:07 -070029import os
30import sys
mikehoran95091b22017-10-31 15:55:26 -070031import tempfile
32import time
kellyhung792fbcf2018-11-19 16:25:50 +080033import platform
Simran Basi259a2b52017-06-21 16:14:07 -070034
Jim Tang815b8892018-07-11 12:57:30 +080035import atest_arg_parser
mikehoraned5d7fe2018-10-05 14:24:18 -070036import atest_metrics
Simran Basicf2189b2017-11-06 23:40:24 -080037import atest_utils
mikehoran63d61b42017-07-28 15:28:50 -070038import cli_translator
Kevin Cheng7edb0b92017-12-14 15:00:25 -080039# pylint: disable=import-error
40import constants
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080041import module_info
mikehoran9b6b44b2018-04-09 15:54:58 -070042import result_reporter
Kevin Cheng7edb0b92017-12-14 15:00:25 -080043import test_runner_handler
Mike Ma0126b9b2018-01-11 19:11:16 -080044from test_runners import regression_test_runner
kellyhunge6a643c2018-12-19 11:31:25 +080045# TODO: Delete SEND_CC_LOG and try/except when no proto ImportError happened.
46SEND_CC_LOG = True
47try:
48 from metrics import metrics
49 from metrics import metrics_utils
50except ImportError:
51 SEND_CC_LOG = False
Simran Basicf2189b2017-11-06 23:40:24 -080052
mikehoranbe9102f2017-08-04 16:04:03 -070053EXPECTED_VARS = frozenset([
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080054 constants.ANDROID_BUILD_TOP,
mikehoran43ed32d2017-08-18 17:13:36 -070055 'ANDROID_TARGET_OUT_TESTCASES',
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080056 constants.ANDROID_OUT])
mikehoran95091b22017-10-31 15:55:26 -070057TEST_RUN_DIR_PREFIX = 'atest_run_%s_'
Kevin Cheng21ea9102018-02-22 10:52:42 -080058CUSTOM_ARG_FLAG = '--'
Dan Shi0ddd3e42018-05-30 11:24:30 -070059OPTION_NOT_FOR_TEST_MAPPING = (
60 'Option `%s` does not work for running tests in TEST_MAPPING files')
mikehoranc80dc532017-11-14 14:30:06 -080061
Dan Shi08c7b722018-11-29 10:25:59 -080062DEVICE_TESTS = 'tests that require device'
63HOST_TESTS = 'tests that do NOT require device'
64RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
65RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
66TEST_COUNT = 'test_count'
67TEST_TYPE = 'test_type'
68
Simran Basi259a2b52017-06-21 16:14:07 -070069
mikehoran63d61b42017-07-28 15:28:50 -070070def _parse_args(argv):
71 """Parse command line arguments.
72
73 Args:
74 argv: A list of arguments.
75
76 Returns:
77 An argspace.Namespace class instance holding parsed args.
78 """
Kevin Cheng21ea9102018-02-22 10:52:42 -080079 # Store everything after '--' in custom_args.
80 pruned_argv = argv
81 custom_args_index = None
82 if CUSTOM_ARG_FLAG in argv:
83 custom_args_index = argv.index(CUSTOM_ARG_FLAG)
84 pruned_argv = argv[:custom_args_index]
Jim Tang815b8892018-07-11 12:57:30 +080085 parser = atest_arg_parser.AtestArgParser()
86 parser.add_atest_args()
Kevin Cheng21ea9102018-02-22 10:52:42 -080087 args = parser.parse_args(pruned_argv)
88 args.custom_args = []
89 if custom_args_index is not None:
90 args.custom_args = argv[custom_args_index+1:]
91 return args
mikehoran63d61b42017-07-28 15:28:50 -070092
Simran Basi259a2b52017-06-21 16:14:07 -070093
mikehoranbe9102f2017-08-04 16:04:03 -070094def _configure_logging(verbose):
95 """Configure the logger.
96
97 Args:
98 verbose: A boolean. If true display DEBUG level logs.
99 """
mikehoranb2401822018-08-16 12:01:40 -0700100 log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
101 datefmt = '%Y-%m-%d %H:%M:%S'
mikehoranbe9102f2017-08-04 16:04:03 -0700102 if verbose:
mikehoranb2401822018-08-16 12:01:40 -0700103 logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700104 else:
mikehoranb2401822018-08-16 12:01:40 -0700105 logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700106
107
108def _missing_environment_variables():
109 """Verify the local environment has been set up to run atest.
110
111 Returns:
112 List of strings of any missing environment variables.
113 """
114 missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
115 if missing:
116 logging.error('Local environment doesn\'t appear to have been '
117 'initialized. Did you remember to run lunch? Expected '
118 'Environment Variables: %s.', missing)
119 return missing
120
121
mikehoran95091b22017-10-31 15:55:26 -0700122def make_test_run_dir():
123 """Make the test run dir in tmp.
124
125 Returns:
126 A string of the dir path.
127 """
128 utc_epoch_time = int(time.time())
129 prefix = TEST_RUN_DIR_PREFIX % utc_epoch_time
130 return tempfile.mkdtemp(prefix=prefix)
131
132
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800133def get_extra_args(args):
134 """Get extra args for test runners.
135
136 Args:
137 args: arg parsed object.
138
139 Returns:
140 Dict of extra args for test runners to utilize.
141 """
142 extra_args = {}
143 if args.wait_for_debugger:
144 extra_args[constants.WAIT_FOR_DEBUGGER] = None
Jim Tang815b8892018-07-11 12:57:30 +0800145 steps = args.steps or constants.ALL_STEPS
146 if constants.INSTALL_STEP not in steps:
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800147 extra_args[constants.DISABLE_INSTALL] = None
mikehoran458b2b12018-02-28 16:07:13 -0800148 if args.disable_teardown:
149 extra_args[constants.DISABLE_TEARDOWN] = args.disable_teardown
Mike Ma150a61d2017-12-15 10:53:35 -0800150 if args.generate_baseline:
Mike Ma0126b9b2018-01-11 19:11:16 -0800151 extra_args[constants.PRE_PATCH_ITERATIONS] = args.generate_baseline
easoncylee8809be02018-03-27 12:28:07 +0800152 if args.serial:
153 extra_args[constants.SERIAL] = args.serial
yangbill37dda0d2018-06-21 16:44:12 +0800154 if args.all_abi:
yangbillc614bdc2018-06-22 14:31:24 +0800155 extra_args[constants.ALL_ABI] = args.all_abi
Mike Ma150a61d2017-12-15 10:53:35 -0800156 if args.generate_new_metrics:
Mike Ma0126b9b2018-01-11 19:11:16 -0800157 extra_args[constants.POST_PATCH_ITERATIONS] = args.generate_new_metrics
kellyhung0625d172018-06-21 16:40:27 +0800158 if args.host:
159 extra_args[constants.HOST] = args.host
yangbillcc1a21f2018-12-12 20:03:12 +0800160 if args.dry_run:
161 extra_args[constants.DRY_RUN] = args.dry_run
Kevin Cheng21ea9102018-02-22 10:52:42 -0800162 if args.custom_args:
163 extra_args[constants.CUSTOM_ARGS] = args.custom_args
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800164 return extra_args
165
166
Mike Ma0126b9b2018-01-11 19:11:16 -0800167def _get_regression_detection_args(args, results_dir):
168 """Get args for regression detection test runners.
169
170 Args:
171 args: parsed args object.
172 results_dir: string directory to store atest results.
173
174 Returns:
175 Dict of args for regression detection test runner to utilize.
176 """
177 regression_args = {}
178 pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
179 else args.detect_regression.pop(0))
180 post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
181 else args.detect_regression.pop(0))
182 regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
183 regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
184 return regression_args
185
186
Dan Shi08c7b722018-11-29 10:25:59 -0800187def _validate_exec_mode(args, test_infos, host_tests=None):
kellyhung0625d172018-06-21 16:40:27 +0800188 """Validate all test execution modes are not in conflict.
189
190 Exit the program with error code if have device-only and host-only.
191 If no conflict and host side, add args.host=True.
192
193 Args:
194 args: parsed args object.
195 test_info: TestInfo object.
Dan Shi08c7b722018-11-29 10:25:59 -0800196 host_tests: True if all tests should be deviceless, False if all tests
197 should be device tests. Default is set to None, which means
198 tests can be either deviceless or device tests.
kellyhung0625d172018-06-21 16:40:27 +0800199 """
200 all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
201 # In the case of '$atest <device-only> --host', exit.
Dan Shi08c7b722018-11-29 10:25:59 -0800202 if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
kellyhung0625d172018-06-21 16:40:27 +0800203 err_msg = ('Test side and option(--host) conflict. Please remove '
204 '--host if the test run on device side.')
205 logging.error(err_msg)
206 sys.exit(constants.EXIT_CODE_ERROR)
207 # In the case of '$atest <host-only> <device-only> --host' or
208 # '$atest <host-only> <device-only>', exit.
209 if (constants.DEVICELESS_TEST in all_device_modes and
210 constants.DEVICE_TEST in all_device_modes):
211 err_msg = 'There are host-only and device-only tests in command.'
212 logging.error(err_msg)
213 sys.exit(constants.EXIT_CODE_ERROR)
Dan Shi08c7b722018-11-29 10:25:59 -0800214 if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
215 err_msg = 'There are host-only tests in command.'
216 logging.error(err_msg)
217 sys.exit(constants.EXIT_CODE_ERROR)
kellyhung0625d172018-06-21 16:40:27 +0800218 # In the case of '$atest <host-only>', we add --host to run on host-side.
Dan Shi08c7b722018-11-29 10:25:59 -0800219 # The option should only be overriden if `host_tests` is not set.
220 if not args.host and host_tests is None:
kellyhung0625d172018-06-21 16:40:27 +0800221 args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
Dan Shi08c7b722018-11-29 10:25:59 -0800222
223
224def _validate_tm_tests_exec_mode(args, test_infos):
225 """Validate all test execution modes are not in conflict.
226
227 Split the tests in Test Mapping files into two groups, device tests and
228 deviceless tests running on host. Validate the tests' host setting.
229 For device tests, exit the program if any test is found for host-only.
230 For deviceless tests, exit the program if any test is found for device-only.
231
232 Args:
233 args: parsed args object.
234 test_info: TestInfo object.
235 """
236 device_test_infos, host_test_infos = _split_test_mapping_tests(
237 test_infos)
238 # No need to verify device tests if atest command is set to only run host
239 # tests.
240 if device_test_infos and not args.host:
241 _validate_exec_mode(args, device_test_infos, host_tests=False)
242 if host_test_infos:
243 _validate_exec_mode(args, host_test_infos, host_tests=True)
244
kellyhung0625d172018-06-21 16:40:27 +0800245
Mike Ma0126b9b2018-01-11 19:11:16 -0800246def _will_run_tests(args):
247 """Determine if there are tests to run.
248
249 Currently only used by detect_regression to skip the test if just running regression detection.
250
251 Args:
252 args: parsed args object.
253
254 Returns:
255 True if there are tests to run, false otherwise.
256 """
257 return not (args.detect_regression and len(args.detect_regression) == 2)
258
259
260def _has_valid_regression_detection_args(args):
261 """Validate regression detection args.
262
263 Args:
264 args: parsed args object.
265
266 Returns:
267 True if args are valid
268 """
269 if args.generate_baseline and args.generate_new_metrics:
270 logging.error('Cannot collect both baseline and new metrics at the same time.')
271 return False
272 if args.detect_regression is not None:
273 if not args.detect_regression:
274 logging.error('Need to specify at least 1 arg for regression detection.')
275 return False
276 elif len(args.detect_regression) == 1:
277 if args.generate_baseline or args.generate_new_metrics:
278 return True
279 logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
280 return False
281 elif len(args.detect_regression) == 2:
282 if args.generate_baseline:
283 logging.error('Specified 2 metric paths and --generate-baseline, '
284 'either drop --generate-baseline or drop a path')
285 return False
286 if args.generate_new_metrics:
287 logging.error('Specified 2 metric paths and --generate-new-metrics, '
288 'either drop --generate-new-metrics or drop a path')
289 return False
290 return True
291 else:
292 logging.error('Specified more than 2 metric paths.')
293 return False
294 return True
295
296
Dan Shi0ddd3e42018-05-30 11:24:30 -0700297def _has_valid_test_mapping_args(args):
298 """Validate test mapping args.
299
300 Not all args work when running tests in TEST_MAPPING files. Validate the
301 args before running the tests.
302
303 Args:
304 args: parsed args object.
305
306 Returns:
307 True if args are valid
308 """
309 is_test_mapping = atest_utils.is_test_mapping(args)
310 if not is_test_mapping:
311 return True
312 options_to_validate = [
313 (args.generate_baseline, '--generate-baseline'),
314 (args.detect_regression, '--detect-regression'),
315 (args.generate_new_metrics, '--generate-new-metrics'),
316 ]
317 for arg_value, arg in options_to_validate:
318 if arg_value:
319 logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
320 return False
321 return True
322
323
Dan Shie4e267f2018-06-01 11:31:57 -0700324def _validate_args(args):
325 """Validate setups and args.
326
327 Exit the program with error code if any setup or arg is invalid.
328
329 Args:
330 args: parsed args object.
331 """
332 if _missing_environment_variables():
333 sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
334 if args.generate_baseline and args.generate_new_metrics:
Dan Shi08c7b722018-11-29 10:25:59 -0800335 logging.error(
336 'Cannot collect both baseline and new metrics at the same time.')
Dan Shie4e267f2018-06-01 11:31:57 -0700337 sys.exit(constants.EXIT_CODE_ERROR)
338 if not _has_valid_regression_detection_args(args):
339 sys.exit(constants.EXIT_CODE_ERROR)
340 if not _has_valid_test_mapping_args(args):
341 sys.exit(constants.EXIT_CODE_ERROR)
342
nelsonlie3f90de2018-06-22 14:59:39 +0800343
344def _print_module_info_from_module_name(mod_info, module_name):
345 """print out the related module_info for a module_name.
346
347 Args:
348 mod_info: ModuleInfo object.
349 module_name: A string of module.
350
351 Returns:
352 True if the module_info is found.
353 """
Dan Shi08c7b722018-11-29 10:25:59 -0800354 title_mapping = {
355 constants.MODULE_PATH: "Source code path",
356 constants.MODULE_INSTALLED: "Installed path",
357 constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
nelsonlie3f90de2018-06-22 14:59:39 +0800358 target_module_info = mod_info.get_module_info(module_name)
359 is_module_found = False
360 if target_module_info:
nelsonliedbd7452018-08-27 11:11:11 +0800361 atest_utils.colorful_print(module_name, constants.GREEN)
nelsonlie3f90de2018-06-22 14:59:39 +0800362 for title_key in title_mapping.iterkeys():
363 atest_utils.colorful_print("\t%s" % title_mapping[title_key],
nelsonliedbd7452018-08-27 11:11:11 +0800364 constants.CYAN)
nelsonlie3f90de2018-06-22 14:59:39 +0800365 for info_value in target_module_info[title_key]:
nelsonliedbd7452018-08-27 11:11:11 +0800366 print("\t\t{}".format(info_value))
nelsonlie3f90de2018-06-22 14:59:39 +0800367 is_module_found = True
368 return is_module_found
369
370
371def _print_test_info(mod_info, test_infos):
372 """Print the module information from TestInfos.
373
374 Args:
375 mod_info: ModuleInfo object.
376 test_infos: A list of TestInfos.
377
378 Returns:
379 Always return EXIT_CODE_SUCCESS
380 """
381 for test_info in test_infos:
382 _print_module_info_from_module_name(mod_info, test_info.test_name)
nelsonliedbd7452018-08-27 11:11:11 +0800383 atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
384 print("\t\t{}".format(", ".join(test_info.build_targets)))
nelsonlie3f90de2018-06-22 14:59:39 +0800385 for build_target in test_info.build_targets:
386 if build_target != test_info.test_name:
387 _print_module_info_from_module_name(mod_info, build_target)
388 atest_utils.colorful_print("", constants.WHITE)
389 return constants.EXIT_CODE_SUCCESS
390
Dan Shi08c7b722018-11-29 10:25:59 -0800391
392def is_from_test_mapping(test_infos):
393 """Check that the test_infos came from TEST_MAPPING files.
394
395 Args:
396 test_infos: A set of TestInfos.
397
398 Retruns:
399 True if the test infos are from TEST_MAPPING files.
400 """
401 return list(test_infos)[0].from_test_mapping
402
403
404def _split_test_mapping_tests(test_infos):
405 """Split Test Mapping tests into 2 groups: device tests and host tests.
406
407 Args:
408 test_infos: A set of TestInfos.
409
410 Retruns:
411 A tuple of (device_test_infos, host_test_infos), where
412 device_test_infos: A set of TestInfos for tests that require device.
413 host_test_infos: A set of TestInfos for tests that do NOT require
414 device.
415 """
416 assert is_from_test_mapping(test_infos)
417 host_test_infos = set([info for info in test_infos if info.host])
418 device_test_infos = set([info for info in test_infos if not info.host])
419 return device_test_infos, host_test_infos
420
421
422# pylint: disable=too-many-locals
423def _run_test_mapping_tests(results_dir, test_infos, extra_args):
424 """Run all tests in TEST_MAPPING files.
425
426 Args:
427 results_dir: String directory to store atest results.
428 test_infos: A set of TestInfos.
429 extra_args: Dict of extra args to add to test run.
430
431 Returns:
432 Exit code.
433 """
434 device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
435 # `host` option needs to be set to True to run host side tests.
436 host_extra_args = extra_args.copy()
437 host_extra_args[constants.HOST] = True
438 test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
439 if extra_args.get(constants.HOST):
440 atest_utils.colorful_print(
441 'Option `--host` specified. Skip running device tests.',
442 constants.MAGENTA)
443 else:
444 test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
445
446 test_results = []
447 for tests, args, test_type in test_runs:
448 if not tests:
449 continue
450 header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
451 atest_utils.colorful_print(header, constants.MAGENTA)
452 logging.debug('\n'.join([str(info) for info in tests]))
453 tests_exit_code, reporter = test_runner_handler.run_all_tests(
454 results_dir, tests, args, delay_print_summary=True)
455 test_results.append((tests_exit_code, reporter, test_type))
456
457 all_tests_exit_code = constants.EXIT_CODE_SUCCESS
458 failed_tests = []
459 for tests_exit_code, reporter, test_type in test_results:
460 atest_utils.colorful_print(
461 RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
462 result = tests_exit_code | reporter.print_summary()
463 if result:
464 failed_tests.append(test_type)
465 all_tests_exit_code |= result
466
467 # List failed tests at the end as a reminder.
468 if failed_tests:
469 atest_utils.colorful_print(
470 '\n==============================', constants.YELLOW)
471 atest_utils.colorful_print(
472 '\nFollowing tests failed:', constants.MAGENTA)
473 for failure in failed_tests:
474 atest_utils.colorful_print(failure, constants.RED)
475
476 return all_tests_exit_code
477
478
yangbillcc1a21f2018-12-12 20:03:12 +0800479def _dry_run(results_dir, extra_args, test_infos):
480 """Only print the commands of the target tests rather than running them in actual.
481
482 Args:
483 results_dir: Path for saving atest logs.
484 extra_args: Dict of extra args for test runners to utilize.
485 test_infos: A list of TestInfos.
486
487 """
488 for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
489 runner = test_runner(results_dir)
490 run_cmds = runner.generate_run_commands(tests, extra_args)
491 for run_cmd in run_cmds:
492 print('Would run test via command: %s'
493 % (atest_utils.colorize(run_cmd, constants.GREEN)))
494
495
kellyhunge6a643c2018-12-19 11:31:25 +0800496# pylint: disable=too-many-statements
Dan Shi08c7b722018-11-29 10:25:59 -0800497# pylint: disable=too-many-branches
Simran Basi259a2b52017-06-21 16:14:07 -0700498def main(argv):
mikehoran63d61b42017-07-28 15:28:50 -0700499 """Entry point of atest script.
Simran Basi259a2b52017-06-21 16:14:07 -0700500
mikehoran63d61b42017-07-28 15:28:50 -0700501 Args:
502 argv: A list of arguments.
Kevin Cheng09c2a2c2017-12-15 12:52:46 -0800503
504 Returns:
505 Exit code.
Simran Basi259a2b52017-06-21 16:14:07 -0700506 """
mikehoran63d61b42017-07-28 15:28:50 -0700507 args = _parse_args(argv)
mikehoranbe9102f2017-08-04 16:04:03 -0700508 _configure_logging(args.verbose)
Dan Shie4e267f2018-06-01 11:31:57 -0700509 _validate_args(args)
mikehoraned5d7fe2018-10-05 14:24:18 -0700510 atest_metrics.log_start_event()
kellyhung792fbcf2018-11-19 16:25:50 +0800511 start = time.time()
kellyhunge6a643c2018-12-19 11:31:25 +0800512 if SEND_CC_LOG:
513 metrics.AtestStartEvent(
514 command_line=' '.join(argv),
515 test_references=args.tests,
516 cwd=os.getcwd(),
517 os=platform.platform())
mikehoran95091b22017-10-31 15:55:26 -0700518 results_dir = make_test_run_dir()
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800519 mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
520 translator = cli_translator.CLITranslator(module_info=mod_info)
Mike Ma0126b9b2018-01-11 19:11:16 -0800521 build_targets = set()
522 test_infos = set()
523 if _will_run_tests(args):
nelsonlic4a71452018-09-13 14:10:30 +0800524 build_targets, test_infos = translator.translate(args)
525 if not test_infos:
kellyhunge6a643c2018-12-19 11:31:25 +0800526 if SEND_CC_LOG:
527 metrics_utils.send_exit_event(start, constants.EXIT_CODE_TEST_NOT_FOUND)
Mike Ma0126b9b2018-01-11 19:11:16 -0800528 return constants.EXIT_CODE_TEST_NOT_FOUND
Dan Shi08c7b722018-11-29 10:25:59 -0800529 if not is_from_test_mapping(test_infos):
530 _validate_exec_mode(args, test_infos)
531 else:
532 _validate_tm_tests_exec_mode(args, test_infos)
nelsonlie3f90de2018-06-22 14:59:39 +0800533 if args.info:
kellyhunge6a643c2018-12-19 11:31:25 +0800534 if SEND_CC_LOG:
535 metrics_utils.send_exit_event(start, constants.EXIT_CODE_SUCCESS)
nelsonlie3f90de2018-06-22 14:59:39 +0800536 return _print_test_info(mod_info, test_infos)
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800537 build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
538 test_infos)
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800539 extra_args = get_extra_args(args)
yangbillcc1a21f2018-12-12 20:03:12 +0800540 if args.dry_run:
541 _dry_run(results_dir, extra_args, test_infos)
542 if SEND_CC_LOG:
543 metrics_utils.send_exit_event(start, constants.EXIT_CODE_SUCCESS)
544 return constants.EXIT_CODE_SUCCESS
545
Mike Ma0126b9b2018-01-11 19:11:16 -0800546 if args.detect_regression:
547 build_targets |= (regression_test_runner.RegressionTestRunner('')
548 .get_test_runner_build_reqs())
mikehoranc327dca2017-11-27 16:24:22 -0800549 # args.steps will be None if none of -bit set, else list of params set.
Jim Tang815b8892018-07-11 12:57:30 +0800550 steps = args.steps if args.steps else constants.ALL_STEPS
551 if build_targets and constants.BUILD_STEP in steps:
Kevin Cheng5be930e2018-02-20 09:39:22 -0800552 # Add module-info.json target to the list of build targets to keep the
553 # file up to date.
554 build_targets.add(mod_info.module_info_target)
mikehoranc80dc532017-11-14 14:30:06 -0800555 success = atest_utils.build(build_targets, args.verbose)
556 if not success:
kellyhunge6a643c2018-12-19 11:31:25 +0800557 if SEND_CC_LOG:
558 metrics_utils.send_exit_event(start, constants.EXIT_CODE_BUILD_FAILURE)
Dan Shifa016d12018-02-02 00:37:19 -0800559 return constants.EXIT_CODE_BUILD_FAILURE
Jim Tang815b8892018-07-11 12:57:30 +0800560 elif constants.TEST_STEP not in steps:
mikehoranc327dca2017-11-27 16:24:22 -0800561 logging.warn('Install step without test step currently not '
562 'supported, installing AND testing instead.')
Jim Tang815b8892018-07-11 12:57:30 +0800563 steps.append(constants.TEST_STEP)
yangbill848a7d12018-09-04 19:12:08 +0800564 tests_exit_code = constants.EXIT_CODE_SUCCESS
Jim Tang815b8892018-07-11 12:57:30 +0800565 if constants.TEST_STEP in steps:
Dan Shi08c7b722018-11-29 10:25:59 -0800566 if not is_from_test_mapping(test_infos):
yangbill1afd78d2019-01-04 11:47:09 +0800567 tests_exit_code, _ = test_runner_handler.run_all_tests(
Dan Shi08c7b722018-11-29 10:25:59 -0800568 results_dir, test_infos, extra_args)
569 else:
570 tests_exit_code = _run_test_mapping_tests(
571 results_dir, test_infos, extra_args)
Mike Ma0126b9b2018-01-11 19:11:16 -0800572 if args.detect_regression:
573 regression_args = _get_regression_detection_args(args, results_dir)
mikehoran9b6b44b2018-04-09 15:54:58 -0700574 # TODO(b/110485713): Should not call run_tests here.
575 reporter = result_reporter.ResultReporter()
Dan Shi08c7b722018-11-29 10:25:59 -0800576 tests_exit_code |= regression_test_runner.RegressionTestRunner(
577 '').run_tests(
578 None, regression_args, reporter)
yangbill848a7d12018-09-04 19:12:08 +0800579 if tests_exit_code != constants.EXIT_CODE_SUCCESS:
580 tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
kellyhunge6a643c2018-12-19 11:31:25 +0800581 if SEND_CC_LOG:
582 metrics_utils.send_exit_event(start, tests_exit_code)
yangbill848a7d12018-09-04 19:12:08 +0800583 return tests_exit_code
mikehoran63d61b42017-07-28 15:28:50 -0700584
Simran Basi259a2b52017-06-21 16:14:07 -0700585if __name__ == '__main__':
586 sys.exit(main(sys.argv[1:]))