blob: 332c5793250ee5e0199bf58644ef342902c2cfb0 [file] [log] [blame]
Simran Basi259a2b52017-06-21 16:14:07 -07001#!/usr/bin/env python
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
mikehoranbe9102f2017-08-04 16:04:03 -070017"""
18Command line utility for running Android tests through TradeFederation.
Simran Basi259a2b52017-06-21 16:14:07 -070019
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
nelsonliedbd7452018-08-27 11:11:11 +080026from __future__ import print_function
27
mikehoranbe9102f2017-08-04 16:04:03 -070028import logging
Simran Basi259a2b52017-06-21 16:14:07 -070029import os
30import sys
mikehoran95091b22017-10-31 15:55:26 -070031import tempfile
32import time
kellyhung792fbcf2018-11-19 16:25:50 +080033import platform
Simran Basi259a2b52017-06-21 16:14:07 -070034
Jim Tang6ed753e2019-07-23 10:39:58 +080035from multiprocessing import Process
36
Jim Tang815b8892018-07-11 12:57:30 +080037import atest_arg_parser
yangbillbac1dd62019-06-03 17:06:40 +080038import atest_error
yelinhsieh4d5917d2019-03-12 17:26:27 +080039import atest_execution_info
Simran Basicf2189b2017-11-06 23:40:24 -080040import atest_utils
kellyhung7d004bb2019-04-02 11:54:59 +080041import bug_detector
mikehoran63d61b42017-07-28 15:28:50 -070042import cli_translator
Kevin Cheng7edb0b92017-12-14 15:00:25 -080043# pylint: disable=import-error
44import constants
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080045import module_info
mikehoran9b6b44b2018-04-09 15:54:58 -070046import result_reporter
Kevin Cheng7edb0b92017-12-14 15:00:25 -080047import test_runner_handler
kellyhung924b8832019-03-05 18:35:00 +080048
49from metrics import metrics
kellyhunge3fa1752019-04-23 11:13:41 +080050from metrics import metrics_base
kellyhung924b8832019-03-05 18:35:00 +080051from metrics import metrics_utils
Mike Ma0126b9b2018-01-11 19:11:16 -080052from test_runners import regression_test_runner
Jim Tang6ed753e2019-07-23 10:39:58 +080053from tools import atest_tools
Simran Basicf2189b2017-11-06 23:40:24 -080054
mikehoranbe9102f2017-08-04 16:04:03 -070055EXPECTED_VARS = frozenset([
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080056 constants.ANDROID_BUILD_TOP,
mikehoran43ed32d2017-08-18 17:13:36 -070057 'ANDROID_TARGET_OUT_TESTCASES',
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080058 constants.ANDROID_OUT])
mikehoran95091b22017-10-31 15:55:26 -070059TEST_RUN_DIR_PREFIX = 'atest_run_%s_'
Kevin Cheng21ea9102018-02-22 10:52:42 -080060CUSTOM_ARG_FLAG = '--'
Dan Shi0ddd3e42018-05-30 11:24:30 -070061OPTION_NOT_FOR_TEST_MAPPING = (
62 'Option `%s` does not work for running tests in TEST_MAPPING files')
mikehoranc80dc532017-11-14 14:30:06 -080063
Dan Shi08c7b722018-11-29 10:25:59 -080064DEVICE_TESTS = 'tests that require device'
65HOST_TESTS = 'tests that do NOT require device'
66RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
67RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
68TEST_COUNT = 'test_count'
69TEST_TYPE = 'test_type'
70
Jim Tang6ed753e2019-07-23 10:39:58 +080071# Tasks that must run in the build time but unable to build by soong.
72# (e.g subprocesses that invoke host commands.)
73EXTRA_TASKS = {
74 'index-targets': atest_tools.index_targets
75}
76
Simran Basi259a2b52017-06-21 16:14:07 -070077
mikehoran63d61b42017-07-28 15:28:50 -070078def _parse_args(argv):
79 """Parse command line arguments.
80
81 Args:
82 argv: A list of arguments.
83
84 Returns:
85 An argspace.Namespace class instance holding parsed args.
86 """
Kevin Cheng21ea9102018-02-22 10:52:42 -080087 # Store everything after '--' in custom_args.
88 pruned_argv = argv
89 custom_args_index = None
90 if CUSTOM_ARG_FLAG in argv:
91 custom_args_index = argv.index(CUSTOM_ARG_FLAG)
92 pruned_argv = argv[:custom_args_index]
Jim Tang815b8892018-07-11 12:57:30 +080093 parser = atest_arg_parser.AtestArgParser()
94 parser.add_atest_args()
Kevin Cheng21ea9102018-02-22 10:52:42 -080095 args = parser.parse_args(pruned_argv)
96 args.custom_args = []
97 if custom_args_index is not None:
98 args.custom_args = argv[custom_args_index+1:]
99 return args
mikehoran63d61b42017-07-28 15:28:50 -0700100
Simran Basi259a2b52017-06-21 16:14:07 -0700101
mikehoranbe9102f2017-08-04 16:04:03 -0700102def _configure_logging(verbose):
103 """Configure the logger.
104
105 Args:
106 verbose: A boolean. If true display DEBUG level logs.
107 """
mikehoranb2401822018-08-16 12:01:40 -0700108 log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
109 datefmt = '%Y-%m-%d %H:%M:%S'
mikehoranbe9102f2017-08-04 16:04:03 -0700110 if verbose:
mikehoranb2401822018-08-16 12:01:40 -0700111 logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700112 else:
mikehoranb2401822018-08-16 12:01:40 -0700113 logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700114
115
116def _missing_environment_variables():
117 """Verify the local environment has been set up to run atest.
118
119 Returns:
120 List of strings of any missing environment variables.
121 """
122 missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
123 if missing:
124 logging.error('Local environment doesn\'t appear to have been '
125 'initialized. Did you remember to run lunch? Expected '
126 'Environment Variables: %s.', missing)
127 return missing
128
129
mikehoran95091b22017-10-31 15:55:26 -0700130def make_test_run_dir():
131 """Make the test run dir in tmp.
132
133 Returns:
134 A string of the dir path.
135 """
136 utc_epoch_time = int(time.time())
137 prefix = TEST_RUN_DIR_PREFIX % utc_epoch_time
138 return tempfile.mkdtemp(prefix=prefix)
139
140
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800141def get_extra_args(args):
142 """Get extra args for test runners.
143
144 Args:
145 args: arg parsed object.
146
147 Returns:
148 Dict of extra args for test runners to utilize.
149 """
150 extra_args = {}
151 if args.wait_for_debugger:
152 extra_args[constants.WAIT_FOR_DEBUGGER] = None
Jim Tang815b8892018-07-11 12:57:30 +0800153 steps = args.steps or constants.ALL_STEPS
154 if constants.INSTALL_STEP not in steps:
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800155 extra_args[constants.DISABLE_INSTALL] = None
mikehoran458b2b12018-02-28 16:07:13 -0800156 if args.disable_teardown:
157 extra_args[constants.DISABLE_TEARDOWN] = args.disable_teardown
Mike Ma150a61d2017-12-15 10:53:35 -0800158 if args.generate_baseline:
Mike Ma0126b9b2018-01-11 19:11:16 -0800159 extra_args[constants.PRE_PATCH_ITERATIONS] = args.generate_baseline
easoncylee8809be02018-03-27 12:28:07 +0800160 if args.serial:
161 extra_args[constants.SERIAL] = args.serial
yangbill37dda0d2018-06-21 16:44:12 +0800162 if args.all_abi:
yangbillc614bdc2018-06-22 14:31:24 +0800163 extra_args[constants.ALL_ABI] = args.all_abi
Mike Ma150a61d2017-12-15 10:53:35 -0800164 if args.generate_new_metrics:
Mike Ma0126b9b2018-01-11 19:11:16 -0800165 extra_args[constants.POST_PATCH_ITERATIONS] = args.generate_new_metrics
Julien Desprezd5168652019-02-06 14:49:01 -0800166 if args.instant:
167 extra_args[constants.INSTANT] = args.instant
kellyhung0625d172018-06-21 16:40:27 +0800168 if args.host:
169 extra_args[constants.HOST] = args.host
yangbillcc1a21f2018-12-12 20:03:12 +0800170 if args.dry_run:
171 extra_args[constants.DRY_RUN] = args.dry_run
Kevin Cheng21ea9102018-02-22 10:52:42 -0800172 if args.custom_args:
173 extra_args[constants.CUSTOM_ARGS] = args.custom_args
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800174 return extra_args
175
176
Mike Ma0126b9b2018-01-11 19:11:16 -0800177def _get_regression_detection_args(args, results_dir):
178 """Get args for regression detection test runners.
179
180 Args:
181 args: parsed args object.
182 results_dir: string directory to store atest results.
183
184 Returns:
185 Dict of args for regression detection test runner to utilize.
186 """
187 regression_args = {}
188 pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
189 else args.detect_regression.pop(0))
190 post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
191 else args.detect_regression.pop(0))
192 regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
193 regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
194 return regression_args
195
196
Dan Shi08c7b722018-11-29 10:25:59 -0800197def _validate_exec_mode(args, test_infos, host_tests=None):
kellyhung0625d172018-06-21 16:40:27 +0800198 """Validate all test execution modes are not in conflict.
199
200 Exit the program with error code if have device-only and host-only.
201 If no conflict and host side, add args.host=True.
202
203 Args:
204 args: parsed args object.
205 test_info: TestInfo object.
Dan Shi08c7b722018-11-29 10:25:59 -0800206 host_tests: True if all tests should be deviceless, False if all tests
207 should be device tests. Default is set to None, which means
208 tests can be either deviceless or device tests.
kellyhung0625d172018-06-21 16:40:27 +0800209 """
210 all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
kellyhung924b8832019-03-05 18:35:00 +0800211 err_msg = None
kellyhung0625d172018-06-21 16:40:27 +0800212 # In the case of '$atest <device-only> --host', exit.
Dan Shi08c7b722018-11-29 10:25:59 -0800213 if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
kellyhung0625d172018-06-21 16:40:27 +0800214 err_msg = ('Test side and option(--host) conflict. Please remove '
215 '--host if the test run on device side.')
kellyhung0625d172018-06-21 16:40:27 +0800216 # In the case of '$atest <host-only> <device-only> --host' or
217 # '$atest <host-only> <device-only>', exit.
218 if (constants.DEVICELESS_TEST in all_device_modes and
219 constants.DEVICE_TEST in all_device_modes):
220 err_msg = 'There are host-only and device-only tests in command.'
Dan Shi08c7b722018-11-29 10:25:59 -0800221 if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
222 err_msg = 'There are host-only tests in command.'
kellyhung924b8832019-03-05 18:35:00 +0800223 if err_msg:
Dan Shi08c7b722018-11-29 10:25:59 -0800224 logging.error(err_msg)
kellyhung924b8832019-03-05 18:35:00 +0800225 metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
Dan Shi08c7b722018-11-29 10:25:59 -0800226 sys.exit(constants.EXIT_CODE_ERROR)
kellyhung0625d172018-06-21 16:40:27 +0800227 # In the case of '$atest <host-only>', we add --host to run on host-side.
Dan Shi08c7b722018-11-29 10:25:59 -0800228 # The option should only be overriden if `host_tests` is not set.
229 if not args.host and host_tests is None:
kellyhung0625d172018-06-21 16:40:27 +0800230 args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
Dan Shi08c7b722018-11-29 10:25:59 -0800231
232
233def _validate_tm_tests_exec_mode(args, test_infos):
234 """Validate all test execution modes are not in conflict.
235
236 Split the tests in Test Mapping files into two groups, device tests and
237 deviceless tests running on host. Validate the tests' host setting.
238 For device tests, exit the program if any test is found for host-only.
239 For deviceless tests, exit the program if any test is found for device-only.
240
241 Args:
242 args: parsed args object.
243 test_info: TestInfo object.
244 """
245 device_test_infos, host_test_infos = _split_test_mapping_tests(
246 test_infos)
247 # No need to verify device tests if atest command is set to only run host
248 # tests.
249 if device_test_infos and not args.host:
250 _validate_exec_mode(args, device_test_infos, host_tests=False)
251 if host_test_infos:
252 _validate_exec_mode(args, host_test_infos, host_tests=True)
253
kellyhung0625d172018-06-21 16:40:27 +0800254
Mike Ma0126b9b2018-01-11 19:11:16 -0800255def _will_run_tests(args):
256 """Determine if there are tests to run.
257
258 Currently only used by detect_regression to skip the test if just running regression detection.
259
260 Args:
261 args: parsed args object.
262
263 Returns:
264 True if there are tests to run, false otherwise.
265 """
266 return not (args.detect_regression and len(args.detect_regression) == 2)
267
268
269def _has_valid_regression_detection_args(args):
270 """Validate regression detection args.
271
272 Args:
273 args: parsed args object.
274
275 Returns:
276 True if args are valid
277 """
278 if args.generate_baseline and args.generate_new_metrics:
279 logging.error('Cannot collect both baseline and new metrics at the same time.')
280 return False
281 if args.detect_regression is not None:
282 if not args.detect_regression:
283 logging.error('Need to specify at least 1 arg for regression detection.')
284 return False
285 elif len(args.detect_regression) == 1:
286 if args.generate_baseline or args.generate_new_metrics:
287 return True
288 logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
289 return False
290 elif len(args.detect_regression) == 2:
291 if args.generate_baseline:
292 logging.error('Specified 2 metric paths and --generate-baseline, '
293 'either drop --generate-baseline or drop a path')
294 return False
295 if args.generate_new_metrics:
296 logging.error('Specified 2 metric paths and --generate-new-metrics, '
297 'either drop --generate-new-metrics or drop a path')
298 return False
299 return True
300 else:
301 logging.error('Specified more than 2 metric paths.')
302 return False
303 return True
304
305
Dan Shi0ddd3e42018-05-30 11:24:30 -0700306def _has_valid_test_mapping_args(args):
307 """Validate test mapping args.
308
309 Not all args work when running tests in TEST_MAPPING files. Validate the
310 args before running the tests.
311
312 Args:
313 args: parsed args object.
314
315 Returns:
316 True if args are valid
317 """
318 is_test_mapping = atest_utils.is_test_mapping(args)
319 if not is_test_mapping:
320 return True
321 options_to_validate = [
322 (args.generate_baseline, '--generate-baseline'),
323 (args.detect_regression, '--detect-regression'),
324 (args.generate_new_metrics, '--generate-new-metrics'),
325 ]
326 for arg_value, arg in options_to_validate:
327 if arg_value:
328 logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
329 return False
330 return True
331
332
Dan Shie4e267f2018-06-01 11:31:57 -0700333def _validate_args(args):
334 """Validate setups and args.
335
336 Exit the program with error code if any setup or arg is invalid.
337
338 Args:
339 args: parsed args object.
340 """
341 if _missing_environment_variables():
342 sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
343 if args.generate_baseline and args.generate_new_metrics:
Dan Shi08c7b722018-11-29 10:25:59 -0800344 logging.error(
345 'Cannot collect both baseline and new metrics at the same time.')
Dan Shie4e267f2018-06-01 11:31:57 -0700346 sys.exit(constants.EXIT_CODE_ERROR)
347 if not _has_valid_regression_detection_args(args):
348 sys.exit(constants.EXIT_CODE_ERROR)
349 if not _has_valid_test_mapping_args(args):
350 sys.exit(constants.EXIT_CODE_ERROR)
351
nelsonlie3f90de2018-06-22 14:59:39 +0800352
353def _print_module_info_from_module_name(mod_info, module_name):
354 """print out the related module_info for a module_name.
355
356 Args:
357 mod_info: ModuleInfo object.
358 module_name: A string of module.
359
360 Returns:
361 True if the module_info is found.
362 """
Dan Shi08c7b722018-11-29 10:25:59 -0800363 title_mapping = {
364 constants.MODULE_PATH: "Source code path",
365 constants.MODULE_INSTALLED: "Installed path",
366 constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
nelsonlie3f90de2018-06-22 14:59:39 +0800367 target_module_info = mod_info.get_module_info(module_name)
368 is_module_found = False
369 if target_module_info:
nelsonliedbd7452018-08-27 11:11:11 +0800370 atest_utils.colorful_print(module_name, constants.GREEN)
nelsonlie3f90de2018-06-22 14:59:39 +0800371 for title_key in title_mapping.iterkeys():
372 atest_utils.colorful_print("\t%s" % title_mapping[title_key],
nelsonliedbd7452018-08-27 11:11:11 +0800373 constants.CYAN)
nelsonlie3f90de2018-06-22 14:59:39 +0800374 for info_value in target_module_info[title_key]:
nelsonliedbd7452018-08-27 11:11:11 +0800375 print("\t\t{}".format(info_value))
nelsonlie3f90de2018-06-22 14:59:39 +0800376 is_module_found = True
377 return is_module_found
378
379
380def _print_test_info(mod_info, test_infos):
381 """Print the module information from TestInfos.
382
383 Args:
384 mod_info: ModuleInfo object.
385 test_infos: A list of TestInfos.
386
387 Returns:
388 Always return EXIT_CODE_SUCCESS
389 """
390 for test_info in test_infos:
391 _print_module_info_from_module_name(mod_info, test_info.test_name)
nelsonliedbd7452018-08-27 11:11:11 +0800392 atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
393 print("\t\t{}".format(", ".join(test_info.build_targets)))
nelsonlie3f90de2018-06-22 14:59:39 +0800394 for build_target in test_info.build_targets:
395 if build_target != test_info.test_name:
396 _print_module_info_from_module_name(mod_info, build_target)
397 atest_utils.colorful_print("", constants.WHITE)
398 return constants.EXIT_CODE_SUCCESS
399
Dan Shi08c7b722018-11-29 10:25:59 -0800400
401def is_from_test_mapping(test_infos):
402 """Check that the test_infos came from TEST_MAPPING files.
403
404 Args:
405 test_infos: A set of TestInfos.
406
407 Retruns:
408 True if the test infos are from TEST_MAPPING files.
409 """
410 return list(test_infos)[0].from_test_mapping
411
412
413def _split_test_mapping_tests(test_infos):
414 """Split Test Mapping tests into 2 groups: device tests and host tests.
415
416 Args:
417 test_infos: A set of TestInfos.
418
419 Retruns:
420 A tuple of (device_test_infos, host_test_infos), where
421 device_test_infos: A set of TestInfos for tests that require device.
422 host_test_infos: A set of TestInfos for tests that do NOT require
423 device.
424 """
425 assert is_from_test_mapping(test_infos)
426 host_test_infos = set([info for info in test_infos if info.host])
427 device_test_infos = set([info for info in test_infos if not info.host])
428 return device_test_infos, host_test_infos
429
430
431# pylint: disable=too-many-locals
432def _run_test_mapping_tests(results_dir, test_infos, extra_args):
433 """Run all tests in TEST_MAPPING files.
434
435 Args:
436 results_dir: String directory to store atest results.
437 test_infos: A set of TestInfos.
438 extra_args: Dict of extra args to add to test run.
439
440 Returns:
441 Exit code.
442 """
443 device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
444 # `host` option needs to be set to True to run host side tests.
445 host_extra_args = extra_args.copy()
446 host_extra_args[constants.HOST] = True
447 test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
448 if extra_args.get(constants.HOST):
449 atest_utils.colorful_print(
450 'Option `--host` specified. Skip running device tests.',
451 constants.MAGENTA)
452 else:
453 test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
454
455 test_results = []
456 for tests, args, test_type in test_runs:
457 if not tests:
458 continue
459 header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
460 atest_utils.colorful_print(header, constants.MAGENTA)
461 logging.debug('\n'.join([str(info) for info in tests]))
462 tests_exit_code, reporter = test_runner_handler.run_all_tests(
463 results_dir, tests, args, delay_print_summary=True)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800464 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800465 test_results.append((tests_exit_code, reporter, test_type))
466
467 all_tests_exit_code = constants.EXIT_CODE_SUCCESS
468 failed_tests = []
469 for tests_exit_code, reporter, test_type in test_results:
470 atest_utils.colorful_print(
471 RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
472 result = tests_exit_code | reporter.print_summary()
473 if result:
474 failed_tests.append(test_type)
475 all_tests_exit_code |= result
476
477 # List failed tests at the end as a reminder.
478 if failed_tests:
479 atest_utils.colorful_print(
480 '\n==============================', constants.YELLOW)
481 atest_utils.colorful_print(
482 '\nFollowing tests failed:', constants.MAGENTA)
483 for failure in failed_tests:
484 atest_utils.colorful_print(failure, constants.RED)
485
486 return all_tests_exit_code
487
488
yangbillcc1a21f2018-12-12 20:03:12 +0800489def _dry_run(results_dir, extra_args, test_infos):
490 """Only print the commands of the target tests rather than running them in actual.
491
492 Args:
493 results_dir: Path for saving atest logs.
494 extra_args: Dict of extra args for test runners to utilize.
495 test_infos: A list of TestInfos.
yangbill52c63fa2019-05-24 09:55:00 +0800496
497 Returns:
498 A list of test commands.
yangbillcc1a21f2018-12-12 20:03:12 +0800499 """
yangbill52c63fa2019-05-24 09:55:00 +0800500 all_run_cmds = []
yangbillcc1a21f2018-12-12 20:03:12 +0800501 for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
502 runner = test_runner(results_dir)
503 run_cmds = runner.generate_run_commands(tests, extra_args)
504 for run_cmd in run_cmds:
yangbill52c63fa2019-05-24 09:55:00 +0800505 all_run_cmds.append(run_cmd)
yangbillcc1a21f2018-12-12 20:03:12 +0800506 print('Would run test via command: %s'
507 % (atest_utils.colorize(run_cmd, constants.GREEN)))
yangbill52c63fa2019-05-24 09:55:00 +0800508 return all_run_cmds
yangbillcc1a21f2018-12-12 20:03:12 +0800509
easoncyleef0fb2b12019-01-22 15:49:09 +0800510def _print_testable_modules(mod_info, suite):
511 """Print the testable modules for a given suite.
512
513 Args:
514 mod_info: ModuleInfo object.
515 suite: A string of suite name.
516 """
517 testable_modules = mod_info.get_testable_modules(suite)
518 print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
519 len(testable_modules), suite), constants.CYAN))
520 print('-------')
521 for module in sorted(testable_modules):
522 print('\t%s' % module)
yangbillcc1a21f2018-12-12 20:03:12 +0800523
kellyhunge6a643c2018-12-19 11:31:25 +0800524# pylint: disable=too-many-statements
Dan Shi08c7b722018-11-29 10:25:59 -0800525# pylint: disable=too-many-branches
yelinhsieh4d5917d2019-03-12 17:26:27 +0800526def main(argv, results_dir):
mikehoran63d61b42017-07-28 15:28:50 -0700527 """Entry point of atest script.
Simran Basi259a2b52017-06-21 16:14:07 -0700528
mikehoran63d61b42017-07-28 15:28:50 -0700529 Args:
530 argv: A list of arguments.
yelinhsieh4d5917d2019-03-12 17:26:27 +0800531 results_dir: A directory which stores the ATest execution information.
Kevin Cheng09c2a2c2017-12-15 12:52:46 -0800532
533 Returns:
534 Exit code.
Simran Basi259a2b52017-06-21 16:14:07 -0700535 """
mikehoran63d61b42017-07-28 15:28:50 -0700536 args = _parse_args(argv)
mikehoranbe9102f2017-08-04 16:04:03 -0700537 _configure_logging(args.verbose)
Dan Shie4e267f2018-06-01 11:31:57 -0700538 _validate_args(args)
kellyhung924b8832019-03-05 18:35:00 +0800539 metrics_utils.get_start_time()
540 metrics.AtestStartEvent(
541 command_line=' '.join(argv),
542 test_references=args.tests,
543 cwd=os.getcwd(),
544 os=platform.platform())
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800545 mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
546 translator = cli_translator.CLITranslator(module_info=mod_info)
easoncyleef0fb2b12019-01-22 15:49:09 +0800547 if args.list_modules:
548 _print_testable_modules(mod_info, args.list_modules)
549 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800550 build_targets = set()
551 test_infos = set()
yangbill0b35e4b2019-06-10 20:36:28 +0800552 # Clear cache if user pass -c option
553 if args.clear_cache:
554 atest_utils.clean_test_info_caches(args.tests)
Mike Ma0126b9b2018-01-11 19:11:16 -0800555 if _will_run_tests(args):
nelsonlic4a71452018-09-13 14:10:30 +0800556 build_targets, test_infos = translator.translate(args)
557 if not test_infos:
Mike Ma0126b9b2018-01-11 19:11:16 -0800558 return constants.EXIT_CODE_TEST_NOT_FOUND
Dan Shi08c7b722018-11-29 10:25:59 -0800559 if not is_from_test_mapping(test_infos):
560 _validate_exec_mode(args, test_infos)
561 else:
562 _validate_tm_tests_exec_mode(args, test_infos)
nelsonlie3f90de2018-06-22 14:59:39 +0800563 if args.info:
564 return _print_test_info(mod_info, test_infos)
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800565 build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
566 test_infos)
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800567 extra_args = get_extra_args(args)
yangbillbac1dd62019-06-03 17:06:40 +0800568 if args.update_cmd_mapping or args.verify_cmd_mapping:
yangbill52c63fa2019-05-24 09:55:00 +0800569 args.dry_run = True
yangbillcc1a21f2018-12-12 20:03:12 +0800570 if args.dry_run:
yangbillbac1dd62019-06-03 17:06:40 +0800571 args.tests.sort()
yangbill52c63fa2019-05-24 09:55:00 +0800572 dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
yangbillbac1dd62019-06-03 17:06:40 +0800573 if args.verify_cmd_mapping:
574 try:
575 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
576 dry_run_cmds,
577 do_verification=True)
578 except atest_error.DryRunVerificationError as e:
579 atest_utils.colorful_print(str(e), constants.RED)
580 return constants.EXIT_CODE_VERIFY_FAILURE
yangbill52c63fa2019-05-24 09:55:00 +0800581 if args.update_cmd_mapping:
yangbillbac1dd62019-06-03 17:06:40 +0800582 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
yangbill52c63fa2019-05-24 09:55:00 +0800583 dry_run_cmds)
yangbillcc1a21f2018-12-12 20:03:12 +0800584 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800585 if args.detect_regression:
586 build_targets |= (regression_test_runner.RegressionTestRunner('')
587 .get_test_runner_build_reqs())
mikehoranc327dca2017-11-27 16:24:22 -0800588 # args.steps will be None if none of -bit set, else list of params set.
Jim Tang815b8892018-07-11 12:57:30 +0800589 steps = args.steps if args.steps else constants.ALL_STEPS
590 if build_targets and constants.BUILD_STEP in steps:
Jim Tang6ed753e2019-07-23 10:39:58 +0800591 if constants.TEST_STEP in steps:
592 # Run extra tasks with building deps concurrently.
593 # When only "-b" is given(without -t), will not index targets.
594 for task in EXTRA_TASKS.values():
595 proc = Process(target=task)
596 # Daemonlise proc so it terminates with the main process.
597 proc.daemon = True
598 proc.start()
Kevin Cheng5be930e2018-02-20 09:39:22 -0800599 # Add module-info.json target to the list of build targets to keep the
600 # file up to date.
601 build_targets.add(mod_info.module_info_target)
kellyhung23c55b82019-01-04 16:58:14 +0800602 build_start = time.time()
Jim Tang6ed753e2019-07-23 10:39:58 +0800603 success = atest_utils.build(build_targets, verbose=args.verbose,
yangbill4b618ed2019-07-23 16:03:38 +0800604 env_vars=constants.ATEST_BUILD_ENV)
kellyhung23c55b82019-01-04 16:58:14 +0800605 metrics.BuildFinishEvent(
606 duration=metrics_utils.convert_duration(time.time() - build_start),
607 success=success,
608 targets=build_targets)
mikehoranc80dc532017-11-14 14:30:06 -0800609 if not success:
Dan Shifa016d12018-02-02 00:37:19 -0800610 return constants.EXIT_CODE_BUILD_FAILURE
Jim Tang815b8892018-07-11 12:57:30 +0800611 elif constants.TEST_STEP not in steps:
mikehoranc327dca2017-11-27 16:24:22 -0800612 logging.warn('Install step without test step currently not '
613 'supported, installing AND testing instead.')
Jim Tang815b8892018-07-11 12:57:30 +0800614 steps.append(constants.TEST_STEP)
yangbill848a7d12018-09-04 19:12:08 +0800615 tests_exit_code = constants.EXIT_CODE_SUCCESS
kellyhung23c55b82019-01-04 16:58:14 +0800616 test_start = time.time()
Jim Tang815b8892018-07-11 12:57:30 +0800617 if constants.TEST_STEP in steps:
Dan Shi08c7b722018-11-29 10:25:59 -0800618 if not is_from_test_mapping(test_infos):
yelinhsieh4d5917d2019-03-12 17:26:27 +0800619 tests_exit_code, reporter = test_runner_handler.run_all_tests(
Dan Shi08c7b722018-11-29 10:25:59 -0800620 results_dir, test_infos, extra_args)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800621 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800622 else:
623 tests_exit_code = _run_test_mapping_tests(
624 results_dir, test_infos, extra_args)
Mike Ma0126b9b2018-01-11 19:11:16 -0800625 if args.detect_regression:
626 regression_args = _get_regression_detection_args(args, results_dir)
mikehoran9b6b44b2018-04-09 15:54:58 -0700627 # TODO(b/110485713): Should not call run_tests here.
628 reporter = result_reporter.ResultReporter()
yelinhsieh4d5917d2019-03-12 17:26:27 +0800629 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800630 tests_exit_code |= regression_test_runner.RegressionTestRunner(
631 '').run_tests(
632 None, regression_args, reporter)
kellyhung23c55b82019-01-04 16:58:14 +0800633 metrics.RunTestsFinishEvent(
634 duration=metrics_utils.convert_duration(time.time() - test_start))
yangbill848a7d12018-09-04 19:12:08 +0800635 if tests_exit_code != constants.EXIT_CODE_SUCCESS:
636 tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
637 return tests_exit_code
mikehoran63d61b42017-07-28 15:28:50 -0700638
Simran Basi259a2b52017-06-21 16:14:07 -0700639if __name__ == '__main__':
yelinhsieh4d5917d2019-03-12 17:26:27 +0800640 RESULTS_DIR = make_test_run_dir()
641 with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
642 RESULTS_DIR) as result_file:
kellyhunge3fa1752019-04-23 11:13:41 +0800643 metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
yelinhsieh4d5917d2019-03-12 17:26:27 +0800644 EXIT_CODE = main(sys.argv[1:], RESULTS_DIR)
kellyhung7d004bb2019-04-02 11:54:59 +0800645 DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
646 metrics.LocalDetectEvent(
647 detect_type=constants.DETECT_TYPE_BUG_DETECTED,
648 result=DETECTOR.caught_result)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800649 metrics_utils.send_exit_event(EXIT_CODE)
650 if result_file:
651 print('Execution detail has saved in %s' % result_file.name)
easoncyleef0fb2b12019-01-22 15:49:09 +0800652 sys.exit(EXIT_CODE)