blob: 8be8e1e2db0d4390fe3011aa57f63999529aba2d [file] [log] [blame]
Simran Basi259a2b52017-06-21 16:14:07 -07001#!/usr/bin/env python
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
mikehoranbe9102f2017-08-04 16:04:03 -070017"""
18Command line utility for running Android tests through TradeFederation.
Simran Basi259a2b52017-06-21 16:14:07 -070019
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
nelsonliedbd7452018-08-27 11:11:11 +080026from __future__ import print_function
27
mikehoranbe9102f2017-08-04 16:04:03 -070028import logging
Simran Basi259a2b52017-06-21 16:14:07 -070029import os
30import sys
mikehoran95091b22017-10-31 15:55:26 -070031import tempfile
32import time
kellyhung792fbcf2018-11-19 16:25:50 +080033import platform
Simran Basi259a2b52017-06-21 16:14:07 -070034
Jim Tang815b8892018-07-11 12:57:30 +080035import atest_arg_parser
yangbillbac1dd62019-06-03 17:06:40 +080036import atest_error
yelinhsieh4d5917d2019-03-12 17:26:27 +080037import atest_execution_info
Simran Basicf2189b2017-11-06 23:40:24 -080038import atest_utils
kellyhung7d004bb2019-04-02 11:54:59 +080039import bug_detector
mikehoran63d61b42017-07-28 15:28:50 -070040import cli_translator
Kevin Cheng7edb0b92017-12-14 15:00:25 -080041# pylint: disable=import-error
42import constants
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080043import module_info
mikehoran9b6b44b2018-04-09 15:54:58 -070044import result_reporter
Kevin Cheng7edb0b92017-12-14 15:00:25 -080045import test_runner_handler
kellyhung924b8832019-03-05 18:35:00 +080046
47from metrics import metrics
kellyhunge3fa1752019-04-23 11:13:41 +080048from metrics import metrics_base
kellyhung924b8832019-03-05 18:35:00 +080049from metrics import metrics_utils
Mike Ma0126b9b2018-01-11 19:11:16 -080050from test_runners import regression_test_runner
Simran Basicf2189b2017-11-06 23:40:24 -080051
mikehoranbe9102f2017-08-04 16:04:03 -070052EXPECTED_VARS = frozenset([
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080053 constants.ANDROID_BUILD_TOP,
mikehoran43ed32d2017-08-18 17:13:36 -070054 'ANDROID_TARGET_OUT_TESTCASES',
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080055 constants.ANDROID_OUT])
mikehoran95091b22017-10-31 15:55:26 -070056TEST_RUN_DIR_PREFIX = 'atest_run_%s_'
Kevin Cheng21ea9102018-02-22 10:52:42 -080057CUSTOM_ARG_FLAG = '--'
Dan Shi0ddd3e42018-05-30 11:24:30 -070058OPTION_NOT_FOR_TEST_MAPPING = (
59 'Option `%s` does not work for running tests in TEST_MAPPING files')
mikehoranc80dc532017-11-14 14:30:06 -080060
Dan Shi08c7b722018-11-29 10:25:59 -080061DEVICE_TESTS = 'tests that require device'
62HOST_TESTS = 'tests that do NOT require device'
63RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
64RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
65TEST_COUNT = 'test_count'
66TEST_TYPE = 'test_type'
67
Simran Basi259a2b52017-06-21 16:14:07 -070068
mikehoran63d61b42017-07-28 15:28:50 -070069def _parse_args(argv):
70 """Parse command line arguments.
71
72 Args:
73 argv: A list of arguments.
74
75 Returns:
76 An argspace.Namespace class instance holding parsed args.
77 """
Kevin Cheng21ea9102018-02-22 10:52:42 -080078 # Store everything after '--' in custom_args.
79 pruned_argv = argv
80 custom_args_index = None
81 if CUSTOM_ARG_FLAG in argv:
82 custom_args_index = argv.index(CUSTOM_ARG_FLAG)
83 pruned_argv = argv[:custom_args_index]
Jim Tang815b8892018-07-11 12:57:30 +080084 parser = atest_arg_parser.AtestArgParser()
85 parser.add_atest_args()
Kevin Cheng21ea9102018-02-22 10:52:42 -080086 args = parser.parse_args(pruned_argv)
87 args.custom_args = []
88 if custom_args_index is not None:
89 args.custom_args = argv[custom_args_index+1:]
90 return args
mikehoran63d61b42017-07-28 15:28:50 -070091
Simran Basi259a2b52017-06-21 16:14:07 -070092
mikehoranbe9102f2017-08-04 16:04:03 -070093def _configure_logging(verbose):
94 """Configure the logger.
95
96 Args:
97 verbose: A boolean. If true display DEBUG level logs.
98 """
mikehoranb2401822018-08-16 12:01:40 -070099 log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
100 datefmt = '%Y-%m-%d %H:%M:%S'
mikehoranbe9102f2017-08-04 16:04:03 -0700101 if verbose:
mikehoranb2401822018-08-16 12:01:40 -0700102 logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700103 else:
mikehoranb2401822018-08-16 12:01:40 -0700104 logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700105
106
107def _missing_environment_variables():
108 """Verify the local environment has been set up to run atest.
109
110 Returns:
111 List of strings of any missing environment variables.
112 """
113 missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
114 if missing:
115 logging.error('Local environment doesn\'t appear to have been '
116 'initialized. Did you remember to run lunch? Expected '
117 'Environment Variables: %s.', missing)
118 return missing
119
120
mikehoran95091b22017-10-31 15:55:26 -0700121def make_test_run_dir():
122 """Make the test run dir in tmp.
123
124 Returns:
125 A string of the dir path.
126 """
127 utc_epoch_time = int(time.time())
128 prefix = TEST_RUN_DIR_PREFIX % utc_epoch_time
129 return tempfile.mkdtemp(prefix=prefix)
130
131
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800132def get_extra_args(args):
133 """Get extra args for test runners.
134
135 Args:
136 args: arg parsed object.
137
138 Returns:
139 Dict of extra args for test runners to utilize.
140 """
141 extra_args = {}
142 if args.wait_for_debugger:
143 extra_args[constants.WAIT_FOR_DEBUGGER] = None
Jim Tang815b8892018-07-11 12:57:30 +0800144 steps = args.steps or constants.ALL_STEPS
145 if constants.INSTALL_STEP not in steps:
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800146 extra_args[constants.DISABLE_INSTALL] = None
mikehoran458b2b12018-02-28 16:07:13 -0800147 if args.disable_teardown:
148 extra_args[constants.DISABLE_TEARDOWN] = args.disable_teardown
Mike Ma150a61d2017-12-15 10:53:35 -0800149 if args.generate_baseline:
Mike Ma0126b9b2018-01-11 19:11:16 -0800150 extra_args[constants.PRE_PATCH_ITERATIONS] = args.generate_baseline
easoncylee8809be02018-03-27 12:28:07 +0800151 if args.serial:
152 extra_args[constants.SERIAL] = args.serial
yangbill37dda0d2018-06-21 16:44:12 +0800153 if args.all_abi:
yangbillc614bdc2018-06-22 14:31:24 +0800154 extra_args[constants.ALL_ABI] = args.all_abi
Mike Ma150a61d2017-12-15 10:53:35 -0800155 if args.generate_new_metrics:
Mike Ma0126b9b2018-01-11 19:11:16 -0800156 extra_args[constants.POST_PATCH_ITERATIONS] = args.generate_new_metrics
Julien Desprezd5168652019-02-06 14:49:01 -0800157 if args.instant:
158 extra_args[constants.INSTANT] = args.instant
kellyhung0625d172018-06-21 16:40:27 +0800159 if args.host:
160 extra_args[constants.HOST] = args.host
yangbillcc1a21f2018-12-12 20:03:12 +0800161 if args.dry_run:
162 extra_args[constants.DRY_RUN] = args.dry_run
Kevin Cheng21ea9102018-02-22 10:52:42 -0800163 if args.custom_args:
164 extra_args[constants.CUSTOM_ARGS] = args.custom_args
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800165 return extra_args
166
167
Mike Ma0126b9b2018-01-11 19:11:16 -0800168def _get_regression_detection_args(args, results_dir):
169 """Get args for regression detection test runners.
170
171 Args:
172 args: parsed args object.
173 results_dir: string directory to store atest results.
174
175 Returns:
176 Dict of args for regression detection test runner to utilize.
177 """
178 regression_args = {}
179 pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
180 else args.detect_regression.pop(0))
181 post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
182 else args.detect_regression.pop(0))
183 regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
184 regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
185 return regression_args
186
187
Dan Shi08c7b722018-11-29 10:25:59 -0800188def _validate_exec_mode(args, test_infos, host_tests=None):
kellyhung0625d172018-06-21 16:40:27 +0800189 """Validate all test execution modes are not in conflict.
190
191 Exit the program with error code if have device-only and host-only.
192 If no conflict and host side, add args.host=True.
193
194 Args:
195 args: parsed args object.
196 test_info: TestInfo object.
Dan Shi08c7b722018-11-29 10:25:59 -0800197 host_tests: True if all tests should be deviceless, False if all tests
198 should be device tests. Default is set to None, which means
199 tests can be either deviceless or device tests.
kellyhung0625d172018-06-21 16:40:27 +0800200 """
201 all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
kellyhung924b8832019-03-05 18:35:00 +0800202 err_msg = None
kellyhung0625d172018-06-21 16:40:27 +0800203 # In the case of '$atest <device-only> --host', exit.
Dan Shi08c7b722018-11-29 10:25:59 -0800204 if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
kellyhung0625d172018-06-21 16:40:27 +0800205 err_msg = ('Test side and option(--host) conflict. Please remove '
206 '--host if the test run on device side.')
kellyhung0625d172018-06-21 16:40:27 +0800207 # In the case of '$atest <host-only> <device-only> --host' or
208 # '$atest <host-only> <device-only>', exit.
209 if (constants.DEVICELESS_TEST in all_device_modes and
210 constants.DEVICE_TEST in all_device_modes):
211 err_msg = 'There are host-only and device-only tests in command.'
Dan Shi08c7b722018-11-29 10:25:59 -0800212 if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
213 err_msg = 'There are host-only tests in command.'
kellyhung924b8832019-03-05 18:35:00 +0800214 if err_msg:
Dan Shi08c7b722018-11-29 10:25:59 -0800215 logging.error(err_msg)
kellyhung924b8832019-03-05 18:35:00 +0800216 metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
Dan Shi08c7b722018-11-29 10:25:59 -0800217 sys.exit(constants.EXIT_CODE_ERROR)
kellyhung0625d172018-06-21 16:40:27 +0800218 # In the case of '$atest <host-only>', we add --host to run on host-side.
Dan Shi08c7b722018-11-29 10:25:59 -0800219 # The option should only be overriden if `host_tests` is not set.
220 if not args.host and host_tests is None:
kellyhung0625d172018-06-21 16:40:27 +0800221 args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
Dan Shi08c7b722018-11-29 10:25:59 -0800222
223
224def _validate_tm_tests_exec_mode(args, test_infos):
225 """Validate all test execution modes are not in conflict.
226
227 Split the tests in Test Mapping files into two groups, device tests and
228 deviceless tests running on host. Validate the tests' host setting.
229 For device tests, exit the program if any test is found for host-only.
230 For deviceless tests, exit the program if any test is found for device-only.
231
232 Args:
233 args: parsed args object.
234 test_info: TestInfo object.
235 """
236 device_test_infos, host_test_infos = _split_test_mapping_tests(
237 test_infos)
238 # No need to verify device tests if atest command is set to only run host
239 # tests.
240 if device_test_infos and not args.host:
241 _validate_exec_mode(args, device_test_infos, host_tests=False)
242 if host_test_infos:
243 _validate_exec_mode(args, host_test_infos, host_tests=True)
244
kellyhung0625d172018-06-21 16:40:27 +0800245
Mike Ma0126b9b2018-01-11 19:11:16 -0800246def _will_run_tests(args):
247 """Determine if there are tests to run.
248
249 Currently only used by detect_regression to skip the test if just running regression detection.
250
251 Args:
252 args: parsed args object.
253
254 Returns:
255 True if there are tests to run, false otherwise.
256 """
257 return not (args.detect_regression and len(args.detect_regression) == 2)
258
259
260def _has_valid_regression_detection_args(args):
261 """Validate regression detection args.
262
263 Args:
264 args: parsed args object.
265
266 Returns:
267 True if args are valid
268 """
269 if args.generate_baseline and args.generate_new_metrics:
270 logging.error('Cannot collect both baseline and new metrics at the same time.')
271 return False
272 if args.detect_regression is not None:
273 if not args.detect_regression:
274 logging.error('Need to specify at least 1 arg for regression detection.')
275 return False
276 elif len(args.detect_regression) == 1:
277 if args.generate_baseline or args.generate_new_metrics:
278 return True
279 logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
280 return False
281 elif len(args.detect_regression) == 2:
282 if args.generate_baseline:
283 logging.error('Specified 2 metric paths and --generate-baseline, '
284 'either drop --generate-baseline or drop a path')
285 return False
286 if args.generate_new_metrics:
287 logging.error('Specified 2 metric paths and --generate-new-metrics, '
288 'either drop --generate-new-metrics or drop a path')
289 return False
290 return True
291 else:
292 logging.error('Specified more than 2 metric paths.')
293 return False
294 return True
295
296
Dan Shi0ddd3e42018-05-30 11:24:30 -0700297def _has_valid_test_mapping_args(args):
298 """Validate test mapping args.
299
300 Not all args work when running tests in TEST_MAPPING files. Validate the
301 args before running the tests.
302
303 Args:
304 args: parsed args object.
305
306 Returns:
307 True if args are valid
308 """
309 is_test_mapping = atest_utils.is_test_mapping(args)
310 if not is_test_mapping:
311 return True
312 options_to_validate = [
313 (args.generate_baseline, '--generate-baseline'),
314 (args.detect_regression, '--detect-regression'),
315 (args.generate_new_metrics, '--generate-new-metrics'),
316 ]
317 for arg_value, arg in options_to_validate:
318 if arg_value:
319 logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
320 return False
321 return True
322
323
Dan Shie4e267f2018-06-01 11:31:57 -0700324def _validate_args(args):
325 """Validate setups and args.
326
327 Exit the program with error code if any setup or arg is invalid.
328
329 Args:
330 args: parsed args object.
331 """
332 if _missing_environment_variables():
333 sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
334 if args.generate_baseline and args.generate_new_metrics:
Dan Shi08c7b722018-11-29 10:25:59 -0800335 logging.error(
336 'Cannot collect both baseline and new metrics at the same time.')
Dan Shie4e267f2018-06-01 11:31:57 -0700337 sys.exit(constants.EXIT_CODE_ERROR)
338 if not _has_valid_regression_detection_args(args):
339 sys.exit(constants.EXIT_CODE_ERROR)
340 if not _has_valid_test_mapping_args(args):
341 sys.exit(constants.EXIT_CODE_ERROR)
342
nelsonlie3f90de2018-06-22 14:59:39 +0800343
344def _print_module_info_from_module_name(mod_info, module_name):
345 """print out the related module_info for a module_name.
346
347 Args:
348 mod_info: ModuleInfo object.
349 module_name: A string of module.
350
351 Returns:
352 True if the module_info is found.
353 """
Dan Shi08c7b722018-11-29 10:25:59 -0800354 title_mapping = {
355 constants.MODULE_PATH: "Source code path",
356 constants.MODULE_INSTALLED: "Installed path",
357 constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
nelsonlie3f90de2018-06-22 14:59:39 +0800358 target_module_info = mod_info.get_module_info(module_name)
359 is_module_found = False
360 if target_module_info:
nelsonliedbd7452018-08-27 11:11:11 +0800361 atest_utils.colorful_print(module_name, constants.GREEN)
nelsonlie3f90de2018-06-22 14:59:39 +0800362 for title_key in title_mapping.iterkeys():
363 atest_utils.colorful_print("\t%s" % title_mapping[title_key],
nelsonliedbd7452018-08-27 11:11:11 +0800364 constants.CYAN)
nelsonlie3f90de2018-06-22 14:59:39 +0800365 for info_value in target_module_info[title_key]:
nelsonliedbd7452018-08-27 11:11:11 +0800366 print("\t\t{}".format(info_value))
nelsonlie3f90de2018-06-22 14:59:39 +0800367 is_module_found = True
368 return is_module_found
369
370
371def _print_test_info(mod_info, test_infos):
372 """Print the module information from TestInfos.
373
374 Args:
375 mod_info: ModuleInfo object.
376 test_infos: A list of TestInfos.
377
378 Returns:
379 Always return EXIT_CODE_SUCCESS
380 """
381 for test_info in test_infos:
382 _print_module_info_from_module_name(mod_info, test_info.test_name)
nelsonliedbd7452018-08-27 11:11:11 +0800383 atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
384 print("\t\t{}".format(", ".join(test_info.build_targets)))
nelsonlie3f90de2018-06-22 14:59:39 +0800385 for build_target in test_info.build_targets:
386 if build_target != test_info.test_name:
387 _print_module_info_from_module_name(mod_info, build_target)
388 atest_utils.colorful_print("", constants.WHITE)
389 return constants.EXIT_CODE_SUCCESS
390
Dan Shi08c7b722018-11-29 10:25:59 -0800391
392def is_from_test_mapping(test_infos):
393 """Check that the test_infos came from TEST_MAPPING files.
394
395 Args:
396 test_infos: A set of TestInfos.
397
398 Retruns:
399 True if the test infos are from TEST_MAPPING files.
400 """
401 return list(test_infos)[0].from_test_mapping
402
403
404def _split_test_mapping_tests(test_infos):
405 """Split Test Mapping tests into 2 groups: device tests and host tests.
406
407 Args:
408 test_infos: A set of TestInfos.
409
410 Retruns:
411 A tuple of (device_test_infos, host_test_infos), where
412 device_test_infos: A set of TestInfos for tests that require device.
413 host_test_infos: A set of TestInfos for tests that do NOT require
414 device.
415 """
416 assert is_from_test_mapping(test_infos)
417 host_test_infos = set([info for info in test_infos if info.host])
418 device_test_infos = set([info for info in test_infos if not info.host])
419 return device_test_infos, host_test_infos
420
421
422# pylint: disable=too-many-locals
423def _run_test_mapping_tests(results_dir, test_infos, extra_args):
424 """Run all tests in TEST_MAPPING files.
425
426 Args:
427 results_dir: String directory to store atest results.
428 test_infos: A set of TestInfos.
429 extra_args: Dict of extra args to add to test run.
430
431 Returns:
432 Exit code.
433 """
434 device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
435 # `host` option needs to be set to True to run host side tests.
436 host_extra_args = extra_args.copy()
437 host_extra_args[constants.HOST] = True
438 test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
439 if extra_args.get(constants.HOST):
440 atest_utils.colorful_print(
441 'Option `--host` specified. Skip running device tests.',
442 constants.MAGENTA)
443 else:
444 test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
445
446 test_results = []
447 for tests, args, test_type in test_runs:
448 if not tests:
449 continue
450 header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
451 atest_utils.colorful_print(header, constants.MAGENTA)
452 logging.debug('\n'.join([str(info) for info in tests]))
453 tests_exit_code, reporter = test_runner_handler.run_all_tests(
454 results_dir, tests, args, delay_print_summary=True)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800455 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800456 test_results.append((tests_exit_code, reporter, test_type))
457
458 all_tests_exit_code = constants.EXIT_CODE_SUCCESS
459 failed_tests = []
460 for tests_exit_code, reporter, test_type in test_results:
461 atest_utils.colorful_print(
462 RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
463 result = tests_exit_code | reporter.print_summary()
464 if result:
465 failed_tests.append(test_type)
466 all_tests_exit_code |= result
467
468 # List failed tests at the end as a reminder.
469 if failed_tests:
470 atest_utils.colorful_print(
471 '\n==============================', constants.YELLOW)
472 atest_utils.colorful_print(
473 '\nFollowing tests failed:', constants.MAGENTA)
474 for failure in failed_tests:
475 atest_utils.colorful_print(failure, constants.RED)
476
477 return all_tests_exit_code
478
479
yangbillcc1a21f2018-12-12 20:03:12 +0800480def _dry_run(results_dir, extra_args, test_infos):
481 """Only print the commands of the target tests rather than running them in actual.
482
483 Args:
484 results_dir: Path for saving atest logs.
485 extra_args: Dict of extra args for test runners to utilize.
486 test_infos: A list of TestInfos.
yangbill52c63fa2019-05-24 09:55:00 +0800487
488 Returns:
489 A list of test commands.
yangbillcc1a21f2018-12-12 20:03:12 +0800490 """
yangbill52c63fa2019-05-24 09:55:00 +0800491 all_run_cmds = []
yangbillcc1a21f2018-12-12 20:03:12 +0800492 for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
493 runner = test_runner(results_dir)
494 run_cmds = runner.generate_run_commands(tests, extra_args)
495 for run_cmd in run_cmds:
yangbill52c63fa2019-05-24 09:55:00 +0800496 all_run_cmds.append(run_cmd)
yangbillcc1a21f2018-12-12 20:03:12 +0800497 print('Would run test via command: %s'
498 % (atest_utils.colorize(run_cmd, constants.GREEN)))
yangbill52c63fa2019-05-24 09:55:00 +0800499 return all_run_cmds
yangbillcc1a21f2018-12-12 20:03:12 +0800500
easoncyleef0fb2b12019-01-22 15:49:09 +0800501def _print_testable_modules(mod_info, suite):
502 """Print the testable modules for a given suite.
503
504 Args:
505 mod_info: ModuleInfo object.
506 suite: A string of suite name.
507 """
508 testable_modules = mod_info.get_testable_modules(suite)
509 print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
510 len(testable_modules), suite), constants.CYAN))
511 print('-------')
512 for module in sorted(testable_modules):
513 print('\t%s' % module)
yangbillcc1a21f2018-12-12 20:03:12 +0800514
kellyhunge6a643c2018-12-19 11:31:25 +0800515# pylint: disable=too-many-statements
Dan Shi08c7b722018-11-29 10:25:59 -0800516# pylint: disable=too-many-branches
yelinhsieh4d5917d2019-03-12 17:26:27 +0800517def main(argv, results_dir):
mikehoran63d61b42017-07-28 15:28:50 -0700518 """Entry point of atest script.
Simran Basi259a2b52017-06-21 16:14:07 -0700519
mikehoran63d61b42017-07-28 15:28:50 -0700520 Args:
521 argv: A list of arguments.
yelinhsieh4d5917d2019-03-12 17:26:27 +0800522 results_dir: A directory which stores the ATest execution information.
Kevin Cheng09c2a2c2017-12-15 12:52:46 -0800523
524 Returns:
525 Exit code.
Simran Basi259a2b52017-06-21 16:14:07 -0700526 """
mikehoran63d61b42017-07-28 15:28:50 -0700527 args = _parse_args(argv)
mikehoranbe9102f2017-08-04 16:04:03 -0700528 _configure_logging(args.verbose)
Dan Shie4e267f2018-06-01 11:31:57 -0700529 _validate_args(args)
kellyhung924b8832019-03-05 18:35:00 +0800530 metrics_utils.get_start_time()
531 metrics.AtestStartEvent(
532 command_line=' '.join(argv),
533 test_references=args.tests,
534 cwd=os.getcwd(),
535 os=platform.platform())
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800536 mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
537 translator = cli_translator.CLITranslator(module_info=mod_info)
easoncyleef0fb2b12019-01-22 15:49:09 +0800538 if args.list_modules:
539 _print_testable_modules(mod_info, args.list_modules)
540 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800541 build_targets = set()
542 test_infos = set()
yangbill0b35e4b2019-06-10 20:36:28 +0800543 # Clear cache if user pass -c option
544 if args.clear_cache:
545 atest_utils.clean_test_info_caches(args.tests)
Mike Ma0126b9b2018-01-11 19:11:16 -0800546 if _will_run_tests(args):
nelsonlic4a71452018-09-13 14:10:30 +0800547 build_targets, test_infos = translator.translate(args)
548 if not test_infos:
Mike Ma0126b9b2018-01-11 19:11:16 -0800549 return constants.EXIT_CODE_TEST_NOT_FOUND
Dan Shi08c7b722018-11-29 10:25:59 -0800550 if not is_from_test_mapping(test_infos):
551 _validate_exec_mode(args, test_infos)
552 else:
553 _validate_tm_tests_exec_mode(args, test_infos)
nelsonlie3f90de2018-06-22 14:59:39 +0800554 if args.info:
555 return _print_test_info(mod_info, test_infos)
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800556 build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
557 test_infos)
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800558 extra_args = get_extra_args(args)
yangbillbac1dd62019-06-03 17:06:40 +0800559 if args.update_cmd_mapping or args.verify_cmd_mapping:
yangbill52c63fa2019-05-24 09:55:00 +0800560 args.dry_run = True
yangbillcc1a21f2018-12-12 20:03:12 +0800561 if args.dry_run:
yangbillbac1dd62019-06-03 17:06:40 +0800562 args.tests.sort()
yangbill52c63fa2019-05-24 09:55:00 +0800563 dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
yangbillbac1dd62019-06-03 17:06:40 +0800564 if args.verify_cmd_mapping:
565 try:
566 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
567 dry_run_cmds,
568 do_verification=True)
569 except atest_error.DryRunVerificationError as e:
570 atest_utils.colorful_print(str(e), constants.RED)
571 return constants.EXIT_CODE_VERIFY_FAILURE
yangbill52c63fa2019-05-24 09:55:00 +0800572 if args.update_cmd_mapping:
yangbillbac1dd62019-06-03 17:06:40 +0800573 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
yangbill52c63fa2019-05-24 09:55:00 +0800574 dry_run_cmds)
yangbillcc1a21f2018-12-12 20:03:12 +0800575 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800576 if args.detect_regression:
577 build_targets |= (regression_test_runner.RegressionTestRunner('')
578 .get_test_runner_build_reqs())
mikehoranc327dca2017-11-27 16:24:22 -0800579 # args.steps will be None if none of -bit set, else list of params set.
Jim Tang815b8892018-07-11 12:57:30 +0800580 steps = args.steps if args.steps else constants.ALL_STEPS
581 if build_targets and constants.BUILD_STEP in steps:
Kevin Cheng5be930e2018-02-20 09:39:22 -0800582 # Add module-info.json target to the list of build targets to keep the
583 # file up to date.
584 build_targets.add(mod_info.module_info_target)
kellyhung23c55b82019-01-04 16:58:14 +0800585 build_start = time.time()
yangbill4b618ed2019-07-23 16:03:38 +0800586 success = atest_utils.build(build_targets, args.verbose,
587 env_vars=constants.ATEST_BUILD_ENV)
kellyhung23c55b82019-01-04 16:58:14 +0800588 metrics.BuildFinishEvent(
589 duration=metrics_utils.convert_duration(time.time() - build_start),
590 success=success,
591 targets=build_targets)
mikehoranc80dc532017-11-14 14:30:06 -0800592 if not success:
Dan Shifa016d12018-02-02 00:37:19 -0800593 return constants.EXIT_CODE_BUILD_FAILURE
Jim Tang815b8892018-07-11 12:57:30 +0800594 elif constants.TEST_STEP not in steps:
mikehoranc327dca2017-11-27 16:24:22 -0800595 logging.warn('Install step without test step currently not '
596 'supported, installing AND testing instead.')
Jim Tang815b8892018-07-11 12:57:30 +0800597 steps.append(constants.TEST_STEP)
yangbill848a7d12018-09-04 19:12:08 +0800598 tests_exit_code = constants.EXIT_CODE_SUCCESS
kellyhung23c55b82019-01-04 16:58:14 +0800599 test_start = time.time()
Jim Tang815b8892018-07-11 12:57:30 +0800600 if constants.TEST_STEP in steps:
Dan Shi08c7b722018-11-29 10:25:59 -0800601 if not is_from_test_mapping(test_infos):
yelinhsieh4d5917d2019-03-12 17:26:27 +0800602 tests_exit_code, reporter = test_runner_handler.run_all_tests(
Dan Shi08c7b722018-11-29 10:25:59 -0800603 results_dir, test_infos, extra_args)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800604 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800605 else:
606 tests_exit_code = _run_test_mapping_tests(
607 results_dir, test_infos, extra_args)
Mike Ma0126b9b2018-01-11 19:11:16 -0800608 if args.detect_regression:
609 regression_args = _get_regression_detection_args(args, results_dir)
mikehoran9b6b44b2018-04-09 15:54:58 -0700610 # TODO(b/110485713): Should not call run_tests here.
611 reporter = result_reporter.ResultReporter()
yelinhsieh4d5917d2019-03-12 17:26:27 +0800612 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800613 tests_exit_code |= regression_test_runner.RegressionTestRunner(
614 '').run_tests(
615 None, regression_args, reporter)
kellyhung23c55b82019-01-04 16:58:14 +0800616 metrics.RunTestsFinishEvent(
617 duration=metrics_utils.convert_duration(time.time() - test_start))
yangbill848a7d12018-09-04 19:12:08 +0800618 if tests_exit_code != constants.EXIT_CODE_SUCCESS:
619 tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
620 return tests_exit_code
mikehoran63d61b42017-07-28 15:28:50 -0700621
Simran Basi259a2b52017-06-21 16:14:07 -0700622if __name__ == '__main__':
yelinhsieh4d5917d2019-03-12 17:26:27 +0800623 RESULTS_DIR = make_test_run_dir()
624 with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
625 RESULTS_DIR) as result_file:
kellyhunge3fa1752019-04-23 11:13:41 +0800626 metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
yelinhsieh4d5917d2019-03-12 17:26:27 +0800627 EXIT_CODE = main(sys.argv[1:], RESULTS_DIR)
kellyhung7d004bb2019-04-02 11:54:59 +0800628 DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
629 metrics.LocalDetectEvent(
630 detect_type=constants.DETECT_TYPE_BUG_DETECTED,
631 result=DETECTOR.caught_result)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800632 metrics_utils.send_exit_event(EXIT_CODE)
633 if result_file:
634 print('Execution detail has saved in %s' % result_file.name)
easoncyleef0fb2b12019-01-22 15:49:09 +0800635 sys.exit(EXIT_CODE)