blob: c4b993ee85e32dd3a355990bb24914044cbba061 [file] [log] [blame]
Simran Basi259a2b52017-06-21 16:14:07 -07001#!/usr/bin/env python
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
mikehoranbe9102f2017-08-04 16:04:03 -070017"""
18Command line utility for running Android tests through TradeFederation.
Simran Basi259a2b52017-06-21 16:14:07 -070019
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
nelsonliedbd7452018-08-27 11:11:11 +080026from __future__ import print_function
27
mikehoranbe9102f2017-08-04 16:04:03 -070028import logging
Simran Basi259a2b52017-06-21 16:14:07 -070029import os
30import sys
mikehoran95091b22017-10-31 15:55:26 -070031import tempfile
32import time
kellyhung792fbcf2018-11-19 16:25:50 +080033import platform
Simran Basi259a2b52017-06-21 16:14:07 -070034
Jim Tang6ed753e2019-07-23 10:39:58 +080035from multiprocessing import Process
36
Jim Tang815b8892018-07-11 12:57:30 +080037import atest_arg_parser
yangbillbac1dd62019-06-03 17:06:40 +080038import atest_error
yelinhsieh4d5917d2019-03-12 17:26:27 +080039import atest_execution_info
Simran Basicf2189b2017-11-06 23:40:24 -080040import atest_utils
kellyhung7d004bb2019-04-02 11:54:59 +080041import bug_detector
mikehoran63d61b42017-07-28 15:28:50 -070042import cli_translator
Kevin Cheng7edb0b92017-12-14 15:00:25 -080043# pylint: disable=import-error
44import constants
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080045import module_info
mikehoran9b6b44b2018-04-09 15:54:58 -070046import result_reporter
Kevin Cheng7edb0b92017-12-14 15:00:25 -080047import test_runner_handler
kellyhung924b8832019-03-05 18:35:00 +080048
49from metrics import metrics
kellyhunge3fa1752019-04-23 11:13:41 +080050from metrics import metrics_base
kellyhung924b8832019-03-05 18:35:00 +080051from metrics import metrics_utils
Mike Ma0126b9b2018-01-11 19:11:16 -080052from test_runners import regression_test_runner
Jim Tang6ed753e2019-07-23 10:39:58 +080053from tools import atest_tools
Simran Basicf2189b2017-11-06 23:40:24 -080054
mikehoranbe9102f2017-08-04 16:04:03 -070055EXPECTED_VARS = frozenset([
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080056 constants.ANDROID_BUILD_TOP,
mikehoran43ed32d2017-08-18 17:13:36 -070057 'ANDROID_TARGET_OUT_TESTCASES',
Kevin Cheng8b2c94c2017-12-18 14:43:26 -080058 constants.ANDROID_OUT])
kellyhung112bd572020-03-27 06:11:07 +080059TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
Kevin Cheng21ea9102018-02-22 10:52:42 -080060CUSTOM_ARG_FLAG = '--'
Dan Shi0ddd3e42018-05-30 11:24:30 -070061OPTION_NOT_FOR_TEST_MAPPING = (
62 'Option `%s` does not work for running tests in TEST_MAPPING files')
mikehoranc80dc532017-11-14 14:30:06 -080063
Dan Shi08c7b722018-11-29 10:25:59 -080064DEVICE_TESTS = 'tests that require device'
65HOST_TESTS = 'tests that do NOT require device'
66RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
67RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
68TEST_COUNT = 'test_count'
69TEST_TYPE = 'test_type'
Jim Tang6ed753e2019-07-23 10:39:58 +080070# Tasks that must run in the build time but unable to build by soong.
71# (e.g subprocesses that invoke host commands.)
72EXTRA_TASKS = {
73 'index-targets': atest_tools.index_targets
74}
75
Simran Basi259a2b52017-06-21 16:14:07 -070076
Jim Tangd1bc9292019-09-05 11:54:20 +080077def _run_extra_tasks(join=False):
78 """Execute EXTRA_TASKS with multiprocessing.
79
80 Args:
81 join: A boolean that indicates the process should terminate when
82 the main process ends or keep itself alive. True indicates the
83 main process will wait for all subprocesses finish while False represents
84 killing all subprocesses when the main process exits.
85 """
86 _running_procs = []
87 for task in EXTRA_TASKS.values():
88 proc = Process(target=task)
89 proc.daemon = not join
90 proc.start()
91 _running_procs.append(proc)
92 if join:
93 for proc in _running_procs:
94 proc.join()
95
96
mikehoran63d61b42017-07-28 15:28:50 -070097def _parse_args(argv):
98 """Parse command line arguments.
99
100 Args:
101 argv: A list of arguments.
102
103 Returns:
104 An argspace.Namespace class instance holding parsed args.
105 """
Kevin Cheng21ea9102018-02-22 10:52:42 -0800106 # Store everything after '--' in custom_args.
107 pruned_argv = argv
108 custom_args_index = None
109 if CUSTOM_ARG_FLAG in argv:
110 custom_args_index = argv.index(CUSTOM_ARG_FLAG)
111 pruned_argv = argv[:custom_args_index]
Jim Tang815b8892018-07-11 12:57:30 +0800112 parser = atest_arg_parser.AtestArgParser()
113 parser.add_atest_args()
Kevin Cheng21ea9102018-02-22 10:52:42 -0800114 args = parser.parse_args(pruned_argv)
115 args.custom_args = []
116 if custom_args_index is not None:
117 args.custom_args = argv[custom_args_index+1:]
118 return args
mikehoran63d61b42017-07-28 15:28:50 -0700119
Simran Basi259a2b52017-06-21 16:14:07 -0700120
mikehoranbe9102f2017-08-04 16:04:03 -0700121def _configure_logging(verbose):
122 """Configure the logger.
123
124 Args:
125 verbose: A boolean. If true display DEBUG level logs.
126 """
mikehoranb2401822018-08-16 12:01:40 -0700127 log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
128 datefmt = '%Y-%m-%d %H:%M:%S'
mikehoranbe9102f2017-08-04 16:04:03 -0700129 if verbose:
mikehoranb2401822018-08-16 12:01:40 -0700130 logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700131 else:
mikehoranb2401822018-08-16 12:01:40 -0700132 logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt)
mikehoranbe9102f2017-08-04 16:04:03 -0700133
134
135def _missing_environment_variables():
136 """Verify the local environment has been set up to run atest.
137
138 Returns:
139 List of strings of any missing environment variables.
140 """
141 missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)])
142 if missing:
143 logging.error('Local environment doesn\'t appear to have been '
144 'initialized. Did you remember to run lunch? Expected '
145 'Environment Variables: %s.', missing)
146 return missing
147
148
mikehoran95091b22017-10-31 15:55:26 -0700149def make_test_run_dir():
yelinhsiehebe01712019-08-15 11:24:37 +0800150 """Make the test run dir in ATEST_RESULT_ROOT.
mikehoran95091b22017-10-31 15:55:26 -0700151
152 Returns:
153 A string of the dir path.
154 """
yelinhsiehebe01712019-08-15 11:24:37 +0800155 if not os.path.exists(constants.ATEST_RESULT_ROOT):
156 os.makedirs(constants.ATEST_RESULT_ROOT)
157 ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
Jim Tang0fb6d112020-02-19 12:18:41 +0800158 test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
159 dir=constants.ATEST_RESULT_ROOT)
Jim Tang0fb6d112020-02-19 12:18:41 +0800160 return test_result_dir
mikehoran95091b22017-10-31 15:55:26 -0700161
162
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800163def get_extra_args(args):
164 """Get extra args for test runners.
165
166 Args:
167 args: arg parsed object.
168
169 Returns:
170 Dict of extra args for test runners to utilize.
171 """
172 extra_args = {}
173 if args.wait_for_debugger:
174 extra_args[constants.WAIT_FOR_DEBUGGER] = None
Jim Tang815b8892018-07-11 12:57:30 +0800175 steps = args.steps or constants.ALL_STEPS
176 if constants.INSTALL_STEP not in steps:
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800177 extra_args[constants.DISABLE_INSTALL] = None
yelinhsieh7be4a7f2019-08-15 14:27:22 +0800178 # The key and its value of the dict can be called via:
179 # if args.aaaa:
180 # extra_args[constants.AAAA] = args.aaaa
181 arg_maps = {'all_abi': constants.ALL_ABI,
kellyhunga118aa52020-01-13 12:09:03 +0800182 'collect_tests_only': constants.COLLECT_TESTS_ONLY,
yelinhsieh7be4a7f2019-08-15 14:27:22 +0800183 'custom_args': constants.CUSTOM_ARGS,
184 'disable_teardown': constants.DISABLE_TEARDOWN,
185 'dry_run': constants.DRY_RUN,
186 'generate_baseline': constants.PRE_PATCH_ITERATIONS,
187 'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
188 'host': constants.HOST,
189 'instant': constants.INSTANT,
kellyhung3e5f8a92019-08-08 16:02:55 +0800190 'iterations': constants.ITERATIONS,
191 'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
192 'retry_any_failure': constants.RETRY_ANY_FAILURE,
yelinhsieh7be4a7f2019-08-15 14:27:22 +0800193 'serial': constants.SERIAL,
yelinhsieh8413afc2020-02-20 18:02:41 +0800194 'sharding': constants.SHARDING,
yelinhsieh804ad2a2020-02-20 16:31:05 +0800195 'tf_debug': constants.TF_DEBUG,
yangbillb309a892019-12-26 18:19:52 +0800196 'tf_template': constants.TF_TEMPLATE,
yelinhsieh7be4a7f2019-08-15 14:27:22 +0800197 'user_type': constants.USER_TYPE}
198 not_match = [k for k in arg_maps if k not in vars(args)]
199 if not_match:
200 raise AttributeError('%s object has no attribute %s'
201 %(type(args).__name__, not_match))
202 extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
203 if arg_maps.get(k) and v})
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800204 return extra_args
205
206
Mike Ma0126b9b2018-01-11 19:11:16 -0800207def _get_regression_detection_args(args, results_dir):
208 """Get args for regression detection test runners.
209
210 Args:
211 args: parsed args object.
212 results_dir: string directory to store atest results.
213
214 Returns:
215 Dict of args for regression detection test runner to utilize.
216 """
217 regression_args = {}
218 pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
219 else args.detect_regression.pop(0))
220 post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
221 else args.detect_regression.pop(0))
222 regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
223 regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
224 return regression_args
225
226
Dan Shi08c7b722018-11-29 10:25:59 -0800227def _validate_exec_mode(args, test_infos, host_tests=None):
kellyhung0625d172018-06-21 16:40:27 +0800228 """Validate all test execution modes are not in conflict.
229
230 Exit the program with error code if have device-only and host-only.
231 If no conflict and host side, add args.host=True.
232
233 Args:
234 args: parsed args object.
235 test_info: TestInfo object.
Dan Shi08c7b722018-11-29 10:25:59 -0800236 host_tests: True if all tests should be deviceless, False if all tests
237 should be device tests. Default is set to None, which means
238 tests can be either deviceless or device tests.
kellyhung0625d172018-06-21 16:40:27 +0800239 """
240 all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
kellyhung924b8832019-03-05 18:35:00 +0800241 err_msg = None
kellyhung0625d172018-06-21 16:40:27 +0800242 # In the case of '$atest <device-only> --host', exit.
Dan Shi08c7b722018-11-29 10:25:59 -0800243 if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
kellyhung0625d172018-06-21 16:40:27 +0800244 err_msg = ('Test side and option(--host) conflict. Please remove '
245 '--host if the test run on device side.')
kellyhung0625d172018-06-21 16:40:27 +0800246 # In the case of '$atest <host-only> <device-only> --host' or
247 # '$atest <host-only> <device-only>', exit.
248 if (constants.DEVICELESS_TEST in all_device_modes and
249 constants.DEVICE_TEST in all_device_modes):
250 err_msg = 'There are host-only and device-only tests in command.'
Dan Shi08c7b722018-11-29 10:25:59 -0800251 if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
252 err_msg = 'There are host-only tests in command.'
kellyhung924b8832019-03-05 18:35:00 +0800253 if err_msg:
Dan Shi08c7b722018-11-29 10:25:59 -0800254 logging.error(err_msg)
kellyhung924b8832019-03-05 18:35:00 +0800255 metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
Dan Shi08c7b722018-11-29 10:25:59 -0800256 sys.exit(constants.EXIT_CODE_ERROR)
kellyhung0625d172018-06-21 16:40:27 +0800257 # In the case of '$atest <host-only>', we add --host to run on host-side.
Jim Tangb0477802019-11-26 10:20:09 +0800258 # The option should only be overridden if `host_tests` is not set.
Dan Shi08c7b722018-11-29 10:25:59 -0800259 if not args.host and host_tests is None:
kellyhung0625d172018-06-21 16:40:27 +0800260 args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
Dan Shi08c7b722018-11-29 10:25:59 -0800261
262
263def _validate_tm_tests_exec_mode(args, test_infos):
264 """Validate all test execution modes are not in conflict.
265
266 Split the tests in Test Mapping files into two groups, device tests and
267 deviceless tests running on host. Validate the tests' host setting.
268 For device tests, exit the program if any test is found for host-only.
269 For deviceless tests, exit the program if any test is found for device-only.
270
271 Args:
272 args: parsed args object.
273 test_info: TestInfo object.
274 """
275 device_test_infos, host_test_infos = _split_test_mapping_tests(
276 test_infos)
277 # No need to verify device tests if atest command is set to only run host
278 # tests.
279 if device_test_infos and not args.host:
280 _validate_exec_mode(args, device_test_infos, host_tests=False)
281 if host_test_infos:
282 _validate_exec_mode(args, host_test_infos, host_tests=True)
283
kellyhung0625d172018-06-21 16:40:27 +0800284
Mike Ma0126b9b2018-01-11 19:11:16 -0800285def _will_run_tests(args):
286 """Determine if there are tests to run.
287
288 Currently only used by detect_regression to skip the test if just running regression detection.
289
290 Args:
291 args: parsed args object.
292
293 Returns:
294 True if there are tests to run, false otherwise.
295 """
296 return not (args.detect_regression and len(args.detect_regression) == 2)
297
298
299def _has_valid_regression_detection_args(args):
300 """Validate regression detection args.
301
302 Args:
303 args: parsed args object.
304
305 Returns:
306 True if args are valid
307 """
308 if args.generate_baseline and args.generate_new_metrics:
309 logging.error('Cannot collect both baseline and new metrics at the same time.')
310 return False
311 if args.detect_regression is not None:
312 if not args.detect_regression:
313 logging.error('Need to specify at least 1 arg for regression detection.')
314 return False
315 elif len(args.detect_regression) == 1:
316 if args.generate_baseline or args.generate_new_metrics:
317 return True
318 logging.error('Need to specify --generate-baseline or --generate-new-metrics.')
319 return False
320 elif len(args.detect_regression) == 2:
321 if args.generate_baseline:
322 logging.error('Specified 2 metric paths and --generate-baseline, '
323 'either drop --generate-baseline or drop a path')
324 return False
325 if args.generate_new_metrics:
326 logging.error('Specified 2 metric paths and --generate-new-metrics, '
327 'either drop --generate-new-metrics or drop a path')
328 return False
329 return True
330 else:
331 logging.error('Specified more than 2 metric paths.')
332 return False
333 return True
334
335
Dan Shi0ddd3e42018-05-30 11:24:30 -0700336def _has_valid_test_mapping_args(args):
337 """Validate test mapping args.
338
339 Not all args work when running tests in TEST_MAPPING files. Validate the
340 args before running the tests.
341
342 Args:
343 args: parsed args object.
344
345 Returns:
346 True if args are valid
347 """
348 is_test_mapping = atest_utils.is_test_mapping(args)
349 if not is_test_mapping:
350 return True
351 options_to_validate = [
352 (args.generate_baseline, '--generate-baseline'),
353 (args.detect_regression, '--detect-regression'),
354 (args.generate_new_metrics, '--generate-new-metrics'),
355 ]
356 for arg_value, arg in options_to_validate:
357 if arg_value:
358 logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
359 return False
360 return True
361
362
Dan Shie4e267f2018-06-01 11:31:57 -0700363def _validate_args(args):
364 """Validate setups and args.
365
366 Exit the program with error code if any setup or arg is invalid.
367
368 Args:
369 args: parsed args object.
370 """
371 if _missing_environment_variables():
372 sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
373 if args.generate_baseline and args.generate_new_metrics:
Dan Shi08c7b722018-11-29 10:25:59 -0800374 logging.error(
375 'Cannot collect both baseline and new metrics at the same time.')
Dan Shie4e267f2018-06-01 11:31:57 -0700376 sys.exit(constants.EXIT_CODE_ERROR)
377 if not _has_valid_regression_detection_args(args):
378 sys.exit(constants.EXIT_CODE_ERROR)
379 if not _has_valid_test_mapping_args(args):
380 sys.exit(constants.EXIT_CODE_ERROR)
381
nelsonlie3f90de2018-06-22 14:59:39 +0800382
383def _print_module_info_from_module_name(mod_info, module_name):
384 """print out the related module_info for a module_name.
385
386 Args:
387 mod_info: ModuleInfo object.
388 module_name: A string of module.
389
390 Returns:
391 True if the module_info is found.
392 """
Dan Shi08c7b722018-11-29 10:25:59 -0800393 title_mapping = {
394 constants.MODULE_PATH: "Source code path",
395 constants.MODULE_INSTALLED: "Installed path",
396 constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"}
nelsonlie3f90de2018-06-22 14:59:39 +0800397 target_module_info = mod_info.get_module_info(module_name)
398 is_module_found = False
399 if target_module_info:
nelsonliedbd7452018-08-27 11:11:11 +0800400 atest_utils.colorful_print(module_name, constants.GREEN)
nelsonlie3f90de2018-06-22 14:59:39 +0800401 for title_key in title_mapping.iterkeys():
402 atest_utils.colorful_print("\t%s" % title_mapping[title_key],
nelsonliedbd7452018-08-27 11:11:11 +0800403 constants.CYAN)
nelsonlie3f90de2018-06-22 14:59:39 +0800404 for info_value in target_module_info[title_key]:
nelsonliedbd7452018-08-27 11:11:11 +0800405 print("\t\t{}".format(info_value))
nelsonlie3f90de2018-06-22 14:59:39 +0800406 is_module_found = True
407 return is_module_found
408
409
410def _print_test_info(mod_info, test_infos):
411 """Print the module information from TestInfos.
412
413 Args:
414 mod_info: ModuleInfo object.
415 test_infos: A list of TestInfos.
416
417 Returns:
418 Always return EXIT_CODE_SUCCESS
419 """
420 for test_info in test_infos:
421 _print_module_info_from_module_name(mod_info, test_info.test_name)
nelsonliedbd7452018-08-27 11:11:11 +0800422 atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
423 print("\t\t{}".format(", ".join(test_info.build_targets)))
nelsonlie3f90de2018-06-22 14:59:39 +0800424 for build_target in test_info.build_targets:
425 if build_target != test_info.test_name:
426 _print_module_info_from_module_name(mod_info, build_target)
427 atest_utils.colorful_print("", constants.WHITE)
428 return constants.EXIT_CODE_SUCCESS
429
Dan Shi08c7b722018-11-29 10:25:59 -0800430
431def is_from_test_mapping(test_infos):
432 """Check that the test_infos came from TEST_MAPPING files.
433
434 Args:
435 test_infos: A set of TestInfos.
436
Jim Tangb0477802019-11-26 10:20:09 +0800437 Returns:
Dan Shi08c7b722018-11-29 10:25:59 -0800438 True if the test infos are from TEST_MAPPING files.
439 """
440 return list(test_infos)[0].from_test_mapping
441
442
443def _split_test_mapping_tests(test_infos):
444 """Split Test Mapping tests into 2 groups: device tests and host tests.
445
446 Args:
447 test_infos: A set of TestInfos.
448
Jim Tangb0477802019-11-26 10:20:09 +0800449 Returns:
Dan Shi08c7b722018-11-29 10:25:59 -0800450 A tuple of (device_test_infos, host_test_infos), where
451 device_test_infos: A set of TestInfos for tests that require device.
452 host_test_infos: A set of TestInfos for tests that do NOT require
453 device.
454 """
455 assert is_from_test_mapping(test_infos)
456 host_test_infos = set([info for info in test_infos if info.host])
457 device_test_infos = set([info for info in test_infos if not info.host])
458 return device_test_infos, host_test_infos
459
460
461# pylint: disable=too-many-locals
462def _run_test_mapping_tests(results_dir, test_infos, extra_args):
463 """Run all tests in TEST_MAPPING files.
464
465 Args:
466 results_dir: String directory to store atest results.
467 test_infos: A set of TestInfos.
468 extra_args: Dict of extra args to add to test run.
469
470 Returns:
471 Exit code.
472 """
473 device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
474 # `host` option needs to be set to True to run host side tests.
475 host_extra_args = extra_args.copy()
476 host_extra_args[constants.HOST] = True
477 test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
478 if extra_args.get(constants.HOST):
479 atest_utils.colorful_print(
480 'Option `--host` specified. Skip running device tests.',
481 constants.MAGENTA)
482 else:
483 test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
484
485 test_results = []
486 for tests, args, test_type in test_runs:
487 if not tests:
488 continue
489 header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
490 atest_utils.colorful_print(header, constants.MAGENTA)
491 logging.debug('\n'.join([str(info) for info in tests]))
492 tests_exit_code, reporter = test_runner_handler.run_all_tests(
493 results_dir, tests, args, delay_print_summary=True)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800494 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800495 test_results.append((tests_exit_code, reporter, test_type))
496
497 all_tests_exit_code = constants.EXIT_CODE_SUCCESS
498 failed_tests = []
499 for tests_exit_code, reporter, test_type in test_results:
500 atest_utils.colorful_print(
501 RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
502 result = tests_exit_code | reporter.print_summary()
503 if result:
504 failed_tests.append(test_type)
505 all_tests_exit_code |= result
506
507 # List failed tests at the end as a reminder.
508 if failed_tests:
509 atest_utils.colorful_print(
510 '\n==============================', constants.YELLOW)
511 atest_utils.colorful_print(
512 '\nFollowing tests failed:', constants.MAGENTA)
513 for failure in failed_tests:
514 atest_utils.colorful_print(failure, constants.RED)
515
516 return all_tests_exit_code
517
518
yangbillcc1a21f2018-12-12 20:03:12 +0800519def _dry_run(results_dir, extra_args, test_infos):
520 """Only print the commands of the target tests rather than running them in actual.
521
522 Args:
523 results_dir: Path for saving atest logs.
524 extra_args: Dict of extra args for test runners to utilize.
525 test_infos: A list of TestInfos.
yangbill52c63fa2019-05-24 09:55:00 +0800526
527 Returns:
528 A list of test commands.
yangbillcc1a21f2018-12-12 20:03:12 +0800529 """
yangbill52c63fa2019-05-24 09:55:00 +0800530 all_run_cmds = []
yangbillcc1a21f2018-12-12 20:03:12 +0800531 for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
532 runner = test_runner(results_dir)
533 run_cmds = runner.generate_run_commands(tests, extra_args)
534 for run_cmd in run_cmds:
yangbill52c63fa2019-05-24 09:55:00 +0800535 all_run_cmds.append(run_cmd)
yangbillcc1a21f2018-12-12 20:03:12 +0800536 print('Would run test via command: %s'
537 % (atest_utils.colorize(run_cmd, constants.GREEN)))
yangbill52c63fa2019-05-24 09:55:00 +0800538 return all_run_cmds
yangbillcc1a21f2018-12-12 20:03:12 +0800539
easoncyleef0fb2b12019-01-22 15:49:09 +0800540def _print_testable_modules(mod_info, suite):
541 """Print the testable modules for a given suite.
542
543 Args:
544 mod_info: ModuleInfo object.
545 suite: A string of suite name.
546 """
547 testable_modules = mod_info.get_testable_modules(suite)
548 print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
549 len(testable_modules), suite), constants.CYAN))
550 print('-------')
551 for module in sorted(testable_modules):
552 print('\t%s' % module)
yangbillcc1a21f2018-12-12 20:03:12 +0800553
Jim Tang4e056c62019-11-08 14:53:18 +0800554def _is_inside_android_root():
555 """Identify whether the cwd is inside of Android source tree.
556
557 Returns:
558 False if the cwd is outside of the source tree, True otherwise.
559 """
560 build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
561 return build_top in os.getcwd()
562
kellyhunge6a643c2018-12-19 11:31:25 +0800563# pylint: disable=too-many-statements
Dan Shi08c7b722018-11-29 10:25:59 -0800564# pylint: disable=too-many-branches
kellyhung61154132020-03-30 10:15:58 +0800565# pylint: disable=too-many-return-statements
kellyhungdb81da72020-03-30 18:17:53 +0800566def main(argv, results_dir, args):
mikehoran63d61b42017-07-28 15:28:50 -0700567 """Entry point of atest script.
Simran Basi259a2b52017-06-21 16:14:07 -0700568
mikehoran63d61b42017-07-28 15:28:50 -0700569 Args:
570 argv: A list of arguments.
yelinhsieh4d5917d2019-03-12 17:26:27 +0800571 results_dir: A directory which stores the ATest execution information.
kellyhungdb81da72020-03-30 18:17:53 +0800572 args: An argspace.Namespace class instance holding parsed args.
Kevin Cheng09c2a2c2017-12-15 12:52:46 -0800573
574 Returns:
575 Exit code.
Simran Basi259a2b52017-06-21 16:14:07 -0700576 """
mikehoranbe9102f2017-08-04 16:04:03 -0700577 _configure_logging(args.verbose)
Dan Shie4e267f2018-06-01 11:31:57 -0700578 _validate_args(args)
kellyhung924b8832019-03-05 18:35:00 +0800579 metrics_utils.get_start_time()
580 metrics.AtestStartEvent(
581 command_line=' '.join(argv),
582 test_references=args.tests,
583 cwd=os.getcwd(),
584 os=platform.platform())
Jim Tang139389a2019-09-27 14:48:30 +0800585 if args.version:
586 if os.path.isfile(constants.VERSION_FILE):
587 with open(constants.VERSION_FILE) as version_file:
588 print(version_file.read())
589 return constants.EXIT_CODE_SUCCESS
Jim Tang4e056c62019-11-08 14:53:18 +0800590 if not _is_inside_android_root():
591 atest_utils.colorful_print(
592 "\nAtest must always work under ${}!".format(
593 constants.ANDROID_BUILD_TOP), constants.RED)
594 return constants.EXIT_CODE_OUTSIDE_ROOT
Jim Tang14397f02019-12-03 16:28:02 +0800595 if args.help:
596 atest_arg_parser.print_epilog_text()
597 return constants.EXIT_CODE_SUCCESS
kellyhung61154132020-03-30 10:15:58 +0800598 if args.history:
599 atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
600 args.history)
601 return constants.EXIT_CODE_SUCCESS
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800602 mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
Jim Tangd1bc9292019-09-05 11:54:20 +0800603 if args.rebuild_module_info:
604 _run_extra_tasks(join=True)
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800605 translator = cli_translator.CLITranslator(module_info=mod_info)
easoncyleef0fb2b12019-01-22 15:49:09 +0800606 if args.list_modules:
607 _print_testable_modules(mod_info, args.list_modules)
608 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800609 build_targets = set()
610 test_infos = set()
yangbill0b35e4b2019-06-10 20:36:28 +0800611 # Clear cache if user pass -c option
612 if args.clear_cache:
613 atest_utils.clean_test_info_caches(args.tests)
Mike Ma0126b9b2018-01-11 19:11:16 -0800614 if _will_run_tests(args):
nelsonlic4a71452018-09-13 14:10:30 +0800615 build_targets, test_infos = translator.translate(args)
616 if not test_infos:
Mike Ma0126b9b2018-01-11 19:11:16 -0800617 return constants.EXIT_CODE_TEST_NOT_FOUND
Dan Shi08c7b722018-11-29 10:25:59 -0800618 if not is_from_test_mapping(test_infos):
619 _validate_exec_mode(args, test_infos)
620 else:
621 _validate_tm_tests_exec_mode(args, test_infos)
nelsonlie3f90de2018-06-22 14:59:39 +0800622 if args.info:
623 return _print_test_info(mod_info, test_infos)
Kevin Cheng8b2c94c2017-12-18 14:43:26 -0800624 build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
625 test_infos)
Kevin Cheng7edb0b92017-12-14 15:00:25 -0800626 extra_args = get_extra_args(args)
yangbillbac1dd62019-06-03 17:06:40 +0800627 if args.update_cmd_mapping or args.verify_cmd_mapping:
yangbill52c63fa2019-05-24 09:55:00 +0800628 args.dry_run = True
yangbillcc1a21f2018-12-12 20:03:12 +0800629 if args.dry_run:
yangbillbac1dd62019-06-03 17:06:40 +0800630 args.tests.sort()
yangbill52c63fa2019-05-24 09:55:00 +0800631 dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
yangbillbac1dd62019-06-03 17:06:40 +0800632 if args.verify_cmd_mapping:
633 try:
634 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
635 dry_run_cmds,
636 do_verification=True)
637 except atest_error.DryRunVerificationError as e:
638 atest_utils.colorful_print(str(e), constants.RED)
639 return constants.EXIT_CODE_VERIFY_FAILURE
yangbill52c63fa2019-05-24 09:55:00 +0800640 if args.update_cmd_mapping:
yangbillbac1dd62019-06-03 17:06:40 +0800641 atest_utils.handle_test_runner_cmd(' '.join(args.tests),
yangbill52c63fa2019-05-24 09:55:00 +0800642 dry_run_cmds)
yangbillcc1a21f2018-12-12 20:03:12 +0800643 return constants.EXIT_CODE_SUCCESS
Mike Ma0126b9b2018-01-11 19:11:16 -0800644 if args.detect_regression:
645 build_targets |= (regression_test_runner.RegressionTestRunner('')
646 .get_test_runner_build_reqs())
mikehoranc327dca2017-11-27 16:24:22 -0800647 # args.steps will be None if none of -bit set, else list of params set.
Jim Tang815b8892018-07-11 12:57:30 +0800648 steps = args.steps if args.steps else constants.ALL_STEPS
649 if build_targets and constants.BUILD_STEP in steps:
Jim Tangd1bc9292019-09-05 11:54:20 +0800650 if constants.TEST_STEP in steps and not args.rebuild_module_info:
651 # Run extra tasks along with build step concurrently. Note that
652 # Atest won't index targets when only "-b" is given(without -t).
653 _run_extra_tasks(join=False)
Kevin Cheng5be930e2018-02-20 09:39:22 -0800654 # Add module-info.json target to the list of build targets to keep the
655 # file up to date.
656 build_targets.add(mod_info.module_info_target)
patricktu5456df02019-11-01 20:15:02 +0800657 # Build the deps-license to generate dependencies data in
658 # module-info.json.
659 build_targets.add(constants.DEPS_LICENSE)
patricktu5456df02019-11-01 20:15:02 +0800660 # The environment variables PROJ_PATH and DEP_PATH are necessary for the
661 # deps-license.
yangbill741dc122020-02-12 10:46:19 +0800662 build_env = dict(constants.DEPS_LICENSE_ENV)
kellyhung23c55b82019-01-04 16:58:14 +0800663 build_start = time.time()
Jim Tang6ed753e2019-07-23 10:39:58 +0800664 success = atest_utils.build(build_targets, verbose=args.verbose,
patricktu5456df02019-11-01 20:15:02 +0800665 env_vars=build_env)
kellyhung23c55b82019-01-04 16:58:14 +0800666 metrics.BuildFinishEvent(
667 duration=metrics_utils.convert_duration(time.time() - build_start),
668 success=success,
669 targets=build_targets)
mikehoranc80dc532017-11-14 14:30:06 -0800670 if not success:
Dan Shifa016d12018-02-02 00:37:19 -0800671 return constants.EXIT_CODE_BUILD_FAILURE
Jim Tang815b8892018-07-11 12:57:30 +0800672 elif constants.TEST_STEP not in steps:
mikehoranc327dca2017-11-27 16:24:22 -0800673 logging.warn('Install step without test step currently not '
674 'supported, installing AND testing instead.')
Jim Tang815b8892018-07-11 12:57:30 +0800675 steps.append(constants.TEST_STEP)
yangbill848a7d12018-09-04 19:12:08 +0800676 tests_exit_code = constants.EXIT_CODE_SUCCESS
kellyhung23c55b82019-01-04 16:58:14 +0800677 test_start = time.time()
Jim Tang815b8892018-07-11 12:57:30 +0800678 if constants.TEST_STEP in steps:
Dan Shi08c7b722018-11-29 10:25:59 -0800679 if not is_from_test_mapping(test_infos):
yelinhsieh4d5917d2019-03-12 17:26:27 +0800680 tests_exit_code, reporter = test_runner_handler.run_all_tests(
Dan Shi08c7b722018-11-29 10:25:59 -0800681 results_dir, test_infos, extra_args)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800682 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800683 else:
684 tests_exit_code = _run_test_mapping_tests(
685 results_dir, test_infos, extra_args)
Mike Ma0126b9b2018-01-11 19:11:16 -0800686 if args.detect_regression:
687 regression_args = _get_regression_detection_args(args, results_dir)
mikehoran9b6b44b2018-04-09 15:54:58 -0700688 # TODO(b/110485713): Should not call run_tests here.
689 reporter = result_reporter.ResultReporter()
yelinhsieh4d5917d2019-03-12 17:26:27 +0800690 atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
Dan Shi08c7b722018-11-29 10:25:59 -0800691 tests_exit_code |= regression_test_runner.RegressionTestRunner(
692 '').run_tests(
693 None, regression_args, reporter)
kellyhung23c55b82019-01-04 16:58:14 +0800694 metrics.RunTestsFinishEvent(
695 duration=metrics_utils.convert_duration(time.time() - test_start))
easoncylee1cace872019-09-19 09:03:29 +0800696 preparation_time = atest_execution_info.preparation_time(test_start)
697 if preparation_time:
698 # Send the preparation time only if it's set.
699 metrics.RunnerFinishEvent(
700 duration=metrics_utils.convert_duration(preparation_time),
701 success=True,
702 runner_name=constants.TF_PREPARATION,
703 test=[])
yangbill848a7d12018-09-04 19:12:08 +0800704 if tests_exit_code != constants.EXIT_CODE_SUCCESS:
705 tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
706 return tests_exit_code
mikehoran63d61b42017-07-28 15:28:50 -0700707
Simran Basi259a2b52017-06-21 16:14:07 -0700708if __name__ == '__main__':
yelinhsieh4d5917d2019-03-12 17:26:27 +0800709 RESULTS_DIR = make_test_run_dir()
kellyhungdb81da72020-03-30 18:17:53 +0800710 ARGS = _parse_args(sys.argv[1:])
yelinhsieh4d5917d2019-03-12 17:26:27 +0800711 with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
kellyhungdb81da72020-03-30 18:17:53 +0800712 RESULTS_DIR,
713 ARGS) as result_file:
kellyhunge3fa1752019-04-23 11:13:41 +0800714 metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
kellyhungdb81da72020-03-30 18:17:53 +0800715 EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, ARGS)
kellyhung7d004bb2019-04-02 11:54:59 +0800716 DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
717 metrics.LocalDetectEvent(
718 detect_type=constants.DETECT_TYPE_BUG_DETECTED,
719 result=DETECTOR.caught_result)
yelinhsieh4d5917d2019-03-12 17:26:27 +0800720 if result_file:
kellyhungdb81da72020-03-30 18:17:53 +0800721 print("Run 'atest --history' to review test result history.")
easoncyleef0fb2b12019-01-22 15:49:09 +0800722 sys.exit(EXIT_CODE)