Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | # |
| 3 | # Copyright 2017, The Android Open Source Project |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | # you may not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 17 | """ |
| 18 | Command line utility for running Android tests through TradeFederation. |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 19 | |
| 20 | atest helps automate the flow of building test modules across the Android |
| 21 | code base and executing the tests via the TradeFederation test harness. |
| 22 | |
| 23 | atest is designed to support any test types that can be ran by TradeFederation. |
| 24 | """ |
| 25 | |
nelsonli | edbd745 | 2018-08-27 11:11:11 +0800 | [diff] [blame] | 26 | from __future__ import print_function |
| 27 | |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 28 | import logging |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 29 | import os |
| 30 | import sys |
mikehoran | 95091b2 | 2017-10-31 15:55:26 -0700 | [diff] [blame] | 31 | import tempfile |
| 32 | import time |
kellyhung | 792fbcf | 2018-11-19 16:25:50 +0800 | [diff] [blame] | 33 | import platform |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 34 | |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 35 | import atest_arg_parser |
yangbill | bac1dd6 | 2019-06-03 17:06:40 +0800 | [diff] [blame] | 36 | import atest_error |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 37 | import atest_execution_info |
Simran Basi | cf2189b | 2017-11-06 23:40:24 -0800 | [diff] [blame] | 38 | import atest_utils |
kellyhung | 7d004bb | 2019-04-02 11:54:59 +0800 | [diff] [blame] | 39 | import bug_detector |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 40 | import cli_translator |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 41 | # pylint: disable=import-error |
| 42 | import constants |
Kevin Cheng | 8b2c94c | 2017-12-18 14:43:26 -0800 | [diff] [blame] | 43 | import module_info |
mikehoran | 9b6b44b | 2018-04-09 15:54:58 -0700 | [diff] [blame] | 44 | import result_reporter |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 45 | import test_runner_handler |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 46 | |
| 47 | from metrics import metrics |
kellyhung | e3fa175 | 2019-04-23 11:13:41 +0800 | [diff] [blame] | 48 | from metrics import metrics_base |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 49 | from metrics import metrics_utils |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 50 | from test_runners import regression_test_runner |
Simran Basi | cf2189b | 2017-11-06 23:40:24 -0800 | [diff] [blame] | 51 | |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 52 | EXPECTED_VARS = frozenset([ |
Kevin Cheng | 8b2c94c | 2017-12-18 14:43:26 -0800 | [diff] [blame] | 53 | constants.ANDROID_BUILD_TOP, |
mikehoran | 43ed32d | 2017-08-18 17:13:36 -0700 | [diff] [blame] | 54 | 'ANDROID_TARGET_OUT_TESTCASES', |
Kevin Cheng | 8b2c94c | 2017-12-18 14:43:26 -0800 | [diff] [blame] | 55 | constants.ANDROID_OUT]) |
mikehoran | 95091b2 | 2017-10-31 15:55:26 -0700 | [diff] [blame] | 56 | TEST_RUN_DIR_PREFIX = 'atest_run_%s_' |
Kevin Cheng | 21ea910 | 2018-02-22 10:52:42 -0800 | [diff] [blame] | 57 | CUSTOM_ARG_FLAG = '--' |
Dan Shi | 0ddd3e4 | 2018-05-30 11:24:30 -0700 | [diff] [blame] | 58 | OPTION_NOT_FOR_TEST_MAPPING = ( |
| 59 | 'Option `%s` does not work for running tests in TEST_MAPPING files') |
mikehoran | c80dc53 | 2017-11-14 14:30:06 -0800 | [diff] [blame] | 60 | |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 61 | DEVICE_TESTS = 'tests that require device' |
| 62 | HOST_TESTS = 'tests that do NOT require device' |
| 63 | RESULT_HEADER_FMT = '\nResults from %(test_type)s:' |
| 64 | RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.' |
| 65 | TEST_COUNT = 'test_count' |
| 66 | TEST_TYPE = 'test_type' |
| 67 | |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 68 | |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 69 | def _parse_args(argv): |
| 70 | """Parse command line arguments. |
| 71 | |
| 72 | Args: |
| 73 | argv: A list of arguments. |
| 74 | |
| 75 | Returns: |
| 76 | An argspace.Namespace class instance holding parsed args. |
| 77 | """ |
Kevin Cheng | 21ea910 | 2018-02-22 10:52:42 -0800 | [diff] [blame] | 78 | # Store everything after '--' in custom_args. |
| 79 | pruned_argv = argv |
| 80 | custom_args_index = None |
| 81 | if CUSTOM_ARG_FLAG in argv: |
| 82 | custom_args_index = argv.index(CUSTOM_ARG_FLAG) |
| 83 | pruned_argv = argv[:custom_args_index] |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 84 | parser = atest_arg_parser.AtestArgParser() |
| 85 | parser.add_atest_args() |
Kevin Cheng | 21ea910 | 2018-02-22 10:52:42 -0800 | [diff] [blame] | 86 | args = parser.parse_args(pruned_argv) |
| 87 | args.custom_args = [] |
| 88 | if custom_args_index is not None: |
| 89 | args.custom_args = argv[custom_args_index+1:] |
| 90 | return args |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 91 | |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 92 | |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 93 | def _configure_logging(verbose): |
| 94 | """Configure the logger. |
| 95 | |
| 96 | Args: |
| 97 | verbose: A boolean. If true display DEBUG level logs. |
| 98 | """ |
mikehoran | b240182 | 2018-08-16 12:01:40 -0700 | [diff] [blame] | 99 | log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s' |
| 100 | datefmt = '%Y-%m-%d %H:%M:%S' |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 101 | if verbose: |
mikehoran | b240182 | 2018-08-16 12:01:40 -0700 | [diff] [blame] | 102 | logging.basicConfig(level=logging.DEBUG, format=log_format, datefmt=datefmt) |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 103 | else: |
mikehoran | b240182 | 2018-08-16 12:01:40 -0700 | [diff] [blame] | 104 | logging.basicConfig(level=logging.INFO, format=log_format, datefmt=datefmt) |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 105 | |
| 106 | |
| 107 | def _missing_environment_variables(): |
| 108 | """Verify the local environment has been set up to run atest. |
| 109 | |
| 110 | Returns: |
| 111 | List of strings of any missing environment variables. |
| 112 | """ |
| 113 | missing = filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]) |
| 114 | if missing: |
| 115 | logging.error('Local environment doesn\'t appear to have been ' |
| 116 | 'initialized. Did you remember to run lunch? Expected ' |
| 117 | 'Environment Variables: %s.', missing) |
| 118 | return missing |
| 119 | |
| 120 | |
mikehoran | 95091b2 | 2017-10-31 15:55:26 -0700 | [diff] [blame] | 121 | def make_test_run_dir(): |
| 122 | """Make the test run dir in tmp. |
| 123 | |
| 124 | Returns: |
| 125 | A string of the dir path. |
| 126 | """ |
| 127 | utc_epoch_time = int(time.time()) |
| 128 | prefix = TEST_RUN_DIR_PREFIX % utc_epoch_time |
| 129 | return tempfile.mkdtemp(prefix=prefix) |
| 130 | |
| 131 | |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 132 | def get_extra_args(args): |
| 133 | """Get extra args for test runners. |
| 134 | |
| 135 | Args: |
| 136 | args: arg parsed object. |
| 137 | |
| 138 | Returns: |
| 139 | Dict of extra args for test runners to utilize. |
| 140 | """ |
| 141 | extra_args = {} |
| 142 | if args.wait_for_debugger: |
| 143 | extra_args[constants.WAIT_FOR_DEBUGGER] = None |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 144 | steps = args.steps or constants.ALL_STEPS |
| 145 | if constants.INSTALL_STEP not in steps: |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 146 | extra_args[constants.DISABLE_INSTALL] = None |
mikehoran | 458b2b1 | 2018-02-28 16:07:13 -0800 | [diff] [blame] | 147 | if args.disable_teardown: |
| 148 | extra_args[constants.DISABLE_TEARDOWN] = args.disable_teardown |
Mike Ma | 150a61d | 2017-12-15 10:53:35 -0800 | [diff] [blame] | 149 | if args.generate_baseline: |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 150 | extra_args[constants.PRE_PATCH_ITERATIONS] = args.generate_baseline |
easoncylee | 8809be0 | 2018-03-27 12:28:07 +0800 | [diff] [blame] | 151 | if args.serial: |
| 152 | extra_args[constants.SERIAL] = args.serial |
yangbill | 37dda0d | 2018-06-21 16:44:12 +0800 | [diff] [blame] | 153 | if args.all_abi: |
yangbill | c614bdc | 2018-06-22 14:31:24 +0800 | [diff] [blame] | 154 | extra_args[constants.ALL_ABI] = args.all_abi |
Mike Ma | 150a61d | 2017-12-15 10:53:35 -0800 | [diff] [blame] | 155 | if args.generate_new_metrics: |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 156 | extra_args[constants.POST_PATCH_ITERATIONS] = args.generate_new_metrics |
Julien Desprez | d516865 | 2019-02-06 14:49:01 -0800 | [diff] [blame] | 157 | if args.instant: |
| 158 | extra_args[constants.INSTANT] = args.instant |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 159 | if args.host: |
| 160 | extra_args[constants.HOST] = args.host |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 161 | if args.dry_run: |
| 162 | extra_args[constants.DRY_RUN] = args.dry_run |
Kevin Cheng | 21ea910 | 2018-02-22 10:52:42 -0800 | [diff] [blame] | 163 | if args.custom_args: |
| 164 | extra_args[constants.CUSTOM_ARGS] = args.custom_args |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 165 | return extra_args |
| 166 | |
| 167 | |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 168 | def _get_regression_detection_args(args, results_dir): |
| 169 | """Get args for regression detection test runners. |
| 170 | |
| 171 | Args: |
| 172 | args: parsed args object. |
| 173 | results_dir: string directory to store atest results. |
| 174 | |
| 175 | Returns: |
| 176 | Dict of args for regression detection test runner to utilize. |
| 177 | """ |
| 178 | regression_args = {} |
| 179 | pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline |
| 180 | else args.detect_regression.pop(0)) |
| 181 | post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics |
| 182 | else args.detect_regression.pop(0)) |
| 183 | regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder |
| 184 | regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder |
| 185 | return regression_args |
| 186 | |
| 187 | |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 188 | def _validate_exec_mode(args, test_infos, host_tests=None): |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 189 | """Validate all test execution modes are not in conflict. |
| 190 | |
| 191 | Exit the program with error code if have device-only and host-only. |
| 192 | If no conflict and host side, add args.host=True. |
| 193 | |
| 194 | Args: |
| 195 | args: parsed args object. |
| 196 | test_info: TestInfo object. |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 197 | host_tests: True if all tests should be deviceless, False if all tests |
| 198 | should be device tests. Default is set to None, which means |
| 199 | tests can be either deviceless or device tests. |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 200 | """ |
| 201 | all_device_modes = [x.get_supported_exec_mode() for x in test_infos] |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 202 | err_msg = None |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 203 | # In the case of '$atest <device-only> --host', exit. |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 204 | if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes: |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 205 | err_msg = ('Test side and option(--host) conflict. Please remove ' |
| 206 | '--host if the test run on device side.') |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 207 | # In the case of '$atest <host-only> <device-only> --host' or |
| 208 | # '$atest <host-only> <device-only>', exit. |
| 209 | if (constants.DEVICELESS_TEST in all_device_modes and |
| 210 | constants.DEVICE_TEST in all_device_modes): |
| 211 | err_msg = 'There are host-only and device-only tests in command.' |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 212 | if host_tests is False and constants.DEVICELESS_TEST in all_device_modes: |
| 213 | err_msg = 'There are host-only tests in command.' |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 214 | if err_msg: |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 215 | logging.error(err_msg) |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 216 | metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg) |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 217 | sys.exit(constants.EXIT_CODE_ERROR) |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 218 | # In the case of '$atest <host-only>', we add --host to run on host-side. |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 219 | # The option should only be overriden if `host_tests` is not set. |
| 220 | if not args.host and host_tests is None: |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 221 | args.host = bool(constants.DEVICELESS_TEST in all_device_modes) |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 222 | |
| 223 | |
| 224 | def _validate_tm_tests_exec_mode(args, test_infos): |
| 225 | """Validate all test execution modes are not in conflict. |
| 226 | |
| 227 | Split the tests in Test Mapping files into two groups, device tests and |
| 228 | deviceless tests running on host. Validate the tests' host setting. |
| 229 | For device tests, exit the program if any test is found for host-only. |
| 230 | For deviceless tests, exit the program if any test is found for device-only. |
| 231 | |
| 232 | Args: |
| 233 | args: parsed args object. |
| 234 | test_info: TestInfo object. |
| 235 | """ |
| 236 | device_test_infos, host_test_infos = _split_test_mapping_tests( |
| 237 | test_infos) |
| 238 | # No need to verify device tests if atest command is set to only run host |
| 239 | # tests. |
| 240 | if device_test_infos and not args.host: |
| 241 | _validate_exec_mode(args, device_test_infos, host_tests=False) |
| 242 | if host_test_infos: |
| 243 | _validate_exec_mode(args, host_test_infos, host_tests=True) |
| 244 | |
kellyhung | 0625d17 | 2018-06-21 16:40:27 +0800 | [diff] [blame] | 245 | |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 246 | def _will_run_tests(args): |
| 247 | """Determine if there are tests to run. |
| 248 | |
| 249 | Currently only used by detect_regression to skip the test if just running regression detection. |
| 250 | |
| 251 | Args: |
| 252 | args: parsed args object. |
| 253 | |
| 254 | Returns: |
| 255 | True if there are tests to run, false otherwise. |
| 256 | """ |
| 257 | return not (args.detect_regression and len(args.detect_regression) == 2) |
| 258 | |
| 259 | |
| 260 | def _has_valid_regression_detection_args(args): |
| 261 | """Validate regression detection args. |
| 262 | |
| 263 | Args: |
| 264 | args: parsed args object. |
| 265 | |
| 266 | Returns: |
| 267 | True if args are valid |
| 268 | """ |
| 269 | if args.generate_baseline and args.generate_new_metrics: |
| 270 | logging.error('Cannot collect both baseline and new metrics at the same time.') |
| 271 | return False |
| 272 | if args.detect_regression is not None: |
| 273 | if not args.detect_regression: |
| 274 | logging.error('Need to specify at least 1 arg for regression detection.') |
| 275 | return False |
| 276 | elif len(args.detect_regression) == 1: |
| 277 | if args.generate_baseline or args.generate_new_metrics: |
| 278 | return True |
| 279 | logging.error('Need to specify --generate-baseline or --generate-new-metrics.') |
| 280 | return False |
| 281 | elif len(args.detect_regression) == 2: |
| 282 | if args.generate_baseline: |
| 283 | logging.error('Specified 2 metric paths and --generate-baseline, ' |
| 284 | 'either drop --generate-baseline or drop a path') |
| 285 | return False |
| 286 | if args.generate_new_metrics: |
| 287 | logging.error('Specified 2 metric paths and --generate-new-metrics, ' |
| 288 | 'either drop --generate-new-metrics or drop a path') |
| 289 | return False |
| 290 | return True |
| 291 | else: |
| 292 | logging.error('Specified more than 2 metric paths.') |
| 293 | return False |
| 294 | return True |
| 295 | |
| 296 | |
Dan Shi | 0ddd3e4 | 2018-05-30 11:24:30 -0700 | [diff] [blame] | 297 | def _has_valid_test_mapping_args(args): |
| 298 | """Validate test mapping args. |
| 299 | |
| 300 | Not all args work when running tests in TEST_MAPPING files. Validate the |
| 301 | args before running the tests. |
| 302 | |
| 303 | Args: |
| 304 | args: parsed args object. |
| 305 | |
| 306 | Returns: |
| 307 | True if args are valid |
| 308 | """ |
| 309 | is_test_mapping = atest_utils.is_test_mapping(args) |
| 310 | if not is_test_mapping: |
| 311 | return True |
| 312 | options_to_validate = [ |
| 313 | (args.generate_baseline, '--generate-baseline'), |
| 314 | (args.detect_regression, '--detect-regression'), |
| 315 | (args.generate_new_metrics, '--generate-new-metrics'), |
| 316 | ] |
| 317 | for arg_value, arg in options_to_validate: |
| 318 | if arg_value: |
| 319 | logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg) |
| 320 | return False |
| 321 | return True |
| 322 | |
| 323 | |
Dan Shi | e4e267f | 2018-06-01 11:31:57 -0700 | [diff] [blame] | 324 | def _validate_args(args): |
| 325 | """Validate setups and args. |
| 326 | |
| 327 | Exit the program with error code if any setup or arg is invalid. |
| 328 | |
| 329 | Args: |
| 330 | args: parsed args object. |
| 331 | """ |
| 332 | if _missing_environment_variables(): |
| 333 | sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP) |
| 334 | if args.generate_baseline and args.generate_new_metrics: |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 335 | logging.error( |
| 336 | 'Cannot collect both baseline and new metrics at the same time.') |
Dan Shi | e4e267f | 2018-06-01 11:31:57 -0700 | [diff] [blame] | 337 | sys.exit(constants.EXIT_CODE_ERROR) |
| 338 | if not _has_valid_regression_detection_args(args): |
| 339 | sys.exit(constants.EXIT_CODE_ERROR) |
| 340 | if not _has_valid_test_mapping_args(args): |
| 341 | sys.exit(constants.EXIT_CODE_ERROR) |
| 342 | |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 343 | |
| 344 | def _print_module_info_from_module_name(mod_info, module_name): |
| 345 | """print out the related module_info for a module_name. |
| 346 | |
| 347 | Args: |
| 348 | mod_info: ModuleInfo object. |
| 349 | module_name: A string of module. |
| 350 | |
| 351 | Returns: |
| 352 | True if the module_info is found. |
| 353 | """ |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 354 | title_mapping = { |
| 355 | constants.MODULE_PATH: "Source code path", |
| 356 | constants.MODULE_INSTALLED: "Installed path", |
| 357 | constants.MODULE_COMPATIBILITY_SUITES: "Compatibility suite"} |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 358 | target_module_info = mod_info.get_module_info(module_name) |
| 359 | is_module_found = False |
| 360 | if target_module_info: |
nelsonli | edbd745 | 2018-08-27 11:11:11 +0800 | [diff] [blame] | 361 | atest_utils.colorful_print(module_name, constants.GREEN) |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 362 | for title_key in title_mapping.iterkeys(): |
| 363 | atest_utils.colorful_print("\t%s" % title_mapping[title_key], |
nelsonli | edbd745 | 2018-08-27 11:11:11 +0800 | [diff] [blame] | 364 | constants.CYAN) |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 365 | for info_value in target_module_info[title_key]: |
nelsonli | edbd745 | 2018-08-27 11:11:11 +0800 | [diff] [blame] | 366 | print("\t\t{}".format(info_value)) |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 367 | is_module_found = True |
| 368 | return is_module_found |
| 369 | |
| 370 | |
| 371 | def _print_test_info(mod_info, test_infos): |
| 372 | """Print the module information from TestInfos. |
| 373 | |
| 374 | Args: |
| 375 | mod_info: ModuleInfo object. |
| 376 | test_infos: A list of TestInfos. |
| 377 | |
| 378 | Returns: |
| 379 | Always return EXIT_CODE_SUCCESS |
| 380 | """ |
| 381 | for test_info in test_infos: |
| 382 | _print_module_info_from_module_name(mod_info, test_info.test_name) |
nelsonli | edbd745 | 2018-08-27 11:11:11 +0800 | [diff] [blame] | 383 | atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA) |
| 384 | print("\t\t{}".format(", ".join(test_info.build_targets))) |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 385 | for build_target in test_info.build_targets: |
| 386 | if build_target != test_info.test_name: |
| 387 | _print_module_info_from_module_name(mod_info, build_target) |
| 388 | atest_utils.colorful_print("", constants.WHITE) |
| 389 | return constants.EXIT_CODE_SUCCESS |
| 390 | |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 391 | |
| 392 | def is_from_test_mapping(test_infos): |
| 393 | """Check that the test_infos came from TEST_MAPPING files. |
| 394 | |
| 395 | Args: |
| 396 | test_infos: A set of TestInfos. |
| 397 | |
| 398 | Retruns: |
| 399 | True if the test infos are from TEST_MAPPING files. |
| 400 | """ |
| 401 | return list(test_infos)[0].from_test_mapping |
| 402 | |
| 403 | |
| 404 | def _split_test_mapping_tests(test_infos): |
| 405 | """Split Test Mapping tests into 2 groups: device tests and host tests. |
| 406 | |
| 407 | Args: |
| 408 | test_infos: A set of TestInfos. |
| 409 | |
| 410 | Retruns: |
| 411 | A tuple of (device_test_infos, host_test_infos), where |
| 412 | device_test_infos: A set of TestInfos for tests that require device. |
| 413 | host_test_infos: A set of TestInfos for tests that do NOT require |
| 414 | device. |
| 415 | """ |
| 416 | assert is_from_test_mapping(test_infos) |
| 417 | host_test_infos = set([info for info in test_infos if info.host]) |
| 418 | device_test_infos = set([info for info in test_infos if not info.host]) |
| 419 | return device_test_infos, host_test_infos |
| 420 | |
| 421 | |
| 422 | # pylint: disable=too-many-locals |
| 423 | def _run_test_mapping_tests(results_dir, test_infos, extra_args): |
| 424 | """Run all tests in TEST_MAPPING files. |
| 425 | |
| 426 | Args: |
| 427 | results_dir: String directory to store atest results. |
| 428 | test_infos: A set of TestInfos. |
| 429 | extra_args: Dict of extra args to add to test run. |
| 430 | |
| 431 | Returns: |
| 432 | Exit code. |
| 433 | """ |
| 434 | device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos) |
| 435 | # `host` option needs to be set to True to run host side tests. |
| 436 | host_extra_args = extra_args.copy() |
| 437 | host_extra_args[constants.HOST] = True |
| 438 | test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)] |
| 439 | if extra_args.get(constants.HOST): |
| 440 | atest_utils.colorful_print( |
| 441 | 'Option `--host` specified. Skip running device tests.', |
| 442 | constants.MAGENTA) |
| 443 | else: |
| 444 | test_runs.append((device_test_infos, extra_args, DEVICE_TESTS)) |
| 445 | |
| 446 | test_results = [] |
| 447 | for tests, args, test_type in test_runs: |
| 448 | if not tests: |
| 449 | continue |
| 450 | header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type} |
| 451 | atest_utils.colorful_print(header, constants.MAGENTA) |
| 452 | logging.debug('\n'.join([str(info) for info in tests])) |
| 453 | tests_exit_code, reporter = test_runner_handler.run_all_tests( |
| 454 | results_dir, tests, args, delay_print_summary=True) |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 455 | atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter) |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 456 | test_results.append((tests_exit_code, reporter, test_type)) |
| 457 | |
| 458 | all_tests_exit_code = constants.EXIT_CODE_SUCCESS |
| 459 | failed_tests = [] |
| 460 | for tests_exit_code, reporter, test_type in test_results: |
| 461 | atest_utils.colorful_print( |
| 462 | RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA) |
| 463 | result = tests_exit_code | reporter.print_summary() |
| 464 | if result: |
| 465 | failed_tests.append(test_type) |
| 466 | all_tests_exit_code |= result |
| 467 | |
| 468 | # List failed tests at the end as a reminder. |
| 469 | if failed_tests: |
| 470 | atest_utils.colorful_print( |
| 471 | '\n==============================', constants.YELLOW) |
| 472 | atest_utils.colorful_print( |
| 473 | '\nFollowing tests failed:', constants.MAGENTA) |
| 474 | for failure in failed_tests: |
| 475 | atest_utils.colorful_print(failure, constants.RED) |
| 476 | |
| 477 | return all_tests_exit_code |
| 478 | |
| 479 | |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 480 | def _dry_run(results_dir, extra_args, test_infos): |
| 481 | """Only print the commands of the target tests rather than running them in actual. |
| 482 | |
| 483 | Args: |
| 484 | results_dir: Path for saving atest logs. |
| 485 | extra_args: Dict of extra args for test runners to utilize. |
| 486 | test_infos: A list of TestInfos. |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 487 | |
| 488 | Returns: |
| 489 | A list of test commands. |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 490 | """ |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 491 | all_run_cmds = [] |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 492 | for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos): |
| 493 | runner = test_runner(results_dir) |
| 494 | run_cmds = runner.generate_run_commands(tests, extra_args) |
| 495 | for run_cmd in run_cmds: |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 496 | all_run_cmds.append(run_cmd) |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 497 | print('Would run test via command: %s' |
| 498 | % (atest_utils.colorize(run_cmd, constants.GREEN))) |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 499 | return all_run_cmds |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 500 | |
easoncylee | f0fb2b1 | 2019-01-22 15:49:09 +0800 | [diff] [blame] | 501 | def _print_testable_modules(mod_info, suite): |
| 502 | """Print the testable modules for a given suite. |
| 503 | |
| 504 | Args: |
| 505 | mod_info: ModuleInfo object. |
| 506 | suite: A string of suite name. |
| 507 | """ |
| 508 | testable_modules = mod_info.get_testable_modules(suite) |
| 509 | print('\n%s' % atest_utils.colorize('%s Testable %s modules' % ( |
| 510 | len(testable_modules), suite), constants.CYAN)) |
| 511 | print('-------') |
| 512 | for module in sorted(testable_modules): |
| 513 | print('\t%s' % module) |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 514 | |
kellyhung | e6a643c | 2018-12-19 11:31:25 +0800 | [diff] [blame] | 515 | # pylint: disable=too-many-statements |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 516 | # pylint: disable=too-many-branches |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 517 | def main(argv, results_dir): |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 518 | """Entry point of atest script. |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 519 | |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 520 | Args: |
| 521 | argv: A list of arguments. |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 522 | results_dir: A directory which stores the ATest execution information. |
Kevin Cheng | 09c2a2c | 2017-12-15 12:52:46 -0800 | [diff] [blame] | 523 | |
| 524 | Returns: |
| 525 | Exit code. |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 526 | """ |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 527 | args = _parse_args(argv) |
mikehoran | be9102f | 2017-08-04 16:04:03 -0700 | [diff] [blame] | 528 | _configure_logging(args.verbose) |
Dan Shi | e4e267f | 2018-06-01 11:31:57 -0700 | [diff] [blame] | 529 | _validate_args(args) |
kellyhung | 924b883 | 2019-03-05 18:35:00 +0800 | [diff] [blame] | 530 | metrics_utils.get_start_time() |
| 531 | metrics.AtestStartEvent( |
| 532 | command_line=' '.join(argv), |
| 533 | test_references=args.tests, |
| 534 | cwd=os.getcwd(), |
| 535 | os=platform.platform()) |
Kevin Cheng | 8b2c94c | 2017-12-18 14:43:26 -0800 | [diff] [blame] | 536 | mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info) |
| 537 | translator = cli_translator.CLITranslator(module_info=mod_info) |
easoncylee | f0fb2b1 | 2019-01-22 15:49:09 +0800 | [diff] [blame] | 538 | if args.list_modules: |
| 539 | _print_testable_modules(mod_info, args.list_modules) |
| 540 | return constants.EXIT_CODE_SUCCESS |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 541 | build_targets = set() |
| 542 | test_infos = set() |
yangbill | 0b35e4b | 2019-06-10 20:36:28 +0800 | [diff] [blame] | 543 | # Clear cache if user pass -c option |
| 544 | if args.clear_cache: |
| 545 | atest_utils.clean_test_info_caches(args.tests) |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 546 | if _will_run_tests(args): |
nelsonli | c4a7145 | 2018-09-13 14:10:30 +0800 | [diff] [blame] | 547 | build_targets, test_infos = translator.translate(args) |
| 548 | if not test_infos: |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 549 | return constants.EXIT_CODE_TEST_NOT_FOUND |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 550 | if not is_from_test_mapping(test_infos): |
| 551 | _validate_exec_mode(args, test_infos) |
| 552 | else: |
| 553 | _validate_tm_tests_exec_mode(args, test_infos) |
nelsonli | e3f90de | 2018-06-22 14:59:39 +0800 | [diff] [blame] | 554 | if args.info: |
| 555 | return _print_test_info(mod_info, test_infos) |
Kevin Cheng | 8b2c94c | 2017-12-18 14:43:26 -0800 | [diff] [blame] | 556 | build_targets |= test_runner_handler.get_test_runner_reqs(mod_info, |
| 557 | test_infos) |
Kevin Cheng | 7edb0b9 | 2017-12-14 15:00:25 -0800 | [diff] [blame] | 558 | extra_args = get_extra_args(args) |
yangbill | bac1dd6 | 2019-06-03 17:06:40 +0800 | [diff] [blame] | 559 | if args.update_cmd_mapping or args.verify_cmd_mapping: |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 560 | args.dry_run = True |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 561 | if args.dry_run: |
yangbill | bac1dd6 | 2019-06-03 17:06:40 +0800 | [diff] [blame] | 562 | args.tests.sort() |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 563 | dry_run_cmds = _dry_run(results_dir, extra_args, test_infos) |
yangbill | bac1dd6 | 2019-06-03 17:06:40 +0800 | [diff] [blame] | 564 | if args.verify_cmd_mapping: |
| 565 | try: |
| 566 | atest_utils.handle_test_runner_cmd(' '.join(args.tests), |
| 567 | dry_run_cmds, |
| 568 | do_verification=True) |
| 569 | except atest_error.DryRunVerificationError as e: |
| 570 | atest_utils.colorful_print(str(e), constants.RED) |
| 571 | return constants.EXIT_CODE_VERIFY_FAILURE |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 572 | if args.update_cmd_mapping: |
yangbill | bac1dd6 | 2019-06-03 17:06:40 +0800 | [diff] [blame] | 573 | atest_utils.handle_test_runner_cmd(' '.join(args.tests), |
yangbill | 52c63fa | 2019-05-24 09:55:00 +0800 | [diff] [blame] | 574 | dry_run_cmds) |
yangbill | cc1a21f | 2018-12-12 20:03:12 +0800 | [diff] [blame] | 575 | return constants.EXIT_CODE_SUCCESS |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 576 | if args.detect_regression: |
| 577 | build_targets |= (regression_test_runner.RegressionTestRunner('') |
| 578 | .get_test_runner_build_reqs()) |
mikehoran | c327dca | 2017-11-27 16:24:22 -0800 | [diff] [blame] | 579 | # args.steps will be None if none of -bit set, else list of params set. |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 580 | steps = args.steps if args.steps else constants.ALL_STEPS |
| 581 | if build_targets and constants.BUILD_STEP in steps: |
Kevin Cheng | 5be930e | 2018-02-20 09:39:22 -0800 | [diff] [blame] | 582 | # Add module-info.json target to the list of build targets to keep the |
| 583 | # file up to date. |
| 584 | build_targets.add(mod_info.module_info_target) |
kellyhung | 23c55b8 | 2019-01-04 16:58:14 +0800 | [diff] [blame] | 585 | build_start = time.time() |
yangbill | 4b618ed | 2019-07-23 16:03:38 +0800 | [diff] [blame] | 586 | success = atest_utils.build(build_targets, args.verbose, |
| 587 | env_vars=constants.ATEST_BUILD_ENV) |
kellyhung | 23c55b8 | 2019-01-04 16:58:14 +0800 | [diff] [blame] | 588 | metrics.BuildFinishEvent( |
| 589 | duration=metrics_utils.convert_duration(time.time() - build_start), |
| 590 | success=success, |
| 591 | targets=build_targets) |
mikehoran | c80dc53 | 2017-11-14 14:30:06 -0800 | [diff] [blame] | 592 | if not success: |
Dan Shi | fa016d1 | 2018-02-02 00:37:19 -0800 | [diff] [blame] | 593 | return constants.EXIT_CODE_BUILD_FAILURE |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 594 | elif constants.TEST_STEP not in steps: |
mikehoran | c327dca | 2017-11-27 16:24:22 -0800 | [diff] [blame] | 595 | logging.warn('Install step without test step currently not ' |
| 596 | 'supported, installing AND testing instead.') |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 597 | steps.append(constants.TEST_STEP) |
yangbill | 848a7d1 | 2018-09-04 19:12:08 +0800 | [diff] [blame] | 598 | tests_exit_code = constants.EXIT_CODE_SUCCESS |
kellyhung | 23c55b8 | 2019-01-04 16:58:14 +0800 | [diff] [blame] | 599 | test_start = time.time() |
Jim Tang | 815b889 | 2018-07-11 12:57:30 +0800 | [diff] [blame] | 600 | if constants.TEST_STEP in steps: |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 601 | if not is_from_test_mapping(test_infos): |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 602 | tests_exit_code, reporter = test_runner_handler.run_all_tests( |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 603 | results_dir, test_infos, extra_args) |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 604 | atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter) |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 605 | else: |
| 606 | tests_exit_code = _run_test_mapping_tests( |
| 607 | results_dir, test_infos, extra_args) |
Mike Ma | 0126b9b | 2018-01-11 19:11:16 -0800 | [diff] [blame] | 608 | if args.detect_regression: |
| 609 | regression_args = _get_regression_detection_args(args, results_dir) |
mikehoran | 9b6b44b | 2018-04-09 15:54:58 -0700 | [diff] [blame] | 610 | # TODO(b/110485713): Should not call run_tests here. |
| 611 | reporter = result_reporter.ResultReporter() |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 612 | atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter) |
Dan Shi | 08c7b72 | 2018-11-29 10:25:59 -0800 | [diff] [blame] | 613 | tests_exit_code |= regression_test_runner.RegressionTestRunner( |
| 614 | '').run_tests( |
| 615 | None, regression_args, reporter) |
kellyhung | 23c55b8 | 2019-01-04 16:58:14 +0800 | [diff] [blame] | 616 | metrics.RunTestsFinishEvent( |
| 617 | duration=metrics_utils.convert_duration(time.time() - test_start)) |
yangbill | 848a7d1 | 2018-09-04 19:12:08 +0800 | [diff] [blame] | 618 | if tests_exit_code != constants.EXIT_CODE_SUCCESS: |
| 619 | tests_exit_code = constants.EXIT_CODE_TEST_FAILURE |
| 620 | return tests_exit_code |
mikehoran | 63d61b4 | 2017-07-28 15:28:50 -0700 | [diff] [blame] | 621 | |
Simran Basi | 259a2b5 | 2017-06-21 16:14:07 -0700 | [diff] [blame] | 622 | if __name__ == '__main__': |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 623 | RESULTS_DIR = make_test_run_dir() |
| 624 | with atest_execution_info.AtestExecutionInfo(sys.argv[1:], |
| 625 | RESULTS_DIR) as result_file: |
kellyhung | e3fa175 | 2019-04-23 11:13:41 +0800 | [diff] [blame] | 626 | metrics_base.MetricsBase.tool_name = constants.TOOL_NAME |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 627 | EXIT_CODE = main(sys.argv[1:], RESULTS_DIR) |
kellyhung | 7d004bb | 2019-04-02 11:54:59 +0800 | [diff] [blame] | 628 | DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE) |
| 629 | metrics.LocalDetectEvent( |
| 630 | detect_type=constants.DETECT_TYPE_BUG_DETECTED, |
| 631 | result=DETECTOR.caught_result) |
yelinhsieh | 4d5917d | 2019-03-12 17:26:27 +0800 | [diff] [blame] | 632 | metrics_utils.send_exit_event(EXIT_CODE) |
| 633 | if result_file: |
| 634 | print('Execution detail has saved in %s' % result_file.name) |
easoncylee | f0fb2b1 | 2019-01-22 15:49:09 +0800 | [diff] [blame] | 635 | sys.exit(EXIT_CODE) |