Dan Shi | 4df3925 | 2013-03-19 13:19:45 -0700 | [diff] [blame] | 1 | # pylint: disable-msg=C0111 |
| 2 | |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 3 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. |
| 4 | # Use of this source code is governed by a BSD-style license that can be |
| 5 | # found in the LICENSE file. |
| 6 | |
| 7 | __author__ = 'cmasone@chromium.org (Chris Masone)' |
| 8 | |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 9 | # The boto module is only available/used in Moblab for validation of cloud |
| 10 | # storage access. The module is not available in the test lab environment, |
| 11 | # and the import error is handled. |
| 12 | try: |
| 13 | import boto |
| 14 | except ImportError: |
| 15 | boto = None |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 16 | import common |
Simran Basi | 773a86e | 2015-05-13 19:15:42 -0700 | [diff] [blame] | 17 | import ConfigParser |
Chris Masone | a8066a9 | 2012-05-01 16:52:31 -0700 | [diff] [blame] | 18 | import datetime |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 19 | import logging |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 20 | import os |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 21 | import re |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 22 | import shutil |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 23 | import socket |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 24 | |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 25 | from autotest_lib.frontend.afe import models |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 26 | from autotest_lib.client.common_lib import control_data |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 27 | from autotest_lib.client.common_lib import error |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 28 | from autotest_lib.client.common_lib import global_config |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 29 | from autotest_lib.client.common_lib import priorities |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 30 | from autotest_lib.client.common_lib import time_utils |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 31 | from autotest_lib.client.common_lib.cros import dev_server |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 32 | from autotest_lib.client.common_lib.cros.graphite import autotest_stats |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 33 | from autotest_lib.frontend.afe import rpc_utils |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 34 | from autotest_lib.server import utils |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 35 | from autotest_lib.server.cros import provision |
Chris Masone | 44e4d6c | 2012-08-15 14:25:53 -0700 | [diff] [blame] | 36 | from autotest_lib.server.cros.dynamic_suite import constants |
Chris Masone | b493555 | 2012-08-14 12:05:54 -0700 | [diff] [blame] | 37 | from autotest_lib.server.cros.dynamic_suite import control_file_getter |
Chris Masone | 44e4d6c | 2012-08-15 14:25:53 -0700 | [diff] [blame] | 38 | from autotest_lib.server.cros.dynamic_suite import tools |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 39 | from autotest_lib.server.cros.dynamic_suite.suite import Suite |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 40 | from autotest_lib.server.hosts import moblab_host |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 41 | from autotest_lib.site_utils import host_history |
Dan Shi | 193905e | 2014-07-25 23:33:09 -0700 | [diff] [blame] | 42 | from autotest_lib.site_utils import job_history |
Dan Shi | d7bb4f1 | 2015-01-06 10:53:50 -0800 | [diff] [blame] | 43 | from autotest_lib.site_utils import server_manager_utils |
Dan Shi | 6964fa5 | 2014-12-18 11:04:27 -0800 | [diff] [blame] | 44 | from autotest_lib.site_utils import stable_version_utils |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 45 | |
| 46 | |
| 47 | _CONFIG = global_config.global_config |
| 48 | MOBLAB_BOTO_LOCATION = '/home/moblab/.boto' |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 49 | |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 50 | # Google Cloud Storage bucket url regex pattern. The pattern is used to extract |
| 51 | # the bucket name from the bucket URL. For example, "gs://image_bucket/google" |
| 52 | # should result in a bucket name "image_bucket". |
| 53 | GOOGLE_STORAGE_BUCKET_URL_PATTERN = re.compile( |
| 54 | r'gs://(?P<bucket>[a-zA-Z][a-zA-Z0-9-_]*)/?.*') |
| 55 | |
| 56 | # Constants used in JSON RPC field names. |
| 57 | _USE_EXISTING_BOTO_FILE = 'use_existing_boto_file' |
| 58 | _GS_ACCESS_KEY_ID = 'gs_access_key_id' |
| 59 | _GS_SECRETE_ACCESS_KEY = 'gs_secret_access_key' |
| 60 | _IMAGE_STORAGE_SERVER = 'image_storage_server' |
| 61 | _RESULT_STORAGE_SERVER = 'results_storage_server' |
| 62 | |
| 63 | |
Chris Masone | f8b5306 | 2012-05-08 22:14:18 -0700 | [diff] [blame] | 64 | # Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 65 | |
| 66 | |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 67 | def canonicalize_suite_name(suite_name): |
Dan Shi | 70647ca | 2015-07-16 22:52:35 -0700 | [diff] [blame] | 68 | # Do not change this naming convention without updating |
| 69 | # site_utils.parse_job_name. |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 70 | return 'test_suites/control.%s' % suite_name |
| 71 | |
| 72 | |
Chris Masone | aa10f8e | 2012-05-15 13:34:21 -0700 | [diff] [blame] | 73 | def formatted_now(): |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 74 | return datetime.datetime.now().strftime(time_utils.TIME_FMT) |
Chris Masone | aa10f8e | 2012-05-15 13:34:21 -0700 | [diff] [blame] | 75 | |
| 76 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 77 | def _get_control_file_contents_by_name(build, ds, suite_name): |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 78 | """Return control file contents for |suite_name|. |
| 79 | |
| 80 | Query the dev server at |ds| for the control file |suite_name|, included |
| 81 | in |build| for |board|. |
| 82 | |
| 83 | @param build: unique name by which to refer to the image from now on. |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 84 | @param ds: a dev_server.DevServer instance to fetch control file with. |
| 85 | @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt. |
| 86 | @raises ControlFileNotFound if a unique suite control file doesn't exist. |
| 87 | @raises NoControlFileList if we can't list the control files at all. |
| 88 | @raises ControlFileEmpty if the control file exists on the server, but |
| 89 | can't be read. |
| 90 | |
| 91 | @return the contents of the desired control file. |
| 92 | """ |
| 93 | getter = control_file_getter.DevServerGetter.create(build, ds) |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 94 | timer = autotest_stats.Timer('control_files.parse.%s.%s' % |
| 95 | (ds.get_server_name(ds.url() |
| 96 | ).replace('.', '_'), |
| 97 | suite_name.rsplit('.')[-1])) |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 98 | # Get the control file for the suite. |
| 99 | try: |
Prashanth Balasubramanian | abe3bb7 | 2014-11-20 12:00:37 -0800 | [diff] [blame] | 100 | with timer: |
| 101 | control_file_in = getter.get_control_file_contents_by_name( |
| 102 | suite_name) |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 103 | except error.CrosDynamicSuiteException as e: |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 104 | raise type(e)("%s while testing %s." % (e, build)) |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 105 | if not control_file_in: |
| 106 | raise error.ControlFileEmpty( |
| 107 | "Fetching %s returned no data." % suite_name) |
Alex Miller | a713e25 | 2013-03-01 10:45:44 -0800 | [diff] [blame] | 108 | # Force control files to only contain ascii characters. |
| 109 | try: |
| 110 | control_file_in.encode('ascii') |
| 111 | except UnicodeDecodeError as e: |
| 112 | raise error.ControlFileMalformed(str(e)) |
| 113 | |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 114 | return control_file_in |
| 115 | |
| 116 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 117 | def _stage_build_artifacts(build): |
| 118 | """ |
| 119 | Ensure components of |build| necessary for installing images are staged. |
| 120 | |
| 121 | @param build image we want to stage. |
| 122 | |
Prashanth B | 6285f6a | 2014-05-08 18:01:27 -0700 | [diff] [blame] | 123 | @raises StageControlFileFailure: if the dev server throws 500 while staging |
| 124 | suite control files. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 125 | |
| 126 | @return: dev_server.ImageServer instance to use with this build. |
| 127 | @return: timings dictionary containing staging start/end times. |
| 128 | """ |
| 129 | timings = {} |
Prashanth B | 6285f6a | 2014-05-08 18:01:27 -0700 | [diff] [blame] | 130 | # Ensure components of |build| necessary for installing images are staged |
| 131 | # on the dev server. However set synchronous to False to allow other |
| 132 | # components to be downloaded in the background. |
Dan Shi | 6450e14 | 2016-03-11 11:52:20 -0800 | [diff] [blame] | 133 | ds = dev_server.resolve(build) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 134 | timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now() |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 135 | timer = autotest_stats.Timer('control_files.stage.%s' % ( |
| 136 | ds.get_server_name(ds.url()).replace('.', '_'))) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 137 | try: |
Prashanth Balasubramanian | abe3bb7 | 2014-11-20 12:00:37 -0800 | [diff] [blame] | 138 | with timer: |
Dan Shi | 6450e14 | 2016-03-11 11:52:20 -0800 | [diff] [blame] | 139 | ds.stage_artifacts(image=build, artifacts=['test_suites']) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 140 | except dev_server.DevServerException as e: |
Prashanth B | 6285f6a | 2014-05-08 18:01:27 -0700 | [diff] [blame] | 141 | raise error.StageControlFileFailure( |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 142 | "Failed to stage %s: %s" % (build, e)) |
| 143 | timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now() |
| 144 | return (ds, timings) |
| 145 | |
| 146 | |
MK Ryu | e301eb7 | 2015-06-25 12:51:02 -0700 | [diff] [blame] | 147 | @rpc_utils.route_rpc_to_master |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 148 | def create_suite_job(name='', board='', build='', pool='', control_file='', |
| 149 | check_hosts=True, num=None, file_bugs=False, timeout=24, |
| 150 | timeout_mins=None, priority=priorities.Priority.DEFAULT, |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 151 | suite_args=None, wait_for_results=True, job_retry=False, |
Fang Deng | 443f195 | 2015-01-02 14:51:49 -0800 | [diff] [blame] | 152 | max_retries=None, max_runtime_mins=None, suite_min_duts=0, |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 153 | offload_failures_only=False, builds={}, |
Dan Shi | 059261a | 2016-02-22 12:06:37 -0800 | [diff] [blame] | 154 | test_source_build=None, run_prod_code=False, |
| 155 | delay_minutes=0, **kwargs): |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 156 | """ |
| 157 | Create a job to run a test suite on the given device with the given image. |
| 158 | |
| 159 | When the timeout specified in the control file is reached, the |
| 160 | job is guaranteed to have completed and results will be available. |
| 161 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 162 | @param name: The test name if control_file is supplied, otherwise the name |
| 163 | of the test suite to run, e.g. 'bvt'. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 164 | @param board: the kind of device to run the tests on. |
| 165 | @param build: unique name by which to refer to the image from now on. |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 166 | @param builds: the builds to install e.g. |
| 167 | {'cros-version:': 'x86-alex-release/R18-1655.0.0', |
| 168 | 'fw-version:': 'x86-alex-firmware/R36-5771.50.0', |
| 169 | 'fwro-version:': 'x86-alex-firmware/R36-5771.49.0'} |
| 170 | If builds is given a value, it overrides argument build. |
| 171 | @param test_source_build: Build that contains the server-side test code. |
Scott Zawalski | 6565017 | 2012-02-16 11:48:26 -0500 | [diff] [blame] | 172 | @param pool: Specify the pool of machines to use for scheduling |
| 173 | purposes. |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 174 | @param check_hosts: require appropriate live hosts to exist in the lab. |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 175 | @param num: Specify the number of machines to schedule across (integer). |
| 176 | Leave unspecified or use None to use default sharding factor. |
Alex Miller | c577f3e | 2012-09-27 14:06:07 -0700 | [diff] [blame] | 177 | @param file_bugs: File a bug on each test failure in this suite. |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 178 | @param timeout: The max lifetime of this suite, in hours. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 179 | @param timeout_mins: The max lifetime of this suite, in minutes. Takes |
| 180 | priority over timeout. |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 181 | @param priority: Integer denoting priority. Higher is more important. |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 182 | @param suite_args: Optional arguments which will be parsed by the suite |
| 183 | control file. Used by control.test_that_wrapper to |
| 184 | determine which tests to run. |
Dan Shi | 9512241 | 2013-11-12 16:20:33 -0800 | [diff] [blame] | 185 | @param wait_for_results: Set to False to run the suite job without waiting |
| 186 | for test jobs to finish. Default is True. |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 187 | @param job_retry: Set to True to enable job-level retry. Default is False. |
Fang Deng | 443f195 | 2015-01-02 14:51:49 -0800 | [diff] [blame] | 188 | @param max_retries: Integer, maximum job retries allowed at suite level. |
| 189 | None for no max. |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 190 | @param max_runtime_mins: Maximum amount of time a job can be running in |
| 191 | minutes. |
Fang Deng | cbc0121 | 2014-11-25 16:09:46 -0800 | [diff] [blame] | 192 | @param suite_min_duts: Integer. Scheduler will prioritize getting the |
| 193 | minimum number of machines for the suite when it is |
| 194 | competing with another suite that has a higher |
| 195 | priority but already got minimum machines it needs. |
Simran Basi | 1e10e92 | 2015-04-16 15:09:56 -0700 | [diff] [blame] | 196 | @param offload_failures_only: Only enable gs_offloading for failed jobs. |
Simran Basi | 5ace6f2 | 2016-01-06 17:30:44 -0800 | [diff] [blame] | 197 | @param run_prod_code: If True, the suite will run the test code that |
| 198 | lives in prod aka the test code currently on the |
| 199 | lab servers. If False, the control files and test |
| 200 | code for this suite run will be retrieved from the |
| 201 | build artifacts. |
Dan Shi | 059261a | 2016-02-22 12:06:37 -0800 | [diff] [blame] | 202 | @param delay_minutes: Delay the creation of test jobs for a given number of |
| 203 | minutes. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 204 | @param kwargs: extra keyword args. NOT USED. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 205 | |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 206 | @raises ControlFileNotFound: if a unique suite control file doesn't exist. |
| 207 | @raises NoControlFileList: if we can't list the control files at all. |
Prashanth B | 6285f6a | 2014-05-08 18:01:27 -0700 | [diff] [blame] | 208 | @raises StageControlFileFailure: If the dev server throws 500 while |
| 209 | staging test_suites. |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 210 | @raises ControlFileEmpty: if the control file exists on the server, but |
| 211 | can't be read. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 212 | |
| 213 | @return: the job ID of the suite; -1 on error. |
| 214 | """ |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 215 | if type(num) is not int and num is not None: |
Chris Sosa | 18c70b3 | 2013-02-15 14:12:43 -0800 | [diff] [blame] | 216 | raise error.SuiteArgumentException('Ill specified num argument %r. ' |
| 217 | 'Must be an integer or None.' % num) |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 218 | if num == 0: |
| 219 | logging.warning("Can't run on 0 hosts; using default.") |
| 220 | num = None |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 221 | |
| 222 | # TODO(dshi): crbug.com/496782 Remove argument build and its reference after |
| 223 | # R45 falls out of stable channel. |
| 224 | if build and not builds: |
| 225 | builds = {provision.CROS_VERSION_PREFIX: build} |
Dan Shi | b49bb8b | 2016-03-01 15:29:27 -0800 | [diff] [blame] | 226 | |
Dan Shi | 2121a33 | 2016-02-25 14:22:22 -0800 | [diff] [blame] | 227 | # Default test source build to CrOS build if it's not specified and |
| 228 | # run_prod_code is set to False. |
| 229 | if not run_prod_code: |
| 230 | test_source_build = Suite.get_test_source_build( |
| 231 | builds, test_source_build=test_source_build) |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 232 | |
Simran Basi | 5ace6f2 | 2016-01-06 17:30:44 -0800 | [diff] [blame] | 233 | suite_name = canonicalize_suite_name(name) |
| 234 | if run_prod_code: |
Dan Shi | 6450e14 | 2016-03-11 11:52:20 -0800 | [diff] [blame] | 235 | ds = dev_server.resolve(build) |
Simran Basi | 5ace6f2 | 2016-01-06 17:30:44 -0800 | [diff] [blame] | 236 | keyvals = {} |
| 237 | getter = control_file_getter.FileSystemGetter( |
| 238 | [_CONFIG.get_config_value('SCHEDULER', |
| 239 | 'drone_installation_directory')]) |
| 240 | control_file = getter.get_control_file_contents_by_name(suite_name) |
| 241 | else: |
| 242 | (ds, keyvals) = _stage_build_artifacts(test_source_build) |
Fang Deng | cbc0121 | 2014-11-25 16:09:46 -0800 | [diff] [blame] | 243 | keyvals[constants.SUITE_MIN_DUTS_KEY] = suite_min_duts |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 244 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 245 | if not control_file: |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 246 | # No control file was supplied so look it up from the build artifacts. |
| 247 | suite_name = canonicalize_suite_name(name) |
| 248 | control_file = _get_control_file_contents_by_name(test_source_build, |
| 249 | ds, suite_name) |
Simran Basi | 86fe9c9 | 2016-02-09 17:58:20 -0800 | [diff] [blame] | 250 | # Do not change this naming convention without updating |
| 251 | # site_utils.parse_job_name. |
Dan Shi | 2121a33 | 2016-02-25 14:22:22 -0800 | [diff] [blame] | 252 | if not run_prod_code: |
| 253 | name = '%s-%s' % (test_source_build, suite_name) |
| 254 | else: |
| 255 | # If run_prod_code is True, test_source_build is not set, use the |
| 256 | # first build in the builds list for the sutie job name. |
| 257 | name = '%s-%s' % (builds.values()[0], suite_name) |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 258 | |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 259 | timeout_mins = timeout_mins or timeout * 60 |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 260 | max_runtime_mins = max_runtime_mins or timeout * 60 |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 261 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 262 | if not board: |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 263 | board = utils.ParseBuildName(builds[provision.CROS_VERSION_PREFIX])[0] |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 264 | |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 265 | # TODO(dshi): crbug.com/496782 Remove argument build and its reference after |
| 266 | # R45 falls out of stable channel. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 267 | # Prepend build and board to the control file. |
Scott Zawalski | 6565017 | 2012-02-16 11:48:26 -0500 | [diff] [blame] | 268 | inject_dict = {'board': board, |
Simran Basi | 5ace6f2 | 2016-01-06 17:30:44 -0800 | [diff] [blame] | 269 | 'build': builds.get(provision.CROS_VERSION_PREFIX), |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 270 | 'builds': builds, |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 271 | 'check_hosts': check_hosts, |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 272 | 'pool': pool, |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 273 | 'num': num, |
Dan Shi | b8a9911 | 2013-06-18 13:46:10 -0700 | [diff] [blame] | 274 | 'file_bugs': file_bugs, |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 275 | 'timeout': timeout, |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 276 | 'timeout_mins': timeout_mins, |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 277 | 'devserver_url': ds.url(), |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 278 | 'priority': priority, |
Dan Shi | 9512241 | 2013-11-12 16:20:33 -0800 | [diff] [blame] | 279 | 'suite_args' : suite_args, |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 280 | 'wait_for_results': wait_for_results, |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 281 | 'job_retry': job_retry, |
Fang Deng | 443f195 | 2015-01-02 14:51:49 -0800 | [diff] [blame] | 282 | 'max_retries': max_retries, |
Fang Deng | cbc0121 | 2014-11-25 16:09:46 -0800 | [diff] [blame] | 283 | 'max_runtime_mins': max_runtime_mins, |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 284 | 'offload_failures_only': offload_failures_only, |
Simran Basi | 5ace6f2 | 2016-01-06 17:30:44 -0800 | [diff] [blame] | 285 | 'test_source_build': test_source_build, |
Dan Shi | 059261a | 2016-02-22 12:06:37 -0800 | [diff] [blame] | 286 | 'run_prod_code': run_prod_code, |
| 287 | 'delay_minutes': delay_minutes, |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 288 | } |
| 289 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 290 | control_file = tools.inject_vars(inject_dict, control_file) |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 291 | |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 292 | return rpc_utils.create_job_common(name, |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 293 | priority=priority, |
| 294 | timeout_mins=timeout_mins, |
| 295 | max_runtime_mins=max_runtime_mins, |
| 296 | control_type='Server', |
| 297 | control_file=control_file, |
| 298 | hostless=True, |
Fang Deng | cbc0121 | 2014-11-25 16:09:46 -0800 | [diff] [blame] | 299 | keyvals=keyvals) |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 300 | |
| 301 | |
| 302 | # TODO: hide the following rpcs under is_moblab |
| 303 | def moblab_only(func): |
| 304 | """Ensure moblab specific functions only run on Moblab devices.""" |
| 305 | def verify(*args, **kwargs): |
| 306 | if not utils.is_moblab(): |
| 307 | raise error.RPCException('RPC: %s can only run on Moblab Systems!', |
| 308 | func.__name__) |
| 309 | return func(*args, **kwargs) |
| 310 | return verify |
| 311 | |
| 312 | |
| 313 | @moblab_only |
| 314 | def get_config_values(): |
| 315 | """Returns all config values parsed from global and shadow configs. |
| 316 | |
| 317 | Config values are grouped by sections, and each section is composed of |
| 318 | a list of name value pairs. |
| 319 | """ |
| 320 | sections =_CONFIG.get_sections() |
| 321 | config_values = {} |
| 322 | for section in sections: |
| 323 | config_values[section] = _CONFIG.config.items(section) |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 324 | return rpc_utils.prepare_for_serialization(config_values) |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 325 | |
| 326 | |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 327 | def _write_config_file(config_file, config_values, overwrite=False): |
| 328 | """Writes out a configuration file. |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 329 | |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 330 | @param config_file: The name of the configuration file. |
| 331 | @param config_values: The ConfigParser object. |
| 332 | @param ovewrite: Flag on if overwriting is allowed. |
| 333 | """ |
| 334 | if not config_file: |
| 335 | raise error.RPCException('Empty config file name.') |
| 336 | if not overwrite and os.path.exists(config_file): |
| 337 | raise error.RPCException('Config file already exists.') |
| 338 | |
| 339 | if config_values: |
| 340 | with open(config_file, 'w') as config_file: |
| 341 | config_values.write(config_file) |
| 342 | |
| 343 | |
| 344 | def _read_original_config(): |
| 345 | """Reads the orginal configuratino without shadow. |
| 346 | |
| 347 | @return: A configuration object, see global_config_class. |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 348 | """ |
Simran Basi | 773a86e | 2015-05-13 19:15:42 -0700 | [diff] [blame] | 349 | original_config = global_config.global_config_class() |
| 350 | original_config.set_config_files(shadow_file='') |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 351 | return original_config |
| 352 | |
| 353 | |
| 354 | def _read_raw_config(config_file): |
| 355 | """Reads the raw configuration from a configuration file. |
| 356 | |
| 357 | @param: config_file: The path of the configuration file. |
| 358 | |
| 359 | @return: A ConfigParser object. |
| 360 | """ |
| 361 | shadow_config = ConfigParser.RawConfigParser() |
| 362 | shadow_config.read(config_file) |
| 363 | return shadow_config |
| 364 | |
| 365 | |
| 366 | def _get_shadow_config_from_partial_update(config_values): |
| 367 | """Finds out the new shadow configuration based on a partial update. |
| 368 | |
| 369 | Since the input is only a partial config, we should not lose the config |
| 370 | data inside the existing shadow config file. We also need to distinguish |
| 371 | if the input config info overrides with a new value or reverts back to |
| 372 | an original value. |
| 373 | |
| 374 | @param config_values: See get_moblab_settings(). |
| 375 | |
| 376 | @return: The new shadow configuration as ConfigParser object. |
| 377 | """ |
| 378 | original_config = _read_original_config() |
| 379 | existing_shadow = _read_raw_config(_CONFIG.shadow_file) |
| 380 | for section, config_value_list in config_values.iteritems(): |
| 381 | for key, value in config_value_list: |
| 382 | if original_config.get_config_value(section, key, |
| 383 | default='', |
| 384 | allow_blank=True) != value: |
| 385 | if not existing_shadow.has_section(section): |
| 386 | existing_shadow.add_section(section) |
| 387 | existing_shadow.set(section, key, value) |
| 388 | elif existing_shadow.has_option(section, key): |
| 389 | existing_shadow.remove_option(section, key) |
| 390 | return existing_shadow |
| 391 | |
| 392 | |
| 393 | def _update_partial_config(config_values): |
| 394 | """Updates the shadow configuration file with a partial config udpate. |
| 395 | |
| 396 | @param config_values: See get_moblab_settings(). |
| 397 | """ |
| 398 | existing_config = _get_shadow_config_from_partial_update(config_values) |
| 399 | _write_config_file(_CONFIG.shadow_file, existing_config, True) |
| 400 | |
| 401 | |
| 402 | @moblab_only |
| 403 | def update_config_handler(config_values): |
| 404 | """Update config values and override shadow config. |
| 405 | |
| 406 | @param config_values: See get_moblab_settings(). |
| 407 | """ |
| 408 | original_config = _read_original_config() |
Simran Basi | 773a86e | 2015-05-13 19:15:42 -0700 | [diff] [blame] | 409 | new_shadow = ConfigParser.RawConfigParser() |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 410 | for section, config_value_list in config_values.iteritems(): |
| 411 | for key, value in config_value_list: |
Simran Basi | 773a86e | 2015-05-13 19:15:42 -0700 | [diff] [blame] | 412 | if original_config.get_config_value(section, key, |
| 413 | default='', |
| 414 | allow_blank=True) != value: |
| 415 | if not new_shadow.has_section(section): |
| 416 | new_shadow.add_section(section) |
| 417 | new_shadow.set(section, key, value) |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 418 | |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 419 | if not _CONFIG.shadow_file or not os.path.exists(_CONFIG.shadow_file): |
| 420 | raise error.RPCException('Shadow config file does not exist.') |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 421 | _write_config_file(_CONFIG.shadow_file, new_shadow, True) |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 422 | |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 423 | # TODO (sbasi) crbug.com/403916 - Remove the reboot command and |
| 424 | # instead restart the services that rely on the config values. |
| 425 | os.system('sudo reboot') |
| 426 | |
| 427 | |
| 428 | @moblab_only |
| 429 | def reset_config_settings(): |
| 430 | with open(_CONFIG.shadow_file, 'w') as config_file: |
Dan Shi | 36cfd83 | 2014-10-10 13:38:51 -0700 | [diff] [blame] | 431 | pass |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 432 | os.system('sudo reboot') |
| 433 | |
| 434 | |
| 435 | @moblab_only |
| 436 | def set_boto_key(boto_key): |
| 437 | """Update the boto_key file. |
| 438 | |
| 439 | @param boto_key: File name of boto_key uploaded through handle_file_upload. |
| 440 | """ |
| 441 | if not os.path.exists(boto_key): |
| 442 | raise error.RPCException('Boto key: %s does not exist!' % boto_key) |
| 443 | shutil.copyfile(boto_key, moblab_host.MOBLAB_BOTO_LOCATION) |
Dan Shi | 193905e | 2014-07-25 23:33:09 -0700 | [diff] [blame] | 444 | |
| 445 | |
Dan Shi | aec9901 | 2016-01-07 09:09:16 -0800 | [diff] [blame] | 446 | @moblab_only |
| 447 | def set_launch_control_key(launch_control_key): |
| 448 | """Update the launch_control_key file. |
| 449 | |
| 450 | @param launch_control_key: File name of launch_control_key uploaded through |
| 451 | handle_file_upload. |
| 452 | """ |
| 453 | if not os.path.exists(launch_control_key): |
| 454 | raise error.RPCException('Launch Control key: %s does not exist!' % |
| 455 | launch_control_key) |
| 456 | shutil.copyfile(launch_control_key, |
| 457 | moblab_host.MOBLAB_LAUNCH_CONTROL_KEY_LOCATION) |
| 458 | # Restart the devserver service. |
| 459 | os.system('sudo restart moblab-devserver-init') |
| 460 | |
| 461 | |
Michael Tang | 9afc74b | 2016-03-21 10:19:23 -0700 | [diff] [blame] | 462 | ###########Moblab Config Wizard RPCs ####################### |
| 463 | def _get_public_ip_address(socket_handle): |
| 464 | """Gets the public IP address. |
| 465 | |
| 466 | Connects to Google DNS server using a socket and gets the preferred IP |
| 467 | address from the connection. |
| 468 | |
| 469 | @param: socket_handle: a unix socket. |
| 470 | |
| 471 | @return: public ip address as string. |
| 472 | """ |
| 473 | try: |
| 474 | socket_handle.settimeout(1) |
| 475 | socket_handle.connect(('8.8.8.8', 53)) |
| 476 | socket_name = socket_handle.getsockname() |
| 477 | if socket_name is not None: |
| 478 | logging.info('Got socket name from UDP socket.') |
| 479 | return socket_name[0] |
| 480 | logging.warn('Created UDP socket but with no socket_name.') |
| 481 | except socket.error: |
| 482 | logging.warn('Could not get socket name from UDP socket.') |
| 483 | return None |
| 484 | |
| 485 | |
| 486 | def _get_network_info(): |
| 487 | """Gets the network information. |
| 488 | |
| 489 | TCP socket is used to test the connectivity. If there is no connectivity, try to |
| 490 | get the public IP with UDP socket. |
| 491 | |
| 492 | @return: a tuple as (public_ip_address, connected_to_internet). |
| 493 | """ |
| 494 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
| 495 | ip = _get_public_ip_address(s) |
| 496 | if ip is not None: |
| 497 | logging.info('Established TCP connection with well known server.') |
| 498 | return (ip, True) |
| 499 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
| 500 | return (_get_public_ip_address(s), False) |
| 501 | |
| 502 | |
| 503 | @moblab_only |
| 504 | def get_network_info(): |
| 505 | """Returns the server ip addresses, and if the server connectivity. |
| 506 | |
| 507 | The server ip addresses as an array of strings, and the connectivity as a |
| 508 | flag. |
| 509 | """ |
| 510 | network_info = {} |
| 511 | info = _get_network_info() |
| 512 | if info[0] is not None: |
| 513 | network_info['server_ips'] = [info[0]] |
| 514 | network_info['is_connected'] = info[1] |
| 515 | |
| 516 | return rpc_utils.prepare_for_serialization(network_info) |
| 517 | |
| 518 | |
| 519 | # Gets the boto configuration. |
| 520 | def _get_boto_config(): |
| 521 | """Reads the boto configuration from the boto file. |
| 522 | |
| 523 | @return: Boto configuration as ConfigParser object. |
| 524 | """ |
| 525 | boto_config = ConfigParser.ConfigParser() |
| 526 | boto_config.read(MOBLAB_BOTO_LOCATION) |
| 527 | return boto_config |
| 528 | |
| 529 | |
| 530 | @moblab_only |
| 531 | def get_cloud_storage_info(): |
| 532 | """RPC handler to get the cloud storage access information. |
| 533 | """ |
| 534 | cloud_storage_info = {} |
| 535 | value =_CONFIG.get_config_value('CROS', _IMAGE_STORAGE_SERVER) |
| 536 | if value is not None: |
| 537 | cloud_storage_info[_IMAGE_STORAGE_SERVER] = value |
| 538 | value =_CONFIG.get_config_value('CROS', _RESULT_STORAGE_SERVER) |
| 539 | if value is not None: |
| 540 | cloud_storage_info[_RESULT_STORAGE_SERVER] = value |
| 541 | |
| 542 | boto_config = _get_boto_config() |
| 543 | sections = boto_config.sections() |
| 544 | |
| 545 | if sections: |
| 546 | cloud_storage_info[_USE_EXISTING_BOTO_FILE] = True |
| 547 | else: |
| 548 | cloud_storage_info[_USE_EXISTING_BOTO_FILE] = False |
| 549 | if 'Credentials' in sections: |
| 550 | options = boto_config.options('Credentials') |
| 551 | if _GS_ACCESS_KEY_ID in options: |
| 552 | value = boto_config.get('Credentials', _GS_ACCESS_KEY_ID) |
| 553 | cloud_storage_info[_GS_ACCESS_KEY_ID] = value |
| 554 | if _GS_SECRETE_ACCESS_KEY in options: |
| 555 | value = boto_config.get('Credentials', _GS_SECRETE_ACCESS_KEY) |
| 556 | cloud_storage_info[_GS_SECRETE_ACCESS_KEY] = value |
| 557 | |
| 558 | return rpc_utils.prepare_for_serialization(cloud_storage_info) |
| 559 | |
| 560 | |
| 561 | def _get_bucket_name_from_url(bucket_url): |
| 562 | """Gets the bucket name from a bucket url. |
| 563 | |
| 564 | @param: bucket_url: the bucket url string. |
| 565 | """ |
| 566 | if bucket_url: |
| 567 | match = GOOGLE_STORAGE_BUCKET_URL_PATTERN.match(bucket_url) |
| 568 | if match: |
| 569 | return match.group('bucket') |
| 570 | return None |
| 571 | |
| 572 | |
| 573 | def _is_valid_boto_key(key_id, key_secret): |
| 574 | """Checks if the boto key is valid. |
| 575 | |
| 576 | @param: key_id: The boto key id string. |
| 577 | @param: key_secret: The boto key string. |
| 578 | |
| 579 | @return: A tuple as (valid_boolean, details_string). |
| 580 | """ |
| 581 | if not key_id or not key_secret: |
| 582 | return (False, "Empty key id or secret.") |
| 583 | conn = boto.connect_gs(key_id, key_secret) |
| 584 | try: |
| 585 | buckets = conn.get_all_buckets() |
| 586 | return (True, None) |
| 587 | except boto.exception.GSResponseError: |
| 588 | details = "The boto access key is not valid" |
| 589 | return (False, details) |
| 590 | finally: |
| 591 | conn.close() |
| 592 | |
| 593 | |
| 594 | def _is_valid_bucket(key_id, key_secret, bucket_name): |
| 595 | """Checks if a bucket is valid and accessible. |
| 596 | |
| 597 | @param: key_id: The boto key id string. |
| 598 | @param: key_secret: The boto key string. |
| 599 | @param: bucket name string. |
| 600 | |
| 601 | @return: A tuple as (valid_boolean, details_string). |
| 602 | """ |
| 603 | if not key_id or not key_secret or not bucket_name: |
| 604 | return (False, "Server error: invalid argument") |
| 605 | conn = boto.connect_gs(key_id, key_secret) |
| 606 | bucket = conn.lookup(bucket_name) |
| 607 | conn.close() |
| 608 | if bucket: |
| 609 | return (True, None) |
| 610 | return (False, "Bucket %s does not exist." % bucket_name) |
| 611 | |
| 612 | |
| 613 | def _is_valid_bucket_url(key_id, key_secret, bucket_url): |
| 614 | """Validates the bucket url is accessible. |
| 615 | |
| 616 | @param: key_id: The boto key id string. |
| 617 | @param: key_secret: The boto key string. |
| 618 | @param: bucket url string. |
| 619 | |
| 620 | @return: A tuple as (valid_boolean, details_string). |
| 621 | """ |
| 622 | bucket_name = _get_bucket_name_from_url(bucket_url) |
| 623 | if bucket_name: |
| 624 | return _is_valid_bucket(key_id, key_secret, bucket_name) |
| 625 | return (False, "Bucket url %s is not valid" % bucket_url) |
| 626 | |
| 627 | |
| 628 | def _validate_cloud_storage_info(cloud_storage_info): |
| 629 | """Checks if the cloud storage information is valid. |
| 630 | |
| 631 | @param: cloud_storage_info: The JSON RPC object for cloud storage info. |
| 632 | |
| 633 | @return: A tuple as (valid_boolean, details_string). |
| 634 | """ |
| 635 | valid = True |
| 636 | details = None |
| 637 | if not cloud_storage_info[_USE_EXISTING_BOTO_FILE]: |
| 638 | key_id = cloud_storage_info[_GS_ACCESS_KEY_ID] |
| 639 | key_secret = cloud_storage_info[_GS_SECRETE_ACCESS_KEY] |
| 640 | valid, details = _is_valid_boto_key(key_id, key_secret) |
| 641 | |
| 642 | if valid: |
| 643 | valid, details = _is_valid_bucket_url( |
| 644 | key_id, key_secret, cloud_storage_info[_IMAGE_STORAGE_SERVER]) |
| 645 | |
| 646 | if valid: |
| 647 | valid, details = _is_valid_bucket_url( |
| 648 | key_id, key_secret, cloud_storage_info[_RESULT_STORAGE_SERVER]) |
| 649 | return (valid, details) |
| 650 | |
| 651 | |
| 652 | def _create_operation_status_response(is_ok, details): |
| 653 | """Helper method to create a operation status reponse. |
| 654 | |
| 655 | @param: is_ok: Boolean for if the operation is ok. |
| 656 | @param: details: A detailed string. |
| 657 | |
| 658 | @return: A serialized JSON RPC object. |
| 659 | """ |
| 660 | status_response = {'status_ok': is_ok} |
| 661 | if details: |
| 662 | status_response['status_details'] = details |
| 663 | return rpc_utils.prepare_for_serialization(status_response) |
| 664 | |
| 665 | |
| 666 | @moblab_only |
| 667 | def validate_cloud_storage_info(cloud_storage_info): |
| 668 | """RPC handler to check if the cloud storage info is valid. |
| 669 | """ |
| 670 | valid, details = _validate_cloud_storage_info(cloud_storage_info) |
| 671 | return _create_operation_status_response(valid, details) |
| 672 | |
| 673 | |
| 674 | @moblab_only |
| 675 | def submit_wizard_config_info(cloud_storage_info): |
| 676 | """RPC handler to submit the cloud storage info. |
| 677 | """ |
| 678 | valid, details = _validate_cloud_storage_info(cloud_storage_info) |
| 679 | if not valid: |
| 680 | return _create_operation_status_response(valid, details) |
| 681 | config_update = {} |
| 682 | config_update['CROS'] = [ |
| 683 | (_IMAGE_STORAGE_SERVER, cloud_storage_info[_IMAGE_STORAGE_SERVER]), |
| 684 | (_RESULT_STORAGE_SERVER, cloud_storage_info[_RESULT_STORAGE_SERVER]) |
| 685 | ] |
| 686 | _update_partial_config(config_update) |
| 687 | |
| 688 | if not cloud_storage_info[_USE_EXISTING_BOTO_FILE]: |
| 689 | boto_config = ConfigParser.RawConfigParser() |
| 690 | boto_config.add_section('Credentials') |
| 691 | boto_config.set('Credentials', _GS_ACCESS_KEY_ID, |
| 692 | cloud_storage_info[_GS_ACCESS_KEY_ID]) |
| 693 | boto_config.set('Credentials', _GS_SECRETE_ACCESS_KEY, |
| 694 | cloud_storage_info[_GS_SECRETE_ACCESS_KEY]) |
| 695 | _write_config_file(MOBLAB_BOTO_LOCATION, boto_config, True) |
| 696 | |
| 697 | _CONFIG.parse_config_file() |
| 698 | |
| 699 | return _create_operation_status_response(True, None) |
| 700 | |
| 701 | |
Dan Shi | 193905e | 2014-07-25 23:33:09 -0700 | [diff] [blame] | 702 | def get_job_history(**filter_data): |
| 703 | """Get history of the job, including the special tasks executed for the job |
| 704 | |
| 705 | @param filter_data: filter for the call, should at least include |
| 706 | {'job_id': [job id]} |
| 707 | @returns: JSON string of the job's history, including the information such |
| 708 | as the hosts run the job and the special tasks executed before |
| 709 | and after the job. |
| 710 | """ |
| 711 | job_id = filter_data['job_id'] |
| 712 | job_info = job_history.get_job_info(job_id) |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 713 | return rpc_utils.prepare_for_serialization(job_info.get_history()) |
| 714 | |
| 715 | |
| 716 | def get_host_history(start_time, end_time, hosts=None, board=None, pool=None): |
| 717 | """Get history of a list of host. |
| 718 | |
| 719 | The return is a JSON string of host history for each host, for example, |
| 720 | {'172.22.33.51': [{'status': 'Resetting' |
| 721 | 'start_time': '2014-08-07 10:02:16', |
| 722 | 'end_time': '2014-08-07 10:03:16', |
| 723 | 'log_url': 'http://autotest/reset-546546/debug', |
| 724 | 'dbg_str': 'Task: Special Task 19441991 (host ...)'}, |
| 725 | {'status': 'Running' |
| 726 | 'start_time': '2014-08-07 10:03:18', |
| 727 | 'end_time': '2014-08-07 10:13:00', |
| 728 | 'log_url': 'http://autotest/reset-546546/debug', |
| 729 | 'dbg_str': 'HQE: 15305005, for job: 14995562'} |
| 730 | ] |
| 731 | } |
| 732 | @param start_time: start time to search for history, can be string value or |
| 733 | epoch time. |
| 734 | @param end_time: end time to search for history, can be string value or |
| 735 | epoch time. |
| 736 | @param hosts: A list of hosts to search for history. Default is None. |
| 737 | @param board: board type of hosts. Default is None. |
| 738 | @param pool: pool type of hosts. Default is None. |
| 739 | @returns: JSON string of the host history. |
| 740 | """ |
| 741 | return rpc_utils.prepare_for_serialization( |
| 742 | host_history.get_history_details( |
| 743 | start_time=start_time, end_time=end_time, |
| 744 | hosts=hosts, board=board, pool=pool, |
| 745 | process_pool_size=4)) |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 746 | |
| 747 | |
MK Ryu | 07a109f | 2015-07-21 17:44:32 -0700 | [diff] [blame] | 748 | def shard_heartbeat(shard_hostname, jobs=(), hqes=(), known_job_ids=(), |
| 749 | known_host_ids=(), known_host_statuses=()): |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame] | 750 | """Receive updates for job statuses from shards and assign hosts and jobs. |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 751 | |
| 752 | @param shard_hostname: Hostname of the calling shard |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 753 | @param jobs: Jobs in serialized form that should be updated with newer |
| 754 | status from a shard. |
| 755 | @param hqes: Hostqueueentries in serialized form that should be updated with |
| 756 | newer status from a shard. Note that for every hostqueueentry |
| 757 | the corresponding job must be in jobs. |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame] | 758 | @param known_job_ids: List of ids of jobs the shard already has. |
| 759 | @param known_host_ids: List of ids of hosts the shard already has. |
MK Ryu | 07a109f | 2015-07-21 17:44:32 -0700 | [diff] [blame] | 760 | @param known_host_statuses: List of statuses of hosts the shard already has. |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 761 | |
Fang Deng | f370599 | 2014-12-16 17:32:18 -0800 | [diff] [blame] | 762 | @returns: Serialized representations of hosts, jobs, suite job keyvals |
| 763 | and their dependencies to be inserted into a shard's database. |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 764 | """ |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame] | 765 | # The following alternatives to sending host and job ids in every heartbeat |
| 766 | # have been considered: |
| 767 | # 1. Sending the highest known job and host ids. This would work for jobs: |
| 768 | # Newer jobs always have larger ids. Also, if a job is not assigned to a |
| 769 | # particular shard during a heartbeat, it never will be assigned to this |
| 770 | # shard later. |
| 771 | # This is not true for hosts though: A host that is leased won't be sent |
| 772 | # to the shard now, but might be sent in a future heartbeat. This means |
| 773 | # sometimes hosts should be transfered that have a lower id than the |
| 774 | # maximum host id the shard knows. |
| 775 | # 2. Send the number of jobs/hosts the shard knows to the master in each |
| 776 | # heartbeat. Compare these to the number of records that already have |
| 777 | # the shard_id set to this shard. In the normal case, they should match. |
| 778 | # In case they don't, resend all entities of that type. |
| 779 | # This would work well for hosts, because there aren't that many. |
| 780 | # Resending all jobs is quite a big overhead though. |
| 781 | # Also, this approach might run into edge cases when entities are |
| 782 | # ever deleted. |
| 783 | # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts. |
| 784 | # Using two different approaches isn't consistent and might cause |
| 785 | # confusion. Also the issues with the case of deletions might still |
| 786 | # occur. |
| 787 | # |
| 788 | # The overhead of sending all job and host ids in every heartbeat is low: |
| 789 | # At peaks one board has about 1200 created but unfinished jobs. |
| 790 | # See the numbers here: http://goo.gl/gQCGWH |
| 791 | # Assuming that job id's have 6 digits and that json serialization takes a |
| 792 | # comma and a space as overhead, the traffic per id sent is about 8 bytes. |
| 793 | # If 5000 ids need to be sent, this means 40 kilobytes of traffic. |
| 794 | # A NOT IN query with 5000 ids took about 30ms in tests made. |
| 795 | # These numbers seem low enough to outweigh the disadvantages of the |
| 796 | # solutions described above. |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 797 | timer = autotest_stats.Timer('shard_heartbeat') |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 798 | with timer: |
| 799 | shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname) |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 800 | rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes) |
MK Ryu | 07a109f | 2015-07-21 17:44:32 -0700 | [diff] [blame] | 801 | assert len(known_host_ids) == len(known_host_statuses) |
| 802 | for i in range(len(known_host_ids)): |
| 803 | host_model = models.Host.objects.get(pk=known_host_ids[i]) |
| 804 | if host_model.status != known_host_statuses[i]: |
| 805 | host_model.status = known_host_statuses[i] |
| 806 | host_model.save() |
| 807 | |
Fang Deng | f370599 | 2014-12-16 17:32:18 -0800 | [diff] [blame] | 808 | hosts, jobs, suite_keyvals = rpc_utils.find_records_for_shard( |
MK Ryu | 07a109f | 2015-07-21 17:44:32 -0700 | [diff] [blame] | 809 | shard_obj, known_job_ids=known_job_ids, |
| 810 | known_host_ids=known_host_ids) |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 811 | return { |
| 812 | 'hosts': [host.serialize() for host in hosts], |
| 813 | 'jobs': [job.serialize() for job in jobs], |
Fang Deng | f370599 | 2014-12-16 17:32:18 -0800 | [diff] [blame] | 814 | 'suite_keyvals': [kv.serialize() for kv in suite_keyvals], |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 815 | } |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 816 | |
| 817 | |
| 818 | def get_shards(**filter_data): |
| 819 | """Return a list of all shards. |
| 820 | |
| 821 | @returns A sequence of nested dictionaries of shard information. |
| 822 | """ |
| 823 | shards = models.Shard.query_objects(filter_data) |
| 824 | serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ()) |
| 825 | for serialized, shard in zip(serialized_shards, shards): |
| 826 | serialized['labels'] = [label.name for label in shard.labels.all()] |
| 827 | |
| 828 | return serialized_shards |
| 829 | |
| 830 | |
MK Ryu | 5dfcc89 | 2015-07-16 15:34:04 -0700 | [diff] [blame] | 831 | def add_shard(hostname, labels): |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 832 | """Add a shard and start running jobs on it. |
| 833 | |
| 834 | @param hostname: The hostname of the shard to be added; needs to be unique. |
MK Ryu | 5dfcc89 | 2015-07-16 15:34:04 -0700 | [diff] [blame] | 835 | @param labels: Board labels separated by a comma. Jobs of one of the labels |
| 836 | will be assigned to the shard. |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 837 | |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 838 | @raises error.RPCException: If label provided doesn't start with `board:` |
| 839 | @raises model_logic.ValidationError: If a shard with the given hostname |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 840 | already exists. |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 841 | @raises models.Label.DoesNotExist: If the label specified doesn't exist. |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 842 | """ |
MK Ryu | 5dfcc89 | 2015-07-16 15:34:04 -0700 | [diff] [blame] | 843 | labels = labels.split(',') |
| 844 | label_models = [] |
| 845 | for label in labels: |
| 846 | if not label.startswith('board:'): |
| 847 | raise error.RPCException('Sharding only supports for `board:.*` ' |
| 848 | 'labels.') |
| 849 | # Fetch label first, so shard isn't created when label doesn't exist. |
| 850 | label_models.append(models.Label.smart_get(label)) |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 851 | |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 852 | shard = models.Shard.add_object(hostname=hostname) |
MK Ryu | 5dfcc89 | 2015-07-16 15:34:04 -0700 | [diff] [blame] | 853 | for label in label_models: |
| 854 | shard.labels.add(label) |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 855 | return shard.id |
| 856 | |
| 857 | |
| 858 | def delete_shard(hostname): |
| 859 | """Delete a shard and reclaim all resources from it. |
| 860 | |
| 861 | This claims back all assigned hosts from the shard. To ensure all DUTs are |
| 862 | in a sane state, a Repair task is scheduled for them. This reboots the DUTs |
| 863 | and therefore clears all running processes that might be left. |
| 864 | |
| 865 | The shard_id of jobs of that shard will be set to None. |
| 866 | |
| 867 | The status of jobs that haven't been reported to be finished yet, will be |
| 868 | lost. The master scheduler will pick up the jobs and execute them. |
| 869 | |
| 870 | @param hostname: Hostname of the shard to delete. |
| 871 | """ |
| 872 | shard = rpc_utils.retrieve_shard(shard_hostname=hostname) |
| 873 | |
| 874 | # TODO(beeps): Power off shard |
| 875 | |
| 876 | # For ChromeOS hosts, repair reboots the DUT. |
| 877 | # Repair will excalate through multiple repair steps and will verify the |
| 878 | # success after each of them. Anyway, it will always run at least the first |
| 879 | # one, which includes a reboot. |
| 880 | # After a reboot we can be sure no processes from prior tests that were run |
| 881 | # by a shard are still running on the DUT. |
| 882 | # Important: Don't just set the status to Repair Failed, as that would run |
| 883 | # Verify first, before doing any repair measures. Verify would probably |
| 884 | # succeed, so this wouldn't change anything on the DUT. |
| 885 | for host in models.Host.objects.filter(shard=shard): |
| 886 | models.SpecialTask.objects.create( |
| 887 | task=models.SpecialTask.Task.REPAIR, |
| 888 | host=host, |
| 889 | requested_by=models.User.current_user()) |
| 890 | models.Host.objects.filter(shard=shard).update(shard=None) |
| 891 | |
| 892 | models.Job.objects.filter(shard=shard).update(shard=None) |
| 893 | |
| 894 | shard.labels.clear() |
| 895 | |
| 896 | shard.delete() |
Dan Shi | 6964fa5 | 2014-12-18 11:04:27 -0800 | [diff] [blame] | 897 | |
| 898 | |
MK Ryu | a34e3b1 | 2015-08-21 16:20:47 -0700 | [diff] [blame] | 899 | def get_servers(hostname=None, role=None, status=None): |
Dan Shi | d7bb4f1 | 2015-01-06 10:53:50 -0800 | [diff] [blame] | 900 | """Get a list of servers with matching role and status. |
| 901 | |
MK Ryu | a34e3b1 | 2015-08-21 16:20:47 -0700 | [diff] [blame] | 902 | @param hostname: FQDN of the server. |
Dan Shi | d7bb4f1 | 2015-01-06 10:53:50 -0800 | [diff] [blame] | 903 | @param role: Name of the server role, e.g., drone, scheduler. Default to |
| 904 | None to match any role. |
| 905 | @param status: Status of the server, e.g., primary, backup, repair_required. |
| 906 | Default to None to match any server status. |
| 907 | |
| 908 | @raises error.RPCException: If server database is not used. |
| 909 | @return: A list of server names for servers with matching role and status. |
| 910 | """ |
| 911 | if not server_manager_utils.use_server_db(): |
| 912 | raise error.RPCException('Server database is not enabled. Please try ' |
| 913 | 'retrieve servers from global config.') |
MK Ryu | a34e3b1 | 2015-08-21 16:20:47 -0700 | [diff] [blame] | 914 | servers = server_manager_utils.get_servers(hostname=hostname, role=role, |
Dan Shi | d7bb4f1 | 2015-01-06 10:53:50 -0800 | [diff] [blame] | 915 | status=status) |
| 916 | return [s.get_details() for s in servers] |
| 917 | |
| 918 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 919 | @rpc_utils.route_rpc_to_master |
Simran Basi | beb2bb2 | 2016-02-03 15:25:48 -0800 | [diff] [blame] | 920 | def get_stable_version(board=stable_version_utils.DEFAULT, android=False): |
Dan Shi | 6964fa5 | 2014-12-18 11:04:27 -0800 | [diff] [blame] | 921 | """Get stable version for the given board. |
| 922 | |
| 923 | @param board: Name of the board. |
Simran Basi | beb2bb2 | 2016-02-03 15:25:48 -0800 | [diff] [blame] | 924 | @param android: If True, the given board is an Android-based device. If |
| 925 | False, assume its a Chrome OS-based device. |
| 926 | |
Dan Shi | 6964fa5 | 2014-12-18 11:04:27 -0800 | [diff] [blame] | 927 | @return: Stable version of the given board. Return global configure value |
| 928 | of CROS.stable_cros_version if stable_versinos table does not have |
| 929 | entry of board DEFAULT. |
| 930 | """ |
Simran Basi | beb2bb2 | 2016-02-03 15:25:48 -0800 | [diff] [blame] | 931 | return stable_version_utils.get(board=board, android=android) |
Dan Shi | 25e1fd4 | 2014-12-19 14:36:42 -0800 | [diff] [blame] | 932 | |
| 933 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 934 | @rpc_utils.route_rpc_to_master |
Dan Shi | 25e1fd4 | 2014-12-19 14:36:42 -0800 | [diff] [blame] | 935 | def get_all_stable_versions(): |
| 936 | """Get stable versions for all boards. |
| 937 | |
| 938 | @return: A dictionary of board:version. |
| 939 | """ |
| 940 | return stable_version_utils.get_all() |
| 941 | |
| 942 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 943 | @rpc_utils.route_rpc_to_master |
Dan Shi | 25e1fd4 | 2014-12-19 14:36:42 -0800 | [diff] [blame] | 944 | def set_stable_version(version, board=stable_version_utils.DEFAULT): |
| 945 | """Modify stable version for the given board. |
| 946 | |
| 947 | @param version: The new value of stable version for given board. |
| 948 | @param board: Name of the board, default to value `DEFAULT`. |
| 949 | """ |
| 950 | stable_version_utils.set(version=version, board=board) |
| 951 | |
| 952 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 953 | @rpc_utils.route_rpc_to_master |
Dan Shi | 25e1fd4 | 2014-12-19 14:36:42 -0800 | [diff] [blame] | 954 | def delete_stable_version(board): |
| 955 | """Modify stable version for the given board. |
| 956 | |
| 957 | Delete a stable version entry in afe_stable_versions table for a given |
| 958 | board, so default stable version will be used. |
| 959 | |
| 960 | @param board: Name of the board. |
| 961 | """ |
| 962 | stable_version_utils.delete(board=board) |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 963 | |
| 964 | |
| 965 | def get_tests_by_build(build): |
| 966 | """Get the tests that are available for the specified build. |
| 967 | |
| 968 | @param build: unique name by which to refer to the image. |
| 969 | |
| 970 | @return: A sorted list of all tests that are in the build specified. |
| 971 | """ |
| 972 | # Stage the test artifacts. |
| 973 | try: |
| 974 | ds = dev_server.ImageServer.resolve(build) |
| 975 | build = ds.translate(build) |
| 976 | except dev_server.DevServerException as e: |
| 977 | raise ValueError('Could not resolve build %s: %s' % (build, e)) |
| 978 | |
| 979 | try: |
Dan Shi | 6450e14 | 2016-03-11 11:52:20 -0800 | [diff] [blame] | 980 | ds.stage_artifacts(image=build, artifacts=['test_suites']) |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 981 | except dev_server.DevServerException as e: |
| 982 | raise error.StageControlFileFailure( |
| 983 | 'Failed to stage %s: %s' % (build, e)) |
| 984 | |
| 985 | # Collect the control files specified in this build |
| 986 | cfile_getter = control_file_getter.DevServerGetter.create(build, ds) |
| 987 | control_file_list = cfile_getter.get_control_file_list() |
| 988 | |
| 989 | test_objects = [] |
| 990 | _id = 0 |
| 991 | for control_file_path in control_file_list: |
| 992 | # Read and parse the control file |
| 993 | control_file = cfile_getter.get_control_file_contents( |
| 994 | control_file_path) |
| 995 | control_obj = control_data.parse_control_string(control_file) |
| 996 | |
| 997 | # Extract the values needed for the AFE from the control_obj. |
| 998 | # The keys list represents attributes in the control_obj that |
| 999 | # are required by the AFE |
| 1000 | keys = ['author', 'doc', 'name', 'time', 'test_type', 'experimental', |
| 1001 | 'test_category', 'test_class', 'dependencies', 'run_verify', |
| 1002 | 'sync_count', 'job_retries', 'retries', 'path'] |
| 1003 | |
| 1004 | test_object = {} |
| 1005 | for key in keys: |
| 1006 | test_object[key] = getattr(control_obj, key) if hasattr( |
| 1007 | control_obj, key) else '' |
| 1008 | |
| 1009 | # Unfortunately, the AFE expects different key-names for certain |
| 1010 | # values, these must be corrected to avoid the risk of tests |
| 1011 | # being omitted by the AFE. |
| 1012 | # The 'id' is an additional value used in the AFE. |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 1013 | # The control_data parsing does not reference 'run_reset', but it |
| 1014 | # is also used in the AFE and defaults to True. |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 1015 | test_object['id'] = _id |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 1016 | test_object['run_reset'] = True |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 1017 | test_object['description'] = test_object.get('doc', '') |
| 1018 | test_object['test_time'] = test_object.get('time', 0) |
| 1019 | test_object['test_retry'] = test_object.get('retries', 0) |
| 1020 | |
| 1021 | # Fix the test name to be consistent with the current presentation |
| 1022 | # of test names in the AFE. |
| 1023 | testpath, subname = os.path.split(control_file_path) |
| 1024 | testname = os.path.basename(testpath) |
| 1025 | subname = subname.split('.')[1:] |
| 1026 | if subname: |
| 1027 | testname = '%s:%s' % (testname, ':'.join(subname)) |
| 1028 | |
| 1029 | test_object['name'] = testname |
| 1030 | |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 1031 | # Correct the test path as parse_control_string sets an empty string. |
| 1032 | test_object['path'] = control_file_path |
| 1033 | |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 1034 | _id += 1 |
| 1035 | test_objects.append(test_object) |
| 1036 | |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 1037 | test_objects = sorted(test_objects, key=lambda x: x.get('name')) |
Matthew Sartori | d96fb9b | 2015-05-19 18:04:58 -0700 | [diff] [blame] | 1038 | return rpc_utils.prepare_for_serialization(test_objects) |