Dan Shi | 4df3925 | 2013-03-19 13:19:45 -0700 | [diff] [blame] | 1 | # pylint: disable-msg=C0111 |
| 2 | |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 3 | # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. |
| 4 | # Use of this source code is governed by a BSD-style license that can be |
| 5 | # found in the LICENSE file. |
| 6 | |
| 7 | __author__ = 'cmasone@chromium.org (Chris Masone)' |
| 8 | |
| 9 | import common |
Chris Masone | a8066a9 | 2012-05-01 16:52:31 -0700 | [diff] [blame] | 10 | import datetime |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 11 | import logging |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 12 | import os |
| 13 | import shutil |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 14 | |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 15 | from autotest_lib.frontend.afe import models |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 16 | from autotest_lib.client.common_lib import error |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 17 | from autotest_lib.client.common_lib import global_config |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 18 | from autotest_lib.client.common_lib import priorities |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 19 | from autotest_lib.client.common_lib import time_utils |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 20 | from autotest_lib.client.common_lib.cros import dev_server |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 21 | from autotest_lib.client.common_lib.cros.graphite import stats |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 22 | from autotest_lib.frontend.afe import rpc_utils |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 23 | from autotest_lib.server import utils |
Chris Masone | 44e4d6c | 2012-08-15 14:25:53 -0700 | [diff] [blame] | 24 | from autotest_lib.server.cros.dynamic_suite import constants |
Chris Masone | b493555 | 2012-08-14 12:05:54 -0700 | [diff] [blame] | 25 | from autotest_lib.server.cros.dynamic_suite import control_file_getter |
Chris Masone | 44e4d6c | 2012-08-15 14:25:53 -0700 | [diff] [blame] | 26 | from autotest_lib.server.cros.dynamic_suite import tools |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 27 | from autotest_lib.server.hosts import moblab_host |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 28 | from autotest_lib.site_utils import host_history |
Dan Shi | 193905e | 2014-07-25 23:33:09 -0700 | [diff] [blame] | 29 | from autotest_lib.site_utils import job_history |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 30 | |
| 31 | |
| 32 | _CONFIG = global_config.global_config |
| 33 | MOBLAB_BOTO_LOCATION = '/home/moblab/.boto' |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 34 | |
Chris Masone | f8b5306 | 2012-05-08 22:14:18 -0700 | [diff] [blame] | 35 | # Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 36 | |
| 37 | |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 38 | def canonicalize_suite_name(suite_name): |
| 39 | return 'test_suites/control.%s' % suite_name |
| 40 | |
| 41 | |
Chris Masone | aa10f8e | 2012-05-15 13:34:21 -0700 | [diff] [blame] | 42 | def formatted_now(): |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 43 | return datetime.datetime.now().strftime(time_utils.TIME_FMT) |
Chris Masone | aa10f8e | 2012-05-15 13:34:21 -0700 | [diff] [blame] | 44 | |
| 45 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 46 | def _get_control_file_contents_by_name(build, ds, suite_name): |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 47 | """Return control file contents for |suite_name|. |
| 48 | |
| 49 | Query the dev server at |ds| for the control file |suite_name|, included |
| 50 | in |build| for |board|. |
| 51 | |
| 52 | @param build: unique name by which to refer to the image from now on. |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 53 | @param ds: a dev_server.DevServer instance to fetch control file with. |
| 54 | @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt. |
| 55 | @raises ControlFileNotFound if a unique suite control file doesn't exist. |
| 56 | @raises NoControlFileList if we can't list the control files at all. |
| 57 | @raises ControlFileEmpty if the control file exists on the server, but |
| 58 | can't be read. |
| 59 | |
| 60 | @return the contents of the desired control file. |
| 61 | """ |
| 62 | getter = control_file_getter.DevServerGetter.create(build, ds) |
| 63 | # Get the control file for the suite. |
| 64 | try: |
| 65 | control_file_in = getter.get_control_file_contents_by_name(suite_name) |
| 66 | except error.CrosDynamicSuiteException as e: |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 67 | raise type(e)("%s while testing %s." % (e, build)) |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 68 | if not control_file_in: |
| 69 | raise error.ControlFileEmpty( |
| 70 | "Fetching %s returned no data." % suite_name) |
Alex Miller | a713e25 | 2013-03-01 10:45:44 -0800 | [diff] [blame] | 71 | # Force control files to only contain ascii characters. |
| 72 | try: |
| 73 | control_file_in.encode('ascii') |
| 74 | except UnicodeDecodeError as e: |
| 75 | raise error.ControlFileMalformed(str(e)) |
| 76 | |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 77 | return control_file_in |
| 78 | |
| 79 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 80 | def _stage_build_artifacts(build): |
| 81 | """ |
| 82 | Ensure components of |build| necessary for installing images are staged. |
| 83 | |
| 84 | @param build image we want to stage. |
| 85 | |
| 86 | @raises StageBuildFailure: if the dev server throws 500 while staging |
| 87 | build. |
| 88 | |
| 89 | @return: dev_server.ImageServer instance to use with this build. |
| 90 | @return: timings dictionary containing staging start/end times. |
| 91 | """ |
| 92 | timings = {} |
| 93 | # Set synchronous to False to allow other components to be downloaded in |
| 94 | # the background. |
| 95 | ds = dev_server.ImageServer.resolve(build) |
| 96 | timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now() |
| 97 | try: |
| 98 | ds.stage_artifacts(build, ['test_suites']) |
| 99 | except dev_server.DevServerException as e: |
| 100 | raise error.StageBuildFailure( |
| 101 | "Failed to stage %s: %s" % (build, e)) |
| 102 | timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now() |
| 103 | return (ds, timings) |
| 104 | |
| 105 | |
| 106 | def create_suite_job(name='', board='', build='', pool='', control_file='', |
| 107 | check_hosts=True, num=None, file_bugs=False, timeout=24, |
| 108 | timeout_mins=None, priority=priorities.Priority.DEFAULT, |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 109 | suite_args=None, wait_for_results=True, job_retry=False, |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 110 | max_runtime_mins=None, **kwargs): |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 111 | """ |
| 112 | Create a job to run a test suite on the given device with the given image. |
| 113 | |
| 114 | When the timeout specified in the control file is reached, the |
| 115 | job is guaranteed to have completed and results will be available. |
| 116 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 117 | @param name: The test name if control_file is supplied, otherwise the name |
| 118 | of the test suite to run, e.g. 'bvt'. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 119 | @param board: the kind of device to run the tests on. |
| 120 | @param build: unique name by which to refer to the image from now on. |
Scott Zawalski | 6565017 | 2012-02-16 11:48:26 -0500 | [diff] [blame] | 121 | @param pool: Specify the pool of machines to use for scheduling |
| 122 | purposes. |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 123 | @param check_hosts: require appropriate live hosts to exist in the lab. |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 124 | @param num: Specify the number of machines to schedule across (integer). |
| 125 | Leave unspecified or use None to use default sharding factor. |
Alex Miller | c577f3e | 2012-09-27 14:06:07 -0700 | [diff] [blame] | 126 | @param file_bugs: File a bug on each test failure in this suite. |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 127 | @param timeout: The max lifetime of this suite, in hours. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 128 | @param timeout_mins: The max lifetime of this suite, in minutes. Takes |
| 129 | priority over timeout. |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 130 | @param priority: Integer denoting priority. Higher is more important. |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 131 | @param suite_args: Optional arguments which will be parsed by the suite |
| 132 | control file. Used by control.test_that_wrapper to |
| 133 | determine which tests to run. |
Dan Shi | 9512241 | 2013-11-12 16:20:33 -0800 | [diff] [blame] | 134 | @param wait_for_results: Set to False to run the suite job without waiting |
| 135 | for test jobs to finish. Default is True. |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 136 | @param job_retry: Set to True to enable job-level retry. Default is False. |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 137 | @param max_runtime_mins: Maximum amount of time a job can be running in |
| 138 | minutes. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 139 | @param kwargs: extra keyword args. NOT USED. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 140 | |
Chris Masone | 8dd27e0 | 2012-06-25 15:59:43 -0700 | [diff] [blame] | 141 | @raises ControlFileNotFound: if a unique suite control file doesn't exist. |
| 142 | @raises NoControlFileList: if we can't list the control files at all. |
| 143 | @raises StageBuildFailure: if the dev server throws 500 while staging build. |
| 144 | @raises ControlFileEmpty: if the control file exists on the server, but |
| 145 | can't be read. |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 146 | |
| 147 | @return: the job ID of the suite; -1 on error. |
| 148 | """ |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 149 | if type(num) is not int and num is not None: |
Chris Sosa | 18c70b3 | 2013-02-15 14:12:43 -0800 | [diff] [blame] | 150 | raise error.SuiteArgumentException('Ill specified num argument %r. ' |
| 151 | 'Must be an integer or None.' % num) |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 152 | if num == 0: |
| 153 | logging.warning("Can't run on 0 hosts; using default.") |
| 154 | num = None |
Chris Masone | a8066a9 | 2012-05-01 16:52:31 -0700 | [diff] [blame] | 155 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 156 | (ds, timings) = _stage_build_artifacts(build) |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 157 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 158 | if not control_file: |
| 159 | # No control file was supplied so look it up from the build artifacts. |
| 160 | suite_name = canonicalize_suite_name(name) |
| 161 | control_file = _get_control_file_contents_by_name(build, ds, suite_name) |
| 162 | name = '%s-%s' % (build, suite_name) |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 163 | |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 164 | timeout_mins = timeout_mins or timeout * 60 |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 165 | max_runtime_mins = max_runtime_mins or timeout * 60 |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 166 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 167 | if not board: |
| 168 | board = utils.ParseBuildName(build)[0] |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 169 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 170 | # Prepend build and board to the control file. |
Scott Zawalski | 6565017 | 2012-02-16 11:48:26 -0500 | [diff] [blame] | 171 | inject_dict = {'board': board, |
| 172 | 'build': build, |
Chris Masone | 6257912 | 2012-03-08 15:18:43 -0800 | [diff] [blame] | 173 | 'check_hosts': check_hosts, |
Chris Masone | 46d0eb1 | 2012-07-27 18:56:39 -0700 | [diff] [blame] | 174 | 'pool': pool, |
Aviv Keshet | d83ef44 | 2013-01-16 16:19:35 -0800 | [diff] [blame] | 175 | 'num': num, |
Dan Shi | b8a9911 | 2013-06-18 13:46:10 -0700 | [diff] [blame] | 176 | 'file_bugs': file_bugs, |
Alex Miller | 139690b | 2013-09-07 15:35:49 -0700 | [diff] [blame] | 177 | 'timeout': timeout, |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 178 | 'timeout_mins': timeout_mins, |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 179 | 'devserver_url': ds.url(), |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 180 | 'priority': priority, |
Dan Shi | 9512241 | 2013-11-12 16:20:33 -0800 | [diff] [blame] | 181 | 'suite_args' : suite_args, |
Fang Deng | 058860c | 2014-05-15 15:41:50 -0700 | [diff] [blame] | 182 | 'wait_for_results': wait_for_results, |
Simran Basi | 102e352 | 2014-09-11 11:46:10 -0700 | [diff] [blame] | 183 | 'job_retry': job_retry, |
| 184 | 'max_runtime_mins': max_runtime_mins |
Aviv Keshet | 7cd1231 | 2013-07-25 10:25:55 -0700 | [diff] [blame] | 185 | } |
| 186 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 187 | control_file = tools.inject_vars(inject_dict, control_file) |
Chris Masone | 859fdec | 2012-01-30 08:38:09 -0800 | [diff] [blame] | 188 | |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 189 | return rpc_utils.create_job_common(name, |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 190 | priority=priority, |
| 191 | timeout_mins=timeout_mins, |
| 192 | max_runtime_mins=max_runtime_mins, |
| 193 | control_type='Server', |
| 194 | control_file=control_file, |
| 195 | hostless=True, |
| 196 | keyvals=timings) |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 197 | |
| 198 | |
| 199 | # TODO: hide the following rpcs under is_moblab |
| 200 | def moblab_only(func): |
| 201 | """Ensure moblab specific functions only run on Moblab devices.""" |
| 202 | def verify(*args, **kwargs): |
| 203 | if not utils.is_moblab(): |
| 204 | raise error.RPCException('RPC: %s can only run on Moblab Systems!', |
| 205 | func.__name__) |
| 206 | return func(*args, **kwargs) |
| 207 | return verify |
| 208 | |
| 209 | |
| 210 | @moblab_only |
| 211 | def get_config_values(): |
| 212 | """Returns all config values parsed from global and shadow configs. |
| 213 | |
| 214 | Config values are grouped by sections, and each section is composed of |
| 215 | a list of name value pairs. |
| 216 | """ |
| 217 | sections =_CONFIG.get_sections() |
| 218 | config_values = {} |
| 219 | for section in sections: |
| 220 | config_values[section] = _CONFIG.config.items(section) |
Jakob Juelich | 9fffe4f | 2014-08-14 18:07:05 -0700 | [diff] [blame] | 221 | return rpc_utils.prepare_for_serialization(config_values) |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 222 | |
| 223 | |
| 224 | @moblab_only |
| 225 | def update_config_handler(config_values): |
| 226 | """ |
| 227 | Update config values and override shadow config. |
| 228 | |
| 229 | @param config_values: See get_moblab_settings(). |
| 230 | """ |
| 231 | for section, config_value_list in config_values.iteritems(): |
| 232 | for key, value in config_value_list: |
| 233 | _CONFIG.override_config_value(section, key, value) |
| 234 | if not _CONFIG.shadow_file or not os.path.exists(_CONFIG.shadow_file): |
| 235 | raise error.RPCException('Shadow config file does not exist.') |
| 236 | |
| 237 | with open(_CONFIG.shadow_file, 'w') as config_file: |
| 238 | _CONFIG.config.write(config_file) |
| 239 | # TODO (sbasi) crbug.com/403916 - Remove the reboot command and |
| 240 | # instead restart the services that rely on the config values. |
| 241 | os.system('sudo reboot') |
| 242 | |
| 243 | |
| 244 | @moblab_only |
| 245 | def reset_config_settings(): |
| 246 | with open(_CONFIG.shadow_file, 'w') as config_file: |
| 247 | pass |
| 248 | os.system('sudo reboot') |
| 249 | |
| 250 | |
| 251 | @moblab_only |
| 252 | def set_boto_key(boto_key): |
| 253 | """Update the boto_key file. |
| 254 | |
| 255 | @param boto_key: File name of boto_key uploaded through handle_file_upload. |
| 256 | """ |
| 257 | if not os.path.exists(boto_key): |
| 258 | raise error.RPCException('Boto key: %s does not exist!' % boto_key) |
| 259 | shutil.copyfile(boto_key, moblab_host.MOBLAB_BOTO_LOCATION) |
Dan Shi | 193905e | 2014-07-25 23:33:09 -0700 | [diff] [blame] | 260 | |
| 261 | |
| 262 | def get_job_history(**filter_data): |
| 263 | """Get history of the job, including the special tasks executed for the job |
| 264 | |
| 265 | @param filter_data: filter for the call, should at least include |
| 266 | {'job_id': [job id]} |
| 267 | @returns: JSON string of the job's history, including the information such |
| 268 | as the hosts run the job and the special tasks executed before |
| 269 | and after the job. |
| 270 | """ |
| 271 | job_id = filter_data['job_id'] |
| 272 | job_info = job_history.get_job_info(job_id) |
Dan Shi | dfea368 | 2014-08-10 23:38:40 -0700 | [diff] [blame] | 273 | return rpc_utils.prepare_for_serialization(job_info.get_history()) |
| 274 | |
| 275 | |
| 276 | def get_host_history(start_time, end_time, hosts=None, board=None, pool=None): |
| 277 | """Get history of a list of host. |
| 278 | |
| 279 | The return is a JSON string of host history for each host, for example, |
| 280 | {'172.22.33.51': [{'status': 'Resetting' |
| 281 | 'start_time': '2014-08-07 10:02:16', |
| 282 | 'end_time': '2014-08-07 10:03:16', |
| 283 | 'log_url': 'http://autotest/reset-546546/debug', |
| 284 | 'dbg_str': 'Task: Special Task 19441991 (host ...)'}, |
| 285 | {'status': 'Running' |
| 286 | 'start_time': '2014-08-07 10:03:18', |
| 287 | 'end_time': '2014-08-07 10:13:00', |
| 288 | 'log_url': 'http://autotest/reset-546546/debug', |
| 289 | 'dbg_str': 'HQE: 15305005, for job: 14995562'} |
| 290 | ] |
| 291 | } |
| 292 | @param start_time: start time to search for history, can be string value or |
| 293 | epoch time. |
| 294 | @param end_time: end time to search for history, can be string value or |
| 295 | epoch time. |
| 296 | @param hosts: A list of hosts to search for history. Default is None. |
| 297 | @param board: board type of hosts. Default is None. |
| 298 | @param pool: pool type of hosts. Default is None. |
| 299 | @returns: JSON string of the host history. |
| 300 | """ |
| 301 | return rpc_utils.prepare_for_serialization( |
| 302 | host_history.get_history_details( |
| 303 | start_time=start_time, end_time=end_time, |
| 304 | hosts=hosts, board=board, pool=pool, |
| 305 | process_pool_size=4)) |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 306 | |
| 307 | |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame^] | 308 | def shard_heartbeat(shard_hostname, jobs=(), hqes=(), |
| 309 | known_job_ids=(), known_host_ids=()): |
| 310 | """Receive updates for job statuses from shards and assign hosts and jobs. |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 311 | |
| 312 | @param shard_hostname: Hostname of the calling shard |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 313 | @param jobs: Jobs in serialized form that should be updated with newer |
| 314 | status from a shard. |
| 315 | @param hqes: Hostqueueentries in serialized form that should be updated with |
| 316 | newer status from a shard. Note that for every hostqueueentry |
| 317 | the corresponding job must be in jobs. |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame^] | 318 | @param known_job_ids: List of ids of jobs the shard already has. |
| 319 | @param known_host_ids: List of ids of hosts the shard already has. |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 320 | |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 321 | @returns: Serialized representations of hosts, jobs and their dependencies |
| 322 | to be inserted into a shard's database. |
| 323 | """ |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame^] | 324 | # The following alternatives to sending host and job ids in every heartbeat |
| 325 | # have been considered: |
| 326 | # 1. Sending the highest known job and host ids. This would work for jobs: |
| 327 | # Newer jobs always have larger ids. Also, if a job is not assigned to a |
| 328 | # particular shard during a heartbeat, it never will be assigned to this |
| 329 | # shard later. |
| 330 | # This is not true for hosts though: A host that is leased won't be sent |
| 331 | # to the shard now, but might be sent in a future heartbeat. This means |
| 332 | # sometimes hosts should be transfered that have a lower id than the |
| 333 | # maximum host id the shard knows. |
| 334 | # 2. Send the number of jobs/hosts the shard knows to the master in each |
| 335 | # heartbeat. Compare these to the number of records that already have |
| 336 | # the shard_id set to this shard. In the normal case, they should match. |
| 337 | # In case they don't, resend all entities of that type. |
| 338 | # This would work well for hosts, because there aren't that many. |
| 339 | # Resending all jobs is quite a big overhead though. |
| 340 | # Also, this approach might run into edge cases when entities are |
| 341 | # ever deleted. |
| 342 | # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts. |
| 343 | # Using two different approaches isn't consistent and might cause |
| 344 | # confusion. Also the issues with the case of deletions might still |
| 345 | # occur. |
| 346 | # |
| 347 | # The overhead of sending all job and host ids in every heartbeat is low: |
| 348 | # At peaks one board has about 1200 created but unfinished jobs. |
| 349 | # See the numbers here: http://goo.gl/gQCGWH |
| 350 | # Assuming that job id's have 6 digits and that json serialization takes a |
| 351 | # comma and a space as overhead, the traffic per id sent is about 8 bytes. |
| 352 | # If 5000 ids need to be sent, this means 40 kilobytes of traffic. |
| 353 | # A NOT IN query with 5000 ids took about 30ms in tests made. |
| 354 | # These numbers seem low enough to outweigh the disadvantages of the |
| 355 | # solutions described above. |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 356 | timer = stats.Timer('shard_heartbeat') |
| 357 | with timer: |
| 358 | shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname) |
Jakob Juelich | a94efe6 | 2014-09-18 16:02:49 -0700 | [diff] [blame] | 359 | rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes) |
Jakob Juelich | 1b52574 | 2014-09-30 13:08:07 -0700 | [diff] [blame^] | 360 | hosts, jobs = rpc_utils.find_records_for_shard( |
| 361 | shard_obj, |
| 362 | known_job_ids=known_job_ids, known_host_ids=known_host_ids) |
Jakob Juelich | 59cfe54 | 2014-09-02 16:37:46 -0700 | [diff] [blame] | 363 | return { |
| 364 | 'hosts': [host.serialize() for host in hosts], |
| 365 | 'jobs': [job.serialize() for job in jobs], |
| 366 | } |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 367 | |
| 368 | |
| 369 | def get_shards(**filter_data): |
| 370 | """Return a list of all shards. |
| 371 | |
| 372 | @returns A sequence of nested dictionaries of shard information. |
| 373 | """ |
| 374 | shards = models.Shard.query_objects(filter_data) |
| 375 | serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ()) |
| 376 | for serialized, shard in zip(serialized_shards, shards): |
| 377 | serialized['labels'] = [label.name for label in shard.labels.all()] |
| 378 | |
| 379 | return serialized_shards |
| 380 | |
| 381 | |
| 382 | def add_shard(hostname, label): |
| 383 | """Add a shard and start running jobs on it. |
| 384 | |
| 385 | @param hostname: The hostname of the shard to be added; needs to be unique. |
| 386 | @param label: A platform label. Jobs of this label will be assigned to the |
| 387 | shard. |
| 388 | |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 389 | @raises error.RPCException: If label provided doesn't start with `board:` |
| 390 | @raises model_logic.ValidationError: If a shard with the given hostname |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 391 | already exists. |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 392 | @raises models.Label.DoesNotExist: If the label specified doesn't exist. |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 393 | """ |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 394 | if not label.startswith('board:'): |
| 395 | raise error.RPCException('Sharding only supported for `board:.*` ' |
| 396 | 'labels.') |
| 397 | |
| 398 | # Fetch label first, so shard isn't created when label doesn't exist. |
| 399 | label = models.Label.smart_get(label) |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 400 | shard = models.Shard.add_object(hostname=hostname) |
Jakob Juelich | 8b110ee | 2014-09-15 16:13:42 -0700 | [diff] [blame] | 401 | shard.labels.add(label) |
Jakob Juelich | 82b7d1c | 2014-09-15 16:10:57 -0700 | [diff] [blame] | 402 | return shard.id |
| 403 | |
| 404 | |
| 405 | def delete_shard(hostname): |
| 406 | """Delete a shard and reclaim all resources from it. |
| 407 | |
| 408 | This claims back all assigned hosts from the shard. To ensure all DUTs are |
| 409 | in a sane state, a Repair task is scheduled for them. This reboots the DUTs |
| 410 | and therefore clears all running processes that might be left. |
| 411 | |
| 412 | The shard_id of jobs of that shard will be set to None. |
| 413 | |
| 414 | The status of jobs that haven't been reported to be finished yet, will be |
| 415 | lost. The master scheduler will pick up the jobs and execute them. |
| 416 | |
| 417 | @param hostname: Hostname of the shard to delete. |
| 418 | """ |
| 419 | shard = rpc_utils.retrieve_shard(shard_hostname=hostname) |
| 420 | |
| 421 | # TODO(beeps): Power off shard |
| 422 | |
| 423 | # For ChromeOS hosts, repair reboots the DUT. |
| 424 | # Repair will excalate through multiple repair steps and will verify the |
| 425 | # success after each of them. Anyway, it will always run at least the first |
| 426 | # one, which includes a reboot. |
| 427 | # After a reboot we can be sure no processes from prior tests that were run |
| 428 | # by a shard are still running on the DUT. |
| 429 | # Important: Don't just set the status to Repair Failed, as that would run |
| 430 | # Verify first, before doing any repair measures. Verify would probably |
| 431 | # succeed, so this wouldn't change anything on the DUT. |
| 432 | for host in models.Host.objects.filter(shard=shard): |
| 433 | models.SpecialTask.objects.create( |
| 434 | task=models.SpecialTask.Task.REPAIR, |
| 435 | host=host, |
| 436 | requested_by=models.User.current_user()) |
| 437 | models.Host.objects.filter(shard=shard).update(shard=None) |
| 438 | |
| 439 | models.Job.objects.filter(shard=shard).update(shard=None) |
| 440 | |
| 441 | shard.labels.clear() |
| 442 | |
| 443 | shard.delete() |