Richard Barnette | 6c2b70a | 2017-01-26 13:40:51 -0800 | [diff] [blame] | 1 | # pylint: disable=missing-docstring |
Don Garrett | a06ea08 | 2017-01-13 00:04:26 +0000 | [diff] [blame] | 2 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 3 | """\ |
| 4 | Functions to expose over the RPC interface. |
| 5 | |
| 6 | For all modify* and delete* functions that ask for an 'id' parameter to |
| 7 | identify the object to operate on, the id may be either |
| 8 | * the database row ID |
| 9 | * the name of the object (label name, hostname, user login, etc.) |
| 10 | * a dictionary containing uniquely identifying field (this option should seldom |
| 11 | be used) |
| 12 | |
| 13 | When specifying foreign key fields (i.e. adding hosts to a label, or adding |
| 14 | users to an ACL group), the given value may be either the database row ID or the |
| 15 | name of the object. |
| 16 | |
| 17 | All get* functions return lists of dictionaries. Each dictionary represents one |
| 18 | object and maps field names to values. |
| 19 | |
| 20 | Some examples: |
| 21 | modify_host(2, hostname='myhost') # modify hostname of host with database ID 2 |
| 22 | modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2' |
| 23 | modify_test('sleeptest', test_type='Client', params=', seconds=60') |
| 24 | delete_acl_group(1) # delete by ID |
| 25 | delete_acl_group('Everyone') # delete by name |
| 26 | acl_group_add_users('Everyone', ['mbligh', 'showard']) |
| 27 | get_jobs(owner='showard', status='Queued') |
| 28 | |
mbligh | 93c80e6 | 2009-02-03 17:48:30 +0000 | [diff] [blame] | 29 | See doctests/001_rpc_test.txt for (lots) more examples. |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 30 | """ |
| 31 | |
| 32 | __author__ = 'showard@google.com (Steve Howard)' |
| 33 | |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 34 | import ast |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 35 | import datetime |
Shuqian Zhao | 4c0d290 | 2016-01-12 17:03:15 -0800 | [diff] [blame] | 36 | import logging |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 37 | import os |
Dan Shi | 4a3deb8 | 2016-10-27 21:32:30 -0700 | [diff] [blame] | 38 | import sys |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 39 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 40 | from django.db.models import Count |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 41 | |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 42 | import common |
Aviv Keshet | 14cac44 | 2016-11-20 21:44:11 -0800 | [diff] [blame] | 43 | # TODO(akeshet): Replace with monarch stats once we know how to instrument rpc |
| 44 | # server with ts_mon. |
Gabe Black | 1e1c41b | 2015-02-04 23:55:15 -0800 | [diff] [blame] | 45 | from autotest_lib.client.common_lib.cros.graphite import autotest_stats |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 46 | from autotest_lib.client.common_lib import control_data |
| 47 | from autotest_lib.client.common_lib import error |
| 48 | from autotest_lib.client.common_lib import global_config |
| 49 | from autotest_lib.client.common_lib import priorities |
| 50 | from autotest_lib.client.common_lib import time_utils |
| 51 | from autotest_lib.client.common_lib.cros import dev_server |
Allen Li | a59b126 | 2016-12-14 12:53:51 -0800 | [diff] [blame] | 52 | from autotest_lib.frontend.afe import control_file as control_file_lib |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 53 | from autotest_lib.frontend.afe import model_attributes |
| 54 | from autotest_lib.frontend.afe import model_logic |
| 55 | from autotest_lib.frontend.afe import models |
Allen Li | a59b126 | 2016-12-14 12:53:51 -0800 | [diff] [blame] | 56 | from autotest_lib.frontend.afe import rpc_utils |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 57 | from autotest_lib.frontend.tko import models as tko_models |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 58 | from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 59 | from autotest_lib.server import frontend |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 60 | from autotest_lib.server import utils |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 61 | from autotest_lib.server.cros import provision |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 62 | from autotest_lib.server.cros.dynamic_suite import constants |
| 63 | from autotest_lib.server.cros.dynamic_suite import control_file_getter |
| 64 | from autotest_lib.server.cros.dynamic_suite import suite as SuiteBase |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 65 | from autotest_lib.server.cros.dynamic_suite import tools |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 66 | from autotest_lib.server.cros.dynamic_suite.suite import Suite |
Aviv Keshet | 7ee9586 | 2016-08-30 15:18:27 -0700 | [diff] [blame] | 67 | from autotest_lib.server.lib import status_history |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 68 | from autotest_lib.site_utils import host_history |
| 69 | from autotest_lib.site_utils import job_history |
| 70 | from autotest_lib.site_utils import server_manager_utils |
| 71 | from autotest_lib.site_utils import stable_version_utils |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 72 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 73 | |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 74 | _CONFIG = global_config.global_config |
| 75 | |
| 76 | # Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py. |
| 77 | |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 78 | def get_parameterized_autoupdate_image_url(job): |
| 79 | """Get the parameterized autoupdate image url from a parameterized job.""" |
| 80 | known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob') |
| 81 | image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj, |
beeps | 8bb1f7d | 2013-08-05 01:30:09 -0700 | [diff] [blame] | 82 | name='image') |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 83 | para_set = job.parameterized_job.parameterizedjobparameter_set |
| 84 | job_test_para = para_set.get(test_parameter=image_parameter) |
| 85 | return job_test_para.parameter_value |
| 86 | |
| 87 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 88 | # labels |
| 89 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 90 | def modify_label(id, **data): |
MK Ryu | 8c554cf | 2015-06-12 11:45:50 -0700 | [diff] [blame] | 91 | """Modify a label. |
| 92 | |
| 93 | @param id: id or name of a label. More often a label name. |
| 94 | @param data: New data for a label. |
| 95 | """ |
| 96 | label_model = models.Label.smart_get(id) |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 97 | label_model.update_object(data) |
MK Ryu | 8c554cf | 2015-06-12 11:45:50 -0700 | [diff] [blame] | 98 | |
| 99 | # Master forwards the RPC to shards |
| 100 | if not utils.is_shard(): |
| 101 | rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False, |
| 102 | id=id, **data) |
| 103 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 104 | |
| 105 | def delete_label(id): |
MK Ryu | 8c554cf | 2015-06-12 11:45:50 -0700 | [diff] [blame] | 106 | """Delete a label. |
| 107 | |
| 108 | @param id: id or name of a label. More often a label name. |
| 109 | """ |
| 110 | label_model = models.Label.smart_get(id) |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 111 | # Hosts that have the label to be deleted. Save this info before |
| 112 | # the label is deleted to use it later. |
| 113 | hosts = [] |
| 114 | for h in label_model.host_set.all(): |
| 115 | hosts.append(models.Host.smart_get(h.id)) |
| 116 | label_model.delete() |
MK Ryu | 8c554cf | 2015-06-12 11:45:50 -0700 | [diff] [blame] | 117 | |
| 118 | # Master forwards the RPC to shards |
| 119 | if not utils.is_shard(): |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 120 | rpc_utils.fanout_rpc(hosts, 'delete_label', False, id=id) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 121 | |
Prashanth Balasubramanian | 744898f | 2015-01-13 05:04:16 -0800 | [diff] [blame] | 122 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 123 | def add_label(name, ignore_exception_if_exists=False, **kwargs): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 124 | """Adds a new label of a given name. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 125 | |
| 126 | @param name: label name. |
| 127 | @param ignore_exception_if_exists: If True and the exception was |
| 128 | thrown due to the duplicated label name when adding a label, |
| 129 | then suppress the exception. Default is False. |
| 130 | @param kwargs: keyword args that store more info about a label |
| 131 | other than the name. |
| 132 | @return: int/long id of a new label. |
| 133 | """ |
| 134 | # models.Label.add_object() throws model_logic.ValidationError |
| 135 | # when it is given a label name that already exists. |
| 136 | # However, ValidationError can be thrown with different errors, |
| 137 | # and those errors should be thrown up to the call chain. |
| 138 | try: |
| 139 | label = models.Label.add_object(name=name, **kwargs) |
| 140 | except: |
| 141 | exc_info = sys.exc_info() |
| 142 | if ignore_exception_if_exists: |
| 143 | label = rpc_utils.get_label(name) |
| 144 | # If the exception is raised not because of duplicated |
| 145 | # "name", then raise the original exception. |
| 146 | if label is None: |
| 147 | raise exc_info[0], exc_info[1], exc_info[2] |
| 148 | else: |
| 149 | raise exc_info[0], exc_info[1], exc_info[2] |
| 150 | return label.id |
| 151 | |
| 152 | |
| 153 | def add_label_to_hosts(id, hosts): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 154 | """Adds a label of the given id to the given hosts only in local DB. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 155 | |
| 156 | @param id: id or name of a label. More often a label name. |
| 157 | @param hosts: The hostnames of hosts that need the label. |
| 158 | |
| 159 | @raises models.Label.DoesNotExist: If the label with id doesn't exist. |
| 160 | """ |
| 161 | label = models.Label.smart_get(id) |
| 162 | host_objs = models.Host.smart_get_bulk(hosts) |
| 163 | if label.platform: |
| 164 | models.Host.check_no_platform(host_objs) |
Shuqian Zhao | 40e182b | 2016-10-11 11:55:11 -0700 | [diff] [blame] | 165 | # Ensure a host has no more than one board label with it. |
| 166 | if label.name.startswith('board:'): |
Dan Shi | b5b8b4f | 2016-11-02 14:04:02 -0700 | [diff] [blame] | 167 | models.Host.check_board_labels_allowed(host_objs, [label.name]) |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 168 | label.host_set.add(*host_objs) |
| 169 | |
| 170 | |
Kevin Cheng | bdfc57d | 2016-04-14 13:46:58 -0700 | [diff] [blame] | 171 | def _create_label_everywhere(id, hosts): |
| 172 | """ |
| 173 | Yet another method to create labels. |
| 174 | |
| 175 | ALERT! This method should be run only on master not shards! |
| 176 | DO NOT RUN THIS ON A SHARD!!! Deputies will hate you if you do!!! |
| 177 | |
| 178 | This method exists primarily to serve label_add_hosts() and |
| 179 | host_add_labels(). Basically it pulls out the label check/add logic |
| 180 | from label_add_hosts() into this nice method that not only creates |
| 181 | the label but also tells the shards that service the hosts to also |
| 182 | create the label. |
| 183 | |
| 184 | @param id: id or name of a label. More often a label name. |
| 185 | @param hosts: A list of hostnames or ids. More often hostnames. |
| 186 | """ |
| 187 | try: |
| 188 | label = models.Label.smart_get(id) |
| 189 | except models.Label.DoesNotExist: |
| 190 | # This matches the type checks in smart_get, which is a hack |
| 191 | # in and off itself. The aim here is to create any non-existent |
| 192 | # label, which we cannot do if the 'id' specified isn't a label name. |
| 193 | if isinstance(id, basestring): |
| 194 | label = models.Label.smart_get(add_label(id)) |
| 195 | else: |
| 196 | raise ValueError('Label id (%s) does not exist. Please specify ' |
| 197 | 'the argument, id, as a string (label name).' |
| 198 | % id) |
| 199 | |
| 200 | # Make sure the label exists on the shard with the same id |
| 201 | # as it is on the master. |
| 202 | # It is possible that the label is already in a shard because |
| 203 | # we are adding a new label only to shards of hosts that the label |
| 204 | # is going to be attached. |
| 205 | # For example, we add a label L1 to a host in shard S1. |
| 206 | # Master and S1 will have L1 but other shards won't. |
| 207 | # Later, when we add the same label L1 to hosts in shards S1 and S2, |
| 208 | # S1 already has the label but S2 doesn't. |
| 209 | # S2 should have the new label without any problem. |
| 210 | # We ignore exception in such a case. |
| 211 | host_objs = models.Host.smart_get_bulk(hosts) |
| 212 | rpc_utils.fanout_rpc( |
| 213 | host_objs, 'add_label', include_hostnames=False, |
| 214 | name=label.name, ignore_exception_if_exists=True, |
| 215 | id=label.id, platform=label.platform) |
| 216 | |
| 217 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 218 | @rpc_utils.route_rpc_to_master |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 219 | def label_add_hosts(id, hosts): |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 220 | """Adds a label with the given id to the given hosts. |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 221 | |
| 222 | This method should be run only on master not shards. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 223 | The given label will be created if it doesn't exist, provided the `id` |
| 224 | supplied is a label name not an int/long id. |
| 225 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 226 | @param id: id or name of a label. More often a label name. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 227 | @param hosts: A list of hostnames or ids. More often hostnames. |
| 228 | |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 229 | @raises ValueError: If the id specified is an int/long (label id) |
| 230 | while the label does not exist. |
Prashanth Balasubramanian | 5949b4a | 2014-11-23 12:58:30 -0800 | [diff] [blame] | 231 | """ |
Kevin Cheng | bdfc57d | 2016-04-14 13:46:58 -0700 | [diff] [blame] | 232 | # Create the label. |
| 233 | _create_label_everywhere(id, hosts) |
| 234 | |
| 235 | # Add it to the master. |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 236 | add_label_to_hosts(id, hosts) |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 237 | |
Kevin Cheng | bdfc57d | 2016-04-14 13:46:58 -0700 | [diff] [blame] | 238 | # Add it to the shards. |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 239 | host_objs = models.Host.smart_get_bulk(hosts) |
MK Ryu | 9c5fbbe | 2015-02-11 15:46:22 -0800 | [diff] [blame] | 240 | rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id) |
showard | bbabf50 | 2008-06-06 00:02:02 +0000 | [diff] [blame] | 241 | |
| 242 | |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 243 | def remove_label_from_hosts(id, hosts): |
| 244 | """Removes a label of the given id from the given hosts only in local DB. |
| 245 | |
| 246 | @param id: id or name of a label. |
| 247 | @param hosts: The hostnames of hosts that need to remove the label from. |
| 248 | """ |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 249 | host_objs = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 250 | models.Label.smart_get(id).host_set.remove(*host_objs) |
showard | bbabf50 | 2008-06-06 00:02:02 +0000 | [diff] [blame] | 251 | |
| 252 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 253 | @rpc_utils.route_rpc_to_master |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 254 | def label_remove_hosts(id, hosts): |
| 255 | """Removes a label of the given id from the given hosts. |
| 256 | |
| 257 | This method should be run only on master not shards. |
| 258 | |
| 259 | @param id: id or name of a label. |
| 260 | @param hosts: A list of hostnames or ids. More often hostnames. |
| 261 | """ |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 262 | host_objs = models.Host.smart_get_bulk(hosts) |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 263 | remove_label_from_hosts(id, hosts) |
| 264 | |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 265 | rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id) |
| 266 | |
MK Ryu | cf027c6 | 2015-03-04 12:00:50 -0800 | [diff] [blame] | 267 | |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 268 | def get_labels(exclude_filters=(), **filter_data): |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 269 | """\ |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 270 | @param exclude_filters: A sequence of dictionaries of filters. |
| 271 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 272 | @returns A sequence of nested dictionaries of label information. |
| 273 | """ |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 274 | labels = models.Label.query_objects(filter_data) |
| 275 | for exclude_filter in exclude_filters: |
| 276 | labels = labels.exclude(**exclude_filter) |
| 277 | return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',)) |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 278 | |
| 279 | |
| 280 | # atomic groups |
| 281 | |
showard | e9450c9 | 2009-06-30 01:58:52 +0000 | [diff] [blame] | 282 | def add_atomic_group(name, max_number_of_machines=None, description=None): |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 283 | return models.AtomicGroup.add_object( |
| 284 | name=name, max_number_of_machines=max_number_of_machines, |
| 285 | description=description).id |
| 286 | |
| 287 | |
| 288 | def modify_atomic_group(id, **data): |
| 289 | models.AtomicGroup.smart_get(id).update_object(data) |
| 290 | |
| 291 | |
| 292 | def delete_atomic_group(id): |
| 293 | models.AtomicGroup.smart_get(id).delete() |
| 294 | |
| 295 | |
| 296 | def atomic_group_add_labels(id, labels): |
| 297 | label_objs = models.Label.smart_get_bulk(labels) |
| 298 | models.AtomicGroup.smart_get(id).label_set.add(*label_objs) |
| 299 | |
| 300 | |
| 301 | def atomic_group_remove_labels(id, labels): |
| 302 | label_objs = models.Label.smart_get_bulk(labels) |
| 303 | models.AtomicGroup.smart_get(id).label_set.remove(*label_objs) |
| 304 | |
| 305 | |
| 306 | def get_atomic_groups(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 307 | return rpc_utils.prepare_for_serialization( |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 308 | models.AtomicGroup.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 309 | |
| 310 | |
| 311 | # hosts |
| 312 | |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 313 | def add_host(hostname, status=None, locked=None, lock_reason='', protection=None): |
| 314 | if locked and not lock_reason: |
| 315 | raise model_logic.ValidationError( |
| 316 | {'locked': 'Please provide a reason for locking when adding host.'}) |
| 317 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 318 | return models.Host.add_object(hostname=hostname, status=status, |
Matthew Sartori | 6818633 | 2015-04-27 17:19:53 -0700 | [diff] [blame] | 319 | locked=locked, lock_reason=lock_reason, |
| 320 | protection=protection).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 321 | |
| 322 | |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 323 | @rpc_utils.route_rpc_to_master |
| 324 | def modify_host(id, **kwargs): |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 325 | """Modify local attributes of a host. |
| 326 | |
| 327 | If this is called on the master, but the host is assigned to a shard, this |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 328 | will call `modify_host_local` RPC to the responsible shard. This means if |
| 329 | a host is being locked using this function, this change will also propagate |
| 330 | to shards. |
| 331 | When this is called on a shard, the shard just routes the RPC to the master |
| 332 | and does nothing. |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 333 | |
| 334 | @param id: id of the host to modify. |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 335 | @param kwargs: key=value pairs of values to set on the host. |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 336 | """ |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 337 | rpc_utils.check_modify_host(kwargs) |
showard | ce7c092 | 2009-09-11 18:39:24 +0000 | [diff] [blame] | 338 | host = models.Host.smart_get(id) |
Shuqian Zhao | 4c0d290 | 2016-01-12 17:03:15 -0800 | [diff] [blame] | 339 | try: |
| 340 | rpc_utils.check_modify_host_locking(host, kwargs) |
| 341 | except model_logic.ValidationError as e: |
| 342 | if not kwargs.get('force_modify_locking', False): |
| 343 | raise |
| 344 | logging.exception('The following exception will be ignored and lock ' |
| 345 | 'modification will be enforced. %s', e) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 346 | |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 347 | # This is required to make `lock_time` for a host be exactly same |
| 348 | # between the master and a shard. |
| 349 | if kwargs.get('locked', None) and 'lock_time' not in kwargs: |
| 350 | kwargs['lock_time'] = datetime.datetime.now() |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 351 | host.update_object(kwargs) |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 352 | |
Shuqian Zhao | 4c0d290 | 2016-01-12 17:03:15 -0800 | [diff] [blame] | 353 | # force_modifying_locking is not an internal field in database, remove. |
| 354 | kwargs.pop('force_modify_locking', None) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 355 | rpc_utils.fanout_rpc([host], 'modify_host_local', |
| 356 | include_hostnames=False, id=id, **kwargs) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 357 | |
| 358 | |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 359 | def modify_host_local(id, **kwargs): |
| 360 | """Modify host attributes in local DB. |
| 361 | |
| 362 | @param id: Host id. |
| 363 | @param kwargs: key=value pairs of values to set on the host. |
| 364 | """ |
| 365 | models.Host.smart_get(id).update_object(kwargs) |
| 366 | |
| 367 | |
| 368 | @rpc_utils.route_rpc_to_master |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 369 | def modify_hosts(host_filter_data, update_data): |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 370 | """Modify local attributes of multiple hosts. |
| 371 | |
| 372 | If this is called on the master, but one of the hosts in that match the |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 373 | filters is assigned to a shard, this will call `modify_hosts_local` RPC |
| 374 | to the responsible shard. |
| 375 | When this is called on a shard, the shard just routes the RPC to the master |
| 376 | and does nothing. |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 377 | |
| 378 | The filters are always applied on the master, not on the shards. This means |
| 379 | if the states of a host differ on the master and a shard, the state on the |
| 380 | master will be used. I.e. this means: |
| 381 | A host was synced to Shard 1. On Shard 1 the status of the host was set to |
| 382 | 'Repair Failed'. |
| 383 | - A call to modify_hosts with host_filter_data={'status': 'Ready'} will |
| 384 | update the host (both on the shard and on the master), because the state |
| 385 | of the host as the master knows it is still 'Ready'. |
| 386 | - A call to modify_hosts with host_filter_data={'status': 'Repair failed' |
| 387 | will not update the host, because the filter doesn't apply on the master. |
| 388 | |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 389 | @param host_filter_data: Filters out which hosts to modify. |
| 390 | @param update_data: A dictionary with the changes to make to the hosts. |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 391 | """ |
MK Ryu | 9316171 | 2015-12-21 10:41:32 -0800 | [diff] [blame] | 392 | update_data = update_data.copy() |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 393 | rpc_utils.check_modify_host(update_data) |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 394 | hosts = models.Host.query_objects(host_filter_data) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 395 | |
| 396 | affected_shard_hostnames = set() |
| 397 | affected_host_ids = [] |
| 398 | |
Alex Miller | 9658a95 | 2013-05-14 16:40:02 -0700 | [diff] [blame] | 399 | # Check all hosts before changing data for exception safety. |
| 400 | for host in hosts: |
Shuqian Zhao | 4c0d290 | 2016-01-12 17:03:15 -0800 | [diff] [blame] | 401 | try: |
| 402 | rpc_utils.check_modify_host_locking(host, update_data) |
| 403 | except model_logic.ValidationError as e: |
| 404 | if not update_data.get('force_modify_locking', False): |
| 405 | raise |
| 406 | logging.exception('The following exception will be ignored and ' |
| 407 | 'lock modification will be enforced. %s', e) |
| 408 | |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 409 | if host.shard: |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 410 | affected_shard_hostnames.add(host.shard.rpc_hostname()) |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 411 | affected_host_ids.append(host.id) |
| 412 | |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 413 | # This is required to make `lock_time` for a host be exactly same |
| 414 | # between the master and a shard. |
| 415 | if update_data.get('locked', None) and 'lock_time' not in update_data: |
| 416 | update_data['lock_time'] = datetime.datetime.now() |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 417 | for host in hosts: |
| 418 | host.update_object(update_data) |
MK Ryu | d53e149 | 2015-12-15 12:09:03 -0800 | [diff] [blame] | 419 | |
Shuqian Zhao | 4c0d290 | 2016-01-12 17:03:15 -0800 | [diff] [blame] | 420 | update_data.pop('force_modify_locking', None) |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 421 | # Caution: Changing the filter from the original here. See docstring. |
| 422 | rpc_utils.run_rpc_on_multiple_hostnames( |
| 423 | 'modify_hosts_local', affected_shard_hostnames, |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 424 | host_filter_data={'id__in': affected_host_ids}, |
| 425 | update_data=update_data) |
| 426 | |
showard | 276f944 | 2009-05-20 00:33:16 +0000 | [diff] [blame] | 427 | |
MK Ryu | 3388961 | 2015-09-04 14:32:35 -0700 | [diff] [blame] | 428 | def modify_hosts_local(host_filter_data, update_data): |
| 429 | """Modify attributes of hosts in local DB. |
| 430 | |
| 431 | @param host_filter_data: Filters out which hosts to modify. |
| 432 | @param update_data: A dictionary with the changes to make to the hosts. |
| 433 | """ |
| 434 | for host in models.Host.query_objects(host_filter_data): |
| 435 | host.update_object(update_data) |
| 436 | |
| 437 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 438 | def add_labels_to_host(id, labels): |
| 439 | """Adds labels to a given host only in local DB. |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 440 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 441 | @param id: id or hostname for a host. |
| 442 | @param labels: ids or names for labels. |
| 443 | """ |
| 444 | label_objs = models.Label.smart_get_bulk(labels) |
| 445 | models.Host.smart_get(id).labels.add(*label_objs) |
| 446 | |
| 447 | |
| 448 | @rpc_utils.route_rpc_to_master |
| 449 | def host_add_labels(id, labels): |
| 450 | """Adds labels to a given host. |
| 451 | |
| 452 | @param id: id or hostname for a host. |
| 453 | @param labels: ids or names for labels. |
| 454 | |
Shuqian Zhao | 40e182b | 2016-10-11 11:55:11 -0700 | [diff] [blame] | 455 | @raises ValidationError: If adding more than one platform/board label. |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 456 | """ |
Kevin Cheng | bdfc57d | 2016-04-14 13:46:58 -0700 | [diff] [blame] | 457 | # Create the labels on the master/shards. |
| 458 | for label in labels: |
| 459 | _create_label_everywhere(label, [id]) |
| 460 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 461 | label_objs = models.Label.smart_get_bulk(labels) |
| 462 | platforms = [label.name for label in label_objs if label.platform] |
Shuqian Zhao | 40e182b | 2016-10-11 11:55:11 -0700 | [diff] [blame] | 463 | boards = [label.name for label in label_objs |
| 464 | if label.name.startswith('board:')] |
Dan Shi | b5b8b4f | 2016-11-02 14:04:02 -0700 | [diff] [blame] | 465 | if len(platforms) > 1 or not utils.board_labels_allowed(boards): |
showard | cafd16e | 2009-05-29 18:37:49 +0000 | [diff] [blame] | 466 | raise model_logic.ValidationError( |
Dan Shi | b5b8b4f | 2016-11-02 14:04:02 -0700 | [diff] [blame] | 467 | {'labels': ('Adding more than one platform label, or a list of ' |
| 468 | 'non-compatible board labels.: %s %s' % |
| 469 | (', '.join(platforms), ', '.join(boards)))}) |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 470 | |
| 471 | host_obj = models.Host.smart_get(id) |
Dan Shi | 4a3deb8 | 2016-10-27 21:32:30 -0700 | [diff] [blame] | 472 | if platforms: |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 473 | models.Host.check_no_platform([host_obj]) |
Dan Shi | 4a3deb8 | 2016-10-27 21:32:30 -0700 | [diff] [blame] | 474 | if boards: |
Dan Shi | b5b8b4f | 2016-11-02 14:04:02 -0700 | [diff] [blame] | 475 | models.Host.check_board_labels_allowed([host_obj], labels) |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 476 | add_labels_to_host(id, labels) |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 477 | |
| 478 | rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False, |
| 479 | id=id, labels=labels) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 480 | |
| 481 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 482 | def remove_labels_from_host(id, labels): |
| 483 | """Removes labels from a given host only in local DB. |
| 484 | |
| 485 | @param id: id or hostname for a host. |
| 486 | @param labels: ids or names for labels. |
| 487 | """ |
| 488 | label_objs = models.Label.smart_get_bulk(labels) |
| 489 | models.Host.smart_get(id).labels.remove(*label_objs) |
| 490 | |
| 491 | |
| 492 | @rpc_utils.route_rpc_to_master |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 493 | def host_remove_labels(id, labels): |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 494 | """Removes labels from a given host. |
| 495 | |
| 496 | @param id: id or hostname for a host. |
| 497 | @param labels: ids or names for labels. |
| 498 | """ |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 499 | remove_labels_from_host(id, labels) |
| 500 | |
MK Ryu | fbb002c | 2015-06-08 14:13:16 -0700 | [diff] [blame] | 501 | host_obj = models.Host.smart_get(id) |
| 502 | rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False, |
| 503 | id=id, labels=labels) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 504 | |
| 505 | |
MK Ryu | acf3592 | 2014-10-03 14:56:49 -0700 | [diff] [blame] | 506 | def get_host_attribute(attribute, **host_filter_data): |
| 507 | """ |
| 508 | @param attribute: string name of attribute |
| 509 | @param host_filter_data: filter data to apply to Hosts to choose hosts to |
| 510 | act upon |
| 511 | """ |
| 512 | hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data) |
| 513 | hosts = list(hosts) |
| 514 | models.Host.objects.populate_relationships(hosts, models.HostAttribute, |
| 515 | 'attribute_list') |
| 516 | host_attr_dicts = [] |
| 517 | for host_obj in hosts: |
| 518 | for attr_obj in host_obj.attribute_list: |
| 519 | if attr_obj.attribute == attribute: |
| 520 | host_attr_dicts.append(attr_obj.get_object_dict()) |
| 521 | return rpc_utils.prepare_for_serialization(host_attr_dicts) |
| 522 | |
| 523 | |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 524 | def set_host_attribute(attribute, value, **host_filter_data): |
| 525 | """ |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 526 | @param attribute: string name of attribute |
| 527 | @param value: string, or None to delete an attribute |
| 528 | @param host_filter_data: filter data to apply to Hosts to choose hosts to |
| 529 | act upon |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 530 | """ |
| 531 | assert host_filter_data # disallow accidental actions on all hosts |
| 532 | hosts = models.Host.query_objects(host_filter_data) |
| 533 | models.AclGroup.check_for_acl_violation_hosts(hosts) |
MK Ryu | 8e2c2d0 | 2016-01-06 15:24:38 -0800 | [diff] [blame] | 534 | for host in hosts: |
| 535 | host.set_or_delete_attribute(attribute, value) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 536 | |
MK Ryu | 26f0c93 | 2015-05-28 18:14:33 -0700 | [diff] [blame] | 537 | # Master forwards this RPC to shards. |
| 538 | if not utils.is_shard(): |
| 539 | rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False, |
| 540 | attribute=attribute, value=value, **host_filter_data) |
| 541 | |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 542 | |
Jakob Juelich | 50e91f7 | 2014-10-01 12:43:23 -0700 | [diff] [blame] | 543 | @rpc_utils.forward_single_host_rpc_to_shard |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 544 | def delete_host(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 545 | models.Host.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 546 | |
| 547 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 548 | def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False, |
Dan Shi | 37df54d | 2015-12-14 11:16:28 -0800 | [diff] [blame] | 549 | exclude_atomic_group_hosts=False, valid_only=True, |
| 550 | include_current_job=False, **filter_data): |
| 551 | """Get a list of dictionaries which contains the information of hosts. |
| 552 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 553 | @param multiple_labels: match hosts in all of the labels given. Should |
| 554 | be a list of label names. |
| 555 | @param exclude_only_if_needed_labels: Exclude hosts with at least one |
| 556 | "only_if_needed" label applied. |
| 557 | @param exclude_atomic_group_hosts: Exclude hosts that have one or more |
| 558 | atomic group labels associated with them. |
Dan Shi | 37df54d | 2015-12-14 11:16:28 -0800 | [diff] [blame] | 559 | @param include_current_job: Set to True to include ids of currently running |
| 560 | job and special task. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 561 | """ |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 562 | hosts = rpc_utils.get_host_query(multiple_labels, |
| 563 | exclude_only_if_needed_labels, |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 564 | exclude_atomic_group_hosts, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 565 | valid_only, filter_data) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 566 | hosts = list(hosts) |
| 567 | models.Host.objects.populate_relationships(hosts, models.Label, |
| 568 | 'label_list') |
| 569 | models.Host.objects.populate_relationships(hosts, models.AclGroup, |
| 570 | 'acl_list') |
| 571 | models.Host.objects.populate_relationships(hosts, models.HostAttribute, |
| 572 | 'attribute_list') |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 573 | host_dicts = [] |
| 574 | for host_obj in hosts: |
| 575 | host_dict = host_obj.get_object_dict() |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 576 | host_dict['labels'] = [label.name for label in host_obj.label_list] |
showard | 909c914 | 2009-07-07 20:54:42 +0000 | [diff] [blame] | 577 | host_dict['platform'], host_dict['atomic_group'] = (rpc_utils. |
| 578 | find_platform_and_atomic_group(host_obj)) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 579 | host_dict['acls'] = [acl.name for acl in host_obj.acl_list] |
| 580 | host_dict['attributes'] = dict((attribute.attribute, attribute.value) |
| 581 | for attribute in host_obj.attribute_list) |
Dan Shi | 37df54d | 2015-12-14 11:16:28 -0800 | [diff] [blame] | 582 | if include_current_job: |
| 583 | host_dict['current_job'] = None |
| 584 | host_dict['current_special_task'] = None |
| 585 | entries = models.HostQueueEntry.objects.filter( |
| 586 | host_id=host_dict['id'], active=True, complete=False) |
| 587 | if entries: |
| 588 | host_dict['current_job'] = ( |
| 589 | entries[0].get_object_dict()['job']) |
| 590 | tasks = models.SpecialTask.objects.filter( |
| 591 | host_id=host_dict['id'], is_active=True, is_complete=False) |
| 592 | if tasks: |
| 593 | host_dict['current_special_task'] = ( |
| 594 | '%d-%s' % (tasks[0].get_object_dict()['id'], |
| 595 | tasks[0].get_object_dict()['task'].lower())) |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 596 | host_dicts.append(host_dict) |
| 597 | return rpc_utils.prepare_for_serialization(host_dicts) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 598 | |
| 599 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 600 | def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 601 | exclude_atomic_group_hosts=False, valid_only=True, |
| 602 | **filter_data): |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 603 | """ |
| 604 | Same parameters as get_hosts(). |
| 605 | |
| 606 | @returns The number of matching hosts. |
| 607 | """ |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 608 | hosts = rpc_utils.get_host_query(multiple_labels, |
| 609 | exclude_only_if_needed_labels, |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 610 | exclude_atomic_group_hosts, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 611 | valid_only, filter_data) |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 612 | return hosts.count() |
showard | 1385b16 | 2008-03-13 15:59:40 +0000 | [diff] [blame] | 613 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 614 | |
| 615 | # tests |
| 616 | |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 617 | def add_test(name, test_type, path, author=None, dependencies=None, |
showard | 3d9899a | 2008-07-31 02:11:58 +0000 | [diff] [blame] | 618 | experimental=True, run_verify=None, test_class=None, |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 619 | test_time=None, test_category=None, description=None, |
| 620 | sync_count=1): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 621 | return models.Test.add_object(name=name, test_type=test_type, path=path, |
showard | 909c7a6 | 2008-07-15 21:52:38 +0000 | [diff] [blame] | 622 | author=author, dependencies=dependencies, |
| 623 | experimental=experimental, |
| 624 | run_verify=run_verify, test_time=test_time, |
| 625 | test_category=test_category, |
| 626 | sync_count=sync_count, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 627 | test_class=test_class, |
| 628 | description=description).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 629 | |
| 630 | |
| 631 | def modify_test(id, **data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 632 | models.Test.smart_get(id).update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 633 | |
| 634 | |
| 635 | def delete_test(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 636 | models.Test.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 637 | |
| 638 | |
| 639 | def get_tests(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 640 | return rpc_utils.prepare_for_serialization( |
| 641 | models.Test.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 642 | |
| 643 | |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 644 | def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name): |
| 645 | """Gets the counts of all passed and failed tests from the matching jobs. |
| 646 | |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 647 | @param job_name_prefix: Name prefix of the jobs to get the summary |
| 648 | from, e.g., 'butterfly-release/R40-6457.21.0/bvt-cq/'. |
Moises Osorio | 2dc7a10 | 2014-12-02 18:24:02 -0800 | [diff] [blame] | 649 | @param label_name: Label that must be set in the jobs, e.g., |
| 650 | 'cros-version:butterfly-release/R40-6457.21.0'. |
| 651 | |
| 652 | @returns A summary of the counts of all the passed and failed tests. |
| 653 | """ |
| 654 | job_ids = list(models.Job.objects.filter( |
| 655 | name__startswith=job_name_prefix, |
| 656 | dependency_labels__name=label_name).values_list( |
| 657 | 'pk', flat=True)) |
| 658 | summary = {'passed': 0, 'failed': 0} |
| 659 | if not job_ids: |
| 660 | return summary |
| 661 | |
| 662 | counts = (tko_models.TestView.objects.filter( |
| 663 | afe_job_id__in=job_ids).exclude( |
| 664 | test_name='SERVER_JOB').exclude( |
| 665 | test_name__startswith='CLIENT_JOB').values( |
| 666 | 'status').annotate( |
| 667 | count=Count('status'))) |
| 668 | for status in counts: |
| 669 | if status['status'] == 'GOOD': |
| 670 | summary['passed'] += status['count'] |
| 671 | else: |
| 672 | summary['failed'] += status['count'] |
| 673 | return summary |
| 674 | |
| 675 | |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 676 | # profilers |
| 677 | |
| 678 | def add_profiler(name, description=None): |
| 679 | return models.Profiler.add_object(name=name, description=description).id |
| 680 | |
| 681 | |
| 682 | def modify_profiler(id, **data): |
| 683 | models.Profiler.smart_get(id).update_object(data) |
| 684 | |
| 685 | |
| 686 | def delete_profiler(id): |
| 687 | models.Profiler.smart_get(id).delete() |
| 688 | |
| 689 | |
| 690 | def get_profilers(**filter_data): |
| 691 | return rpc_utils.prepare_for_serialization( |
| 692 | models.Profiler.list_objects(filter_data)) |
| 693 | |
| 694 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 695 | # users |
| 696 | |
| 697 | def add_user(login, access_level=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 698 | return models.User.add_object(login=login, access_level=access_level).id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 699 | |
| 700 | |
| 701 | def modify_user(id, **data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 702 | models.User.smart_get(id).update_object(data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 703 | |
| 704 | |
| 705 | def delete_user(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 706 | models.User.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 707 | |
| 708 | |
| 709 | def get_users(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 710 | return rpc_utils.prepare_for_serialization( |
| 711 | models.User.list_objects(filter_data)) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 712 | |
| 713 | |
| 714 | # acl groups |
| 715 | |
| 716 | def add_acl_group(name, description=None): |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 717 | group = models.AclGroup.add_object(name=name, description=description) |
showard | 64a9595 | 2010-01-13 21:27:16 +0000 | [diff] [blame] | 718 | group.users.add(models.User.current_user()) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 719 | return group.id |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 720 | |
| 721 | |
| 722 | def modify_acl_group(id, **data): |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 723 | group = models.AclGroup.smart_get(id) |
| 724 | group.check_for_acl_violation_acl_group() |
| 725 | group.update_object(data) |
| 726 | group.add_current_user_if_empty() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 727 | |
| 728 | |
| 729 | def acl_group_add_users(id, users): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 730 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 731 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 732 | users = models.User.smart_get_bulk(users) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 733 | group.users.add(*users) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 734 | |
| 735 | |
| 736 | def acl_group_remove_users(id, users): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 737 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 738 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 739 | users = models.User.smart_get_bulk(users) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 740 | group.users.remove(*users) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 741 | group.add_current_user_if_empty() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 742 | |
| 743 | |
| 744 | def acl_group_add_hosts(id, hosts): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 745 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 746 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 747 | hosts = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 748 | group.hosts.add(*hosts) |
showard | 08f981b | 2008-06-24 21:59:03 +0000 | [diff] [blame] | 749 | group.on_host_membership_change() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 750 | |
| 751 | |
| 752 | def acl_group_remove_hosts(id, hosts): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 753 | group = models.AclGroup.smart_get(id) |
showard | 04f2cd8 | 2008-07-25 20:53:31 +0000 | [diff] [blame] | 754 | group.check_for_acl_violation_acl_group() |
showard | be3ec04 | 2008-11-12 18:16:07 +0000 | [diff] [blame] | 755 | hosts = models.Host.smart_get_bulk(hosts) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 756 | group.hosts.remove(*hosts) |
showard | 08f981b | 2008-06-24 21:59:03 +0000 | [diff] [blame] | 757 | group.on_host_membership_change() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 758 | |
| 759 | |
| 760 | def delete_acl_group(id): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 761 | models.AclGroup.smart_get(id).delete() |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 762 | |
| 763 | |
| 764 | def get_acl_groups(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 765 | acl_groups = models.AclGroup.list_objects(filter_data) |
| 766 | for acl_group in acl_groups: |
| 767 | acl_group_obj = models.AclGroup.objects.get(id=acl_group['id']) |
| 768 | acl_group['users'] = [user.login |
| 769 | for user in acl_group_obj.users.all()] |
| 770 | acl_group['hosts'] = [host.hostname |
| 771 | for host in acl_group_obj.hosts.all()] |
| 772 | return rpc_utils.prepare_for_serialization(acl_groups) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 773 | |
| 774 | |
| 775 | # jobs |
| 776 | |
Richard Barnette | 8e33b4e | 2016-05-21 12:12:26 -0700 | [diff] [blame] | 777 | def generate_control_file(tests=(), profilers=(), |
showard | 91f8510 | 2009-10-12 20:34:52 +0000 | [diff] [blame] | 778 | client_control_file='', use_container=False, |
Richard Barnette | 8e33b4e | 2016-05-21 12:12:26 -0700 | [diff] [blame] | 779 | profile_only=None, db_tests=True, |
| 780 | test_source_build=None): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 781 | """ |
Richard Barnette | 8e33b4e | 2016-05-21 12:12:26 -0700 | [diff] [blame] | 782 | Generates a client-side control file to run tests. |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 783 | |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 784 | @param tests List of tests to run. See db_tests for more information. |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 785 | @param profilers List of profilers to activate during the job. |
| 786 | @param client_control_file The contents of a client-side control file to |
| 787 | run at the end of all tests. If this is supplied, all tests must be |
| 788 | client side. |
| 789 | TODO: in the future we should support server control files directly |
| 790 | to wrap with a kernel. That'll require changing the parameter |
| 791 | name and adding a boolean to indicate if it is a client or server |
| 792 | control file. |
| 793 | @param use_container unused argument today. TODO: Enable containers |
| 794 | on the host during a client side test. |
showard | 91f8510 | 2009-10-12 20:34:52 +0000 | [diff] [blame] | 795 | @param profile_only A boolean that indicates what default profile_only |
| 796 | mode to use in the control file. Passing None will generate a |
| 797 | control file that does not explcitly set the default mode at all. |
Matthew Sartori | 1043809 | 2015-06-24 14:30:18 -0700 | [diff] [blame] | 798 | @param db_tests: if True, the test object can be found in the database |
| 799 | backing the test model. In this case, tests is a tuple |
| 800 | of test IDs which are used to retrieve the test objects |
| 801 | from the database. If False, tests is a tuple of test |
| 802 | dictionaries stored client-side in the AFE. |
Michael Tang | 84a2ecf | 2016-06-07 15:10:53 -0700 | [diff] [blame] | 803 | @param test_source_build: Build to be used to retrieve test code. Default |
| 804 | to None. |
mbligh | 120351e | 2009-01-24 01:40:45 +0000 | [diff] [blame] | 805 | |
| 806 | @returns a dict with the following keys: |
| 807 | control_file: str, The control file text. |
| 808 | is_server: bool, is the control file a server-side control file? |
| 809 | synch_count: How many machines the job uses per autoserv execution. |
| 810 | synch_count == 1 means the job is asynchronous. |
| 811 | dependencies: A list of the names of labels on which the job depends. |
| 812 | """ |
showard | d86debe | 2009-06-10 17:37:56 +0000 | [diff] [blame] | 813 | if not tests and not client_control_file: |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 814 | return dict(control_file='', is_server=False, synch_count=1, |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 815 | dependencies=[]) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 816 | |
Richard Barnette | 8e33b4e | 2016-05-21 12:12:26 -0700 | [diff] [blame] | 817 | cf_info, test_objects, profiler_objects = ( |
| 818 | rpc_utils.prepare_generate_control_file(tests, profilers, |
| 819 | db_tests)) |
Allen Li | a59b126 | 2016-12-14 12:53:51 -0800 | [diff] [blame] | 820 | cf_info['control_file'] = control_file_lib.generate_control( |
Richard Barnette | 8e33b4e | 2016-05-21 12:12:26 -0700 | [diff] [blame] | 821 | tests=test_objects, profilers=profiler_objects, |
| 822 | is_server=cf_info['is_server'], |
showard | 232b7ae | 2009-11-10 00:46:48 +0000 | [diff] [blame] | 823 | client_control_file=client_control_file, profile_only=profile_only, |
Michael Tang | 84a2ecf | 2016-06-07 15:10:53 -0700 | [diff] [blame] | 824 | test_source_build=test_source_build) |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 825 | return cf_info |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 826 | |
| 827 | |
Allen Li | 41e47c1 | 2016-12-14 12:43:44 -0800 | [diff] [blame] | 828 | def create_parameterized_job( |
| 829 | name, |
| 830 | priority, |
| 831 | test, |
| 832 | parameters, |
| 833 | kernel=None, |
| 834 | label=None, |
| 835 | profilers=(), |
| 836 | profiler_parameters=None, |
| 837 | use_container=False, |
| 838 | profile_only=None, |
| 839 | upload_kernel_config=False, |
| 840 | hosts=(), |
| 841 | meta_hosts=(), |
| 842 | one_time_hosts=(), |
| 843 | atomic_group_name=None, |
| 844 | synch_count=None, |
| 845 | is_template=False, |
| 846 | timeout=None, |
| 847 | timeout_mins=None, |
| 848 | max_runtime_mins=None, |
| 849 | run_verify=False, |
| 850 | email_list='', |
| 851 | dependencies=(), |
| 852 | reboot_before=None, |
| 853 | reboot_after=None, |
| 854 | parse_failed_repair=None, |
| 855 | hostless=False, |
| 856 | keyvals=None, |
| 857 | drone_set=None, |
| 858 | run_reset=True, |
| 859 | require_ssp=None): |
Shuqian Zhao | 54a5b67 | 2016-05-11 22:12:17 +0000 | [diff] [blame] | 860 | """ |
| 861 | Creates and enqueues a parameterized job. |
| 862 | |
| 863 | Most parameters a combination of the parameters for generate_control_file() |
| 864 | and create_job(), with the exception of: |
| 865 | |
| 866 | @param test name or ID of the test to run |
| 867 | @param parameters a map of parameter name -> |
| 868 | tuple of (param value, param type) |
| 869 | @param profiler_parameters a dictionary of parameters for the profilers: |
| 870 | key: profiler name |
| 871 | value: dict of param name -> tuple of |
| 872 | (param value, |
| 873 | param type) |
| 874 | """ |
Shuqian Zhao | 54a5b67 | 2016-05-11 22:12:17 +0000 | [diff] [blame] | 875 | # Set up the parameterized job configs |
| 876 | test_obj = models.Test.smart_get(test) |
| 877 | control_type = test_obj.test_type |
| 878 | |
| 879 | try: |
| 880 | label = models.Label.smart_get(label) |
| 881 | except models.Label.DoesNotExist: |
| 882 | label = None |
| 883 | |
| 884 | kernel_objs = models.Kernel.create_kernels(kernel) |
| 885 | profiler_objs = [models.Profiler.smart_get(profiler) |
| 886 | for profiler in profilers] |
| 887 | |
| 888 | parameterized_job = models.ParameterizedJob.objects.create( |
| 889 | test=test_obj, label=label, use_container=use_container, |
| 890 | profile_only=profile_only, |
| 891 | upload_kernel_config=upload_kernel_config) |
| 892 | parameterized_job.kernels.add(*kernel_objs) |
| 893 | |
| 894 | for profiler in profiler_objs: |
| 895 | parameterized_profiler = models.ParameterizedJobProfiler.objects.create( |
| 896 | parameterized_job=parameterized_job, |
| 897 | profiler=profiler) |
| 898 | profiler_params = profiler_parameters.get(profiler.name, {}) |
| 899 | for name, (value, param_type) in profiler_params.iteritems(): |
| 900 | models.ParameterizedJobProfilerParameter.objects.create( |
| 901 | parameterized_job_profiler=parameterized_profiler, |
| 902 | parameter_name=name, |
| 903 | parameter_value=value, |
| 904 | parameter_type=param_type) |
| 905 | |
| 906 | try: |
| 907 | for parameter in test_obj.testparameter_set.all(): |
| 908 | if parameter.name in parameters: |
| 909 | param_value, param_type = parameters.pop(parameter.name) |
| 910 | parameterized_job.parameterizedjobparameter_set.create( |
| 911 | test_parameter=parameter, parameter_value=param_value, |
| 912 | parameter_type=param_type) |
| 913 | |
| 914 | if parameters: |
| 915 | raise Exception('Extra parameters remain: %r' % parameters) |
| 916 | |
| 917 | return rpc_utils.create_job_common( |
Allen Li | 81996a8 | 2016-12-14 13:01:37 -0800 | [diff] [blame] | 918 | name=name, |
| 919 | priority=priority, |
| 920 | control_type=control_type, |
| 921 | hosts=hosts, |
| 922 | meta_hosts=meta_hosts, |
| 923 | one_time_hosts=one_time_hosts, |
| 924 | atomic_group_name=atomic_group_name, |
| 925 | synch_count=synch_count, |
| 926 | is_template=is_template, |
| 927 | timeout=timeout, |
| 928 | timeout_mins=timeout_mins, |
| 929 | max_runtime_mins=max_runtime_mins, |
| 930 | run_verify=run_verify, |
| 931 | email_list=email_list, |
| 932 | dependencies=dependencies, |
| 933 | reboot_before=reboot_before, |
| 934 | reboot_after=reboot_after, |
| 935 | parse_failed_repair=parse_failed_repair, |
| 936 | hostless=hostless, |
| 937 | keyvals=keyvals, |
| 938 | drone_set=drone_set, |
| 939 | parameterized_job=parameterized_job.id, |
| 940 | run_reset=run_reset, |
| 941 | require_ssp=require_ssp) |
Shuqian Zhao | 54a5b67 | 2016-05-11 22:12:17 +0000 | [diff] [blame] | 942 | except: |
| 943 | parameterized_job.delete() |
| 944 | raise |
| 945 | |
| 946 | |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 947 | def create_job_page_handler(name, priority, control_file, control_type, |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 948 | image=None, hostless=False, firmware_rw_build=None, |
| 949 | firmware_ro_build=None, test_source_build=None, |
Michael Tang | 84a2ecf | 2016-06-07 15:10:53 -0700 | [diff] [blame] | 950 | is_cloning=False, **kwargs): |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 951 | """\ |
| 952 | Create and enqueue a job. |
| 953 | |
| 954 | @param name name of this job |
| 955 | @param priority Integer priority of this job. Higher is more important. |
| 956 | @param control_file String contents of the control file. |
| 957 | @param control_type Type of control file, Client or Server. |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 958 | @param image: ChromeOS build to be installed in the dut. Default to None. |
| 959 | @param firmware_rw_build: Firmware build to update RW firmware. Default to |
| 960 | None, i.e., RW firmware will not be updated. |
| 961 | @param firmware_ro_build: Firmware build to update RO firmware. Default to |
| 962 | None, i.e., RO firmware will not be updated. |
| 963 | @param test_source_build: Build to be used to retrieve test code. Default |
| 964 | to None. |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 965 | @param is_cloning: True if creating a cloning job. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 966 | @param kwargs extra args that will be required by create_suite_job or |
| 967 | create_job. |
| 968 | |
| 969 | @returns The created Job id number. |
| 970 | """ |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 971 | if is_cloning: |
| 972 | logging.info('Start to clone a new job') |
Shuqian Zhao | 61f5d31 | 2016-08-05 17:15:23 -0700 | [diff] [blame] | 973 | # When cloning a job, hosts and meta_hosts should not exist together, |
| 974 | # which would cause host-scheduler to schedule two hqe jobs to one host |
| 975 | # at the same time, and crash itself. Clear meta_hosts for this case. |
| 976 | if kwargs.get('hosts') and kwargs.get('meta_hosts'): |
| 977 | kwargs['meta_hosts'] = [] |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 978 | else: |
| 979 | logging.info('Start to create a new job') |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 980 | control_file = rpc_utils.encode_ascii(control_file) |
Jiaxi Luo | dd67beb | 2014-07-18 16:28:31 -0700 | [diff] [blame] | 981 | if not control_file: |
| 982 | raise model_logic.ValidationError({ |
| 983 | 'control_file' : "Control file cannot be empty"}) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 984 | |
| 985 | if image and hostless: |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 986 | builds = {} |
| 987 | builds[provision.CROS_VERSION_PREFIX] = image |
| 988 | if firmware_rw_build: |
Dan Shi | 0723bf5 | 2015-06-24 10:52:38 -0700 | [diff] [blame] | 989 | builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build |
Dan Shi | d215dbe | 2015-06-18 16:14:59 -0700 | [diff] [blame] | 990 | if firmware_ro_build: |
| 991 | builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 992 | return create_suite_job( |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 993 | name=name, control_file=control_file, priority=priority, |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 994 | builds=builds, test_source_build=test_source_build, |
| 995 | is_cloning=is_cloning, **kwargs) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 996 | return create_job(name, priority, control_file, control_type, image=image, |
Allen Li | ac199b6 | 2016-12-14 12:56:02 -0800 | [diff] [blame] | 997 | hostless=hostless, **kwargs) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 998 | |
| 999 | |
MK Ryu | e301eb7 | 2015-06-25 12:51:02 -0700 | [diff] [blame] | 1000 | @rpc_utils.route_rpc_to_master |
Allen Li | 8af9da0 | 2016-12-12 17:32:39 -0800 | [diff] [blame] | 1001 | def create_job( |
| 1002 | name, |
| 1003 | priority, |
| 1004 | control_file, |
| 1005 | control_type, |
| 1006 | hosts=(), |
| 1007 | meta_hosts=(), |
| 1008 | one_time_hosts=(), |
| 1009 | atomic_group_name=None, |
| 1010 | synch_count=None, |
| 1011 | is_template=False, |
| 1012 | timeout=None, |
| 1013 | timeout_mins=None, |
| 1014 | max_runtime_mins=None, |
| 1015 | run_verify=False, |
| 1016 | email_list='', |
| 1017 | dependencies=(), |
| 1018 | reboot_before=None, |
| 1019 | reboot_after=None, |
| 1020 | parse_failed_repair=None, |
| 1021 | hostless=False, |
| 1022 | keyvals=None, |
| 1023 | drone_set=None, |
| 1024 | image=None, |
| 1025 | parent_job_id=None, |
| 1026 | test_retry=0, |
| 1027 | run_reset=True, |
| 1028 | require_ssp=None, |
| 1029 | args=(), |
Allen Li | 8af9da0 | 2016-12-12 17:32:39 -0800 | [diff] [blame] | 1030 | **kwargs): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1031 | """\ |
| 1032 | Create and enqueue a job. |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1033 | |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1034 | @param name name of this job |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 1035 | @param priority Integer priority of this job. Higher is more important. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1036 | @param control_file String contents of the control file. |
| 1037 | @param control_type Type of control file, Client or Server. |
| 1038 | @param synch_count How many machines the job uses per autoserv execution. |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1039 | synch_count == 1 means the job is asynchronous. If an atomic group is |
| 1040 | given this value is treated as a minimum. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1041 | @param is_template If true then create a template job. |
| 1042 | @param timeout Hours after this call returns until the job times out. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 1043 | @param timeout_mins Minutes after this call returns until the job times |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1044 | out. |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 1045 | @param max_runtime_mins Minutes from job starting time until job times out |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1046 | @param run_verify Should the host be verified before running the test? |
| 1047 | @param email_list String containing emails to mail when the job is done |
| 1048 | @param dependencies List of label names on which this job depends |
| 1049 | @param reboot_before Never, If dirty, or Always |
| 1050 | @param reboot_after Never, If all tests passed, or Always |
| 1051 | @param parse_failed_repair if true, results of failed repairs launched by |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1052 | this job will be parsed as part of the job. |
showard | a9545c0 | 2009-12-18 22:44:26 +0000 | [diff] [blame] | 1053 | @param hostless if true, create a hostless job |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 1054 | @param keyvals dict of keyvals to associate with the job |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1055 | @param hosts List of hosts to run job on. |
| 1056 | @param meta_hosts List where each entry is a label name, and for each entry |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1057 | one host will be chosen from that label to run the job on. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1058 | @param one_time_hosts List of hosts not in the database to run the job on. |
| 1059 | @param atomic_group_name The name of an atomic group to schedule the job on. |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1060 | @param drone_set The name of the drone set to run this test on. |
Paul Pendlebury | 5a8c6ad | 2011-02-01 07:20:17 -0800 | [diff] [blame] | 1061 | @param image OS image to install before running job. |
Aviv Keshet | 0b9cfc9 | 2013-02-05 11:36:02 -0800 | [diff] [blame] | 1062 | @param parent_job_id id of a job considered to be parent of created job. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 1063 | @param test_retry Number of times to retry test if the test did not |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1064 | complete successfully. (optional, default: 0) |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 1065 | @param run_reset Should the host be reset before running the test? |
Dan Shi | ec1d47d | 2015-02-13 11:38:13 -0800 | [diff] [blame] | 1066 | @param require_ssp Set to True to require server-side packaging to run the |
| 1067 | test. If it's set to None, drone will still try to run |
| 1068 | the server side with server-side packaging. If the |
| 1069 | autotest-server package doesn't exist for the build or |
| 1070 | image is not set, drone will run the test without server- |
| 1071 | side packaging. Default is None. |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1072 | @param args A list of args to be injected into control file. |
Simran Basi | b6ec8ae | 2014-04-23 12:05:08 -0700 | [diff] [blame] | 1073 | @param kwargs extra keyword args. NOT USED. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1074 | |
| 1075 | @returns The created Job id number. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1076 | """ |
Jiaxi Luo | 90190c9 | 2014-06-18 12:35:57 -0700 | [diff] [blame] | 1077 | if args: |
| 1078 | control_file = tools.inject_vars({'args': args}, control_file) |
Richard Barnette | 6c2b70a | 2017-01-26 13:40:51 -0800 | [diff] [blame] | 1079 | if image: |
| 1080 | dependencies += (provision.image_version_to_label(image),) |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 1081 | return rpc_utils.create_job_common( |
Allen Li | 81996a8 | 2016-12-14 13:01:37 -0800 | [diff] [blame] | 1082 | name=name, |
| 1083 | priority=priority, |
| 1084 | control_type=control_type, |
| 1085 | control_file=control_file, |
| 1086 | hosts=hosts, |
| 1087 | meta_hosts=meta_hosts, |
| 1088 | one_time_hosts=one_time_hosts, |
| 1089 | atomic_group_name=atomic_group_name, |
| 1090 | synch_count=synch_count, |
| 1091 | is_template=is_template, |
| 1092 | timeout=timeout, |
| 1093 | timeout_mins=timeout_mins, |
| 1094 | max_runtime_mins=max_runtime_mins, |
| 1095 | run_verify=run_verify, |
| 1096 | email_list=email_list, |
| 1097 | dependencies=dependencies, |
| 1098 | reboot_before=reboot_before, |
| 1099 | reboot_after=reboot_after, |
| 1100 | parse_failed_repair=parse_failed_repair, |
| 1101 | hostless=hostless, |
| 1102 | keyvals=keyvals, |
| 1103 | drone_set=drone_set, |
Allen Li | 81996a8 | 2016-12-14 13:01:37 -0800 | [diff] [blame] | 1104 | parent_job_id=parent_job_id, |
| 1105 | test_retry=test_retry, |
| 1106 | run_reset=run_reset, |
| 1107 | require_ssp=require_ssp) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1108 | |
| 1109 | |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 1110 | def abort_host_queue_entries(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1111 | """\ |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 1112 | Abort a set of host queue entries. |
Fang Deng | 63b0e45 | 2014-12-19 14:38:15 -0800 | [diff] [blame] | 1113 | |
| 1114 | @return: A list of dictionaries, each contains information |
| 1115 | about an aborted HQE. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1116 | """ |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 1117 | query = models.HostQueueEntry.query_objects(filter_data) |
beeps | faecbce | 2013-10-29 11:35:10 -0700 | [diff] [blame] | 1118 | |
| 1119 | # Dont allow aborts on: |
| 1120 | # 1. Jobs that have already completed (whether or not they were aborted) |
| 1121 | # 2. Jobs that we have already been aborted (but may not have completed) |
| 1122 | query = query.filter(complete=False).filter(aborted=False) |
showard | dc81751 | 2008-11-12 18:16:41 +0000 | [diff] [blame] | 1123 | models.AclGroup.check_abort_permissions(query) |
showard | 9dbdcda | 2008-10-14 17:34:36 +0000 | [diff] [blame] | 1124 | host_queue_entries = list(query.select_related()) |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 1125 | rpc_utils.check_abort_synchronous_jobs(host_queue_entries) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1126 | |
Simran Basi | c1b2676 | 2013-06-26 14:23:21 -0700 | [diff] [blame] | 1127 | models.HostQueueEntry.abort_host_queue_entries(host_queue_entries) |
Fang Deng | 63b0e45 | 2014-12-19 14:38:15 -0800 | [diff] [blame] | 1128 | hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id, |
| 1129 | 'Job name': hqe.job.name} for hqe in host_queue_entries] |
| 1130 | return hqe_info |
showard | 9d821ab | 2008-07-11 16:54:29 +0000 | [diff] [blame] | 1131 | |
| 1132 | |
beeps | 8bb1f7d | 2013-08-05 01:30:09 -0700 | [diff] [blame] | 1133 | def abort_special_tasks(**filter_data): |
| 1134 | """\ |
| 1135 | Abort the special task, or tasks, specified in the filter. |
| 1136 | """ |
| 1137 | query = models.SpecialTask.query_objects(filter_data) |
| 1138 | special_tasks = query.filter(is_active=True) |
| 1139 | for task in special_tasks: |
| 1140 | task.abort() |
| 1141 | |
| 1142 | |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 1143 | def _call_special_tasks_on_hosts(task, hosts): |
| 1144 | """\ |
| 1145 | Schedules a set of hosts for a special task. |
| 1146 | |
| 1147 | @returns A list of hostnames that a special task was created for. |
| 1148 | """ |
| 1149 | models.AclGroup.check_for_acl_violation_hosts(hosts) |
Prashanth Balasubramanian | 6edaaf9 | 2014-11-24 16:36:25 -0800 | [diff] [blame] | 1150 | shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts) |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 1151 | if shard_host_map and not utils.is_shard(): |
Prashanth Balasubramanian | 6edaaf9 | 2014-11-24 16:36:25 -0800 | [diff] [blame] | 1152 | raise ValueError('The following hosts are on shards, please ' |
| 1153 | 'follow the link to the shards and create jobs ' |
| 1154 | 'there instead. %s.' % shard_host_map) |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 1155 | for host in hosts: |
| 1156 | models.SpecialTask.schedule_special_task(host, task) |
| 1157 | return list(sorted(host.hostname for host in hosts)) |
| 1158 | |
| 1159 | |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1160 | def _forward_special_tasks_on_hosts(task, rpc, **filter_data): |
| 1161 | """Forward special tasks to corresponding shards. |
mbligh | 4e545a5 | 2009-12-19 05:30:39 +0000 | [diff] [blame] | 1162 | |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1163 | For master, when special tasks are fired on hosts that are sharded, |
| 1164 | forward the RPC to corresponding shards. |
| 1165 | |
| 1166 | For shard, create special task records in local DB. |
| 1167 | |
| 1168 | @param task: Enum value of frontend.afe.models.SpecialTask.Task |
| 1169 | @param rpc: RPC name to forward. |
| 1170 | @param filter_data: Filter keywords to be used for DB query. |
| 1171 | |
| 1172 | @return: A list of hostnames that a special task was created for. |
showard | 1ff7b2e | 2009-05-15 23:17:18 +0000 | [diff] [blame] | 1173 | """ |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 1174 | hosts = models.Host.query_objects(filter_data) |
| 1175 | shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True) |
| 1176 | |
| 1177 | # Filter out hosts on a shard from those on the master, forward |
| 1178 | # rpcs to the shard with an additional hostname__in filter, and |
| 1179 | # create a local SpecialTask for each remaining host. |
Prashanth Balasubramanian | 8c98ac1 | 2014-12-23 11:26:44 -0800 | [diff] [blame] | 1180 | if shard_host_map and not utils.is_shard(): |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 1181 | hosts = [h for h in hosts if h.shard is None] |
| 1182 | for shard, hostnames in shard_host_map.iteritems(): |
| 1183 | |
| 1184 | # The main client of this module is the frontend website, and |
| 1185 | # it invokes it with an 'id' or an 'id__in' filter. Regardless, |
| 1186 | # the 'hostname' filter should narrow down the list of hosts on |
| 1187 | # each shard even though we supply all the ids in filter_data. |
| 1188 | # This method uses hostname instead of id because it fits better |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1189 | # with the overall architecture of redirection functions in |
| 1190 | # rpc_utils. |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 1191 | shard_filter = filter_data.copy() |
| 1192 | shard_filter['hostname__in'] = hostnames |
| 1193 | rpc_utils.run_rpc_on_multiple_hostnames( |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1194 | rpc, [shard], **shard_filter) |
Prashanth Balasubramanian | 4098123 | 2014-12-16 19:01:58 -0800 | [diff] [blame] | 1195 | |
| 1196 | # There is a race condition here if someone assigns a shard to one of these |
| 1197 | # hosts before we create the task. The host will stay on the master if: |
| 1198 | # 1. The host is not Ready |
| 1199 | # 2. The host is Ready but has a task |
| 1200 | # But if the host is Ready and doesn't have a task yet, it will get sent |
| 1201 | # to the shard as we're creating a task here. |
| 1202 | |
| 1203 | # Given that we only rarely verify Ready hosts it isn't worth putting this |
| 1204 | # entire method in a transaction. The worst case scenario is that we have |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1205 | # a verify running on a Ready host while the shard is using it, if the |
| 1206 | # verify fails no subsequent tasks will be created against the host on the |
| 1207 | # master, and verifies are safe enough that this is OK. |
| 1208 | return _call_special_tasks_on_hosts(task, hosts) |
| 1209 | |
| 1210 | |
| 1211 | def reverify_hosts(**filter_data): |
| 1212 | """\ |
| 1213 | Schedules a set of hosts for verify. |
| 1214 | |
| 1215 | @returns A list of hostnames that a verify task was created for. |
| 1216 | """ |
| 1217 | return _forward_special_tasks_on_hosts( |
| 1218 | models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data) |
Simran Basi | 73dae55 | 2013-02-25 14:57:46 -0800 | [diff] [blame] | 1219 | |
| 1220 | |
| 1221 | def repair_hosts(**filter_data): |
| 1222 | """\ |
| 1223 | Schedules a set of hosts for repair. |
| 1224 | |
| 1225 | @returns A list of hostnames that a repair task was created for. |
| 1226 | """ |
MK Ryu | 5aa2504 | 2015-07-28 16:08:04 -0700 | [diff] [blame] | 1227 | return _forward_special_tasks_on_hosts( |
| 1228 | models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data) |
showard | 1ff7b2e | 2009-05-15 23:17:18 +0000 | [diff] [blame] | 1229 | |
| 1230 | |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1231 | def get_jobs(not_yet_run=False, running=False, finished=False, |
| 1232 | suite=False, sub=False, standalone=False, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1233 | """\ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1234 | Extra status filter args for get_jobs: |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1235 | -not_yet_run: Include only jobs that have not yet started running. |
| 1236 | -running: Include only jobs that have start running but for which not |
| 1237 | all hosts have completed. |
| 1238 | -finished: Include only jobs for which all hosts have completed (or |
| 1239 | aborted). |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1240 | |
| 1241 | Extra type filter args for get_jobs: |
| 1242 | -suite: Include only jobs with child jobs. |
| 1243 | -sub: Include only jobs with a parent job. |
| 1244 | -standalone: Inlcude only jobs with no child or parent jobs. |
| 1245 | At most one of these three fields should be specified. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1246 | """ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1247 | extra_args = rpc_utils.extra_job_status_filters(not_yet_run, |
| 1248 | running, |
| 1249 | finished) |
| 1250 | filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args, |
| 1251 | suite, |
| 1252 | sub, |
| 1253 | standalone) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1254 | job_dicts = [] |
| 1255 | jobs = list(models.Job.query_objects(filter_data)) |
| 1256 | models.Job.objects.populate_relationships(jobs, models.Label, |
| 1257 | 'dependencies') |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 1258 | models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals') |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1259 | for job in jobs: |
| 1260 | job_dict = job.get_object_dict() |
| 1261 | job_dict['dependencies'] = ','.join(label.name |
| 1262 | for label in job.dependencies) |
showard | c1a98d1 | 2010-01-15 00:22:22 +0000 | [diff] [blame] | 1263 | job_dict['keyvals'] = dict((keyval.key, keyval.value) |
| 1264 | for keyval in job.keyvals) |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 1265 | if job.parameterized_job: |
| 1266 | job_dict['image'] = get_parameterized_autoupdate_image_url(job) |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 1267 | job_dicts.append(job_dict) |
| 1268 | return rpc_utils.prepare_for_serialization(job_dicts) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1269 | |
| 1270 | |
| 1271 | def get_num_jobs(not_yet_run=False, running=False, finished=False, |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1272 | suite=False, sub=False, standalone=False, |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1273 | **filter_data): |
Aviv Keshet | 17660a5 | 2016-04-06 18:56:43 +0000 | [diff] [blame] | 1274 | """\ |
| 1275 | See get_jobs() for documentation of extra filter parameters. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1276 | """ |
Jiaxi Luo | 15cbf37 | 2014-07-01 19:20:20 -0700 | [diff] [blame] | 1277 | extra_args = rpc_utils.extra_job_status_filters(not_yet_run, |
| 1278 | running, |
| 1279 | finished) |
| 1280 | filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args, |
| 1281 | suite, |
| 1282 | sub, |
| 1283 | standalone) |
Aviv Keshet | 17660a5 | 2016-04-06 18:56:43 +0000 | [diff] [blame] | 1284 | return models.Job.query_count(filter_data) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1285 | |
| 1286 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1287 | def get_jobs_summary(**filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1288 | """\ |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 1289 | Like get_jobs(), but adds 'status_counts' and 'result_counts' field. |
| 1290 | |
| 1291 | 'status_counts' filed is a dictionary mapping status strings to the number |
| 1292 | of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}. |
| 1293 | |
| 1294 | 'result_counts' field is piped to tko's rpc_interface and has the return |
| 1295 | format specified under get_group_counts. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1296 | """ |
| 1297 | jobs = get_jobs(**filter_data) |
| 1298 | ids = [job['id'] for job in jobs] |
| 1299 | all_status_counts = models.Job.objects.get_status_counts(ids) |
| 1300 | for job in jobs: |
| 1301 | job['status_counts'] = all_status_counts[job['id']] |
Jiaxi Luo | aac5457 | 2014-06-04 13:57:02 -0700 | [diff] [blame] | 1302 | job['result_counts'] = tko_rpc_interface.get_status_counts( |
| 1303 | ['afe_job_id', 'afe_job_id'], |
| 1304 | header_groups=[['afe_job_id'], ['afe_job_id']], |
| 1305 | **{'afe_job_id': job['id']}) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1306 | return rpc_utils.prepare_for_serialization(jobs) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1307 | |
| 1308 | |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 1309 | def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None): |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1310 | """\ |
| 1311 | Retrieves all the information needed to clone a job. |
| 1312 | """ |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1313 | job = models.Job.objects.get(id=id) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1314 | job_info = rpc_utils.get_job_info(job, |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 1315 | preserve_metahosts, |
| 1316 | queue_entry_filter_data) |
showard | 945072f | 2008-09-03 20:34:59 +0000 | [diff] [blame] | 1317 | |
showard | d9992fe | 2008-07-31 02:15:03 +0000 | [diff] [blame] | 1318 | host_dicts = [] |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1319 | for host in job_info['hosts']: |
| 1320 | host_dict = get_hosts(id=host.id)[0] |
| 1321 | other_labels = host_dict['labels'] |
| 1322 | if host_dict['platform']: |
| 1323 | other_labels.remove(host_dict['platform']) |
| 1324 | host_dict['other_labels'] = ', '.join(other_labels) |
showard | d9992fe | 2008-07-31 02:15:03 +0000 | [diff] [blame] | 1325 | host_dicts.append(host_dict) |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1326 | |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1327 | for host in job_info['one_time_hosts']: |
| 1328 | host_dict = dict(hostname=host.hostname, |
| 1329 | id=host.id, |
| 1330 | platform='(one-time host)', |
| 1331 | locked_text='') |
| 1332 | host_dicts.append(host_dict) |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1333 | |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 1334 | # convert keys from Label objects to strings (names of labels) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1335 | meta_host_counts = dict((meta_host.name, count) for meta_host, count |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 1336 | in job_info['meta_host_counts'].iteritems()) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1337 | |
| 1338 | info = dict(job=job.get_object_dict(), |
| 1339 | meta_host_counts=meta_host_counts, |
| 1340 | hosts=host_dicts) |
| 1341 | info['job']['dependencies'] = job_info['dependencies'] |
| 1342 | if job_info['atomic_group']: |
| 1343 | info['atomic_group_name'] = (job_info['atomic_group']).name |
| 1344 | else: |
| 1345 | info['atomic_group_name'] = None |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 1346 | info['hostless'] = job_info['hostless'] |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1347 | info['drone_set'] = job.drone_set and job.drone_set.name |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1348 | |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 1349 | image = _get_image_for_job(job, job_info['hostless']) |
| 1350 | if image: |
| 1351 | info['job']['image'] = image |
Eric Li | d23bc19 | 2011-02-09 14:38:57 -0800 | [diff] [blame] | 1352 | |
showard | a8709c5 | 2008-07-03 19:44:54 +0000 | [diff] [blame] | 1353 | return rpc_utils.prepare_for_serialization(info) |
| 1354 | |
| 1355 | |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 1356 | def _get_image_for_job(job, hostless): |
| 1357 | """ Gets the image used for a job. |
| 1358 | |
| 1359 | Gets the image used for an AFE job. If the job is a parameterized job, get |
| 1360 | the image from the job parameter; otherwise, tries to get the image from |
| 1361 | the job's keyvals 'build' or 'builds'. As a last resort, if the job is a |
| 1362 | hostless job, tries to get the image from its control file attributes |
| 1363 | 'build' or 'builds'. |
| 1364 | |
| 1365 | TODO(ntang): Needs to handle FAFT with two builds for ro/rw. |
| 1366 | |
| 1367 | @param job An AFE job object. |
| 1368 | @param hostless Boolean on of the job is hostless. |
| 1369 | |
| 1370 | @returns The image build used for the job. |
| 1371 | """ |
| 1372 | image = None |
| 1373 | if job.parameterized_job: |
| 1374 | image = get_parameterized_autoupdate_image_url(job) |
| 1375 | else: |
| 1376 | keyvals = job.keyval_dict() |
Michael Tang | 84a2ecf | 2016-06-07 15:10:53 -0700 | [diff] [blame] | 1377 | image = keyvals.get('build') |
Michael Tang | 6dc174e | 2016-05-31 23:13:42 -0700 | [diff] [blame] | 1378 | if not image: |
| 1379 | value = keyvals.get('builds') |
| 1380 | builds = None |
| 1381 | if isinstance(value, dict): |
| 1382 | builds = value |
| 1383 | elif isinstance(value, basestring): |
| 1384 | builds = ast.literal_eval(value) |
| 1385 | if builds: |
| 1386 | image = builds.get('cros-version') |
| 1387 | if not image and hostless and job.control_file: |
| 1388 | try: |
| 1389 | control_obj = control_data.parse_control_string( |
| 1390 | job.control_file) |
| 1391 | if hasattr(control_obj, 'build'): |
| 1392 | image = getattr(control_obj, 'build') |
| 1393 | if not image and hasattr(control_obj, 'builds'): |
| 1394 | builds = getattr(control_obj, 'builds') |
| 1395 | image = builds.get('cros-version') |
| 1396 | except: |
| 1397 | logging.warning('Failed to parse control file for job: %s', |
| 1398 | job.name) |
| 1399 | return image |
| 1400 | |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1401 | |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1402 | def get_host_queue_entries(start_time=None, end_time=None, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1403 | """\ |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1404 | @returns A sequence of nested dictionaries of host and job information. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1405 | """ |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1406 | filter_data = rpc_utils.inject_times_to_filter('started_on__gte', |
| 1407 | 'started_on__lte', |
| 1408 | start_time, |
| 1409 | end_time, |
| 1410 | **filter_data) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1411 | return rpc_utils.prepare_rows_as_nested_dicts( |
| 1412 | models.HostQueueEntry.query_objects(filter_data), |
| 1413 | ('host', 'atomic_group', 'job')) |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1414 | |
| 1415 | |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1416 | def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1417 | """\ |
| 1418 | Get the number of host queue entries associated with this job. |
| 1419 | """ |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1420 | filter_data = rpc_utils.inject_times_to_filter('started_on__gte', |
| 1421 | 'started_on__lte', |
| 1422 | start_time, |
| 1423 | end_time, |
| 1424 | **filter_data) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1425 | return models.HostQueueEntry.query_count(filter_data) |
showard | 34dc5fa | 2008-04-24 20:58:40 +0000 | [diff] [blame] | 1426 | |
| 1427 | |
showard | 1e935f1 | 2008-07-11 00:11:36 +0000 | [diff] [blame] | 1428 | def get_hqe_percentage_complete(**filter_data): |
| 1429 | """ |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1430 | Computes the fraction of host queue entries matching the given filter data |
showard | 1e935f1 | 2008-07-11 00:11:36 +0000 | [diff] [blame] | 1431 | that are complete. |
| 1432 | """ |
| 1433 | query = models.HostQueueEntry.query_objects(filter_data) |
| 1434 | complete_count = query.filter(complete=True).count() |
| 1435 | total_count = query.count() |
| 1436 | if total_count == 0: |
| 1437 | return 1 |
| 1438 | return float(complete_count) / total_count |
| 1439 | |
| 1440 | |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 1441 | # special tasks |
| 1442 | |
| 1443 | def get_special_tasks(**filter_data): |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1444 | """Get special task entries from the local database. |
| 1445 | |
| 1446 | Query the special tasks table for tasks matching the given |
| 1447 | `filter_data`, and return a list of the results. No attempt is |
| 1448 | made to forward the call to shards; the buck will stop here. |
| 1449 | The caller is expected to know the target shard for such reasons |
| 1450 | as: |
| 1451 | * The caller is a service (such as gs_offloader) configured |
| 1452 | to operate on behalf of one specific shard, and no other. |
| 1453 | * The caller has a host as a parameter, and knows that this is |
| 1454 | the shard assigned to that host. |
| 1455 | |
| 1456 | @param filter_data Filter keywords to pass to the underlying |
| 1457 | database query. |
| 1458 | |
| 1459 | """ |
J. Richard Barnette | fdfcd66 | 2015-04-13 17:20:29 -0700 | [diff] [blame] | 1460 | return rpc_utils.prepare_rows_as_nested_dicts( |
| 1461 | models.SpecialTask.query_objects(filter_data), |
| 1462 | ('host', 'queue_entry')) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1463 | |
| 1464 | |
| 1465 | def get_host_special_tasks(host_id, **filter_data): |
| 1466 | """Get special task entries for a given host. |
| 1467 | |
| 1468 | Query the special tasks table for tasks that ran on the host |
| 1469 | given by `host_id` and matching the given `filter_data`. |
| 1470 | Return a list of the results. If the host is assigned to a |
| 1471 | shard, forward this call to that shard. |
| 1472 | |
| 1473 | @param host_id Id in the database of the target host. |
| 1474 | @param filter_data Filter keywords to pass to the underlying |
| 1475 | database query. |
| 1476 | |
| 1477 | """ |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1478 | # Retrieve host data even if the host is in an invalid state. |
| 1479 | host = models.Host.smart_get(host_id, False) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1480 | if not host.shard: |
J. Richard Barnette | fdfcd66 | 2015-04-13 17:20:29 -0700 | [diff] [blame] | 1481 | return get_special_tasks(host_id=host_id, **filter_data) |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1482 | else: |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1483 | # The return values from AFE methods are post-processed |
| 1484 | # objects that aren't JSON-serializable. So, we have to |
| 1485 | # call AFE.run() to get the raw, serializable output from |
| 1486 | # the shard. |
J. Richard Barnette | b5164d6 | 2015-04-13 12:59:31 -0700 | [diff] [blame] | 1487 | shard_afe = frontend.AFE(server=host.shard.rpc_hostname()) |
| 1488 | return shard_afe.run('get_special_tasks', |
| 1489 | host_id=host_id, **filter_data) |
showard | 1a5a408 | 2009-07-28 20:01:37 +0000 | [diff] [blame] | 1490 | |
| 1491 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1492 | def get_num_special_tasks(**kwargs): |
| 1493 | """Get the number of special task entries from the local database. |
| 1494 | |
| 1495 | Query the special tasks table for tasks matching the given 'kwargs', |
| 1496 | and return the number of the results. No attempt is made to forward |
| 1497 | the call to shards; the buck will stop here. |
| 1498 | |
| 1499 | @param kwargs Filter keywords to pass to the underlying database query. |
| 1500 | |
| 1501 | """ |
| 1502 | return models.SpecialTask.query_count(kwargs) |
| 1503 | |
| 1504 | |
| 1505 | def get_host_num_special_tasks(host, **kwargs): |
| 1506 | """Get special task entries for a given host. |
| 1507 | |
| 1508 | Query the special tasks table for tasks that ran on the host |
| 1509 | given by 'host' and matching the given 'kwargs'. |
| 1510 | Return a list of the results. If the host is assigned to a |
| 1511 | shard, forward this call to that shard. |
| 1512 | |
| 1513 | @param host id or name of a host. More often a hostname. |
| 1514 | @param kwargs Filter keywords to pass to the underlying database query. |
| 1515 | |
| 1516 | """ |
| 1517 | # Retrieve host data even if the host is in an invalid state. |
| 1518 | host_model = models.Host.smart_get(host, False) |
| 1519 | if not host_model.shard: |
| 1520 | return get_num_special_tasks(host=host, **kwargs) |
| 1521 | else: |
| 1522 | shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname()) |
| 1523 | return shard_afe.run('get_num_special_tasks', host=host, **kwargs) |
| 1524 | |
| 1525 | |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1526 | def get_status_task(host_id, end_time): |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1527 | """Get the "status task" for a host from the local shard. |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1528 | |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1529 | Returns a single special task representing the given host's |
| 1530 | "status task". The status task is a completed special task that |
| 1531 | identifies whether the corresponding host was working or broken |
| 1532 | when it completed. A successful task indicates a working host; |
| 1533 | a failed task indicates broken. |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1534 | |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1535 | This call will not be forward to a shard; the receiving server |
| 1536 | must be the shard that owns the host. |
| 1537 | |
| 1538 | @param host_id Id in the database of the target host. |
| 1539 | @param end_time Time reference for the host's status. |
| 1540 | |
| 1541 | @return A single task; its status (successful or not) |
| 1542 | corresponds to the status of the host (working or |
| 1543 | broken) at the given time. If no task is found, return |
| 1544 | `None`. |
| 1545 | |
| 1546 | """ |
| 1547 | tasklist = rpc_utils.prepare_rows_as_nested_dicts( |
| 1548 | status_history.get_status_task(host_id, end_time), |
| 1549 | ('host', 'queue_entry')) |
| 1550 | return tasklist[0] if tasklist else None |
| 1551 | |
| 1552 | |
| 1553 | def get_host_status_task(host_id, end_time): |
| 1554 | """Get the "status task" for a host from its owning shard. |
| 1555 | |
| 1556 | Finds the given host's owning shard, and forwards to it a call |
| 1557 | to `get_status_task()` (see above). |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1558 | |
| 1559 | @param host_id Id in the database of the target host. |
| 1560 | @param end_time Time reference for the host's status. |
| 1561 | |
| 1562 | @return A single task; its status (successful or not) |
| 1563 | corresponds to the status of the host (working or |
| 1564 | broken) at the given time. If no task is found, return |
| 1565 | `None`. |
| 1566 | |
| 1567 | """ |
| 1568 | host = models.Host.smart_get(host_id) |
| 1569 | if not host.shard: |
J. Richard Barnette | 4d7e6e6 | 2015-05-01 10:47:34 -0700 | [diff] [blame] | 1570 | return get_status_task(host_id, end_time) |
J. Richard Barnette | 39255fa | 2015-04-14 17:23:41 -0700 | [diff] [blame] | 1571 | else: |
| 1572 | # The return values from AFE methods are post-processed |
| 1573 | # objects that aren't JSON-serializable. So, we have to |
| 1574 | # call AFE.run() to get the raw, serializable output from |
| 1575 | # the shard. |
| 1576 | shard_afe = frontend.AFE(server=host.shard.rpc_hostname()) |
| 1577 | return shard_afe.run('get_status_task', |
| 1578 | host_id=host_id, end_time=end_time) |
| 1579 | |
| 1580 | |
J. Richard Barnette | 8abbfd6 | 2015-06-23 12:46:54 -0700 | [diff] [blame] | 1581 | def get_host_diagnosis_interval(host_id, end_time, success): |
| 1582 | """Find a "diagnosis interval" for a given host. |
| 1583 | |
| 1584 | A "diagnosis interval" identifies a start and end time where |
| 1585 | the host went from "working" to "broken", or vice versa. The |
| 1586 | interval's starting time is the starting time of the last status |
| 1587 | task with the old status; the end time is the finish time of the |
| 1588 | first status task with the new status. |
| 1589 | |
| 1590 | This routine finds the most recent diagnosis interval for the |
| 1591 | given host prior to `end_time`, with a starting status matching |
| 1592 | `success`. If `success` is true, the interval will start with a |
| 1593 | successful status task; if false the interval will start with a |
| 1594 | failed status task. |
| 1595 | |
| 1596 | @param host_id Id in the database of the target host. |
| 1597 | @param end_time Time reference for the diagnosis interval. |
| 1598 | @param success Whether the diagnosis interval should start |
| 1599 | with a successful or failed status task. |
| 1600 | |
| 1601 | @return A list of two strings. The first is the timestamp for |
| 1602 | the beginning of the interval; the second is the |
| 1603 | timestamp for the end. If the host has never changed |
| 1604 | state, the list is empty. |
| 1605 | |
| 1606 | """ |
| 1607 | host = models.Host.smart_get(host_id) |
J. Richard Barnette | 78f281a | 2015-06-29 13:24:51 -0700 | [diff] [blame] | 1608 | if not host.shard or utils.is_shard(): |
J. Richard Barnette | 8abbfd6 | 2015-06-23 12:46:54 -0700 | [diff] [blame] | 1609 | return status_history.get_diagnosis_interval( |
| 1610 | host_id, end_time, success) |
| 1611 | else: |
| 1612 | shard_afe = frontend.AFE(server=host.shard.rpc_hostname()) |
| 1613 | return shard_afe.get_host_diagnosis_interval( |
| 1614 | host_id, end_time, success) |
| 1615 | |
| 1616 | |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1617 | # support for host detail view |
| 1618 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1619 | def get_host_queue_entries_and_special_tasks(host, query_start=None, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1620 | query_limit=None, start_time=None, |
| 1621 | end_time=None): |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1622 | """ |
| 1623 | @returns an interleaved list of HostQueueEntries and SpecialTasks, |
| 1624 | in approximate run order. each dict contains keys for type, host, |
| 1625 | job, status, started_on, execution_path, and ID. |
| 1626 | """ |
| 1627 | total_limit = None |
| 1628 | if query_limit is not None: |
| 1629 | total_limit = query_start + query_limit |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1630 | filter_data_common = {'host': host, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1631 | 'query_limit': total_limit, |
| 1632 | 'sort_by': ['-id']} |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1633 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1634 | filter_data_special_tasks = rpc_utils.inject_times_to_filter( |
| 1635 | 'time_started__gte', 'time_started__lte', start_time, end_time, |
| 1636 | **filter_data_common) |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1637 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1638 | queue_entries = get_host_queue_entries( |
| 1639 | start_time, end_time, **filter_data_common) |
| 1640 | special_tasks = get_host_special_tasks(host, **filter_data_special_tasks) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1641 | |
| 1642 | interleaved_entries = rpc_utils.interleave_entries(queue_entries, |
| 1643 | special_tasks) |
| 1644 | if query_start is not None: |
| 1645 | interleaved_entries = interleaved_entries[query_start:] |
| 1646 | if query_limit is not None: |
| 1647 | interleaved_entries = interleaved_entries[:query_limit] |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1648 | return rpc_utils.prepare_host_queue_entries_and_special_tasks( |
| 1649 | interleaved_entries, queue_entries) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1650 | |
| 1651 | |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1652 | def get_num_host_queue_entries_and_special_tasks(host, start_time=None, |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1653 | end_time=None): |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1654 | filter_data_common = {'host': host} |
Jiaxi Luo | 57bc195 | 2014-07-22 15:27:30 -0700 | [diff] [blame] | 1655 | |
| 1656 | filter_data_queue_entries, filter_data_special_tasks = ( |
| 1657 | rpc_utils.inject_times_to_hqe_special_tasks_filters( |
| 1658 | filter_data_common, start_time, end_time)) |
| 1659 | |
| 1660 | return (models.HostQueueEntry.query_count(filter_data_queue_entries) |
MK Ryu | 0c1a37d | 2015-04-30 12:00:55 -0700 | [diff] [blame] | 1661 | + get_host_num_special_tasks(**filter_data_special_tasks)) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 1662 | |
| 1663 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1664 | # other |
| 1665 | |
showard | e0b6362 | 2008-08-04 20:58:47 +0000 | [diff] [blame] | 1666 | def echo(data=""): |
| 1667 | """\ |
| 1668 | Returns a passed in string. For doing a basic test to see if RPC calls |
| 1669 | can successfully be made. |
| 1670 | """ |
| 1671 | return data |
| 1672 | |
| 1673 | |
showard | b7a52fd | 2009-04-27 20:10:56 +0000 | [diff] [blame] | 1674 | def get_motd(): |
| 1675 | """\ |
| 1676 | Returns the message of the day as a string. |
| 1677 | """ |
| 1678 | return rpc_utils.get_motd() |
| 1679 | |
| 1680 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 1681 | def get_static_data(): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1682 | """\ |
| 1683 | Returns a dictionary containing a bunch of data that shouldn't change |
| 1684 | often and is otherwise inaccessible. This includes: |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1685 | |
| 1686 | priorities: List of job priority choices. |
| 1687 | default_priority: Default priority value for new jobs. |
| 1688 | users: Sorted list of all users. |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1689 | labels: Sorted list of labels not start with 'cros-version' and |
| 1690 | 'fw-version'. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1691 | atomic_groups: Sorted list of all atomic groups. |
| 1692 | tests: Sorted list of all tests. |
| 1693 | profilers: Sorted list of all profilers. |
| 1694 | current_user: Logged-in username. |
| 1695 | host_statuses: Sorted list of possible Host statuses. |
| 1696 | job_statuses: Sorted list of possible HostQueueEntry statuses. |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 1697 | job_timeout_default: The default job timeout length in minutes. |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1698 | parse_failed_repair_default: Default value for the parse_failed_repair job |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1699 | option. |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1700 | reboot_before_options: A list of valid RebootBefore string enums. |
| 1701 | reboot_after_options: A list of valid RebootAfter string enums. |
| 1702 | motd: Server's message of the day. |
| 1703 | status_dictionary: A mapping from one word job status names to a more |
| 1704 | informative description. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1705 | """ |
showard | 21baa45 | 2008-10-21 00:08:39 +0000 | [diff] [blame] | 1706 | |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1707 | default_drone_set_name = models.DroneSet.default_drone_set_name() |
| 1708 | drone_sets = ([default_drone_set_name] + |
| 1709 | sorted(drone_set.name for drone_set in |
| 1710 | models.DroneSet.objects.exclude( |
| 1711 | name=default_drone_set_name))) |
showard | 21baa45 | 2008-10-21 00:08:39 +0000 | [diff] [blame] | 1712 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1713 | result = {} |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 1714 | result['priorities'] = priorities.Priority.choices() |
Alex Miller | 7d658cf | 2013-09-04 16:00:35 -0700 | [diff] [blame] | 1715 | result['default_priority'] = 'Default' |
| 1716 | result['max_schedulable_priority'] = priorities.Priority.DEFAULT |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1717 | result['users'] = get_users(sort_by=['login']) |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1718 | |
| 1719 | label_exclude_filters = [{'name__startswith': 'cros-version'}, |
Dan Shi | 65351d6 | 2015-08-03 12:03:23 -0700 | [diff] [blame] | 1720 | {'name__startswith': 'fw-version'}, |
| 1721 | {'name__startswith': 'fwrw-version'}, |
Dan Shi | 2751697 | 2016-03-16 14:03:41 -0700 | [diff] [blame] | 1722 | {'name__startswith': 'fwro-version'}, |
| 1723 | {'name__startswith': 'ab-version'}, |
| 1724 | {'name__startswith': 'testbed-version'}] |
Jiaxi Luo | 3187459 | 2014-06-11 10:36:35 -0700 | [diff] [blame] | 1725 | result['labels'] = get_labels( |
| 1726 | label_exclude_filters, |
| 1727 | sort_by=['-platform', 'name']) |
| 1728 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 1729 | result['atomic_groups'] = get_atomic_groups(sort_by=['name']) |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1730 | result['tests'] = get_tests(sort_by=['name']) |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 1731 | result['profilers'] = get_profilers(sort_by=['name']) |
showard | 0fc3830 | 2008-10-23 00:44:07 +0000 | [diff] [blame] | 1732 | result['current_user'] = rpc_utils.prepare_for_serialization( |
showard | 64a9595 | 2010-01-13 21:27:16 +0000 | [diff] [blame] | 1733 | models.User.current_user().get_object_dict()) |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 1734 | result['host_statuses'] = sorted(models.Host.Status.names) |
mbligh | 5a198b9 | 2008-12-11 19:33:29 +0000 | [diff] [blame] | 1735 | result['job_statuses'] = sorted(models.HostQueueEntry.Status.names) |
Simran Basi | 7e60574 | 2013-11-12 13:43:36 -0800 | [diff] [blame] | 1736 | result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 1737 | result['job_max_runtime_mins_default'] = ( |
| 1738 | models.Job.DEFAULT_MAX_RUNTIME_MINS) |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 1739 | result['parse_failed_repair_default'] = bool( |
| 1740 | models.Job.DEFAULT_PARSE_FAILED_REPAIR) |
jamesren | dd85524 | 2010-03-02 22:23:44 +0000 | [diff] [blame] | 1741 | result['reboot_before_options'] = model_attributes.RebootBefore.names |
| 1742 | result['reboot_after_options'] = model_attributes.RebootAfter.names |
showard | 8fbae65 | 2009-01-20 23:23:10 +0000 | [diff] [blame] | 1743 | result['motd'] = rpc_utils.get_motd() |
jamesren | 76fcf19 | 2010-04-21 20:39:50 +0000 | [diff] [blame] | 1744 | result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled() |
| 1745 | result['drone_sets'] = drone_sets |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 1746 | result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled() |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1747 | |
showard | d3dc199 | 2009-04-22 21:01:40 +0000 | [diff] [blame] | 1748 | result['status_dictionary'] = {"Aborted": "Aborted", |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1749 | "Verifying": "Verifying Host", |
Alex Miller | dfff2fd | 2013-05-28 13:05:06 -0700 | [diff] [blame] | 1750 | "Provisioning": "Provisioning Host", |
showard | 8ac29b4 | 2008-07-17 17:01:55 +0000 | [diff] [blame] | 1751 | "Pending": "Waiting on other hosts", |
| 1752 | "Running": "Running autoserv", |
| 1753 | "Completed": "Autoserv completed", |
| 1754 | "Failed": "Failed to complete", |
showard | d823b36 | 2008-07-24 16:35:46 +0000 | [diff] [blame] | 1755 | "Queued": "Queued", |
showard | 5deb677 | 2008-11-04 21:54:33 +0000 | [diff] [blame] | 1756 | "Starting": "Next in host's queue", |
| 1757 | "Stopped": "Other host(s) failed verify", |
showard | d3dc199 | 2009-04-22 21:01:40 +0000 | [diff] [blame] | 1758 | "Parsing": "Awaiting parse of final results", |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1759 | "Gathering": "Gathering log files", |
mbligh | 4608b00 | 2010-01-05 18:22:35 +0000 | [diff] [blame] | 1760 | "Waiting": "Waiting for scheduler action", |
Dan Shi | 07e09af | 2013-04-12 09:31:29 -0700 | [diff] [blame] | 1761 | "Archiving": "Archiving results", |
| 1762 | "Resetting": "Resetting hosts"} |
Jiaxi Luo | 421608e | 2014-07-07 14:38:00 -0700 | [diff] [blame] | 1763 | |
| 1764 | result['wmatrix_url'] = rpc_utils.get_wmatrix_url() |
Simran Basi | 71206ef | 2014-08-13 13:51:18 -0700 | [diff] [blame] | 1765 | result['is_moblab'] = bool(utils.is_moblab()) |
Jiaxi Luo | 421608e | 2014-07-07 14:38:00 -0700 | [diff] [blame] | 1766 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 1767 | return result |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 1768 | |
| 1769 | |
| 1770 | def get_server_time(): |
| 1771 | return datetime.datetime.now().strftime("%Y-%m-%d %H:%M") |
Kevin Cheng | 1952198 | 2016-09-22 12:27:23 -0700 | [diff] [blame] | 1772 | |
| 1773 | |
| 1774 | def get_hosts_by_attribute(attribute, value): |
| 1775 | """ |
| 1776 | Get the list of valid hosts that share the same host attribute value. |
| 1777 | |
| 1778 | @param attribute: String of the host attribute to check. |
| 1779 | @param value: String of the value that is shared between hosts. |
| 1780 | |
| 1781 | @returns List of hostnames that all have the same host attribute and |
| 1782 | value. |
| 1783 | """ |
| 1784 | hosts = models.HostAttribute.query_objects({'attribute': attribute, |
| 1785 | 'value': value}) |
| 1786 | return [row.host.hostname for row in hosts if row.host.invalid == 0] |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 1787 | |
| 1788 | |
| 1789 | def canonicalize_suite_name(suite_name): |
| 1790 | """Canonicalize the suite's name. |
| 1791 | |
| 1792 | @param suite_name: the name of the suite. |
| 1793 | """ |
| 1794 | # Do not change this naming convention without updating |
| 1795 | # site_utils.parse_job_name. |
| 1796 | return 'test_suites/control.%s' % suite_name |
| 1797 | |
| 1798 | |
| 1799 | def formatted_now(): |
| 1800 | """Format the current datetime.""" |
| 1801 | return datetime.datetime.now().strftime(time_utils.TIME_FMT) |
| 1802 | |
| 1803 | |
| 1804 | def _get_control_file_by_build(build, ds, suite_name): |
| 1805 | """Return control file contents for |suite_name|. |
| 1806 | |
| 1807 | Query the dev server at |ds| for the control file |suite_name|, included |
| 1808 | in |build| for |board|. |
| 1809 | |
| 1810 | @param build: unique name by which to refer to the image from now on. |
| 1811 | @param ds: a dev_server.DevServer instance to fetch control file with. |
| 1812 | @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt. |
| 1813 | @raises ControlFileNotFound if a unique suite control file doesn't exist. |
| 1814 | @raises NoControlFileList if we can't list the control files at all. |
| 1815 | @raises ControlFileEmpty if the control file exists on the server, but |
| 1816 | can't be read. |
| 1817 | |
| 1818 | @return the contents of the desired control file. |
| 1819 | """ |
| 1820 | getter = control_file_getter.DevServerGetter.create(build, ds) |
| 1821 | devserver_name = ds.hostname |
| 1822 | timer = autotest_stats.Timer('control_files.parse.%s.%s' % |
| 1823 | (devserver_name.replace('.', '_'), |
| 1824 | suite_name.rsplit('.')[-1])) |
| 1825 | # Get the control file for the suite. |
| 1826 | try: |
| 1827 | with timer: |
| 1828 | control_file_in = getter.get_control_file_contents_by_name( |
| 1829 | suite_name) |
| 1830 | except error.CrosDynamicSuiteException as e: |
| 1831 | raise type(e)('Failed to get control file for %s ' |
| 1832 | '(devserver: %s) (error: %s)' % |
| 1833 | (build, devserver_name, e)) |
| 1834 | if not control_file_in: |
| 1835 | raise error.ControlFileEmpty( |
| 1836 | "Fetching %s returned no data. (devserver: %s)" % |
| 1837 | (suite_name, devserver_name)) |
| 1838 | # Force control files to only contain ascii characters. |
| 1839 | try: |
| 1840 | control_file_in.encode('ascii') |
| 1841 | except UnicodeDecodeError as e: |
| 1842 | raise error.ControlFileMalformed(str(e)) |
| 1843 | |
| 1844 | return control_file_in |
| 1845 | |
| 1846 | |
| 1847 | def _get_control_file_by_suite(suite_name): |
| 1848 | """Get control file contents by suite name. |
| 1849 | |
| 1850 | @param suite_name: Suite name as string. |
| 1851 | @returns: Control file contents as string. |
| 1852 | """ |
| 1853 | getter = control_file_getter.FileSystemGetter( |
Dan Shi | 6cd838f | 2017-02-02 15:30:18 -0800 | [diff] [blame^] | 1854 | [_CONFIG.get_config_value('SCHEDULER', |
| 1855 | 'drone_installation_directory')]) |
Allen Li | cdd00f2 | 2017-02-01 18:01:52 -0800 | [diff] [blame] | 1856 | return getter.get_control_file_contents_by_name(suite_name) |
| 1857 | |
| 1858 | |
| 1859 | def _stage_build_artifacts(build, hostname=None): |
| 1860 | """ |
| 1861 | Ensure components of |build| necessary for installing images are staged. |
| 1862 | |
| 1863 | @param build image we want to stage. |
| 1864 | @param hostname hostname of a dut may run test on. This is to help to locate |
| 1865 | a devserver closer to duts if needed. Default is None. |
| 1866 | |
| 1867 | @raises StageControlFileFailure: if the dev server throws 500 while staging |
| 1868 | suite control files. |
| 1869 | |
| 1870 | @return: dev_server.ImageServer instance to use with this build. |
| 1871 | @return: timings dictionary containing staging start/end times. |
| 1872 | """ |
| 1873 | timings = {} |
| 1874 | # Ensure components of |build| necessary for installing images are staged |
| 1875 | # on the dev server. However set synchronous to False to allow other |
| 1876 | # components to be downloaded in the background. |
| 1877 | ds = dev_server.resolve(build, hostname=hostname) |
| 1878 | ds_name = ds.hostname |
| 1879 | timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now() |
| 1880 | timer = autotest_stats.Timer('control_files.stage.%s' % ( |
| 1881 | ds_name.replace('.', '_'))) |
| 1882 | try: |
| 1883 | with timer: |
| 1884 | ds.stage_artifacts(image=build, artifacts=['test_suites']) |
| 1885 | except dev_server.DevServerException as e: |
| 1886 | raise error.StageControlFileFailure( |
| 1887 | "Failed to stage %s on %s: %s" % (build, ds_name, e)) |
| 1888 | timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now() |
| 1889 | return (ds, timings) |
| 1890 | |
| 1891 | |
| 1892 | @rpc_utils.route_rpc_to_master |
| 1893 | def create_suite_job( |
| 1894 | name='', |
| 1895 | board='', |
| 1896 | pool='', |
| 1897 | control_file='', |
| 1898 | check_hosts=True, |
| 1899 | num=None, |
| 1900 | file_bugs=False, |
| 1901 | timeout=24, |
| 1902 | timeout_mins=None, |
| 1903 | priority=priorities.Priority.DEFAULT, |
| 1904 | suite_args=None, |
| 1905 | wait_for_results=True, |
| 1906 | job_retry=False, |
| 1907 | max_retries=None, |
| 1908 | max_runtime_mins=None, |
| 1909 | suite_min_duts=0, |
| 1910 | offload_failures_only=False, |
| 1911 | builds=None, |
| 1912 | test_source_build=None, |
| 1913 | run_prod_code=False, |
| 1914 | delay_minutes=0, |
| 1915 | is_cloning=False, |
| 1916 | **kwargs |
| 1917 | ): |
| 1918 | """ |
| 1919 | Create a job to run a test suite on the given device with the given image. |
| 1920 | |
| 1921 | When the timeout specified in the control file is reached, the |
| 1922 | job is guaranteed to have completed and results will be available. |
| 1923 | |
| 1924 | @param name: The test name if control_file is supplied, otherwise the name |
| 1925 | of the test suite to run, e.g. 'bvt'. |
| 1926 | @param board: the kind of device to run the tests on. |
| 1927 | @param builds: the builds to install e.g. |
| 1928 | {'cros-version:': 'x86-alex-release/R18-1655.0.0', |
| 1929 | 'fwrw-version:': 'x86-alex-firmware/R36-5771.50.0', |
| 1930 | 'fwro-version:': 'x86-alex-firmware/R36-5771.49.0'} |
| 1931 | If builds is given a value, it overrides argument build. |
| 1932 | @param test_source_build: Build that contains the server-side test code. |
| 1933 | @param pool: Specify the pool of machines to use for scheduling |
| 1934 | purposes. |
| 1935 | @param control_file: the control file of the job. |
| 1936 | @param check_hosts: require appropriate live hosts to exist in the lab. |
| 1937 | @param num: Specify the number of machines to schedule across (integer). |
| 1938 | Leave unspecified or use None to use default sharding factor. |
| 1939 | @param file_bugs: File a bug on each test failure in this suite. |
| 1940 | @param timeout: The max lifetime of this suite, in hours. |
| 1941 | @param timeout_mins: The max lifetime of this suite, in minutes. Takes |
| 1942 | priority over timeout. |
| 1943 | @param priority: Integer denoting priority. Higher is more important. |
| 1944 | @param suite_args: Optional arguments which will be parsed by the suite |
| 1945 | control file. Used by control.test_that_wrapper to |
| 1946 | determine which tests to run. |
| 1947 | @param wait_for_results: Set to False to run the suite job without waiting |
| 1948 | for test jobs to finish. Default is True. |
| 1949 | @param job_retry: Set to True to enable job-level retry. Default is False. |
| 1950 | @param max_retries: Integer, maximum job retries allowed at suite level. |
| 1951 | None for no max. |
| 1952 | @param max_runtime_mins: Maximum amount of time a job can be running in |
| 1953 | minutes. |
| 1954 | @param suite_min_duts: Integer. Scheduler will prioritize getting the |
| 1955 | minimum number of machines for the suite when it is |
| 1956 | competing with another suite that has a higher |
| 1957 | priority but already got minimum machines it needs. |
| 1958 | @param offload_failures_only: Only enable gs_offloading for failed jobs. |
| 1959 | @param run_prod_code: If True, the suite will run the test code that |
| 1960 | lives in prod aka the test code currently on the |
| 1961 | lab servers. If False, the control files and test |
| 1962 | code for this suite run will be retrieved from the |
| 1963 | build artifacts. |
| 1964 | @param delay_minutes: Delay the creation of test jobs for a given number of |
| 1965 | minutes. |
| 1966 | @param is_cloning: True if creating a cloning job. |
| 1967 | @param kwargs: extra keyword args. NOT USED. |
| 1968 | |
| 1969 | @raises ControlFileNotFound: if a unique suite control file doesn't exist. |
| 1970 | @raises NoControlFileList: if we can't list the control files at all. |
| 1971 | @raises StageControlFileFailure: If the dev server throws 500 while |
| 1972 | staging test_suites. |
| 1973 | @raises ControlFileEmpty: if the control file exists on the server, but |
| 1974 | can't be read. |
| 1975 | |
| 1976 | @return: the job ID of the suite; -1 on error. |
| 1977 | """ |
| 1978 | if type(num) is not int and num is not None: |
| 1979 | raise error.SuiteArgumentException('Ill specified num argument %r. ' |
| 1980 | 'Must be an integer or None.' % num) |
| 1981 | if num == 0: |
| 1982 | logging.warning("Can't run on 0 hosts; using default.") |
| 1983 | num = None |
| 1984 | |
| 1985 | if builds is None: |
| 1986 | builds = {} |
| 1987 | |
| 1988 | # Default test source build to CrOS build if it's not specified and |
| 1989 | # run_prod_code is set to False. |
| 1990 | if not run_prod_code: |
| 1991 | test_source_build = Suite.get_test_source_build( |
| 1992 | builds, test_source_build=test_source_build) |
| 1993 | |
| 1994 | sample_dut = rpc_utils.get_sample_dut(board, pool) |
| 1995 | |
| 1996 | suite_name = canonicalize_suite_name(name) |
| 1997 | if run_prod_code: |
| 1998 | ds = dev_server.resolve(test_source_build, hostname=sample_dut) |
| 1999 | keyvals = {} |
| 2000 | else: |
| 2001 | (ds, keyvals) = _stage_build_artifacts( |
| 2002 | test_source_build, hostname=sample_dut) |
| 2003 | keyvals[constants.SUITE_MIN_DUTS_KEY] = suite_min_duts |
| 2004 | |
| 2005 | # Do not change this naming convention without updating |
| 2006 | # site_utils.parse_job_name. |
| 2007 | if run_prod_code: |
| 2008 | # If run_prod_code is True, test_source_build is not set, use the |
| 2009 | # first build in the builds list for the sutie job name. |
| 2010 | name = '%s-%s' % (builds.values()[0], suite_name) |
| 2011 | else: |
| 2012 | name = '%s-%s' % (test_source_build, suite_name) |
| 2013 | |
| 2014 | timeout_mins = timeout_mins or timeout * 60 |
| 2015 | max_runtime_mins = max_runtime_mins or timeout * 60 |
| 2016 | |
| 2017 | if not board: |
| 2018 | board = utils.ParseBuildName(builds[provision.CROS_VERSION_PREFIX])[0] |
| 2019 | |
| 2020 | if run_prod_code: |
| 2021 | control_file = _get_control_file_by_suite(suite_name) |
| 2022 | |
| 2023 | if not control_file: |
| 2024 | # No control file was supplied so look it up from the build artifacts. |
| 2025 | control_file = _get_control_file_by_build( |
| 2026 | test_source_build, ds, suite_name) |
| 2027 | |
| 2028 | # Prepend builds and board to the control file. |
| 2029 | if is_cloning: |
| 2030 | control_file = tools.remove_injection(control_file) |
| 2031 | |
| 2032 | inject_dict = { |
| 2033 | 'board': board, |
| 2034 | # `build` is needed for suites like AU to stage image inside suite |
| 2035 | # control file. |
| 2036 | 'build': test_source_build, |
| 2037 | 'builds': builds, |
| 2038 | 'check_hosts': check_hosts, |
| 2039 | 'pool': pool, |
| 2040 | 'num': num, |
| 2041 | 'file_bugs': file_bugs, |
| 2042 | 'timeout': timeout, |
| 2043 | 'timeout_mins': timeout_mins, |
| 2044 | 'devserver_url': ds.url(), |
| 2045 | 'priority': priority, |
| 2046 | 'suite_args' : suite_args, |
| 2047 | 'wait_for_results': wait_for_results, |
| 2048 | 'job_retry': job_retry, |
| 2049 | 'max_retries': max_retries, |
| 2050 | 'max_runtime_mins': max_runtime_mins, |
| 2051 | 'offload_failures_only': offload_failures_only, |
| 2052 | 'test_source_build': test_source_build, |
| 2053 | 'run_prod_code': run_prod_code, |
| 2054 | 'delay_minutes': delay_minutes, |
| 2055 | } |
| 2056 | control_file = tools.inject_vars(inject_dict, control_file) |
| 2057 | |
| 2058 | return rpc_utils.create_job_common(name, |
| 2059 | priority=priority, |
| 2060 | timeout_mins=timeout_mins, |
| 2061 | max_runtime_mins=max_runtime_mins, |
| 2062 | control_type='Server', |
| 2063 | control_file=control_file, |
| 2064 | hostless=True, |
| 2065 | keyvals=keyvals) |
| 2066 | |
| 2067 | |
| 2068 | def get_job_history(**filter_data): |
| 2069 | """Get history of the job, including the special tasks executed for the job |
| 2070 | |
| 2071 | @param filter_data: filter for the call, should at least include |
| 2072 | {'job_id': [job id]} |
| 2073 | @returns: JSON string of the job's history, including the information such |
| 2074 | as the hosts run the job and the special tasks executed before |
| 2075 | and after the job. |
| 2076 | """ |
| 2077 | job_id = filter_data['job_id'] |
| 2078 | job_info = job_history.get_job_info(job_id) |
| 2079 | return rpc_utils.prepare_for_serialization(job_info.get_history()) |
| 2080 | |
| 2081 | |
| 2082 | def get_host_history(start_time, end_time, hosts=None, board=None, pool=None): |
| 2083 | """Get history of a list of host. |
| 2084 | |
| 2085 | The return is a JSON string of host history for each host, for example, |
| 2086 | {'172.22.33.51': [{'status': 'Resetting' |
| 2087 | 'start_time': '2014-08-07 10:02:16', |
| 2088 | 'end_time': '2014-08-07 10:03:16', |
| 2089 | 'log_url': 'http://autotest/reset-546546/debug', |
| 2090 | 'dbg_str': 'Task: Special Task 19441991 (host ...)'}, |
| 2091 | {'status': 'Running' |
| 2092 | 'start_time': '2014-08-07 10:03:18', |
| 2093 | 'end_time': '2014-08-07 10:13:00', |
| 2094 | 'log_url': 'http://autotest/reset-546546/debug', |
| 2095 | 'dbg_str': 'HQE: 15305005, for job: 14995562'} |
| 2096 | ] |
| 2097 | } |
| 2098 | @param start_time: start time to search for history, can be string value or |
| 2099 | epoch time. |
| 2100 | @param end_time: end time to search for history, can be string value or |
| 2101 | epoch time. |
| 2102 | @param hosts: A list of hosts to search for history. Default is None. |
| 2103 | @param board: board type of hosts. Default is None. |
| 2104 | @param pool: pool type of hosts. Default is None. |
| 2105 | @returns: JSON string of the host history. |
| 2106 | """ |
| 2107 | return rpc_utils.prepare_for_serialization( |
| 2108 | host_history.get_history_details( |
| 2109 | start_time=start_time, end_time=end_time, |
| 2110 | hosts=hosts, board=board, pool=pool, |
| 2111 | process_pool_size=4)) |
| 2112 | |
| 2113 | |
| 2114 | def shard_heartbeat(shard_hostname, jobs=(), hqes=(), known_job_ids=(), |
| 2115 | known_host_ids=(), known_host_statuses=()): |
| 2116 | """Receive updates for job statuses from shards and assign hosts and jobs. |
| 2117 | |
| 2118 | @param shard_hostname: Hostname of the calling shard |
| 2119 | @param jobs: Jobs in serialized form that should be updated with newer |
| 2120 | status from a shard. |
| 2121 | @param hqes: Hostqueueentries in serialized form that should be updated with |
| 2122 | newer status from a shard. Note that for every hostqueueentry |
| 2123 | the corresponding job must be in jobs. |
| 2124 | @param known_job_ids: List of ids of jobs the shard already has. |
| 2125 | @param known_host_ids: List of ids of hosts the shard already has. |
| 2126 | @param known_host_statuses: List of statuses of hosts the shard already has. |
| 2127 | |
| 2128 | @returns: Serialized representations of hosts, jobs, suite job keyvals |
| 2129 | and their dependencies to be inserted into a shard's database. |
| 2130 | """ |
| 2131 | # The following alternatives to sending host and job ids in every heartbeat |
| 2132 | # have been considered: |
| 2133 | # 1. Sending the highest known job and host ids. This would work for jobs: |
| 2134 | # Newer jobs always have larger ids. Also, if a job is not assigned to a |
| 2135 | # particular shard during a heartbeat, it never will be assigned to this |
| 2136 | # shard later. |
| 2137 | # This is not true for hosts though: A host that is leased won't be sent |
| 2138 | # to the shard now, but might be sent in a future heartbeat. This means |
| 2139 | # sometimes hosts should be transfered that have a lower id than the |
| 2140 | # maximum host id the shard knows. |
| 2141 | # 2. Send the number of jobs/hosts the shard knows to the master in each |
| 2142 | # heartbeat. Compare these to the number of records that already have |
| 2143 | # the shard_id set to this shard. In the normal case, they should match. |
| 2144 | # In case they don't, resend all entities of that type. |
| 2145 | # This would work well for hosts, because there aren't that many. |
| 2146 | # Resending all jobs is quite a big overhead though. |
| 2147 | # Also, this approach might run into edge cases when entities are |
| 2148 | # ever deleted. |
| 2149 | # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts. |
| 2150 | # Using two different approaches isn't consistent and might cause |
| 2151 | # confusion. Also the issues with the case of deletions might still |
| 2152 | # occur. |
| 2153 | # |
| 2154 | # The overhead of sending all job and host ids in every heartbeat is low: |
| 2155 | # At peaks one board has about 1200 created but unfinished jobs. |
| 2156 | # See the numbers here: http://goo.gl/gQCGWH |
| 2157 | # Assuming that job id's have 6 digits and that json serialization takes a |
| 2158 | # comma and a space as overhead, the traffic per id sent is about 8 bytes. |
| 2159 | # If 5000 ids need to be sent, this means 40 kilobytes of traffic. |
| 2160 | # A NOT IN query with 5000 ids took about 30ms in tests made. |
| 2161 | # These numbers seem low enough to outweigh the disadvantages of the |
| 2162 | # solutions described above. |
| 2163 | timer = autotest_stats.Timer('shard_heartbeat') |
| 2164 | with timer: |
| 2165 | shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname) |
| 2166 | rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes) |
| 2167 | assert len(known_host_ids) == len(known_host_statuses) |
| 2168 | for i in range(len(known_host_ids)): |
| 2169 | host_model = models.Host.objects.get(pk=known_host_ids[i]) |
| 2170 | if host_model.status != known_host_statuses[i]: |
| 2171 | host_model.status = known_host_statuses[i] |
| 2172 | host_model.save() |
| 2173 | |
| 2174 | hosts, jobs, suite_keyvals = rpc_utils.find_records_for_shard( |
| 2175 | shard_obj, known_job_ids=known_job_ids, |
| 2176 | known_host_ids=known_host_ids) |
| 2177 | return { |
| 2178 | 'hosts': [host.serialize() for host in hosts], |
| 2179 | 'jobs': [job.serialize() for job in jobs], |
| 2180 | 'suite_keyvals': [kv.serialize() for kv in suite_keyvals], |
| 2181 | } |
| 2182 | |
| 2183 | |
| 2184 | def get_shards(**filter_data): |
| 2185 | """Return a list of all shards. |
| 2186 | |
| 2187 | @returns A sequence of nested dictionaries of shard information. |
| 2188 | """ |
| 2189 | shards = models.Shard.query_objects(filter_data) |
| 2190 | serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ()) |
| 2191 | for serialized, shard in zip(serialized_shards, shards): |
| 2192 | serialized['labels'] = [label.name for label in shard.labels.all()] |
| 2193 | |
| 2194 | return serialized_shards |
| 2195 | |
| 2196 | |
| 2197 | def _assign_board_to_shard_precheck(labels): |
| 2198 | """Verify whether board labels are valid to be added to a given shard. |
| 2199 | |
| 2200 | First check whether board label is in correct format. Second, check whether |
| 2201 | the board label exist. Third, check whether the board has already been |
| 2202 | assigned to shard. |
| 2203 | |
| 2204 | @param labels: Board labels separated by comma. |
| 2205 | |
| 2206 | @raises error.RPCException: If label provided doesn't start with `board:` |
| 2207 | or board has been added to shard already. |
| 2208 | @raises models.Label.DoesNotExist: If the label specified doesn't exist. |
| 2209 | |
| 2210 | @returns: A list of label models that ready to be added to shard. |
| 2211 | """ |
| 2212 | labels = labels.split(',') |
| 2213 | label_models = [] |
| 2214 | for label in labels: |
| 2215 | # Check whether the board label is in correct format. |
| 2216 | if not label.startswith('board:'): |
| 2217 | raise error.RPCException('Sharding only supports `board:.*` label.') |
| 2218 | # Check whether the board label exist. If not, exception will be thrown |
| 2219 | # by smart_get function. |
| 2220 | label = models.Label.smart_get(label) |
| 2221 | # Check whether the board has been sharded already |
| 2222 | try: |
| 2223 | shard = models.Shard.objects.get(labels=label) |
| 2224 | raise error.RPCException( |
| 2225 | '%s is already on shard %s' % (label, shard.hostname)) |
| 2226 | except models.Shard.DoesNotExist: |
| 2227 | # board is not on any shard, so it's valid. |
| 2228 | label_models.append(label) |
| 2229 | return label_models |
| 2230 | |
| 2231 | |
| 2232 | def add_shard(hostname, labels): |
| 2233 | """Add a shard and start running jobs on it. |
| 2234 | |
| 2235 | @param hostname: The hostname of the shard to be added; needs to be unique. |
| 2236 | @param labels: Board labels separated by comma. Jobs of one of the labels |
| 2237 | will be assigned to the shard. |
| 2238 | |
| 2239 | @raises error.RPCException: If label provided doesn't start with `board:` or |
| 2240 | board has been added to shard already. |
| 2241 | @raises model_logic.ValidationError: If a shard with the given hostname |
| 2242 | already exist. |
| 2243 | @raises models.Label.DoesNotExist: If the label specified doesn't exist. |
| 2244 | |
| 2245 | @returns: The id of the added shard. |
| 2246 | """ |
| 2247 | labels = _assign_board_to_shard_precheck(labels) |
| 2248 | shard = models.Shard.add_object(hostname=hostname) |
| 2249 | for label in labels: |
| 2250 | shard.labels.add(label) |
| 2251 | return shard.id |
| 2252 | |
| 2253 | |
| 2254 | def add_board_to_shard(hostname, labels): |
| 2255 | """Add boards to a given shard |
| 2256 | |
| 2257 | @param hostname: The hostname of the shard to be changed. |
| 2258 | @param labels: Board labels separated by comma. |
| 2259 | |
| 2260 | @raises error.RPCException: If label provided doesn't start with `board:` or |
| 2261 | board has been added to shard already. |
| 2262 | @raises models.Label.DoesNotExist: If the label specified doesn't exist. |
| 2263 | |
| 2264 | @returns: The id of the changed shard. |
| 2265 | """ |
| 2266 | labels = _assign_board_to_shard_precheck(labels) |
| 2267 | shard = models.Shard.objects.get(hostname=hostname) |
| 2268 | for label in labels: |
| 2269 | shard.labels.add(label) |
| 2270 | return shard.id |
| 2271 | |
| 2272 | |
| 2273 | def delete_shard(hostname): |
| 2274 | """Delete a shard and reclaim all resources from it. |
| 2275 | |
| 2276 | This claims back all assigned hosts from the shard. To ensure all DUTs are |
| 2277 | in a sane state, a Reboot task with highest priority is scheduled for them. |
| 2278 | This reboots the DUTs and then all left tasks continue to run in drone of |
| 2279 | the master. |
| 2280 | |
| 2281 | The procedure for deleting a shard: |
| 2282 | * Lock all unlocked hosts on that shard. |
| 2283 | * Remove shard information . |
| 2284 | * Assign a reboot task with highest priority to these hosts. |
| 2285 | * Unlock these hosts, then, the reboot tasks run in front of all other |
| 2286 | tasks. |
| 2287 | |
| 2288 | The status of jobs that haven't been reported to be finished yet, will be |
| 2289 | lost. The master scheduler will pick up the jobs and execute them. |
| 2290 | |
| 2291 | @param hostname: Hostname of the shard to delete. |
| 2292 | """ |
| 2293 | shard = rpc_utils.retrieve_shard(shard_hostname=hostname) |
| 2294 | hostnames_to_lock = [h.hostname for h in |
| 2295 | models.Host.objects.filter(shard=shard, locked=False)] |
| 2296 | |
| 2297 | # TODO(beeps): Power off shard |
| 2298 | # For ChromeOS hosts, a reboot test with the highest priority is added to |
| 2299 | # the DUT. After a reboot it should be ganranteed that no processes from |
| 2300 | # prior tests that were run by a shard are still running on. |
| 2301 | |
| 2302 | # Lock all unlocked hosts. |
| 2303 | dicts = {'locked': True, 'lock_time': datetime.datetime.now()} |
| 2304 | models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts) |
| 2305 | |
| 2306 | # Remove shard information. |
| 2307 | models.Host.objects.filter(shard=shard).update(shard=None) |
| 2308 | models.Job.objects.filter(shard=shard).update(shard=None) |
| 2309 | shard.labels.clear() |
| 2310 | shard.delete() |
| 2311 | |
| 2312 | # Assign a reboot task with highest priority: Super. |
| 2313 | t = models.Test.objects.get(name='platform_BootPerfServer:shard') |
| 2314 | c = utils.read_file(os.path.join(common.autotest_dir, t.path)) |
| 2315 | if hostnames_to_lock: |
| 2316 | rpc_utils.create_job_common( |
| 2317 | 'reboot_dut_for_shard_deletion', |
| 2318 | priority=priorities.Priority.SUPER, |
| 2319 | control_type='Server', |
| 2320 | control_file=c, hosts=hostnames_to_lock) |
| 2321 | |
| 2322 | # Unlock these shard-related hosts. |
| 2323 | dicts = {'locked': False, 'lock_time': None} |
| 2324 | models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts) |
| 2325 | |
| 2326 | |
| 2327 | def get_servers(hostname=None, role=None, status=None): |
| 2328 | """Get a list of servers with matching role and status. |
| 2329 | |
| 2330 | @param hostname: FQDN of the server. |
| 2331 | @param role: Name of the server role, e.g., drone, scheduler. Default to |
| 2332 | None to match any role. |
| 2333 | @param status: Status of the server, e.g., primary, backup, repair_required. |
| 2334 | Default to None to match any server status. |
| 2335 | |
| 2336 | @raises error.RPCException: If server database is not used. |
| 2337 | @return: A list of server names for servers with matching role and status. |
| 2338 | """ |
| 2339 | if not server_manager_utils.use_server_db(): |
| 2340 | raise error.RPCException('Server database is not enabled. Please try ' |
| 2341 | 'retrieve servers from global config.') |
| 2342 | servers = server_manager_utils.get_servers(hostname=hostname, role=role, |
| 2343 | status=status) |
| 2344 | return [s.get_details() for s in servers] |
| 2345 | |
| 2346 | |
| 2347 | @rpc_utils.route_rpc_to_master |
| 2348 | def get_stable_version(board=stable_version_utils.DEFAULT, android=False): |
| 2349 | """Get stable version for the given board. |
| 2350 | |
| 2351 | @param board: Name of the board. |
| 2352 | @param android: If True, the given board is an Android-based device. If |
| 2353 | False, assume its a Chrome OS-based device. |
| 2354 | |
| 2355 | @return: Stable version of the given board. Return global configure value |
| 2356 | of CROS.stable_cros_version if stable_versinos table does not have |
| 2357 | entry of board DEFAULT. |
| 2358 | """ |
| 2359 | return stable_version_utils.get(board=board, android=android) |
| 2360 | |
| 2361 | |
| 2362 | @rpc_utils.route_rpc_to_master |
| 2363 | def get_all_stable_versions(): |
| 2364 | """Get stable versions for all boards. |
| 2365 | |
| 2366 | @return: A dictionary of board:version. |
| 2367 | """ |
| 2368 | return stable_version_utils.get_all() |
| 2369 | |
| 2370 | |
| 2371 | @rpc_utils.route_rpc_to_master |
| 2372 | def set_stable_version(version, board=stable_version_utils.DEFAULT): |
| 2373 | """Modify stable version for the given board. |
| 2374 | |
| 2375 | @param version: The new value of stable version for given board. |
| 2376 | @param board: Name of the board, default to value `DEFAULT`. |
| 2377 | """ |
| 2378 | stable_version_utils.set(version=version, board=board) |
| 2379 | |
| 2380 | |
| 2381 | @rpc_utils.route_rpc_to_master |
| 2382 | def delete_stable_version(board): |
| 2383 | """Modify stable version for the given board. |
| 2384 | |
| 2385 | Delete a stable version entry in afe_stable_versions table for a given |
| 2386 | board, so default stable version will be used. |
| 2387 | |
| 2388 | @param board: Name of the board. |
| 2389 | """ |
| 2390 | stable_version_utils.delete(board=board) |
| 2391 | |
| 2392 | |
| 2393 | def get_tests_by_build(build, ignore_invalid_tests=True): |
| 2394 | """Get the tests that are available for the specified build. |
| 2395 | |
| 2396 | @param build: unique name by which to refer to the image. |
| 2397 | @param ignore_invalid_tests: flag on if unparsable tests are ignored. |
| 2398 | |
| 2399 | @return: A sorted list of all tests that are in the build specified. |
| 2400 | """ |
| 2401 | # Collect the control files specified in this build |
| 2402 | cfile_getter = control_file_lib._initialize_control_file_getter(build) |
| 2403 | if SuiteBase.ENABLE_CONTROLS_IN_BATCH: |
| 2404 | control_file_info_list = cfile_getter.get_suite_info() |
| 2405 | control_file_list = control_file_info_list.keys() |
| 2406 | else: |
| 2407 | control_file_list = cfile_getter.get_control_file_list() |
| 2408 | |
| 2409 | test_objects = [] |
| 2410 | _id = 0 |
| 2411 | for control_file_path in control_file_list: |
| 2412 | # Read and parse the control file |
| 2413 | if SuiteBase.ENABLE_CONTROLS_IN_BATCH: |
| 2414 | control_file = control_file_info_list[control_file_path] |
| 2415 | else: |
| 2416 | control_file = cfile_getter.get_control_file_contents( |
| 2417 | control_file_path) |
| 2418 | try: |
| 2419 | control_obj = control_data.parse_control_string(control_file) |
| 2420 | except: |
| 2421 | logging.info('Failed to parse control file: %s', control_file_path) |
| 2422 | if not ignore_invalid_tests: |
| 2423 | raise |
| 2424 | |
| 2425 | # Extract the values needed for the AFE from the control_obj. |
| 2426 | # The keys list represents attributes in the control_obj that |
| 2427 | # are required by the AFE |
| 2428 | keys = ['author', 'doc', 'name', 'time', 'test_type', 'experimental', |
| 2429 | 'test_category', 'test_class', 'dependencies', 'run_verify', |
| 2430 | 'sync_count', 'job_retries', 'retries', 'path'] |
| 2431 | |
| 2432 | test_object = {} |
| 2433 | for key in keys: |
| 2434 | test_object[key] = getattr(control_obj, key) if hasattr( |
| 2435 | control_obj, key) else '' |
| 2436 | |
| 2437 | # Unfortunately, the AFE expects different key-names for certain |
| 2438 | # values, these must be corrected to avoid the risk of tests |
| 2439 | # being omitted by the AFE. |
| 2440 | # The 'id' is an additional value used in the AFE. |
| 2441 | # The control_data parsing does not reference 'run_reset', but it |
| 2442 | # is also used in the AFE and defaults to True. |
| 2443 | test_object['id'] = _id |
| 2444 | test_object['run_reset'] = True |
| 2445 | test_object['description'] = test_object.get('doc', '') |
| 2446 | test_object['test_time'] = test_object.get('time', 0) |
| 2447 | test_object['test_retry'] = test_object.get('retries', 0) |
| 2448 | |
| 2449 | # Fix the test name to be consistent with the current presentation |
| 2450 | # of test names in the AFE. |
| 2451 | testpath, subname = os.path.split(control_file_path) |
| 2452 | testname = os.path.basename(testpath) |
| 2453 | subname = subname.split('.')[1:] |
| 2454 | if subname: |
| 2455 | testname = '%s:%s' % (testname, ':'.join(subname)) |
| 2456 | |
| 2457 | test_object['name'] = testname |
| 2458 | |
| 2459 | # Correct the test path as parse_control_string sets an empty string. |
| 2460 | test_object['path'] = control_file_path |
| 2461 | |
| 2462 | _id += 1 |
| 2463 | test_objects.append(test_object) |
| 2464 | |
| 2465 | test_objects = sorted(test_objects, key=lambda x: x.get('name')) |
| 2466 | return rpc_utils.prepare_for_serialization(test_objects) |