blob: 8f5ae7bc5004d6fb629ad68ae14d4af6dd17cf22 [file] [log] [blame]
Richard Barnette6c2b70a2017-01-26 13:40:51 -08001# pylint: disable=missing-docstring
Don Garretta06ea082017-01-13 00:04:26 +00002
mblighe8819cd2008-02-15 16:48:40 +00003"""\
4Functions to expose over the RPC interface.
5
6For all modify* and delete* functions that ask for an 'id' parameter to
7identify the object to operate on, the id may be either
8 * the database row ID
9 * the name of the object (label name, hostname, user login, etc.)
10 * a dictionary containing uniquely identifying field (this option should seldom
11 be used)
12
13When specifying foreign key fields (i.e. adding hosts to a label, or adding
14users to an ACL group), the given value may be either the database row ID or the
15name of the object.
16
17All get* functions return lists of dictionaries. Each dictionary represents one
18object and maps field names to values.
19
20Some examples:
21modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
22modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
23modify_test('sleeptest', test_type='Client', params=', seconds=60')
24delete_acl_group(1) # delete by ID
25delete_acl_group('Everyone') # delete by name
26acl_group_add_users('Everyone', ['mbligh', 'showard'])
27get_jobs(owner='showard', status='Queued')
28
mbligh93c80e62009-02-03 17:48:30 +000029See doctests/001_rpc_test.txt for (lots) more examples.
mblighe8819cd2008-02-15 16:48:40 +000030"""
31
32__author__ = 'showard@google.com (Steve Howard)'
33
Michael Tang6dc174e2016-05-31 23:13:42 -070034import ast
showard29f7cd22009-04-29 21:16:24 +000035import datetime
Shuqian Zhao4c0d2902016-01-12 17:03:15 -080036import logging
Allen Licdd00f22017-02-01 18:01:52 -080037import os
Dan Shi4a3deb82016-10-27 21:32:30 -070038import sys
MK Ryu9c5fbbe2015-02-11 15:46:22 -080039
Moises Osorio2dc7a102014-12-02 18:24:02 -080040from django.db.models import Count
Allen Licdd00f22017-02-01 18:01:52 -080041
showardcafd16e2009-05-29 18:37:49 +000042import common
Aviv Keshet14cac442016-11-20 21:44:11 -080043# TODO(akeshet): Replace with monarch stats once we know how to instrument rpc
44# server with ts_mon.
Gabe Black1e1c41b2015-02-04 23:55:15 -080045from autotest_lib.client.common_lib.cros.graphite import autotest_stats
Allen Licdd00f22017-02-01 18:01:52 -080046from autotest_lib.client.common_lib import control_data
47from autotest_lib.client.common_lib import error
48from autotest_lib.client.common_lib import global_config
49from autotest_lib.client.common_lib import priorities
50from autotest_lib.client.common_lib import time_utils
51from autotest_lib.client.common_lib.cros import dev_server
Allen Lia59b1262016-12-14 12:53:51 -080052from autotest_lib.frontend.afe import control_file as control_file_lib
Allen Licdd00f22017-02-01 18:01:52 -080053from autotest_lib.frontend.afe import model_attributes
54from autotest_lib.frontend.afe import model_logic
55from autotest_lib.frontend.afe import models
Allen Lia59b1262016-12-14 12:53:51 -080056from autotest_lib.frontend.afe import rpc_utils
Moises Osorio2dc7a102014-12-02 18:24:02 -080057from autotest_lib.frontend.tko import models as tko_models
Jiaxi Luoaac54572014-06-04 13:57:02 -070058from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070059from autotest_lib.server import frontend
Simran Basi71206ef2014-08-13 13:51:18 -070060from autotest_lib.server import utils
Dan Shid215dbe2015-06-18 16:14:59 -070061from autotest_lib.server.cros import provision
Allen Licdd00f22017-02-01 18:01:52 -080062from autotest_lib.server.cros.dynamic_suite import constants
63from autotest_lib.server.cros.dynamic_suite import control_file_getter
64from autotest_lib.server.cros.dynamic_suite import suite as SuiteBase
Jiaxi Luo90190c92014-06-18 12:35:57 -070065from autotest_lib.server.cros.dynamic_suite import tools
Allen Licdd00f22017-02-01 18:01:52 -080066from autotest_lib.server.cros.dynamic_suite.suite import Suite
Aviv Keshet7ee95862016-08-30 15:18:27 -070067from autotest_lib.server.lib import status_history
Allen Licdd00f22017-02-01 18:01:52 -080068from autotest_lib.site_utils import host_history
69from autotest_lib.site_utils import job_history
70from autotest_lib.site_utils import server_manager_utils
71from autotest_lib.site_utils import stable_version_utils
mblighe8819cd2008-02-15 16:48:40 +000072
Moises Osorio2dc7a102014-12-02 18:24:02 -080073
Gabe Black1e1c41b2015-02-04 23:55:15 -080074_timer = autotest_stats.Timer('rpc_interface')
Moises Osorio2dc7a102014-12-02 18:24:02 -080075
Allen Licdd00f22017-02-01 18:01:52 -080076_CONFIG = global_config.global_config
77
78# Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py.
79
Eric Lid23bc192011-02-09 14:38:57 -080080def get_parameterized_autoupdate_image_url(job):
81 """Get the parameterized autoupdate image url from a parameterized job."""
82 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
83 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
beeps8bb1f7d2013-08-05 01:30:09 -070084 name='image')
Eric Lid23bc192011-02-09 14:38:57 -080085 para_set = job.parameterized_job.parameterizedjobparameter_set
86 job_test_para = para_set.get(test_parameter=image_parameter)
87 return job_test_para.parameter_value
88
89
mblighe8819cd2008-02-15 16:48:40 +000090# labels
91
mblighe8819cd2008-02-15 16:48:40 +000092def modify_label(id, **data):
MK Ryu8c554cf2015-06-12 11:45:50 -070093 """Modify a label.
94
95 @param id: id or name of a label. More often a label name.
96 @param data: New data for a label.
97 """
98 label_model = models.Label.smart_get(id)
MK Ryu8e2c2d02016-01-06 15:24:38 -080099 label_model.update_object(data)
MK Ryu8c554cf2015-06-12 11:45:50 -0700100
101 # Master forwards the RPC to shards
102 if not utils.is_shard():
103 rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False,
104 id=id, **data)
105
mblighe8819cd2008-02-15 16:48:40 +0000106
107def delete_label(id):
MK Ryu8c554cf2015-06-12 11:45:50 -0700108 """Delete a label.
109
110 @param id: id or name of a label. More often a label name.
111 """
112 label_model = models.Label.smart_get(id)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800113 # Hosts that have the label to be deleted. Save this info before
114 # the label is deleted to use it later.
115 hosts = []
116 for h in label_model.host_set.all():
117 hosts.append(models.Host.smart_get(h.id))
118 label_model.delete()
MK Ryu8c554cf2015-06-12 11:45:50 -0700119
120 # Master forwards the RPC to shards
121 if not utils.is_shard():
MK Ryu8e2c2d02016-01-06 15:24:38 -0800122 rpc_utils.fanout_rpc(hosts, 'delete_label', False, id=id)
mblighe8819cd2008-02-15 16:48:40 +0000123
Prashanth Balasubramanian744898f2015-01-13 05:04:16 -0800124
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800125def add_label(name, ignore_exception_if_exists=False, **kwargs):
MK Ryucf027c62015-03-04 12:00:50 -0800126 """Adds a new label of a given name.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800127
128 @param name: label name.
129 @param ignore_exception_if_exists: If True and the exception was
130 thrown due to the duplicated label name when adding a label,
131 then suppress the exception. Default is False.
132 @param kwargs: keyword args that store more info about a label
133 other than the name.
134 @return: int/long id of a new label.
135 """
136 # models.Label.add_object() throws model_logic.ValidationError
137 # when it is given a label name that already exists.
138 # However, ValidationError can be thrown with different errors,
139 # and those errors should be thrown up to the call chain.
140 try:
141 label = models.Label.add_object(name=name, **kwargs)
142 except:
143 exc_info = sys.exc_info()
144 if ignore_exception_if_exists:
145 label = rpc_utils.get_label(name)
146 # If the exception is raised not because of duplicated
147 # "name", then raise the original exception.
148 if label is None:
149 raise exc_info[0], exc_info[1], exc_info[2]
150 else:
151 raise exc_info[0], exc_info[1], exc_info[2]
152 return label.id
153
154
155def add_label_to_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800156 """Adds a label of the given id to the given hosts only in local DB.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800157
158 @param id: id or name of a label. More often a label name.
159 @param hosts: The hostnames of hosts that need the label.
160
161 @raises models.Label.DoesNotExist: If the label with id doesn't exist.
162 """
163 label = models.Label.smart_get(id)
164 host_objs = models.Host.smart_get_bulk(hosts)
165 if label.platform:
166 models.Host.check_no_platform(host_objs)
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700167 # Ensure a host has no more than one board label with it.
168 if label.name.startswith('board:'):
Dan Shib5b8b4f2016-11-02 14:04:02 -0700169 models.Host.check_board_labels_allowed(host_objs, [label.name])
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800170 label.host_set.add(*host_objs)
171
172
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700173def _create_label_everywhere(id, hosts):
174 """
175 Yet another method to create labels.
176
177 ALERT! This method should be run only on master not shards!
178 DO NOT RUN THIS ON A SHARD!!! Deputies will hate you if you do!!!
179
180 This method exists primarily to serve label_add_hosts() and
181 host_add_labels(). Basically it pulls out the label check/add logic
182 from label_add_hosts() into this nice method that not only creates
183 the label but also tells the shards that service the hosts to also
184 create the label.
185
186 @param id: id or name of a label. More often a label name.
187 @param hosts: A list of hostnames or ids. More often hostnames.
188 """
189 try:
190 label = models.Label.smart_get(id)
191 except models.Label.DoesNotExist:
192 # This matches the type checks in smart_get, which is a hack
193 # in and off itself. The aim here is to create any non-existent
194 # label, which we cannot do if the 'id' specified isn't a label name.
195 if isinstance(id, basestring):
196 label = models.Label.smart_get(add_label(id))
197 else:
198 raise ValueError('Label id (%s) does not exist. Please specify '
199 'the argument, id, as a string (label name).'
200 % id)
201
202 # Make sure the label exists on the shard with the same id
203 # as it is on the master.
204 # It is possible that the label is already in a shard because
205 # we are adding a new label only to shards of hosts that the label
206 # is going to be attached.
207 # For example, we add a label L1 to a host in shard S1.
208 # Master and S1 will have L1 but other shards won't.
209 # Later, when we add the same label L1 to hosts in shards S1 and S2,
210 # S1 already has the label but S2 doesn't.
211 # S2 should have the new label without any problem.
212 # We ignore exception in such a case.
213 host_objs = models.Host.smart_get_bulk(hosts)
214 rpc_utils.fanout_rpc(
215 host_objs, 'add_label', include_hostnames=False,
216 name=label.name, ignore_exception_if_exists=True,
217 id=label.id, platform=label.platform)
218
219
MK Ryufbb002c2015-06-08 14:13:16 -0700220@rpc_utils.route_rpc_to_master
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800221def label_add_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800222 """Adds a label with the given id to the given hosts.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800223
224 This method should be run only on master not shards.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800225 The given label will be created if it doesn't exist, provided the `id`
226 supplied is a label name not an int/long id.
227
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800228 @param id: id or name of a label. More often a label name.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800229 @param hosts: A list of hostnames or ids. More often hostnames.
230
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800231 @raises ValueError: If the id specified is an int/long (label id)
232 while the label does not exist.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800233 """
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700234 # Create the label.
235 _create_label_everywhere(id, hosts)
236
237 # Add it to the master.
MK Ryu8e2c2d02016-01-06 15:24:38 -0800238 add_label_to_hosts(id, hosts)
MK Ryucf027c62015-03-04 12:00:50 -0800239
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700240 # Add it to the shards.
MK Ryucf027c62015-03-04 12:00:50 -0800241 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800242 rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id)
showardbbabf502008-06-06 00:02:02 +0000243
244
MK Ryucf027c62015-03-04 12:00:50 -0800245def remove_label_from_hosts(id, hosts):
246 """Removes a label of the given id from the given hosts only in local DB.
247
248 @param id: id or name of a label.
249 @param hosts: The hostnames of hosts that need to remove the label from.
250 """
showardbe3ec042008-11-12 18:16:07 +0000251 host_objs = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000252 models.Label.smart_get(id).host_set.remove(*host_objs)
showardbbabf502008-06-06 00:02:02 +0000253
254
MK Ryufbb002c2015-06-08 14:13:16 -0700255@rpc_utils.route_rpc_to_master
MK Ryucf027c62015-03-04 12:00:50 -0800256def label_remove_hosts(id, hosts):
257 """Removes a label of the given id from the given hosts.
258
259 This method should be run only on master not shards.
260
261 @param id: id or name of a label.
262 @param hosts: A list of hostnames or ids. More often hostnames.
263 """
MK Ryucf027c62015-03-04 12:00:50 -0800264 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu26f0c932015-05-28 18:14:33 -0700265 remove_label_from_hosts(id, hosts)
266
MK Ryu8e2c2d02016-01-06 15:24:38 -0800267 rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id)
268
MK Ryucf027c62015-03-04 12:00:50 -0800269
Jiaxi Luo31874592014-06-11 10:36:35 -0700270def get_labels(exclude_filters=(), **filter_data):
showardc92da832009-04-07 18:14:34 +0000271 """\
Jiaxi Luo31874592014-06-11 10:36:35 -0700272 @param exclude_filters: A sequence of dictionaries of filters.
273
showardc92da832009-04-07 18:14:34 +0000274 @returns A sequence of nested dictionaries of label information.
275 """
Jiaxi Luo31874592014-06-11 10:36:35 -0700276 labels = models.Label.query_objects(filter_data)
277 for exclude_filter in exclude_filters:
278 labels = labels.exclude(**exclude_filter)
279 return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',))
showardc92da832009-04-07 18:14:34 +0000280
281
282# atomic groups
283
showarde9450c92009-06-30 01:58:52 +0000284def add_atomic_group(name, max_number_of_machines=None, description=None):
showardc92da832009-04-07 18:14:34 +0000285 return models.AtomicGroup.add_object(
286 name=name, max_number_of_machines=max_number_of_machines,
287 description=description).id
288
289
290def modify_atomic_group(id, **data):
291 models.AtomicGroup.smart_get(id).update_object(data)
292
293
294def delete_atomic_group(id):
295 models.AtomicGroup.smart_get(id).delete()
296
297
298def atomic_group_add_labels(id, labels):
299 label_objs = models.Label.smart_get_bulk(labels)
300 models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
301
302
303def atomic_group_remove_labels(id, labels):
304 label_objs = models.Label.smart_get_bulk(labels)
305 models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
306
307
308def get_atomic_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000309 return rpc_utils.prepare_for_serialization(
showardc92da832009-04-07 18:14:34 +0000310 models.AtomicGroup.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000311
312
313# hosts
314
Matthew Sartori68186332015-04-27 17:19:53 -0700315def add_host(hostname, status=None, locked=None, lock_reason='', protection=None):
316 if locked and not lock_reason:
317 raise model_logic.ValidationError(
318 {'locked': 'Please provide a reason for locking when adding host.'})
319
jadmanski0afbb632008-06-06 21:10:57 +0000320 return models.Host.add_object(hostname=hostname, status=status,
Matthew Sartori68186332015-04-27 17:19:53 -0700321 locked=locked, lock_reason=lock_reason,
322 protection=protection).id
mblighe8819cd2008-02-15 16:48:40 +0000323
324
MK Ryu33889612015-09-04 14:32:35 -0700325@rpc_utils.route_rpc_to_master
326def modify_host(id, **kwargs):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700327 """Modify local attributes of a host.
328
329 If this is called on the master, but the host is assigned to a shard, this
MK Ryu33889612015-09-04 14:32:35 -0700330 will call `modify_host_local` RPC to the responsible shard. This means if
331 a host is being locked using this function, this change will also propagate
332 to shards.
333 When this is called on a shard, the shard just routes the RPC to the master
334 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700335
336 @param id: id of the host to modify.
MK Ryu33889612015-09-04 14:32:35 -0700337 @param kwargs: key=value pairs of values to set on the host.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700338 """
MK Ryu33889612015-09-04 14:32:35 -0700339 rpc_utils.check_modify_host(kwargs)
showardce7c0922009-09-11 18:39:24 +0000340 host = models.Host.smart_get(id)
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800341 try:
342 rpc_utils.check_modify_host_locking(host, kwargs)
343 except model_logic.ValidationError as e:
344 if not kwargs.get('force_modify_locking', False):
345 raise
346 logging.exception('The following exception will be ignored and lock '
347 'modification will be enforced. %s', e)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700348
MK Ryud53e1492015-12-15 12:09:03 -0800349 # This is required to make `lock_time` for a host be exactly same
350 # between the master and a shard.
351 if kwargs.get('locked', None) and 'lock_time' not in kwargs:
352 kwargs['lock_time'] = datetime.datetime.now()
MK Ryu8e2c2d02016-01-06 15:24:38 -0800353 host.update_object(kwargs)
MK Ryud53e1492015-12-15 12:09:03 -0800354
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800355 # force_modifying_locking is not an internal field in database, remove.
356 kwargs.pop('force_modify_locking', None)
MK Ryu33889612015-09-04 14:32:35 -0700357 rpc_utils.fanout_rpc([host], 'modify_host_local',
358 include_hostnames=False, id=id, **kwargs)
mblighe8819cd2008-02-15 16:48:40 +0000359
360
MK Ryu33889612015-09-04 14:32:35 -0700361def modify_host_local(id, **kwargs):
362 """Modify host attributes in local DB.
363
364 @param id: Host id.
365 @param kwargs: key=value pairs of values to set on the host.
366 """
367 models.Host.smart_get(id).update_object(kwargs)
368
369
370@rpc_utils.route_rpc_to_master
showard276f9442009-05-20 00:33:16 +0000371def modify_hosts(host_filter_data, update_data):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700372 """Modify local attributes of multiple hosts.
373
374 If this is called on the master, but one of the hosts in that match the
MK Ryu33889612015-09-04 14:32:35 -0700375 filters is assigned to a shard, this will call `modify_hosts_local` RPC
376 to the responsible shard.
377 When this is called on a shard, the shard just routes the RPC to the master
378 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700379
380 The filters are always applied on the master, not on the shards. This means
381 if the states of a host differ on the master and a shard, the state on the
382 master will be used. I.e. this means:
383 A host was synced to Shard 1. On Shard 1 the status of the host was set to
384 'Repair Failed'.
385 - A call to modify_hosts with host_filter_data={'status': 'Ready'} will
386 update the host (both on the shard and on the master), because the state
387 of the host as the master knows it is still 'Ready'.
388 - A call to modify_hosts with host_filter_data={'status': 'Repair failed'
389 will not update the host, because the filter doesn't apply on the master.
390
showardbe0d8692009-08-20 23:42:44 +0000391 @param host_filter_data: Filters out which hosts to modify.
392 @param update_data: A dictionary with the changes to make to the hosts.
showard276f9442009-05-20 00:33:16 +0000393 """
MK Ryu93161712015-12-21 10:41:32 -0800394 update_data = update_data.copy()
showardbe0d8692009-08-20 23:42:44 +0000395 rpc_utils.check_modify_host(update_data)
showard276f9442009-05-20 00:33:16 +0000396 hosts = models.Host.query_objects(host_filter_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700397
398 affected_shard_hostnames = set()
399 affected_host_ids = []
400
Alex Miller9658a952013-05-14 16:40:02 -0700401 # Check all hosts before changing data for exception safety.
402 for host in hosts:
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800403 try:
404 rpc_utils.check_modify_host_locking(host, update_data)
405 except model_logic.ValidationError as e:
406 if not update_data.get('force_modify_locking', False):
407 raise
408 logging.exception('The following exception will be ignored and '
409 'lock modification will be enforced. %s', e)
410
Jakob Juelich50e91f72014-10-01 12:43:23 -0700411 if host.shard:
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800412 affected_shard_hostnames.add(host.shard.rpc_hostname())
Jakob Juelich50e91f72014-10-01 12:43:23 -0700413 affected_host_ids.append(host.id)
414
MK Ryud53e1492015-12-15 12:09:03 -0800415 # This is required to make `lock_time` for a host be exactly same
416 # between the master and a shard.
417 if update_data.get('locked', None) and 'lock_time' not in update_data:
418 update_data['lock_time'] = datetime.datetime.now()
MK Ryu8e2c2d02016-01-06 15:24:38 -0800419 for host in hosts:
420 host.update_object(update_data)
MK Ryud53e1492015-12-15 12:09:03 -0800421
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800422 update_data.pop('force_modify_locking', None)
MK Ryu33889612015-09-04 14:32:35 -0700423 # Caution: Changing the filter from the original here. See docstring.
424 rpc_utils.run_rpc_on_multiple_hostnames(
425 'modify_hosts_local', affected_shard_hostnames,
Jakob Juelich50e91f72014-10-01 12:43:23 -0700426 host_filter_data={'id__in': affected_host_ids},
427 update_data=update_data)
428
showard276f9442009-05-20 00:33:16 +0000429
MK Ryu33889612015-09-04 14:32:35 -0700430def modify_hosts_local(host_filter_data, update_data):
431 """Modify attributes of hosts in local DB.
432
433 @param host_filter_data: Filters out which hosts to modify.
434 @param update_data: A dictionary with the changes to make to the hosts.
435 """
436 for host in models.Host.query_objects(host_filter_data):
437 host.update_object(update_data)
438
439
MK Ryufbb002c2015-06-08 14:13:16 -0700440def add_labels_to_host(id, labels):
441 """Adds labels to a given host only in local DB.
showardcafd16e2009-05-29 18:37:49 +0000442
MK Ryufbb002c2015-06-08 14:13:16 -0700443 @param id: id or hostname for a host.
444 @param labels: ids or names for labels.
445 """
446 label_objs = models.Label.smart_get_bulk(labels)
447 models.Host.smart_get(id).labels.add(*label_objs)
448
449
450@rpc_utils.route_rpc_to_master
451def host_add_labels(id, labels):
452 """Adds labels to a given host.
453
454 @param id: id or hostname for a host.
455 @param labels: ids or names for labels.
456
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700457 @raises ValidationError: If adding more than one platform/board label.
MK Ryufbb002c2015-06-08 14:13:16 -0700458 """
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700459 # Create the labels on the master/shards.
460 for label in labels:
461 _create_label_everywhere(label, [id])
462
MK Ryufbb002c2015-06-08 14:13:16 -0700463 label_objs = models.Label.smart_get_bulk(labels)
464 platforms = [label.name for label in label_objs if label.platform]
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700465 boards = [label.name for label in label_objs
466 if label.name.startswith('board:')]
Dan Shib5b8b4f2016-11-02 14:04:02 -0700467 if len(platforms) > 1 or not utils.board_labels_allowed(boards):
showardcafd16e2009-05-29 18:37:49 +0000468 raise model_logic.ValidationError(
Dan Shib5b8b4f2016-11-02 14:04:02 -0700469 {'labels': ('Adding more than one platform label, or a list of '
470 'non-compatible board labels.: %s %s' %
471 (', '.join(platforms), ', '.join(boards)))})
MK Ryufbb002c2015-06-08 14:13:16 -0700472
473 host_obj = models.Host.smart_get(id)
Dan Shi4a3deb82016-10-27 21:32:30 -0700474 if platforms:
MK Ryufbb002c2015-06-08 14:13:16 -0700475 models.Host.check_no_platform([host_obj])
Dan Shi4a3deb82016-10-27 21:32:30 -0700476 if boards:
Dan Shib5b8b4f2016-11-02 14:04:02 -0700477 models.Host.check_board_labels_allowed([host_obj], labels)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800478 add_labels_to_host(id, labels)
MK Ryufbb002c2015-06-08 14:13:16 -0700479
480 rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False,
481 id=id, labels=labels)
mblighe8819cd2008-02-15 16:48:40 +0000482
483
MK Ryufbb002c2015-06-08 14:13:16 -0700484def remove_labels_from_host(id, labels):
485 """Removes labels from a given host only in local DB.
486
487 @param id: id or hostname for a host.
488 @param labels: ids or names for labels.
489 """
490 label_objs = models.Label.smart_get_bulk(labels)
491 models.Host.smart_get(id).labels.remove(*label_objs)
492
493
494@rpc_utils.route_rpc_to_master
mblighe8819cd2008-02-15 16:48:40 +0000495def host_remove_labels(id, labels):
MK Ryufbb002c2015-06-08 14:13:16 -0700496 """Removes labels from a given host.
497
498 @param id: id or hostname for a host.
499 @param labels: ids or names for labels.
500 """
MK Ryu8e2c2d02016-01-06 15:24:38 -0800501 remove_labels_from_host(id, labels)
502
MK Ryufbb002c2015-06-08 14:13:16 -0700503 host_obj = models.Host.smart_get(id)
504 rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False,
505 id=id, labels=labels)
mblighe8819cd2008-02-15 16:48:40 +0000506
507
MK Ryuacf35922014-10-03 14:56:49 -0700508def get_host_attribute(attribute, **host_filter_data):
509 """
510 @param attribute: string name of attribute
511 @param host_filter_data: filter data to apply to Hosts to choose hosts to
512 act upon
513 """
514 hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data)
515 hosts = list(hosts)
516 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
517 'attribute_list')
518 host_attr_dicts = []
519 for host_obj in hosts:
520 for attr_obj in host_obj.attribute_list:
521 if attr_obj.attribute == attribute:
522 host_attr_dicts.append(attr_obj.get_object_dict())
523 return rpc_utils.prepare_for_serialization(host_attr_dicts)
524
525
showard0957a842009-05-11 19:25:08 +0000526def set_host_attribute(attribute, value, **host_filter_data):
527 """
MK Ryu26f0c932015-05-28 18:14:33 -0700528 @param attribute: string name of attribute
529 @param value: string, or None to delete an attribute
530 @param host_filter_data: filter data to apply to Hosts to choose hosts to
531 act upon
showard0957a842009-05-11 19:25:08 +0000532 """
533 assert host_filter_data # disallow accidental actions on all hosts
534 hosts = models.Host.query_objects(host_filter_data)
535 models.AclGroup.check_for_acl_violation_hosts(hosts)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800536 for host in hosts:
537 host.set_or_delete_attribute(attribute, value)
showard0957a842009-05-11 19:25:08 +0000538
MK Ryu26f0c932015-05-28 18:14:33 -0700539 # Master forwards this RPC to shards.
540 if not utils.is_shard():
541 rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False,
542 attribute=attribute, value=value, **host_filter_data)
543
showard0957a842009-05-11 19:25:08 +0000544
Jakob Juelich50e91f72014-10-01 12:43:23 -0700545@rpc_utils.forward_single_host_rpc_to_shard
mblighe8819cd2008-02-15 16:48:40 +0000546def delete_host(id):
jadmanski0afbb632008-06-06 21:10:57 +0000547 models.Host.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000548
549
showard87cc38f2009-08-20 23:37:04 +0000550def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
Dan Shi37df54d2015-12-14 11:16:28 -0800551 exclude_atomic_group_hosts=False, valid_only=True,
552 include_current_job=False, **filter_data):
553 """Get a list of dictionaries which contains the information of hosts.
554
showard87cc38f2009-08-20 23:37:04 +0000555 @param multiple_labels: match hosts in all of the labels given. Should
556 be a list of label names.
557 @param exclude_only_if_needed_labels: Exclude hosts with at least one
558 "only_if_needed" label applied.
559 @param exclude_atomic_group_hosts: Exclude hosts that have one or more
560 atomic group labels associated with them.
Dan Shi37df54d2015-12-14 11:16:28 -0800561 @param include_current_job: Set to True to include ids of currently running
562 job and special task.
jadmanski0afbb632008-06-06 21:10:57 +0000563 """
showard43a3d262008-11-12 18:17:05 +0000564 hosts = rpc_utils.get_host_query(multiple_labels,
565 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000566 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000567 valid_only, filter_data)
showard0957a842009-05-11 19:25:08 +0000568 hosts = list(hosts)
569 models.Host.objects.populate_relationships(hosts, models.Label,
570 'label_list')
571 models.Host.objects.populate_relationships(hosts, models.AclGroup,
572 'acl_list')
573 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
574 'attribute_list')
showard43a3d262008-11-12 18:17:05 +0000575 host_dicts = []
576 for host_obj in hosts:
577 host_dict = host_obj.get_object_dict()
showard0957a842009-05-11 19:25:08 +0000578 host_dict['labels'] = [label.name for label in host_obj.label_list]
showard909c9142009-07-07 20:54:42 +0000579 host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
580 find_platform_and_atomic_group(host_obj))
showard0957a842009-05-11 19:25:08 +0000581 host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
582 host_dict['attributes'] = dict((attribute.attribute, attribute.value)
583 for attribute in host_obj.attribute_list)
Dan Shi37df54d2015-12-14 11:16:28 -0800584 if include_current_job:
585 host_dict['current_job'] = None
586 host_dict['current_special_task'] = None
587 entries = models.HostQueueEntry.objects.filter(
588 host_id=host_dict['id'], active=True, complete=False)
589 if entries:
590 host_dict['current_job'] = (
591 entries[0].get_object_dict()['job'])
592 tasks = models.SpecialTask.objects.filter(
593 host_id=host_dict['id'], is_active=True, is_complete=False)
594 if tasks:
595 host_dict['current_special_task'] = (
596 '%d-%s' % (tasks[0].get_object_dict()['id'],
597 tasks[0].get_object_dict()['task'].lower()))
showard43a3d262008-11-12 18:17:05 +0000598 host_dicts.append(host_dict)
599 return rpc_utils.prepare_for_serialization(host_dicts)
mblighe8819cd2008-02-15 16:48:40 +0000600
601
showard87cc38f2009-08-20 23:37:04 +0000602def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
showard8aa84fc2009-09-16 17:17:55 +0000603 exclude_atomic_group_hosts=False, valid_only=True,
604 **filter_data):
showard87cc38f2009-08-20 23:37:04 +0000605 """
606 Same parameters as get_hosts().
607
608 @returns The number of matching hosts.
609 """
showard43a3d262008-11-12 18:17:05 +0000610 hosts = rpc_utils.get_host_query(multiple_labels,
611 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000612 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000613 valid_only, filter_data)
showard43a3d262008-11-12 18:17:05 +0000614 return hosts.count()
showard1385b162008-03-13 15:59:40 +0000615
mblighe8819cd2008-02-15 16:48:40 +0000616
617# tests
618
showard909c7a62008-07-15 21:52:38 +0000619def add_test(name, test_type, path, author=None, dependencies=None,
showard3d9899a2008-07-31 02:11:58 +0000620 experimental=True, run_verify=None, test_class=None,
showard909c7a62008-07-15 21:52:38 +0000621 test_time=None, test_category=None, description=None,
622 sync_count=1):
jadmanski0afbb632008-06-06 21:10:57 +0000623 return models.Test.add_object(name=name, test_type=test_type, path=path,
showard909c7a62008-07-15 21:52:38 +0000624 author=author, dependencies=dependencies,
625 experimental=experimental,
626 run_verify=run_verify, test_time=test_time,
627 test_category=test_category,
628 sync_count=sync_count,
jadmanski0afbb632008-06-06 21:10:57 +0000629 test_class=test_class,
630 description=description).id
mblighe8819cd2008-02-15 16:48:40 +0000631
632
633def modify_test(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000634 models.Test.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000635
636
637def delete_test(id):
jadmanski0afbb632008-06-06 21:10:57 +0000638 models.Test.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000639
640
641def get_tests(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000642 return rpc_utils.prepare_for_serialization(
643 models.Test.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000644
645
Moises Osorio2dc7a102014-12-02 18:24:02 -0800646@_timer.decorate
647def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name):
648 """Gets the counts of all passed and failed tests from the matching jobs.
649
Allen Licdd00f22017-02-01 18:01:52 -0800650 @param job_name_prefix: Name prefix of the jobs to get the summary
651 from, e.g., 'butterfly-release/R40-6457.21.0/bvt-cq/'.
Moises Osorio2dc7a102014-12-02 18:24:02 -0800652 @param label_name: Label that must be set in the jobs, e.g.,
653 'cros-version:butterfly-release/R40-6457.21.0'.
654
655 @returns A summary of the counts of all the passed and failed tests.
656 """
657 job_ids = list(models.Job.objects.filter(
658 name__startswith=job_name_prefix,
659 dependency_labels__name=label_name).values_list(
660 'pk', flat=True))
661 summary = {'passed': 0, 'failed': 0}
662 if not job_ids:
663 return summary
664
665 counts = (tko_models.TestView.objects.filter(
666 afe_job_id__in=job_ids).exclude(
667 test_name='SERVER_JOB').exclude(
668 test_name__startswith='CLIENT_JOB').values(
669 'status').annotate(
670 count=Count('status')))
671 for status in counts:
672 if status['status'] == 'GOOD':
673 summary['passed'] += status['count']
674 else:
675 summary['failed'] += status['count']
676 return summary
677
678
showard2b9a88b2008-06-13 20:55:03 +0000679# profilers
680
681def add_profiler(name, description=None):
682 return models.Profiler.add_object(name=name, description=description).id
683
684
685def modify_profiler(id, **data):
686 models.Profiler.smart_get(id).update_object(data)
687
688
689def delete_profiler(id):
690 models.Profiler.smart_get(id).delete()
691
692
693def get_profilers(**filter_data):
694 return rpc_utils.prepare_for_serialization(
695 models.Profiler.list_objects(filter_data))
696
697
mblighe8819cd2008-02-15 16:48:40 +0000698# users
699
700def add_user(login, access_level=None):
jadmanski0afbb632008-06-06 21:10:57 +0000701 return models.User.add_object(login=login, access_level=access_level).id
mblighe8819cd2008-02-15 16:48:40 +0000702
703
704def modify_user(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000705 models.User.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000706
707
708def delete_user(id):
jadmanski0afbb632008-06-06 21:10:57 +0000709 models.User.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000710
711
712def get_users(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000713 return rpc_utils.prepare_for_serialization(
714 models.User.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000715
716
717# acl groups
718
719def add_acl_group(name, description=None):
showard04f2cd82008-07-25 20:53:31 +0000720 group = models.AclGroup.add_object(name=name, description=description)
showard64a95952010-01-13 21:27:16 +0000721 group.users.add(models.User.current_user())
showard04f2cd82008-07-25 20:53:31 +0000722 return group.id
mblighe8819cd2008-02-15 16:48:40 +0000723
724
725def modify_acl_group(id, **data):
showard04f2cd82008-07-25 20:53:31 +0000726 group = models.AclGroup.smart_get(id)
727 group.check_for_acl_violation_acl_group()
728 group.update_object(data)
729 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000730
731
732def acl_group_add_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000733 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000734 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000735 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000736 group.users.add(*users)
mblighe8819cd2008-02-15 16:48:40 +0000737
738
739def acl_group_remove_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000740 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000741 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000742 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000743 group.users.remove(*users)
showard04f2cd82008-07-25 20:53:31 +0000744 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000745
746
747def acl_group_add_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000748 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000749 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000750 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000751 group.hosts.add(*hosts)
showard08f981b2008-06-24 21:59:03 +0000752 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000753
754
755def acl_group_remove_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000756 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000757 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000758 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000759 group.hosts.remove(*hosts)
showard08f981b2008-06-24 21:59:03 +0000760 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000761
762
763def delete_acl_group(id):
jadmanski0afbb632008-06-06 21:10:57 +0000764 models.AclGroup.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000765
766
767def get_acl_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000768 acl_groups = models.AclGroup.list_objects(filter_data)
769 for acl_group in acl_groups:
770 acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
771 acl_group['users'] = [user.login
772 for user in acl_group_obj.users.all()]
773 acl_group['hosts'] = [host.hostname
774 for host in acl_group_obj.hosts.all()]
775 return rpc_utils.prepare_for_serialization(acl_groups)
mblighe8819cd2008-02-15 16:48:40 +0000776
777
778# jobs
779
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700780def generate_control_file(tests=(), profilers=(),
showard91f85102009-10-12 20:34:52 +0000781 client_control_file='', use_container=False,
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700782 profile_only=None, db_tests=True,
783 test_source_build=None):
jadmanski0afbb632008-06-06 21:10:57 +0000784 """
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700785 Generates a client-side control file to run tests.
mbligh120351e2009-01-24 01:40:45 +0000786
Matthew Sartori10438092015-06-24 14:30:18 -0700787 @param tests List of tests to run. See db_tests for more information.
mbligh120351e2009-01-24 01:40:45 +0000788 @param profilers List of profilers to activate during the job.
789 @param client_control_file The contents of a client-side control file to
790 run at the end of all tests. If this is supplied, all tests must be
791 client side.
792 TODO: in the future we should support server control files directly
793 to wrap with a kernel. That'll require changing the parameter
794 name and adding a boolean to indicate if it is a client or server
795 control file.
796 @param use_container unused argument today. TODO: Enable containers
797 on the host during a client side test.
showard91f85102009-10-12 20:34:52 +0000798 @param profile_only A boolean that indicates what default profile_only
799 mode to use in the control file. Passing None will generate a
800 control file that does not explcitly set the default mode at all.
Matthew Sartori10438092015-06-24 14:30:18 -0700801 @param db_tests: if True, the test object can be found in the database
802 backing the test model. In this case, tests is a tuple
803 of test IDs which are used to retrieve the test objects
804 from the database. If False, tests is a tuple of test
805 dictionaries stored client-side in the AFE.
Michael Tang84a2ecf2016-06-07 15:10:53 -0700806 @param test_source_build: Build to be used to retrieve test code. Default
807 to None.
mbligh120351e2009-01-24 01:40:45 +0000808
809 @returns a dict with the following keys:
810 control_file: str, The control file text.
811 is_server: bool, is the control file a server-side control file?
812 synch_count: How many machines the job uses per autoserv execution.
813 synch_count == 1 means the job is asynchronous.
814 dependencies: A list of the names of labels on which the job depends.
815 """
showardd86debe2009-06-10 17:37:56 +0000816 if not tests and not client_control_file:
showard2bab8f42008-11-12 18:15:22 +0000817 return dict(control_file='', is_server=False, synch_count=1,
showard989f25d2008-10-01 11:38:11 +0000818 dependencies=[])
mblighe8819cd2008-02-15 16:48:40 +0000819
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700820 cf_info, test_objects, profiler_objects = (
821 rpc_utils.prepare_generate_control_file(tests, profilers,
822 db_tests))
Allen Lia59b1262016-12-14 12:53:51 -0800823 cf_info['control_file'] = control_file_lib.generate_control(
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700824 tests=test_objects, profilers=profiler_objects,
825 is_server=cf_info['is_server'],
showard232b7ae2009-11-10 00:46:48 +0000826 client_control_file=client_control_file, profile_only=profile_only,
Michael Tang84a2ecf2016-06-07 15:10:53 -0700827 test_source_build=test_source_build)
showard989f25d2008-10-01 11:38:11 +0000828 return cf_info
mblighe8819cd2008-02-15 16:48:40 +0000829
830
Allen Li41e47c12016-12-14 12:43:44 -0800831def create_parameterized_job(
832 name,
833 priority,
834 test,
835 parameters,
836 kernel=None,
837 label=None,
838 profilers=(),
839 profiler_parameters=None,
840 use_container=False,
841 profile_only=None,
842 upload_kernel_config=False,
843 hosts=(),
844 meta_hosts=(),
845 one_time_hosts=(),
846 atomic_group_name=None,
847 synch_count=None,
848 is_template=False,
849 timeout=None,
850 timeout_mins=None,
851 max_runtime_mins=None,
852 run_verify=False,
853 email_list='',
854 dependencies=(),
855 reboot_before=None,
856 reboot_after=None,
857 parse_failed_repair=None,
858 hostless=False,
859 keyvals=None,
860 drone_set=None,
861 run_reset=True,
862 require_ssp=None):
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000863 """
864 Creates and enqueues a parameterized job.
865
866 Most parameters a combination of the parameters for generate_control_file()
867 and create_job(), with the exception of:
868
869 @param test name or ID of the test to run
870 @param parameters a map of parameter name ->
871 tuple of (param value, param type)
872 @param profiler_parameters a dictionary of parameters for the profilers:
873 key: profiler name
874 value: dict of param name -> tuple of
875 (param value,
876 param type)
877 """
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000878 # Set up the parameterized job configs
879 test_obj = models.Test.smart_get(test)
880 control_type = test_obj.test_type
881
882 try:
883 label = models.Label.smart_get(label)
884 except models.Label.DoesNotExist:
885 label = None
886
887 kernel_objs = models.Kernel.create_kernels(kernel)
888 profiler_objs = [models.Profiler.smart_get(profiler)
889 for profiler in profilers]
890
891 parameterized_job = models.ParameterizedJob.objects.create(
892 test=test_obj, label=label, use_container=use_container,
893 profile_only=profile_only,
894 upload_kernel_config=upload_kernel_config)
895 parameterized_job.kernels.add(*kernel_objs)
896
897 for profiler in profiler_objs:
898 parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
899 parameterized_job=parameterized_job,
900 profiler=profiler)
901 profiler_params = profiler_parameters.get(profiler.name, {})
902 for name, (value, param_type) in profiler_params.iteritems():
903 models.ParameterizedJobProfilerParameter.objects.create(
904 parameterized_job_profiler=parameterized_profiler,
905 parameter_name=name,
906 parameter_value=value,
907 parameter_type=param_type)
908
909 try:
910 for parameter in test_obj.testparameter_set.all():
911 if parameter.name in parameters:
912 param_value, param_type = parameters.pop(parameter.name)
913 parameterized_job.parameterizedjobparameter_set.create(
914 test_parameter=parameter, parameter_value=param_value,
915 parameter_type=param_type)
916
917 if parameters:
918 raise Exception('Extra parameters remain: %r' % parameters)
919
920 return rpc_utils.create_job_common(
Allen Li81996a82016-12-14 13:01:37 -0800921 name=name,
922 priority=priority,
923 control_type=control_type,
924 hosts=hosts,
925 meta_hosts=meta_hosts,
926 one_time_hosts=one_time_hosts,
927 atomic_group_name=atomic_group_name,
928 synch_count=synch_count,
929 is_template=is_template,
930 timeout=timeout,
931 timeout_mins=timeout_mins,
932 max_runtime_mins=max_runtime_mins,
933 run_verify=run_verify,
934 email_list=email_list,
935 dependencies=dependencies,
936 reboot_before=reboot_before,
937 reboot_after=reboot_after,
938 parse_failed_repair=parse_failed_repair,
939 hostless=hostless,
940 keyvals=keyvals,
941 drone_set=drone_set,
942 parameterized_job=parameterized_job.id,
943 run_reset=run_reset,
944 require_ssp=require_ssp)
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000945 except:
946 parameterized_job.delete()
947 raise
948
949
Simran Basib6ec8ae2014-04-23 12:05:08 -0700950def create_job_page_handler(name, priority, control_file, control_type,
Dan Shid215dbe2015-06-18 16:14:59 -0700951 image=None, hostless=False, firmware_rw_build=None,
952 firmware_ro_build=None, test_source_build=None,
Michael Tang84a2ecf2016-06-07 15:10:53 -0700953 is_cloning=False, **kwargs):
Simran Basib6ec8ae2014-04-23 12:05:08 -0700954 """\
955 Create and enqueue a job.
956
957 @param name name of this job
958 @param priority Integer priority of this job. Higher is more important.
959 @param control_file String contents of the control file.
960 @param control_type Type of control file, Client or Server.
Dan Shid215dbe2015-06-18 16:14:59 -0700961 @param image: ChromeOS build to be installed in the dut. Default to None.
962 @param firmware_rw_build: Firmware build to update RW firmware. Default to
963 None, i.e., RW firmware will not be updated.
964 @param firmware_ro_build: Firmware build to update RO firmware. Default to
965 None, i.e., RO firmware will not be updated.
966 @param test_source_build: Build to be used to retrieve test code. Default
967 to None.
Michael Tang6dc174e2016-05-31 23:13:42 -0700968 @param is_cloning: True if creating a cloning job.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700969 @param kwargs extra args that will be required by create_suite_job or
970 create_job.
971
972 @returns The created Job id number.
973 """
Michael Tang6dc174e2016-05-31 23:13:42 -0700974 if is_cloning:
975 logging.info('Start to clone a new job')
Shuqian Zhao61f5d312016-08-05 17:15:23 -0700976 # When cloning a job, hosts and meta_hosts should not exist together,
977 # which would cause host-scheduler to schedule two hqe jobs to one host
978 # at the same time, and crash itself. Clear meta_hosts for this case.
979 if kwargs.get('hosts') and kwargs.get('meta_hosts'):
980 kwargs['meta_hosts'] = []
Michael Tang6dc174e2016-05-31 23:13:42 -0700981 else:
982 logging.info('Start to create a new job')
Simran Basib6ec8ae2014-04-23 12:05:08 -0700983 control_file = rpc_utils.encode_ascii(control_file)
Jiaxi Luodd67beb2014-07-18 16:28:31 -0700984 if not control_file:
985 raise model_logic.ValidationError({
986 'control_file' : "Control file cannot be empty"})
Simran Basib6ec8ae2014-04-23 12:05:08 -0700987
988 if image and hostless:
Dan Shid215dbe2015-06-18 16:14:59 -0700989 builds = {}
990 builds[provision.CROS_VERSION_PREFIX] = image
991 if firmware_rw_build:
Dan Shi0723bf52015-06-24 10:52:38 -0700992 builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build
Dan Shid215dbe2015-06-18 16:14:59 -0700993 if firmware_ro_build:
994 builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build
Allen Licdd00f22017-02-01 18:01:52 -0800995 return create_suite_job(
Simran Basib6ec8ae2014-04-23 12:05:08 -0700996 name=name, control_file=control_file, priority=priority,
Michael Tang6dc174e2016-05-31 23:13:42 -0700997 builds=builds, test_source_build=test_source_build,
998 is_cloning=is_cloning, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700999 return create_job(name, priority, control_file, control_type, image=image,
Allen Liac199b62016-12-14 12:56:02 -08001000 hostless=hostless, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001001
1002
MK Ryue301eb72015-06-25 12:51:02 -07001003@rpc_utils.route_rpc_to_master
Allen Li8af9da02016-12-12 17:32:39 -08001004def create_job(
1005 name,
1006 priority,
1007 control_file,
1008 control_type,
1009 hosts=(),
1010 meta_hosts=(),
1011 one_time_hosts=(),
1012 atomic_group_name=None,
1013 synch_count=None,
1014 is_template=False,
1015 timeout=None,
1016 timeout_mins=None,
1017 max_runtime_mins=None,
1018 run_verify=False,
1019 email_list='',
1020 dependencies=(),
1021 reboot_before=None,
1022 reboot_after=None,
1023 parse_failed_repair=None,
1024 hostless=False,
1025 keyvals=None,
1026 drone_set=None,
1027 image=None,
1028 parent_job_id=None,
1029 test_retry=0,
1030 run_reset=True,
1031 require_ssp=None,
1032 args=(),
Allen Li8af9da02016-12-12 17:32:39 -08001033 **kwargs):
jadmanski0afbb632008-06-06 21:10:57 +00001034 """\
1035 Create and enqueue a job.
mblighe8819cd2008-02-15 16:48:40 +00001036
showarda1e74b32009-05-12 17:32:04 +00001037 @param name name of this job
Alex Miller7d658cf2013-09-04 16:00:35 -07001038 @param priority Integer priority of this job. Higher is more important.
showarda1e74b32009-05-12 17:32:04 +00001039 @param control_file String contents of the control file.
1040 @param control_type Type of control file, Client or Server.
1041 @param synch_count How many machines the job uses per autoserv execution.
Jiaxi Luo90190c92014-06-18 12:35:57 -07001042 synch_count == 1 means the job is asynchronous. If an atomic group is
1043 given this value is treated as a minimum.
showarda1e74b32009-05-12 17:32:04 +00001044 @param is_template If true then create a template job.
1045 @param timeout Hours after this call returns until the job times out.
Simran Basi7e605742013-11-12 13:43:36 -08001046 @param timeout_mins Minutes after this call returns until the job times
Jiaxi Luo90190c92014-06-18 12:35:57 -07001047 out.
Simran Basi34217022012-11-06 13:43:15 -08001048 @param max_runtime_mins Minutes from job starting time until job times out
showarda1e74b32009-05-12 17:32:04 +00001049 @param run_verify Should the host be verified before running the test?
1050 @param email_list String containing emails to mail when the job is done
1051 @param dependencies List of label names on which this job depends
1052 @param reboot_before Never, If dirty, or Always
1053 @param reboot_after Never, If all tests passed, or Always
1054 @param parse_failed_repair if true, results of failed repairs launched by
Jiaxi Luo90190c92014-06-18 12:35:57 -07001055 this job will be parsed as part of the job.
showarda9545c02009-12-18 22:44:26 +00001056 @param hostless if true, create a hostless job
showardc1a98d12010-01-15 00:22:22 +00001057 @param keyvals dict of keyvals to associate with the job
showarda1e74b32009-05-12 17:32:04 +00001058 @param hosts List of hosts to run job on.
1059 @param meta_hosts List where each entry is a label name, and for each entry
Jiaxi Luo90190c92014-06-18 12:35:57 -07001060 one host will be chosen from that label to run the job on.
showarda1e74b32009-05-12 17:32:04 +00001061 @param one_time_hosts List of hosts not in the database to run the job on.
1062 @param atomic_group_name The name of an atomic group to schedule the job on.
jamesren76fcf192010-04-21 20:39:50 +00001063 @param drone_set The name of the drone set to run this test on.
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -08001064 @param image OS image to install before running job.
Aviv Keshet0b9cfc92013-02-05 11:36:02 -08001065 @param parent_job_id id of a job considered to be parent of created job.
Simran Basib6ec8ae2014-04-23 12:05:08 -07001066 @param test_retry Number of times to retry test if the test did not
Jiaxi Luo90190c92014-06-18 12:35:57 -07001067 complete successfully. (optional, default: 0)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001068 @param run_reset Should the host be reset before running the test?
Dan Shiec1d47d2015-02-13 11:38:13 -08001069 @param require_ssp Set to True to require server-side packaging to run the
1070 test. If it's set to None, drone will still try to run
1071 the server side with server-side packaging. If the
1072 autotest-server package doesn't exist for the build or
1073 image is not set, drone will run the test without server-
1074 side packaging. Default is None.
Jiaxi Luo90190c92014-06-18 12:35:57 -07001075 @param args A list of args to be injected into control file.
Simran Basib6ec8ae2014-04-23 12:05:08 -07001076 @param kwargs extra keyword args. NOT USED.
showardc92da832009-04-07 18:14:34 +00001077
1078 @returns The created Job id number.
jadmanski0afbb632008-06-06 21:10:57 +00001079 """
Jiaxi Luo90190c92014-06-18 12:35:57 -07001080 if args:
1081 control_file = tools.inject_vars({'args': args}, control_file)
Richard Barnette6c2b70a2017-01-26 13:40:51 -08001082 if image:
1083 dependencies += (provision.image_version_to_label(image),)
jamesren4a41e012010-07-16 22:33:48 +00001084 return rpc_utils.create_job_common(
Allen Li81996a82016-12-14 13:01:37 -08001085 name=name,
1086 priority=priority,
1087 control_type=control_type,
1088 control_file=control_file,
1089 hosts=hosts,
1090 meta_hosts=meta_hosts,
1091 one_time_hosts=one_time_hosts,
1092 atomic_group_name=atomic_group_name,
1093 synch_count=synch_count,
1094 is_template=is_template,
1095 timeout=timeout,
1096 timeout_mins=timeout_mins,
1097 max_runtime_mins=max_runtime_mins,
1098 run_verify=run_verify,
1099 email_list=email_list,
1100 dependencies=dependencies,
1101 reboot_before=reboot_before,
1102 reboot_after=reboot_after,
1103 parse_failed_repair=parse_failed_repair,
1104 hostless=hostless,
1105 keyvals=keyvals,
1106 drone_set=drone_set,
Allen Li81996a82016-12-14 13:01:37 -08001107 parent_job_id=parent_job_id,
1108 test_retry=test_retry,
1109 run_reset=run_reset,
1110 require_ssp=require_ssp)
mblighe8819cd2008-02-15 16:48:40 +00001111
1112
showard9dbdcda2008-10-14 17:34:36 +00001113def abort_host_queue_entries(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001114 """\
showard9dbdcda2008-10-14 17:34:36 +00001115 Abort a set of host queue entries.
Fang Deng63b0e452014-12-19 14:38:15 -08001116
1117 @return: A list of dictionaries, each contains information
1118 about an aborted HQE.
jadmanski0afbb632008-06-06 21:10:57 +00001119 """
showard9dbdcda2008-10-14 17:34:36 +00001120 query = models.HostQueueEntry.query_objects(filter_data)
beepsfaecbce2013-10-29 11:35:10 -07001121
1122 # Dont allow aborts on:
1123 # 1. Jobs that have already completed (whether or not they were aborted)
1124 # 2. Jobs that we have already been aborted (but may not have completed)
1125 query = query.filter(complete=False).filter(aborted=False)
showarddc817512008-11-12 18:16:41 +00001126 models.AclGroup.check_abort_permissions(query)
showard9dbdcda2008-10-14 17:34:36 +00001127 host_queue_entries = list(query.select_related())
showard2bab8f42008-11-12 18:15:22 +00001128 rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
mblighe8819cd2008-02-15 16:48:40 +00001129
Simran Basic1b26762013-06-26 14:23:21 -07001130 models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
Fang Deng63b0e452014-12-19 14:38:15 -08001131 hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id,
1132 'Job name': hqe.job.name} for hqe in host_queue_entries]
1133 return hqe_info
showard9d821ab2008-07-11 16:54:29 +00001134
1135
beeps8bb1f7d2013-08-05 01:30:09 -07001136def abort_special_tasks(**filter_data):
1137 """\
1138 Abort the special task, or tasks, specified in the filter.
1139 """
1140 query = models.SpecialTask.query_objects(filter_data)
1141 special_tasks = query.filter(is_active=True)
1142 for task in special_tasks:
1143 task.abort()
1144
1145
Simran Basi73dae552013-02-25 14:57:46 -08001146def _call_special_tasks_on_hosts(task, hosts):
1147 """\
1148 Schedules a set of hosts for a special task.
1149
1150 @returns A list of hostnames that a special task was created for.
1151 """
1152 models.AclGroup.check_for_acl_violation_hosts(hosts)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001153 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001154 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001155 raise ValueError('The following hosts are on shards, please '
1156 'follow the link to the shards and create jobs '
1157 'there instead. %s.' % shard_host_map)
Simran Basi73dae552013-02-25 14:57:46 -08001158 for host in hosts:
1159 models.SpecialTask.schedule_special_task(host, task)
1160 return list(sorted(host.hostname for host in hosts))
1161
1162
MK Ryu5aa25042015-07-28 16:08:04 -07001163def _forward_special_tasks_on_hosts(task, rpc, **filter_data):
1164 """Forward special tasks to corresponding shards.
mbligh4e545a52009-12-19 05:30:39 +00001165
MK Ryu5aa25042015-07-28 16:08:04 -07001166 For master, when special tasks are fired on hosts that are sharded,
1167 forward the RPC to corresponding shards.
1168
1169 For shard, create special task records in local DB.
1170
1171 @param task: Enum value of frontend.afe.models.SpecialTask.Task
1172 @param rpc: RPC name to forward.
1173 @param filter_data: Filter keywords to be used for DB query.
1174
1175 @return: A list of hostnames that a special task was created for.
showard1ff7b2e2009-05-15 23:17:18 +00001176 """
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001177 hosts = models.Host.query_objects(filter_data)
1178 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True)
1179
1180 # Filter out hosts on a shard from those on the master, forward
1181 # rpcs to the shard with an additional hostname__in filter, and
1182 # create a local SpecialTask for each remaining host.
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001183 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001184 hosts = [h for h in hosts if h.shard is None]
1185 for shard, hostnames in shard_host_map.iteritems():
1186
1187 # The main client of this module is the frontend website, and
1188 # it invokes it with an 'id' or an 'id__in' filter. Regardless,
1189 # the 'hostname' filter should narrow down the list of hosts on
1190 # each shard even though we supply all the ids in filter_data.
1191 # This method uses hostname instead of id because it fits better
MK Ryu5aa25042015-07-28 16:08:04 -07001192 # with the overall architecture of redirection functions in
1193 # rpc_utils.
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001194 shard_filter = filter_data.copy()
1195 shard_filter['hostname__in'] = hostnames
1196 rpc_utils.run_rpc_on_multiple_hostnames(
MK Ryu5aa25042015-07-28 16:08:04 -07001197 rpc, [shard], **shard_filter)
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001198
1199 # There is a race condition here if someone assigns a shard to one of these
1200 # hosts before we create the task. The host will stay on the master if:
1201 # 1. The host is not Ready
1202 # 2. The host is Ready but has a task
1203 # But if the host is Ready and doesn't have a task yet, it will get sent
1204 # to the shard as we're creating a task here.
1205
1206 # Given that we only rarely verify Ready hosts it isn't worth putting this
1207 # entire method in a transaction. The worst case scenario is that we have
MK Ryu5aa25042015-07-28 16:08:04 -07001208 # a verify running on a Ready host while the shard is using it, if the
1209 # verify fails no subsequent tasks will be created against the host on the
1210 # master, and verifies are safe enough that this is OK.
1211 return _call_special_tasks_on_hosts(task, hosts)
1212
1213
1214def reverify_hosts(**filter_data):
1215 """\
1216 Schedules a set of hosts for verify.
1217
1218 @returns A list of hostnames that a verify task was created for.
1219 """
1220 return _forward_special_tasks_on_hosts(
1221 models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data)
Simran Basi73dae552013-02-25 14:57:46 -08001222
1223
1224def repair_hosts(**filter_data):
1225 """\
1226 Schedules a set of hosts for repair.
1227
1228 @returns A list of hostnames that a repair task was created for.
1229 """
MK Ryu5aa25042015-07-28 16:08:04 -07001230 return _forward_special_tasks_on_hosts(
1231 models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data)
showard1ff7b2e2009-05-15 23:17:18 +00001232
1233
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001234def get_jobs(not_yet_run=False, running=False, finished=False,
1235 suite=False, sub=False, standalone=False, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001236 """\
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001237 Extra status filter args for get_jobs:
jadmanski0afbb632008-06-06 21:10:57 +00001238 -not_yet_run: Include only jobs that have not yet started running.
1239 -running: Include only jobs that have start running but for which not
1240 all hosts have completed.
1241 -finished: Include only jobs for which all hosts have completed (or
1242 aborted).
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001243
1244 Extra type filter args for get_jobs:
1245 -suite: Include only jobs with child jobs.
1246 -sub: Include only jobs with a parent job.
1247 -standalone: Inlcude only jobs with no child or parent jobs.
1248 At most one of these three fields should be specified.
jadmanski0afbb632008-06-06 21:10:57 +00001249 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001250 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1251 running,
1252 finished)
1253 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1254 suite,
1255 sub,
1256 standalone)
showard0957a842009-05-11 19:25:08 +00001257 job_dicts = []
1258 jobs = list(models.Job.query_objects(filter_data))
1259 models.Job.objects.populate_relationships(jobs, models.Label,
1260 'dependencies')
showardc1a98d12010-01-15 00:22:22 +00001261 models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
showard0957a842009-05-11 19:25:08 +00001262 for job in jobs:
1263 job_dict = job.get_object_dict()
1264 job_dict['dependencies'] = ','.join(label.name
1265 for label in job.dependencies)
showardc1a98d12010-01-15 00:22:22 +00001266 job_dict['keyvals'] = dict((keyval.key, keyval.value)
1267 for keyval in job.keyvals)
Eric Lid23bc192011-02-09 14:38:57 -08001268 if job.parameterized_job:
1269 job_dict['image'] = get_parameterized_autoupdate_image_url(job)
showard0957a842009-05-11 19:25:08 +00001270 job_dicts.append(job_dict)
1271 return rpc_utils.prepare_for_serialization(job_dicts)
mblighe8819cd2008-02-15 16:48:40 +00001272
1273
1274def get_num_jobs(not_yet_run=False, running=False, finished=False,
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001275 suite=False, sub=False, standalone=False,
jadmanski0afbb632008-06-06 21:10:57 +00001276 **filter_data):
Aviv Keshet17660a52016-04-06 18:56:43 +00001277 """\
1278 See get_jobs() for documentation of extra filter parameters.
jadmanski0afbb632008-06-06 21:10:57 +00001279 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001280 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1281 running,
1282 finished)
1283 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1284 suite,
1285 sub,
1286 standalone)
Aviv Keshet17660a52016-04-06 18:56:43 +00001287 return models.Job.query_count(filter_data)
mblighe8819cd2008-02-15 16:48:40 +00001288
1289
mblighe8819cd2008-02-15 16:48:40 +00001290def get_jobs_summary(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001291 """\
Jiaxi Luoaac54572014-06-04 13:57:02 -07001292 Like get_jobs(), but adds 'status_counts' and 'result_counts' field.
1293
1294 'status_counts' filed is a dictionary mapping status strings to the number
1295 of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}.
1296
1297 'result_counts' field is piped to tko's rpc_interface and has the return
1298 format specified under get_group_counts.
jadmanski0afbb632008-06-06 21:10:57 +00001299 """
1300 jobs = get_jobs(**filter_data)
1301 ids = [job['id'] for job in jobs]
1302 all_status_counts = models.Job.objects.get_status_counts(ids)
1303 for job in jobs:
1304 job['status_counts'] = all_status_counts[job['id']]
Jiaxi Luoaac54572014-06-04 13:57:02 -07001305 job['result_counts'] = tko_rpc_interface.get_status_counts(
1306 ['afe_job_id', 'afe_job_id'],
1307 header_groups=[['afe_job_id'], ['afe_job_id']],
1308 **{'afe_job_id': job['id']})
jadmanski0afbb632008-06-06 21:10:57 +00001309 return rpc_utils.prepare_for_serialization(jobs)
mblighe8819cd2008-02-15 16:48:40 +00001310
1311
showarda965cef2009-05-15 23:17:41 +00001312def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
showarda8709c52008-07-03 19:44:54 +00001313 """\
1314 Retrieves all the information needed to clone a job.
1315 """
showarda8709c52008-07-03 19:44:54 +00001316 job = models.Job.objects.get(id=id)
showard29f7cd22009-04-29 21:16:24 +00001317 job_info = rpc_utils.get_job_info(job,
showarda965cef2009-05-15 23:17:41 +00001318 preserve_metahosts,
1319 queue_entry_filter_data)
showard945072f2008-09-03 20:34:59 +00001320
showardd9992fe2008-07-31 02:15:03 +00001321 host_dicts = []
showard29f7cd22009-04-29 21:16:24 +00001322 for host in job_info['hosts']:
1323 host_dict = get_hosts(id=host.id)[0]
1324 other_labels = host_dict['labels']
1325 if host_dict['platform']:
1326 other_labels.remove(host_dict['platform'])
1327 host_dict['other_labels'] = ', '.join(other_labels)
showardd9992fe2008-07-31 02:15:03 +00001328 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001329
showard29f7cd22009-04-29 21:16:24 +00001330 for host in job_info['one_time_hosts']:
1331 host_dict = dict(hostname=host.hostname,
1332 id=host.id,
1333 platform='(one-time host)',
1334 locked_text='')
1335 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001336
showard4d077562009-05-08 18:24:36 +00001337 # convert keys from Label objects to strings (names of labels)
showard29f7cd22009-04-29 21:16:24 +00001338 meta_host_counts = dict((meta_host.name, count) for meta_host, count
showard4d077562009-05-08 18:24:36 +00001339 in job_info['meta_host_counts'].iteritems())
showard29f7cd22009-04-29 21:16:24 +00001340
1341 info = dict(job=job.get_object_dict(),
1342 meta_host_counts=meta_host_counts,
1343 hosts=host_dicts)
1344 info['job']['dependencies'] = job_info['dependencies']
1345 if job_info['atomic_group']:
1346 info['atomic_group_name'] = (job_info['atomic_group']).name
1347 else:
1348 info['atomic_group_name'] = None
jamesren2275ef12010-04-12 18:25:06 +00001349 info['hostless'] = job_info['hostless']
jamesren76fcf192010-04-21 20:39:50 +00001350 info['drone_set'] = job.drone_set and job.drone_set.name
showarda8709c52008-07-03 19:44:54 +00001351
Michael Tang6dc174e2016-05-31 23:13:42 -07001352 image = _get_image_for_job(job, job_info['hostless'])
1353 if image:
1354 info['job']['image'] = image
Eric Lid23bc192011-02-09 14:38:57 -08001355
showarda8709c52008-07-03 19:44:54 +00001356 return rpc_utils.prepare_for_serialization(info)
1357
1358
Michael Tang6dc174e2016-05-31 23:13:42 -07001359def _get_image_for_job(job, hostless):
1360 """ Gets the image used for a job.
1361
1362 Gets the image used for an AFE job. If the job is a parameterized job, get
1363 the image from the job parameter; otherwise, tries to get the image from
1364 the job's keyvals 'build' or 'builds'. As a last resort, if the job is a
1365 hostless job, tries to get the image from its control file attributes
1366 'build' or 'builds'.
1367
1368 TODO(ntang): Needs to handle FAFT with two builds for ro/rw.
1369
1370 @param job An AFE job object.
1371 @param hostless Boolean on of the job is hostless.
1372
1373 @returns The image build used for the job.
1374 """
1375 image = None
1376 if job.parameterized_job:
1377 image = get_parameterized_autoupdate_image_url(job)
1378 else:
1379 keyvals = job.keyval_dict()
Michael Tang84a2ecf2016-06-07 15:10:53 -07001380 image = keyvals.get('build')
Michael Tang6dc174e2016-05-31 23:13:42 -07001381 if not image:
1382 value = keyvals.get('builds')
1383 builds = None
1384 if isinstance(value, dict):
1385 builds = value
1386 elif isinstance(value, basestring):
1387 builds = ast.literal_eval(value)
1388 if builds:
1389 image = builds.get('cros-version')
1390 if not image and hostless and job.control_file:
1391 try:
1392 control_obj = control_data.parse_control_string(
1393 job.control_file)
1394 if hasattr(control_obj, 'build'):
1395 image = getattr(control_obj, 'build')
1396 if not image and hasattr(control_obj, 'builds'):
1397 builds = getattr(control_obj, 'builds')
1398 image = builds.get('cros-version')
1399 except:
1400 logging.warning('Failed to parse control file for job: %s',
1401 job.name)
1402 return image
1403
showard34dc5fa2008-04-24 20:58:40 +00001404
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001405def get_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001406 """\
showardc92da832009-04-07 18:14:34 +00001407 @returns A sequence of nested dictionaries of host and job information.
jadmanski0afbb632008-06-06 21:10:57 +00001408 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001409 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1410 'started_on__lte',
1411 start_time,
1412 end_time,
1413 **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001414 return rpc_utils.prepare_rows_as_nested_dicts(
1415 models.HostQueueEntry.query_objects(filter_data),
1416 ('host', 'atomic_group', 'job'))
showard34dc5fa2008-04-24 20:58:40 +00001417
1418
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001419def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001420 """\
1421 Get the number of host queue entries associated with this job.
1422 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001423 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1424 'started_on__lte',
1425 start_time,
1426 end_time,
1427 **filter_data)
jadmanski0afbb632008-06-06 21:10:57 +00001428 return models.HostQueueEntry.query_count(filter_data)
showard34dc5fa2008-04-24 20:58:40 +00001429
1430
showard1e935f12008-07-11 00:11:36 +00001431def get_hqe_percentage_complete(**filter_data):
1432 """
showardc92da832009-04-07 18:14:34 +00001433 Computes the fraction of host queue entries matching the given filter data
showard1e935f12008-07-11 00:11:36 +00001434 that are complete.
1435 """
1436 query = models.HostQueueEntry.query_objects(filter_data)
1437 complete_count = query.filter(complete=True).count()
1438 total_count = query.count()
1439 if total_count == 0:
1440 return 1
1441 return float(complete_count) / total_count
1442
1443
showard1a5a4082009-07-28 20:01:37 +00001444# special tasks
1445
1446def get_special_tasks(**filter_data):
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001447 """Get special task entries from the local database.
1448
1449 Query the special tasks table for tasks matching the given
1450 `filter_data`, and return a list of the results. No attempt is
1451 made to forward the call to shards; the buck will stop here.
1452 The caller is expected to know the target shard for such reasons
1453 as:
1454 * The caller is a service (such as gs_offloader) configured
1455 to operate on behalf of one specific shard, and no other.
1456 * The caller has a host as a parameter, and knows that this is
1457 the shard assigned to that host.
1458
1459 @param filter_data Filter keywords to pass to the underlying
1460 database query.
1461
1462 """
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001463 return rpc_utils.prepare_rows_as_nested_dicts(
1464 models.SpecialTask.query_objects(filter_data),
1465 ('host', 'queue_entry'))
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001466
1467
1468def get_host_special_tasks(host_id, **filter_data):
1469 """Get special task entries for a given host.
1470
1471 Query the special tasks table for tasks that ran on the host
1472 given by `host_id` and matching the given `filter_data`.
1473 Return a list of the results. If the host is assigned to a
1474 shard, forward this call to that shard.
1475
1476 @param host_id Id in the database of the target host.
1477 @param filter_data Filter keywords to pass to the underlying
1478 database query.
1479
1480 """
MK Ryu0c1a37d2015-04-30 12:00:55 -07001481 # Retrieve host data even if the host is in an invalid state.
1482 host = models.Host.smart_get(host_id, False)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001483 if not host.shard:
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001484 return get_special_tasks(host_id=host_id, **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001485 else:
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001486 # The return values from AFE methods are post-processed
1487 # objects that aren't JSON-serializable. So, we have to
1488 # call AFE.run() to get the raw, serializable output from
1489 # the shard.
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001490 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1491 return shard_afe.run('get_special_tasks',
1492 host_id=host_id, **filter_data)
showard1a5a4082009-07-28 20:01:37 +00001493
1494
MK Ryu0c1a37d2015-04-30 12:00:55 -07001495def get_num_special_tasks(**kwargs):
1496 """Get the number of special task entries from the local database.
1497
1498 Query the special tasks table for tasks matching the given 'kwargs',
1499 and return the number of the results. No attempt is made to forward
1500 the call to shards; the buck will stop here.
1501
1502 @param kwargs Filter keywords to pass to the underlying database query.
1503
1504 """
1505 return models.SpecialTask.query_count(kwargs)
1506
1507
1508def get_host_num_special_tasks(host, **kwargs):
1509 """Get special task entries for a given host.
1510
1511 Query the special tasks table for tasks that ran on the host
1512 given by 'host' and matching the given 'kwargs'.
1513 Return a list of the results. If the host is assigned to a
1514 shard, forward this call to that shard.
1515
1516 @param host id or name of a host. More often a hostname.
1517 @param kwargs Filter keywords to pass to the underlying database query.
1518
1519 """
1520 # Retrieve host data even if the host is in an invalid state.
1521 host_model = models.Host.smart_get(host, False)
1522 if not host_model.shard:
1523 return get_num_special_tasks(host=host, **kwargs)
1524 else:
1525 shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname())
1526 return shard_afe.run('get_num_special_tasks', host=host, **kwargs)
1527
1528
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001529def get_status_task(host_id, end_time):
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001530 """Get the "status task" for a host from the local shard.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001531
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001532 Returns a single special task representing the given host's
1533 "status task". The status task is a completed special task that
1534 identifies whether the corresponding host was working or broken
1535 when it completed. A successful task indicates a working host;
1536 a failed task indicates broken.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001537
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001538 This call will not be forward to a shard; the receiving server
1539 must be the shard that owns the host.
1540
1541 @param host_id Id in the database of the target host.
1542 @param end_time Time reference for the host's status.
1543
1544 @return A single task; its status (successful or not)
1545 corresponds to the status of the host (working or
1546 broken) at the given time. If no task is found, return
1547 `None`.
1548
1549 """
1550 tasklist = rpc_utils.prepare_rows_as_nested_dicts(
1551 status_history.get_status_task(host_id, end_time),
1552 ('host', 'queue_entry'))
1553 return tasklist[0] if tasklist else None
1554
1555
1556def get_host_status_task(host_id, end_time):
1557 """Get the "status task" for a host from its owning shard.
1558
1559 Finds the given host's owning shard, and forwards to it a call
1560 to `get_status_task()` (see above).
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001561
1562 @param host_id Id in the database of the target host.
1563 @param end_time Time reference for the host's status.
1564
1565 @return A single task; its status (successful or not)
1566 corresponds to the status of the host (working or
1567 broken) at the given time. If no task is found, return
1568 `None`.
1569
1570 """
1571 host = models.Host.smart_get(host_id)
1572 if not host.shard:
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001573 return get_status_task(host_id, end_time)
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001574 else:
1575 # The return values from AFE methods are post-processed
1576 # objects that aren't JSON-serializable. So, we have to
1577 # call AFE.run() to get the raw, serializable output from
1578 # the shard.
1579 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1580 return shard_afe.run('get_status_task',
1581 host_id=host_id, end_time=end_time)
1582
1583
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001584def get_host_diagnosis_interval(host_id, end_time, success):
1585 """Find a "diagnosis interval" for a given host.
1586
1587 A "diagnosis interval" identifies a start and end time where
1588 the host went from "working" to "broken", or vice versa. The
1589 interval's starting time is the starting time of the last status
1590 task with the old status; the end time is the finish time of the
1591 first status task with the new status.
1592
1593 This routine finds the most recent diagnosis interval for the
1594 given host prior to `end_time`, with a starting status matching
1595 `success`. If `success` is true, the interval will start with a
1596 successful status task; if false the interval will start with a
1597 failed status task.
1598
1599 @param host_id Id in the database of the target host.
1600 @param end_time Time reference for the diagnosis interval.
1601 @param success Whether the diagnosis interval should start
1602 with a successful or failed status task.
1603
1604 @return A list of two strings. The first is the timestamp for
1605 the beginning of the interval; the second is the
1606 timestamp for the end. If the host has never changed
1607 state, the list is empty.
1608
1609 """
1610 host = models.Host.smart_get(host_id)
J. Richard Barnette78f281a2015-06-29 13:24:51 -07001611 if not host.shard or utils.is_shard():
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001612 return status_history.get_diagnosis_interval(
1613 host_id, end_time, success)
1614 else:
1615 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1616 return shard_afe.get_host_diagnosis_interval(
1617 host_id, end_time, success)
1618
1619
showardc0ac3a72009-07-08 21:14:45 +00001620# support for host detail view
1621
MK Ryu0c1a37d2015-04-30 12:00:55 -07001622def get_host_queue_entries_and_special_tasks(host, query_start=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001623 query_limit=None, start_time=None,
1624 end_time=None):
showardc0ac3a72009-07-08 21:14:45 +00001625 """
1626 @returns an interleaved list of HostQueueEntries and SpecialTasks,
1627 in approximate run order. each dict contains keys for type, host,
1628 job, status, started_on, execution_path, and ID.
1629 """
1630 total_limit = None
1631 if query_limit is not None:
1632 total_limit = query_start + query_limit
MK Ryu0c1a37d2015-04-30 12:00:55 -07001633 filter_data_common = {'host': host,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001634 'query_limit': total_limit,
1635 'sort_by': ['-id']}
showardc0ac3a72009-07-08 21:14:45 +00001636
MK Ryu0c1a37d2015-04-30 12:00:55 -07001637 filter_data_special_tasks = rpc_utils.inject_times_to_filter(
1638 'time_started__gte', 'time_started__lte', start_time, end_time,
1639 **filter_data_common)
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001640
MK Ryu0c1a37d2015-04-30 12:00:55 -07001641 queue_entries = get_host_queue_entries(
1642 start_time, end_time, **filter_data_common)
1643 special_tasks = get_host_special_tasks(host, **filter_data_special_tasks)
showardc0ac3a72009-07-08 21:14:45 +00001644
1645 interleaved_entries = rpc_utils.interleave_entries(queue_entries,
1646 special_tasks)
1647 if query_start is not None:
1648 interleaved_entries = interleaved_entries[query_start:]
1649 if query_limit is not None:
1650 interleaved_entries = interleaved_entries[:query_limit]
MK Ryu0c1a37d2015-04-30 12:00:55 -07001651 return rpc_utils.prepare_host_queue_entries_and_special_tasks(
1652 interleaved_entries, queue_entries)
showardc0ac3a72009-07-08 21:14:45 +00001653
1654
MK Ryu0c1a37d2015-04-30 12:00:55 -07001655def get_num_host_queue_entries_and_special_tasks(host, start_time=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001656 end_time=None):
MK Ryu0c1a37d2015-04-30 12:00:55 -07001657 filter_data_common = {'host': host}
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001658
1659 filter_data_queue_entries, filter_data_special_tasks = (
1660 rpc_utils.inject_times_to_hqe_special_tasks_filters(
1661 filter_data_common, start_time, end_time))
1662
1663 return (models.HostQueueEntry.query_count(filter_data_queue_entries)
MK Ryu0c1a37d2015-04-30 12:00:55 -07001664 + get_host_num_special_tasks(**filter_data_special_tasks))
showardc0ac3a72009-07-08 21:14:45 +00001665
1666
mblighe8819cd2008-02-15 16:48:40 +00001667# other
1668
showarde0b63622008-08-04 20:58:47 +00001669def echo(data=""):
1670 """\
1671 Returns a passed in string. For doing a basic test to see if RPC calls
1672 can successfully be made.
1673 """
1674 return data
1675
1676
showardb7a52fd2009-04-27 20:10:56 +00001677def get_motd():
1678 """\
1679 Returns the message of the day as a string.
1680 """
1681 return rpc_utils.get_motd()
1682
1683
mblighe8819cd2008-02-15 16:48:40 +00001684def get_static_data():
jadmanski0afbb632008-06-06 21:10:57 +00001685 """\
1686 Returns a dictionary containing a bunch of data that shouldn't change
1687 often and is otherwise inaccessible. This includes:
showardc92da832009-04-07 18:14:34 +00001688
1689 priorities: List of job priority choices.
1690 default_priority: Default priority value for new jobs.
1691 users: Sorted list of all users.
Jiaxi Luo31874592014-06-11 10:36:35 -07001692 labels: Sorted list of labels not start with 'cros-version' and
1693 'fw-version'.
showardc92da832009-04-07 18:14:34 +00001694 atomic_groups: Sorted list of all atomic groups.
1695 tests: Sorted list of all tests.
1696 profilers: Sorted list of all profilers.
1697 current_user: Logged-in username.
1698 host_statuses: Sorted list of possible Host statuses.
1699 job_statuses: Sorted list of possible HostQueueEntry statuses.
Simran Basi7e605742013-11-12 13:43:36 -08001700 job_timeout_default: The default job timeout length in minutes.
showarda1e74b32009-05-12 17:32:04 +00001701 parse_failed_repair_default: Default value for the parse_failed_repair job
Jiaxi Luo31874592014-06-11 10:36:35 -07001702 option.
showardc92da832009-04-07 18:14:34 +00001703 reboot_before_options: A list of valid RebootBefore string enums.
1704 reboot_after_options: A list of valid RebootAfter string enums.
1705 motd: Server's message of the day.
1706 status_dictionary: A mapping from one word job status names to a more
1707 informative description.
jadmanski0afbb632008-06-06 21:10:57 +00001708 """
showard21baa452008-10-21 00:08:39 +00001709
jamesren76fcf192010-04-21 20:39:50 +00001710 default_drone_set_name = models.DroneSet.default_drone_set_name()
1711 drone_sets = ([default_drone_set_name] +
1712 sorted(drone_set.name for drone_set in
1713 models.DroneSet.objects.exclude(
1714 name=default_drone_set_name)))
showard21baa452008-10-21 00:08:39 +00001715
jadmanski0afbb632008-06-06 21:10:57 +00001716 result = {}
Alex Miller7d658cf2013-09-04 16:00:35 -07001717 result['priorities'] = priorities.Priority.choices()
Alex Miller7d658cf2013-09-04 16:00:35 -07001718 result['default_priority'] = 'Default'
1719 result['max_schedulable_priority'] = priorities.Priority.DEFAULT
jadmanski0afbb632008-06-06 21:10:57 +00001720 result['users'] = get_users(sort_by=['login'])
Jiaxi Luo31874592014-06-11 10:36:35 -07001721
1722 label_exclude_filters = [{'name__startswith': 'cros-version'},
Dan Shi65351d62015-08-03 12:03:23 -07001723 {'name__startswith': 'fw-version'},
1724 {'name__startswith': 'fwrw-version'},
Dan Shi27516972016-03-16 14:03:41 -07001725 {'name__startswith': 'fwro-version'},
1726 {'name__startswith': 'ab-version'},
1727 {'name__startswith': 'testbed-version'}]
Jiaxi Luo31874592014-06-11 10:36:35 -07001728 result['labels'] = get_labels(
1729 label_exclude_filters,
1730 sort_by=['-platform', 'name'])
1731
showardc92da832009-04-07 18:14:34 +00001732 result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
jadmanski0afbb632008-06-06 21:10:57 +00001733 result['tests'] = get_tests(sort_by=['name'])
showard2b9a88b2008-06-13 20:55:03 +00001734 result['profilers'] = get_profilers(sort_by=['name'])
showard0fc38302008-10-23 00:44:07 +00001735 result['current_user'] = rpc_utils.prepare_for_serialization(
showard64a95952010-01-13 21:27:16 +00001736 models.User.current_user().get_object_dict())
showard2b9a88b2008-06-13 20:55:03 +00001737 result['host_statuses'] = sorted(models.Host.Status.names)
mbligh5a198b92008-12-11 19:33:29 +00001738 result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
Simran Basi7e605742013-11-12 13:43:36 -08001739 result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
Simran Basi34217022012-11-06 13:43:15 -08001740 result['job_max_runtime_mins_default'] = (
1741 models.Job.DEFAULT_MAX_RUNTIME_MINS)
showarda1e74b32009-05-12 17:32:04 +00001742 result['parse_failed_repair_default'] = bool(
1743 models.Job.DEFAULT_PARSE_FAILED_REPAIR)
jamesrendd855242010-03-02 22:23:44 +00001744 result['reboot_before_options'] = model_attributes.RebootBefore.names
1745 result['reboot_after_options'] = model_attributes.RebootAfter.names
showard8fbae652009-01-20 23:23:10 +00001746 result['motd'] = rpc_utils.get_motd()
jamesren76fcf192010-04-21 20:39:50 +00001747 result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
1748 result['drone_sets'] = drone_sets
jamesren4a41e012010-07-16 22:33:48 +00001749 result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
showard8ac29b42008-07-17 17:01:55 +00001750
showardd3dc1992009-04-22 21:01:40 +00001751 result['status_dictionary'] = {"Aborted": "Aborted",
showard8ac29b42008-07-17 17:01:55 +00001752 "Verifying": "Verifying Host",
Alex Millerdfff2fd2013-05-28 13:05:06 -07001753 "Provisioning": "Provisioning Host",
showard8ac29b42008-07-17 17:01:55 +00001754 "Pending": "Waiting on other hosts",
1755 "Running": "Running autoserv",
1756 "Completed": "Autoserv completed",
1757 "Failed": "Failed to complete",
showardd823b362008-07-24 16:35:46 +00001758 "Queued": "Queued",
showard5deb6772008-11-04 21:54:33 +00001759 "Starting": "Next in host's queue",
1760 "Stopped": "Other host(s) failed verify",
showardd3dc1992009-04-22 21:01:40 +00001761 "Parsing": "Awaiting parse of final results",
showard29f7cd22009-04-29 21:16:24 +00001762 "Gathering": "Gathering log files",
mbligh4608b002010-01-05 18:22:35 +00001763 "Waiting": "Waiting for scheduler action",
Dan Shi07e09af2013-04-12 09:31:29 -07001764 "Archiving": "Archiving results",
1765 "Resetting": "Resetting hosts"}
Jiaxi Luo421608e2014-07-07 14:38:00 -07001766
1767 result['wmatrix_url'] = rpc_utils.get_wmatrix_url()
Simran Basi71206ef2014-08-13 13:51:18 -07001768 result['is_moblab'] = bool(utils.is_moblab())
Jiaxi Luo421608e2014-07-07 14:38:00 -07001769
jadmanski0afbb632008-06-06 21:10:57 +00001770 return result
showard29f7cd22009-04-29 21:16:24 +00001771
1772
1773def get_server_time():
1774 return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
Kevin Cheng19521982016-09-22 12:27:23 -07001775
1776
1777def get_hosts_by_attribute(attribute, value):
1778 """
1779 Get the list of valid hosts that share the same host attribute value.
1780
1781 @param attribute: String of the host attribute to check.
1782 @param value: String of the value that is shared between hosts.
1783
1784 @returns List of hostnames that all have the same host attribute and
1785 value.
1786 """
1787 hosts = models.HostAttribute.query_objects({'attribute': attribute,
1788 'value': value})
1789 return [row.host.hostname for row in hosts if row.host.invalid == 0]
Allen Licdd00f22017-02-01 18:01:52 -08001790
1791
1792def canonicalize_suite_name(suite_name):
1793 """Canonicalize the suite's name.
1794
1795 @param suite_name: the name of the suite.
1796 """
1797 # Do not change this naming convention without updating
1798 # site_utils.parse_job_name.
1799 return 'test_suites/control.%s' % suite_name
1800
1801
1802def formatted_now():
1803 """Format the current datetime."""
1804 return datetime.datetime.now().strftime(time_utils.TIME_FMT)
1805
1806
1807def _get_control_file_by_build(build, ds, suite_name):
1808 """Return control file contents for |suite_name|.
1809
1810 Query the dev server at |ds| for the control file |suite_name|, included
1811 in |build| for |board|.
1812
1813 @param build: unique name by which to refer to the image from now on.
1814 @param ds: a dev_server.DevServer instance to fetch control file with.
1815 @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt.
1816 @raises ControlFileNotFound if a unique suite control file doesn't exist.
1817 @raises NoControlFileList if we can't list the control files at all.
1818 @raises ControlFileEmpty if the control file exists on the server, but
1819 can't be read.
1820
1821 @return the contents of the desired control file.
1822 """
1823 getter = control_file_getter.DevServerGetter.create(build, ds)
1824 devserver_name = ds.hostname
1825 timer = autotest_stats.Timer('control_files.parse.%s.%s' %
1826 (devserver_name.replace('.', '_'),
1827 suite_name.rsplit('.')[-1]))
1828 # Get the control file for the suite.
1829 try:
1830 with timer:
1831 control_file_in = getter.get_control_file_contents_by_name(
1832 suite_name)
1833 except error.CrosDynamicSuiteException as e:
1834 raise type(e)('Failed to get control file for %s '
1835 '(devserver: %s) (error: %s)' %
1836 (build, devserver_name, e))
1837 if not control_file_in:
1838 raise error.ControlFileEmpty(
1839 "Fetching %s returned no data. (devserver: %s)" %
1840 (suite_name, devserver_name))
1841 # Force control files to only contain ascii characters.
1842 try:
1843 control_file_in.encode('ascii')
1844 except UnicodeDecodeError as e:
1845 raise error.ControlFileMalformed(str(e))
1846
1847 return control_file_in
1848
1849
1850def _get_control_file_by_suite(suite_name):
1851 """Get control file contents by suite name.
1852
1853 @param suite_name: Suite name as string.
1854 @returns: Control file contents as string.
1855 """
1856 getter = control_file_getter.FileSystemGetter(
1857 [_CONFIG.get_config_value('SCHEDULER',
1858 'drone_installation_directory')])
1859 return getter.get_control_file_contents_by_name(suite_name)
1860
1861
1862def _stage_build_artifacts(build, hostname=None):
1863 """
1864 Ensure components of |build| necessary for installing images are staged.
1865
1866 @param build image we want to stage.
1867 @param hostname hostname of a dut may run test on. This is to help to locate
1868 a devserver closer to duts if needed. Default is None.
1869
1870 @raises StageControlFileFailure: if the dev server throws 500 while staging
1871 suite control files.
1872
1873 @return: dev_server.ImageServer instance to use with this build.
1874 @return: timings dictionary containing staging start/end times.
1875 """
1876 timings = {}
1877 # Ensure components of |build| necessary for installing images are staged
1878 # on the dev server. However set synchronous to False to allow other
1879 # components to be downloaded in the background.
1880 ds = dev_server.resolve(build, hostname=hostname)
1881 ds_name = ds.hostname
1882 timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now()
1883 timer = autotest_stats.Timer('control_files.stage.%s' % (
1884 ds_name.replace('.', '_')))
1885 try:
1886 with timer:
1887 ds.stage_artifacts(image=build, artifacts=['test_suites'])
1888 except dev_server.DevServerException as e:
1889 raise error.StageControlFileFailure(
1890 "Failed to stage %s on %s: %s" % (build, ds_name, e))
1891 timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now()
1892 return (ds, timings)
1893
1894
1895@rpc_utils.route_rpc_to_master
1896def create_suite_job(
1897 name='',
1898 board='',
1899 pool='',
1900 control_file='',
1901 check_hosts=True,
1902 num=None,
1903 file_bugs=False,
1904 timeout=24,
1905 timeout_mins=None,
1906 priority=priorities.Priority.DEFAULT,
1907 suite_args=None,
1908 wait_for_results=True,
1909 job_retry=False,
1910 max_retries=None,
1911 max_runtime_mins=None,
1912 suite_min_duts=0,
1913 offload_failures_only=False,
1914 builds=None,
1915 test_source_build=None,
1916 run_prod_code=False,
1917 delay_minutes=0,
1918 is_cloning=False,
1919 **kwargs
1920):
1921 """
1922 Create a job to run a test suite on the given device with the given image.
1923
1924 When the timeout specified in the control file is reached, the
1925 job is guaranteed to have completed and results will be available.
1926
1927 @param name: The test name if control_file is supplied, otherwise the name
1928 of the test suite to run, e.g. 'bvt'.
1929 @param board: the kind of device to run the tests on.
1930 @param builds: the builds to install e.g.
1931 {'cros-version:': 'x86-alex-release/R18-1655.0.0',
1932 'fwrw-version:': 'x86-alex-firmware/R36-5771.50.0',
1933 'fwro-version:': 'x86-alex-firmware/R36-5771.49.0'}
1934 If builds is given a value, it overrides argument build.
1935 @param test_source_build: Build that contains the server-side test code.
1936 @param pool: Specify the pool of machines to use for scheduling
1937 purposes.
1938 @param control_file: the control file of the job.
1939 @param check_hosts: require appropriate live hosts to exist in the lab.
1940 @param num: Specify the number of machines to schedule across (integer).
1941 Leave unspecified or use None to use default sharding factor.
1942 @param file_bugs: File a bug on each test failure in this suite.
1943 @param timeout: The max lifetime of this suite, in hours.
1944 @param timeout_mins: The max lifetime of this suite, in minutes. Takes
1945 priority over timeout.
1946 @param priority: Integer denoting priority. Higher is more important.
1947 @param suite_args: Optional arguments which will be parsed by the suite
1948 control file. Used by control.test_that_wrapper to
1949 determine which tests to run.
1950 @param wait_for_results: Set to False to run the suite job without waiting
1951 for test jobs to finish. Default is True.
1952 @param job_retry: Set to True to enable job-level retry. Default is False.
1953 @param max_retries: Integer, maximum job retries allowed at suite level.
1954 None for no max.
1955 @param max_runtime_mins: Maximum amount of time a job can be running in
1956 minutes.
1957 @param suite_min_duts: Integer. Scheduler will prioritize getting the
1958 minimum number of machines for the suite when it is
1959 competing with another suite that has a higher
1960 priority but already got minimum machines it needs.
1961 @param offload_failures_only: Only enable gs_offloading for failed jobs.
1962 @param run_prod_code: If True, the suite will run the test code that
1963 lives in prod aka the test code currently on the
1964 lab servers. If False, the control files and test
1965 code for this suite run will be retrieved from the
1966 build artifacts.
1967 @param delay_minutes: Delay the creation of test jobs for a given number of
1968 minutes.
1969 @param is_cloning: True if creating a cloning job.
1970 @param kwargs: extra keyword args. NOT USED.
1971
1972 @raises ControlFileNotFound: if a unique suite control file doesn't exist.
1973 @raises NoControlFileList: if we can't list the control files at all.
1974 @raises StageControlFileFailure: If the dev server throws 500 while
1975 staging test_suites.
1976 @raises ControlFileEmpty: if the control file exists on the server, but
1977 can't be read.
1978
1979 @return: the job ID of the suite; -1 on error.
1980 """
1981 if type(num) is not int and num is not None:
1982 raise error.SuiteArgumentException('Ill specified num argument %r. '
1983 'Must be an integer or None.' % num)
1984 if num == 0:
1985 logging.warning("Can't run on 0 hosts; using default.")
1986 num = None
1987
1988 if builds is None:
1989 builds = {}
1990
1991 # Default test source build to CrOS build if it's not specified and
1992 # run_prod_code is set to False.
1993 if not run_prod_code:
1994 test_source_build = Suite.get_test_source_build(
1995 builds, test_source_build=test_source_build)
1996
1997 sample_dut = rpc_utils.get_sample_dut(board, pool)
1998
1999 suite_name = canonicalize_suite_name(name)
2000 if run_prod_code:
2001 ds = dev_server.resolve(test_source_build, hostname=sample_dut)
2002 keyvals = {}
2003 else:
2004 (ds, keyvals) = _stage_build_artifacts(
2005 test_source_build, hostname=sample_dut)
2006 keyvals[constants.SUITE_MIN_DUTS_KEY] = suite_min_duts
2007
2008 # Do not change this naming convention without updating
2009 # site_utils.parse_job_name.
2010 if run_prod_code:
2011 # If run_prod_code is True, test_source_build is not set, use the
2012 # first build in the builds list for the sutie job name.
2013 name = '%s-%s' % (builds.values()[0], suite_name)
2014 else:
2015 name = '%s-%s' % (test_source_build, suite_name)
2016
2017 timeout_mins = timeout_mins or timeout * 60
2018 max_runtime_mins = max_runtime_mins or timeout * 60
2019
2020 if not board:
2021 board = utils.ParseBuildName(builds[provision.CROS_VERSION_PREFIX])[0]
2022
2023 if run_prod_code:
2024 control_file = _get_control_file_by_suite(suite_name)
2025
2026 if not control_file:
2027 # No control file was supplied so look it up from the build artifacts.
2028 control_file = _get_control_file_by_build(
2029 test_source_build, ds, suite_name)
2030
2031 # Prepend builds and board to the control file.
2032 if is_cloning:
2033 control_file = tools.remove_injection(control_file)
2034
2035 inject_dict = {
2036 'board': board,
2037 # `build` is needed for suites like AU to stage image inside suite
2038 # control file.
2039 'build': test_source_build,
2040 'builds': builds,
2041 'check_hosts': check_hosts,
2042 'pool': pool,
2043 'num': num,
2044 'file_bugs': file_bugs,
2045 'timeout': timeout,
2046 'timeout_mins': timeout_mins,
2047 'devserver_url': ds.url(),
2048 'priority': priority,
2049 'suite_args' : suite_args,
2050 'wait_for_results': wait_for_results,
2051 'job_retry': job_retry,
2052 'max_retries': max_retries,
2053 'max_runtime_mins': max_runtime_mins,
2054 'offload_failures_only': offload_failures_only,
2055 'test_source_build': test_source_build,
2056 'run_prod_code': run_prod_code,
2057 'delay_minutes': delay_minutes,
2058 }
2059 control_file = tools.inject_vars(inject_dict, control_file)
2060
2061 return rpc_utils.create_job_common(name,
2062 priority=priority,
2063 timeout_mins=timeout_mins,
2064 max_runtime_mins=max_runtime_mins,
2065 control_type='Server',
2066 control_file=control_file,
2067 hostless=True,
2068 keyvals=keyvals)
2069
2070
2071def get_job_history(**filter_data):
2072 """Get history of the job, including the special tasks executed for the job
2073
2074 @param filter_data: filter for the call, should at least include
2075 {'job_id': [job id]}
2076 @returns: JSON string of the job's history, including the information such
2077 as the hosts run the job and the special tasks executed before
2078 and after the job.
2079 """
2080 job_id = filter_data['job_id']
2081 job_info = job_history.get_job_info(job_id)
2082 return rpc_utils.prepare_for_serialization(job_info.get_history())
2083
2084
2085def get_host_history(start_time, end_time, hosts=None, board=None, pool=None):
2086 """Get history of a list of host.
2087
2088 The return is a JSON string of host history for each host, for example,
2089 {'172.22.33.51': [{'status': 'Resetting'
2090 'start_time': '2014-08-07 10:02:16',
2091 'end_time': '2014-08-07 10:03:16',
2092 'log_url': 'http://autotest/reset-546546/debug',
2093 'dbg_str': 'Task: Special Task 19441991 (host ...)'},
2094 {'status': 'Running'
2095 'start_time': '2014-08-07 10:03:18',
2096 'end_time': '2014-08-07 10:13:00',
2097 'log_url': 'http://autotest/reset-546546/debug',
2098 'dbg_str': 'HQE: 15305005, for job: 14995562'}
2099 ]
2100 }
2101 @param start_time: start time to search for history, can be string value or
2102 epoch time.
2103 @param end_time: end time to search for history, can be string value or
2104 epoch time.
2105 @param hosts: A list of hosts to search for history. Default is None.
2106 @param board: board type of hosts. Default is None.
2107 @param pool: pool type of hosts. Default is None.
2108 @returns: JSON string of the host history.
2109 """
2110 return rpc_utils.prepare_for_serialization(
2111 host_history.get_history_details(
2112 start_time=start_time, end_time=end_time,
2113 hosts=hosts, board=board, pool=pool,
2114 process_pool_size=4))
2115
2116
2117def shard_heartbeat(shard_hostname, jobs=(), hqes=(), known_job_ids=(),
2118 known_host_ids=(), known_host_statuses=()):
2119 """Receive updates for job statuses from shards and assign hosts and jobs.
2120
2121 @param shard_hostname: Hostname of the calling shard
2122 @param jobs: Jobs in serialized form that should be updated with newer
2123 status from a shard.
2124 @param hqes: Hostqueueentries in serialized form that should be updated with
2125 newer status from a shard. Note that for every hostqueueentry
2126 the corresponding job must be in jobs.
2127 @param known_job_ids: List of ids of jobs the shard already has.
2128 @param known_host_ids: List of ids of hosts the shard already has.
2129 @param known_host_statuses: List of statuses of hosts the shard already has.
2130
2131 @returns: Serialized representations of hosts, jobs, suite job keyvals
2132 and their dependencies to be inserted into a shard's database.
2133 """
2134 # The following alternatives to sending host and job ids in every heartbeat
2135 # have been considered:
2136 # 1. Sending the highest known job and host ids. This would work for jobs:
2137 # Newer jobs always have larger ids. Also, if a job is not assigned to a
2138 # particular shard during a heartbeat, it never will be assigned to this
2139 # shard later.
2140 # This is not true for hosts though: A host that is leased won't be sent
2141 # to the shard now, but might be sent in a future heartbeat. This means
2142 # sometimes hosts should be transfered that have a lower id than the
2143 # maximum host id the shard knows.
2144 # 2. Send the number of jobs/hosts the shard knows to the master in each
2145 # heartbeat. Compare these to the number of records that already have
2146 # the shard_id set to this shard. In the normal case, they should match.
2147 # In case they don't, resend all entities of that type.
2148 # This would work well for hosts, because there aren't that many.
2149 # Resending all jobs is quite a big overhead though.
2150 # Also, this approach might run into edge cases when entities are
2151 # ever deleted.
2152 # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts.
2153 # Using two different approaches isn't consistent and might cause
2154 # confusion. Also the issues with the case of deletions might still
2155 # occur.
2156 #
2157 # The overhead of sending all job and host ids in every heartbeat is low:
2158 # At peaks one board has about 1200 created but unfinished jobs.
2159 # See the numbers here: http://goo.gl/gQCGWH
2160 # Assuming that job id's have 6 digits and that json serialization takes a
2161 # comma and a space as overhead, the traffic per id sent is about 8 bytes.
2162 # If 5000 ids need to be sent, this means 40 kilobytes of traffic.
2163 # A NOT IN query with 5000 ids took about 30ms in tests made.
2164 # These numbers seem low enough to outweigh the disadvantages of the
2165 # solutions described above.
2166 timer = autotest_stats.Timer('shard_heartbeat')
2167 with timer:
2168 shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname)
2169 rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes)
2170 assert len(known_host_ids) == len(known_host_statuses)
2171 for i in range(len(known_host_ids)):
2172 host_model = models.Host.objects.get(pk=known_host_ids[i])
2173 if host_model.status != known_host_statuses[i]:
2174 host_model.status = known_host_statuses[i]
2175 host_model.save()
2176
2177 hosts, jobs, suite_keyvals = rpc_utils.find_records_for_shard(
2178 shard_obj, known_job_ids=known_job_ids,
2179 known_host_ids=known_host_ids)
2180 return {
2181 'hosts': [host.serialize() for host in hosts],
2182 'jobs': [job.serialize() for job in jobs],
2183 'suite_keyvals': [kv.serialize() for kv in suite_keyvals],
2184 }
2185
2186
2187def get_shards(**filter_data):
2188 """Return a list of all shards.
2189
2190 @returns A sequence of nested dictionaries of shard information.
2191 """
2192 shards = models.Shard.query_objects(filter_data)
2193 serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ())
2194 for serialized, shard in zip(serialized_shards, shards):
2195 serialized['labels'] = [label.name for label in shard.labels.all()]
2196
2197 return serialized_shards
2198
2199
2200def _assign_board_to_shard_precheck(labels):
2201 """Verify whether board labels are valid to be added to a given shard.
2202
2203 First check whether board label is in correct format. Second, check whether
2204 the board label exist. Third, check whether the board has already been
2205 assigned to shard.
2206
2207 @param labels: Board labels separated by comma.
2208
2209 @raises error.RPCException: If label provided doesn't start with `board:`
2210 or board has been added to shard already.
2211 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2212
2213 @returns: A list of label models that ready to be added to shard.
2214 """
2215 labels = labels.split(',')
2216 label_models = []
2217 for label in labels:
2218 # Check whether the board label is in correct format.
2219 if not label.startswith('board:'):
2220 raise error.RPCException('Sharding only supports `board:.*` label.')
2221 # Check whether the board label exist. If not, exception will be thrown
2222 # by smart_get function.
2223 label = models.Label.smart_get(label)
2224 # Check whether the board has been sharded already
2225 try:
2226 shard = models.Shard.objects.get(labels=label)
2227 raise error.RPCException(
2228 '%s is already on shard %s' % (label, shard.hostname))
2229 except models.Shard.DoesNotExist:
2230 # board is not on any shard, so it's valid.
2231 label_models.append(label)
2232 return label_models
2233
2234
2235def add_shard(hostname, labels):
2236 """Add a shard and start running jobs on it.
2237
2238 @param hostname: The hostname of the shard to be added; needs to be unique.
2239 @param labels: Board labels separated by comma. Jobs of one of the labels
2240 will be assigned to the shard.
2241
2242 @raises error.RPCException: If label provided doesn't start with `board:` or
2243 board has been added to shard already.
2244 @raises model_logic.ValidationError: If a shard with the given hostname
2245 already exist.
2246 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2247
2248 @returns: The id of the added shard.
2249 """
2250 labels = _assign_board_to_shard_precheck(labels)
2251 shard = models.Shard.add_object(hostname=hostname)
2252 for label in labels:
2253 shard.labels.add(label)
2254 return shard.id
2255
2256
2257def add_board_to_shard(hostname, labels):
2258 """Add boards to a given shard
2259
2260 @param hostname: The hostname of the shard to be changed.
2261 @param labels: Board labels separated by comma.
2262
2263 @raises error.RPCException: If label provided doesn't start with `board:` or
2264 board has been added to shard already.
2265 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2266
2267 @returns: The id of the changed shard.
2268 """
2269 labels = _assign_board_to_shard_precheck(labels)
2270 shard = models.Shard.objects.get(hostname=hostname)
2271 for label in labels:
2272 shard.labels.add(label)
2273 return shard.id
2274
2275
2276def delete_shard(hostname):
2277 """Delete a shard and reclaim all resources from it.
2278
2279 This claims back all assigned hosts from the shard. To ensure all DUTs are
2280 in a sane state, a Reboot task with highest priority is scheduled for them.
2281 This reboots the DUTs and then all left tasks continue to run in drone of
2282 the master.
2283
2284 The procedure for deleting a shard:
2285 * Lock all unlocked hosts on that shard.
2286 * Remove shard information .
2287 * Assign a reboot task with highest priority to these hosts.
2288 * Unlock these hosts, then, the reboot tasks run in front of all other
2289 tasks.
2290
2291 The status of jobs that haven't been reported to be finished yet, will be
2292 lost. The master scheduler will pick up the jobs and execute them.
2293
2294 @param hostname: Hostname of the shard to delete.
2295 """
2296 shard = rpc_utils.retrieve_shard(shard_hostname=hostname)
2297 hostnames_to_lock = [h.hostname for h in
2298 models.Host.objects.filter(shard=shard, locked=False)]
2299
2300 # TODO(beeps): Power off shard
2301 # For ChromeOS hosts, a reboot test with the highest priority is added to
2302 # the DUT. After a reboot it should be ganranteed that no processes from
2303 # prior tests that were run by a shard are still running on.
2304
2305 # Lock all unlocked hosts.
2306 dicts = {'locked': True, 'lock_time': datetime.datetime.now()}
2307 models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts)
2308
2309 # Remove shard information.
2310 models.Host.objects.filter(shard=shard).update(shard=None)
2311 models.Job.objects.filter(shard=shard).update(shard=None)
2312 shard.labels.clear()
2313 shard.delete()
2314
2315 # Assign a reboot task with highest priority: Super.
2316 t = models.Test.objects.get(name='platform_BootPerfServer:shard')
2317 c = utils.read_file(os.path.join(common.autotest_dir, t.path))
2318 if hostnames_to_lock:
2319 rpc_utils.create_job_common(
2320 'reboot_dut_for_shard_deletion',
2321 priority=priorities.Priority.SUPER,
2322 control_type='Server',
2323 control_file=c, hosts=hostnames_to_lock)
2324
2325 # Unlock these shard-related hosts.
2326 dicts = {'locked': False, 'lock_time': None}
2327 models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts)
2328
2329
2330def get_servers(hostname=None, role=None, status=None):
2331 """Get a list of servers with matching role and status.
2332
2333 @param hostname: FQDN of the server.
2334 @param role: Name of the server role, e.g., drone, scheduler. Default to
2335 None to match any role.
2336 @param status: Status of the server, e.g., primary, backup, repair_required.
2337 Default to None to match any server status.
2338
2339 @raises error.RPCException: If server database is not used.
2340 @return: A list of server names for servers with matching role and status.
2341 """
2342 if not server_manager_utils.use_server_db():
2343 raise error.RPCException('Server database is not enabled. Please try '
2344 'retrieve servers from global config.')
2345 servers = server_manager_utils.get_servers(hostname=hostname, role=role,
2346 status=status)
2347 return [s.get_details() for s in servers]
2348
2349
2350@rpc_utils.route_rpc_to_master
2351def get_stable_version(board=stable_version_utils.DEFAULT, android=False):
2352 """Get stable version for the given board.
2353
2354 @param board: Name of the board.
2355 @param android: If True, the given board is an Android-based device. If
2356 False, assume its a Chrome OS-based device.
2357
2358 @return: Stable version of the given board. Return global configure value
2359 of CROS.stable_cros_version if stable_versinos table does not have
2360 entry of board DEFAULT.
2361 """
2362 return stable_version_utils.get(board=board, android=android)
2363
2364
2365@rpc_utils.route_rpc_to_master
2366def get_all_stable_versions():
2367 """Get stable versions for all boards.
2368
2369 @return: A dictionary of board:version.
2370 """
2371 return stable_version_utils.get_all()
2372
2373
2374@rpc_utils.route_rpc_to_master
2375def set_stable_version(version, board=stable_version_utils.DEFAULT):
2376 """Modify stable version for the given board.
2377
2378 @param version: The new value of stable version for given board.
2379 @param board: Name of the board, default to value `DEFAULT`.
2380 """
2381 stable_version_utils.set(version=version, board=board)
2382
2383
2384@rpc_utils.route_rpc_to_master
2385def delete_stable_version(board):
2386 """Modify stable version for the given board.
2387
2388 Delete a stable version entry in afe_stable_versions table for a given
2389 board, so default stable version will be used.
2390
2391 @param board: Name of the board.
2392 """
2393 stable_version_utils.delete(board=board)
2394
2395
2396def get_tests_by_build(build, ignore_invalid_tests=True):
2397 """Get the tests that are available for the specified build.
2398
2399 @param build: unique name by which to refer to the image.
2400 @param ignore_invalid_tests: flag on if unparsable tests are ignored.
2401
2402 @return: A sorted list of all tests that are in the build specified.
2403 """
2404 # Collect the control files specified in this build
2405 cfile_getter = control_file_lib._initialize_control_file_getter(build)
2406 if SuiteBase.ENABLE_CONTROLS_IN_BATCH:
2407 control_file_info_list = cfile_getter.get_suite_info()
2408 control_file_list = control_file_info_list.keys()
2409 else:
2410 control_file_list = cfile_getter.get_control_file_list()
2411
2412 test_objects = []
2413 _id = 0
2414 for control_file_path in control_file_list:
2415 # Read and parse the control file
2416 if SuiteBase.ENABLE_CONTROLS_IN_BATCH:
2417 control_file = control_file_info_list[control_file_path]
2418 else:
2419 control_file = cfile_getter.get_control_file_contents(
2420 control_file_path)
2421 try:
2422 control_obj = control_data.parse_control_string(control_file)
2423 except:
2424 logging.info('Failed to parse control file: %s', control_file_path)
2425 if not ignore_invalid_tests:
2426 raise
2427
2428 # Extract the values needed for the AFE from the control_obj.
2429 # The keys list represents attributes in the control_obj that
2430 # are required by the AFE
2431 keys = ['author', 'doc', 'name', 'time', 'test_type', 'experimental',
2432 'test_category', 'test_class', 'dependencies', 'run_verify',
2433 'sync_count', 'job_retries', 'retries', 'path']
2434
2435 test_object = {}
2436 for key in keys:
2437 test_object[key] = getattr(control_obj, key) if hasattr(
2438 control_obj, key) else ''
2439
2440 # Unfortunately, the AFE expects different key-names for certain
2441 # values, these must be corrected to avoid the risk of tests
2442 # being omitted by the AFE.
2443 # The 'id' is an additional value used in the AFE.
2444 # The control_data parsing does not reference 'run_reset', but it
2445 # is also used in the AFE and defaults to True.
2446 test_object['id'] = _id
2447 test_object['run_reset'] = True
2448 test_object['description'] = test_object.get('doc', '')
2449 test_object['test_time'] = test_object.get('time', 0)
2450 test_object['test_retry'] = test_object.get('retries', 0)
2451
2452 # Fix the test name to be consistent with the current presentation
2453 # of test names in the AFE.
2454 testpath, subname = os.path.split(control_file_path)
2455 testname = os.path.basename(testpath)
2456 subname = subname.split('.')[1:]
2457 if subname:
2458 testname = '%s:%s' % (testname, ':'.join(subname))
2459
2460 test_object['name'] = testname
2461
2462 # Correct the test path as parse_control_string sets an empty string.
2463 test_object['path'] = control_file_path
2464
2465 _id += 1
2466 test_objects.append(test_object)
2467
2468 test_objects = sorted(test_objects, key=lambda x: x.get('name'))
2469 return rpc_utils.prepare_for_serialization(test_objects)