blob: 13443a992ae4f84e23f269f965f43cca6302f8ce [file] [log] [blame]
Richard Barnette6c2b70a2017-01-26 13:40:51 -08001# pylint: disable=missing-docstring
Don Garretta06ea082017-01-13 00:04:26 +00002
mblighe8819cd2008-02-15 16:48:40 +00003"""\
4Functions to expose over the RPC interface.
5
6For all modify* and delete* functions that ask for an 'id' parameter to
7identify the object to operate on, the id may be either
8 * the database row ID
9 * the name of the object (label name, hostname, user login, etc.)
10 * a dictionary containing uniquely identifying field (this option should seldom
11 be used)
12
13When specifying foreign key fields (i.e. adding hosts to a label, or adding
14users to an ACL group), the given value may be either the database row ID or the
15name of the object.
16
17All get* functions return lists of dictionaries. Each dictionary represents one
18object and maps field names to values.
19
20Some examples:
21modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
22modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
23modify_test('sleeptest', test_type='Client', params=', seconds=60')
24delete_acl_group(1) # delete by ID
25delete_acl_group('Everyone') # delete by name
26acl_group_add_users('Everyone', ['mbligh', 'showard'])
27get_jobs(owner='showard', status='Queued')
28
mbligh93c80e62009-02-03 17:48:30 +000029See doctests/001_rpc_test.txt for (lots) more examples.
mblighe8819cd2008-02-15 16:48:40 +000030"""
31
32__author__ = 'showard@google.com (Steve Howard)'
33
Michael Tang6dc174e2016-05-31 23:13:42 -070034import ast
showard29f7cd22009-04-29 21:16:24 +000035import datetime
Shuqian Zhao4c0d2902016-01-12 17:03:15 -080036import logging
Allen Licdd00f22017-02-01 18:01:52 -080037import os
Dan Shi4a3deb82016-10-27 21:32:30 -070038import sys
MK Ryu9c5fbbe2015-02-11 15:46:22 -080039
Moises Osorio2dc7a102014-12-02 18:24:02 -080040from django.db.models import Count
Allen Licdd00f22017-02-01 18:01:52 -080041
showardcafd16e2009-05-29 18:37:49 +000042import common
Aviv Keshet14cac442016-11-20 21:44:11 -080043# TODO(akeshet): Replace with monarch stats once we know how to instrument rpc
44# server with ts_mon.
Gabe Black1e1c41b2015-02-04 23:55:15 -080045from autotest_lib.client.common_lib.cros.graphite import autotest_stats
Allen Licdd00f22017-02-01 18:01:52 -080046from autotest_lib.client.common_lib import control_data
47from autotest_lib.client.common_lib import error
48from autotest_lib.client.common_lib import global_config
49from autotest_lib.client.common_lib import priorities
50from autotest_lib.client.common_lib import time_utils
51from autotest_lib.client.common_lib.cros import dev_server
Allen Lia59b1262016-12-14 12:53:51 -080052from autotest_lib.frontend.afe import control_file as control_file_lib
Allen Licdd00f22017-02-01 18:01:52 -080053from autotest_lib.frontend.afe import model_attributes
54from autotest_lib.frontend.afe import model_logic
55from autotest_lib.frontend.afe import models
Allen Lia59b1262016-12-14 12:53:51 -080056from autotest_lib.frontend.afe import rpc_utils
Moises Osorio2dc7a102014-12-02 18:24:02 -080057from autotest_lib.frontend.tko import models as tko_models
Jiaxi Luoaac54572014-06-04 13:57:02 -070058from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070059from autotest_lib.server import frontend
Simran Basi71206ef2014-08-13 13:51:18 -070060from autotest_lib.server import utils
Dan Shid215dbe2015-06-18 16:14:59 -070061from autotest_lib.server.cros import provision
Allen Licdd00f22017-02-01 18:01:52 -080062from autotest_lib.server.cros.dynamic_suite import constants
63from autotest_lib.server.cros.dynamic_suite import control_file_getter
64from autotest_lib.server.cros.dynamic_suite import suite as SuiteBase
Jiaxi Luo90190c92014-06-18 12:35:57 -070065from autotest_lib.server.cros.dynamic_suite import tools
Allen Licdd00f22017-02-01 18:01:52 -080066from autotest_lib.server.cros.dynamic_suite.suite import Suite
Aviv Keshet7ee95862016-08-30 15:18:27 -070067from autotest_lib.server.lib import status_history
Allen Licdd00f22017-02-01 18:01:52 -080068from autotest_lib.site_utils import host_history
69from autotest_lib.site_utils import job_history
70from autotest_lib.site_utils import server_manager_utils
71from autotest_lib.site_utils import stable_version_utils
mblighe8819cd2008-02-15 16:48:40 +000072
Moises Osorio2dc7a102014-12-02 18:24:02 -080073
Allen Licdd00f22017-02-01 18:01:52 -080074_CONFIG = global_config.global_config
75
76# Relevant CrosDynamicSuiteExceptions are defined in client/common_lib/error.py.
77
Eric Lid23bc192011-02-09 14:38:57 -080078def get_parameterized_autoupdate_image_url(job):
79 """Get the parameterized autoupdate image url from a parameterized job."""
80 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
81 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
beeps8bb1f7d2013-08-05 01:30:09 -070082 name='image')
Eric Lid23bc192011-02-09 14:38:57 -080083 para_set = job.parameterized_job.parameterizedjobparameter_set
84 job_test_para = para_set.get(test_parameter=image_parameter)
85 return job_test_para.parameter_value
86
87
mblighe8819cd2008-02-15 16:48:40 +000088# labels
89
mblighe8819cd2008-02-15 16:48:40 +000090def modify_label(id, **data):
MK Ryu8c554cf2015-06-12 11:45:50 -070091 """Modify a label.
92
93 @param id: id or name of a label. More often a label name.
94 @param data: New data for a label.
95 """
96 label_model = models.Label.smart_get(id)
MK Ryu8e2c2d02016-01-06 15:24:38 -080097 label_model.update_object(data)
MK Ryu8c554cf2015-06-12 11:45:50 -070098
99 # Master forwards the RPC to shards
100 if not utils.is_shard():
101 rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False,
102 id=id, **data)
103
mblighe8819cd2008-02-15 16:48:40 +0000104
105def delete_label(id):
MK Ryu8c554cf2015-06-12 11:45:50 -0700106 """Delete a label.
107
108 @param id: id or name of a label. More often a label name.
109 """
110 label_model = models.Label.smart_get(id)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800111 # Hosts that have the label to be deleted. Save this info before
112 # the label is deleted to use it later.
113 hosts = []
114 for h in label_model.host_set.all():
115 hosts.append(models.Host.smart_get(h.id))
116 label_model.delete()
MK Ryu8c554cf2015-06-12 11:45:50 -0700117
118 # Master forwards the RPC to shards
119 if not utils.is_shard():
MK Ryu8e2c2d02016-01-06 15:24:38 -0800120 rpc_utils.fanout_rpc(hosts, 'delete_label', False, id=id)
mblighe8819cd2008-02-15 16:48:40 +0000121
Prashanth Balasubramanian744898f2015-01-13 05:04:16 -0800122
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800123def add_label(name, ignore_exception_if_exists=False, **kwargs):
MK Ryucf027c62015-03-04 12:00:50 -0800124 """Adds a new label of a given name.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800125
126 @param name: label name.
127 @param ignore_exception_if_exists: If True and the exception was
128 thrown due to the duplicated label name when adding a label,
129 then suppress the exception. Default is False.
130 @param kwargs: keyword args that store more info about a label
131 other than the name.
132 @return: int/long id of a new label.
133 """
134 # models.Label.add_object() throws model_logic.ValidationError
135 # when it is given a label name that already exists.
136 # However, ValidationError can be thrown with different errors,
137 # and those errors should be thrown up to the call chain.
138 try:
139 label = models.Label.add_object(name=name, **kwargs)
140 except:
141 exc_info = sys.exc_info()
142 if ignore_exception_if_exists:
143 label = rpc_utils.get_label(name)
144 # If the exception is raised not because of duplicated
145 # "name", then raise the original exception.
146 if label is None:
147 raise exc_info[0], exc_info[1], exc_info[2]
148 else:
149 raise exc_info[0], exc_info[1], exc_info[2]
150 return label.id
151
152
153def add_label_to_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800154 """Adds a label of the given id to the given hosts only in local DB.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800155
156 @param id: id or name of a label. More often a label name.
157 @param hosts: The hostnames of hosts that need the label.
158
159 @raises models.Label.DoesNotExist: If the label with id doesn't exist.
160 """
161 label = models.Label.smart_get(id)
162 host_objs = models.Host.smart_get_bulk(hosts)
163 if label.platform:
164 models.Host.check_no_platform(host_objs)
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700165 # Ensure a host has no more than one board label with it.
166 if label.name.startswith('board:'):
Dan Shib5b8b4f2016-11-02 14:04:02 -0700167 models.Host.check_board_labels_allowed(host_objs, [label.name])
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800168 label.host_set.add(*host_objs)
169
170
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700171def _create_label_everywhere(id, hosts):
172 """
173 Yet another method to create labels.
174
175 ALERT! This method should be run only on master not shards!
176 DO NOT RUN THIS ON A SHARD!!! Deputies will hate you if you do!!!
177
178 This method exists primarily to serve label_add_hosts() and
179 host_add_labels(). Basically it pulls out the label check/add logic
180 from label_add_hosts() into this nice method that not only creates
181 the label but also tells the shards that service the hosts to also
182 create the label.
183
184 @param id: id or name of a label. More often a label name.
185 @param hosts: A list of hostnames or ids. More often hostnames.
186 """
187 try:
188 label = models.Label.smart_get(id)
189 except models.Label.DoesNotExist:
190 # This matches the type checks in smart_get, which is a hack
191 # in and off itself. The aim here is to create any non-existent
192 # label, which we cannot do if the 'id' specified isn't a label name.
193 if isinstance(id, basestring):
194 label = models.Label.smart_get(add_label(id))
195 else:
196 raise ValueError('Label id (%s) does not exist. Please specify '
197 'the argument, id, as a string (label name).'
198 % id)
199
200 # Make sure the label exists on the shard with the same id
201 # as it is on the master.
202 # It is possible that the label is already in a shard because
203 # we are adding a new label only to shards of hosts that the label
204 # is going to be attached.
205 # For example, we add a label L1 to a host in shard S1.
206 # Master and S1 will have L1 but other shards won't.
207 # Later, when we add the same label L1 to hosts in shards S1 and S2,
208 # S1 already has the label but S2 doesn't.
209 # S2 should have the new label without any problem.
210 # We ignore exception in such a case.
211 host_objs = models.Host.smart_get_bulk(hosts)
212 rpc_utils.fanout_rpc(
213 host_objs, 'add_label', include_hostnames=False,
214 name=label.name, ignore_exception_if_exists=True,
215 id=label.id, platform=label.platform)
216
217
MK Ryufbb002c2015-06-08 14:13:16 -0700218@rpc_utils.route_rpc_to_master
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800219def label_add_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800220 """Adds a label with the given id to the given hosts.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800221
222 This method should be run only on master not shards.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800223 The given label will be created if it doesn't exist, provided the `id`
224 supplied is a label name not an int/long id.
225
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800226 @param id: id or name of a label. More often a label name.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800227 @param hosts: A list of hostnames or ids. More often hostnames.
228
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800229 @raises ValueError: If the id specified is an int/long (label id)
230 while the label does not exist.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800231 """
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700232 # Create the label.
233 _create_label_everywhere(id, hosts)
234
235 # Add it to the master.
MK Ryu8e2c2d02016-01-06 15:24:38 -0800236 add_label_to_hosts(id, hosts)
MK Ryucf027c62015-03-04 12:00:50 -0800237
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700238 # Add it to the shards.
MK Ryucf027c62015-03-04 12:00:50 -0800239 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800240 rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id)
showardbbabf502008-06-06 00:02:02 +0000241
242
MK Ryucf027c62015-03-04 12:00:50 -0800243def remove_label_from_hosts(id, hosts):
244 """Removes a label of the given id from the given hosts only in local DB.
245
246 @param id: id or name of a label.
247 @param hosts: The hostnames of hosts that need to remove the label from.
248 """
showardbe3ec042008-11-12 18:16:07 +0000249 host_objs = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000250 models.Label.smart_get(id).host_set.remove(*host_objs)
showardbbabf502008-06-06 00:02:02 +0000251
252
MK Ryufbb002c2015-06-08 14:13:16 -0700253@rpc_utils.route_rpc_to_master
MK Ryucf027c62015-03-04 12:00:50 -0800254def label_remove_hosts(id, hosts):
255 """Removes a label of the given id from the given hosts.
256
257 This method should be run only on master not shards.
258
259 @param id: id or name of a label.
260 @param hosts: A list of hostnames or ids. More often hostnames.
261 """
MK Ryucf027c62015-03-04 12:00:50 -0800262 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu26f0c932015-05-28 18:14:33 -0700263 remove_label_from_hosts(id, hosts)
264
MK Ryu8e2c2d02016-01-06 15:24:38 -0800265 rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id)
266
MK Ryucf027c62015-03-04 12:00:50 -0800267
Jiaxi Luo31874592014-06-11 10:36:35 -0700268def get_labels(exclude_filters=(), **filter_data):
showardc92da832009-04-07 18:14:34 +0000269 """\
Jiaxi Luo31874592014-06-11 10:36:35 -0700270 @param exclude_filters: A sequence of dictionaries of filters.
271
showardc92da832009-04-07 18:14:34 +0000272 @returns A sequence of nested dictionaries of label information.
273 """
Jiaxi Luo31874592014-06-11 10:36:35 -0700274 labels = models.Label.query_objects(filter_data)
275 for exclude_filter in exclude_filters:
276 labels = labels.exclude(**exclude_filter)
277 return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',))
showardc92da832009-04-07 18:14:34 +0000278
279
280# atomic groups
281
showarde9450c92009-06-30 01:58:52 +0000282def add_atomic_group(name, max_number_of_machines=None, description=None):
showardc92da832009-04-07 18:14:34 +0000283 return models.AtomicGroup.add_object(
284 name=name, max_number_of_machines=max_number_of_machines,
285 description=description).id
286
287
288def modify_atomic_group(id, **data):
289 models.AtomicGroup.smart_get(id).update_object(data)
290
291
292def delete_atomic_group(id):
293 models.AtomicGroup.smart_get(id).delete()
294
295
296def atomic_group_add_labels(id, labels):
297 label_objs = models.Label.smart_get_bulk(labels)
298 models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
299
300
301def atomic_group_remove_labels(id, labels):
302 label_objs = models.Label.smart_get_bulk(labels)
303 models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
304
305
306def get_atomic_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000307 return rpc_utils.prepare_for_serialization(
showardc92da832009-04-07 18:14:34 +0000308 models.AtomicGroup.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000309
310
311# hosts
312
Matthew Sartori68186332015-04-27 17:19:53 -0700313def add_host(hostname, status=None, locked=None, lock_reason='', protection=None):
314 if locked and not lock_reason:
315 raise model_logic.ValidationError(
316 {'locked': 'Please provide a reason for locking when adding host.'})
317
jadmanski0afbb632008-06-06 21:10:57 +0000318 return models.Host.add_object(hostname=hostname, status=status,
Matthew Sartori68186332015-04-27 17:19:53 -0700319 locked=locked, lock_reason=lock_reason,
320 protection=protection).id
mblighe8819cd2008-02-15 16:48:40 +0000321
322
MK Ryu33889612015-09-04 14:32:35 -0700323@rpc_utils.route_rpc_to_master
324def modify_host(id, **kwargs):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700325 """Modify local attributes of a host.
326
327 If this is called on the master, but the host is assigned to a shard, this
MK Ryu33889612015-09-04 14:32:35 -0700328 will call `modify_host_local` RPC to the responsible shard. This means if
329 a host is being locked using this function, this change will also propagate
330 to shards.
331 When this is called on a shard, the shard just routes the RPC to the master
332 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700333
334 @param id: id of the host to modify.
MK Ryu33889612015-09-04 14:32:35 -0700335 @param kwargs: key=value pairs of values to set on the host.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700336 """
MK Ryu33889612015-09-04 14:32:35 -0700337 rpc_utils.check_modify_host(kwargs)
showardce7c0922009-09-11 18:39:24 +0000338 host = models.Host.smart_get(id)
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800339 try:
340 rpc_utils.check_modify_host_locking(host, kwargs)
341 except model_logic.ValidationError as e:
342 if not kwargs.get('force_modify_locking', False):
343 raise
344 logging.exception('The following exception will be ignored and lock '
345 'modification will be enforced. %s', e)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700346
MK Ryud53e1492015-12-15 12:09:03 -0800347 # This is required to make `lock_time` for a host be exactly same
348 # between the master and a shard.
349 if kwargs.get('locked', None) and 'lock_time' not in kwargs:
350 kwargs['lock_time'] = datetime.datetime.now()
MK Ryu8e2c2d02016-01-06 15:24:38 -0800351 host.update_object(kwargs)
MK Ryud53e1492015-12-15 12:09:03 -0800352
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800353 # force_modifying_locking is not an internal field in database, remove.
354 kwargs.pop('force_modify_locking', None)
MK Ryu33889612015-09-04 14:32:35 -0700355 rpc_utils.fanout_rpc([host], 'modify_host_local',
356 include_hostnames=False, id=id, **kwargs)
mblighe8819cd2008-02-15 16:48:40 +0000357
358
MK Ryu33889612015-09-04 14:32:35 -0700359def modify_host_local(id, **kwargs):
360 """Modify host attributes in local DB.
361
362 @param id: Host id.
363 @param kwargs: key=value pairs of values to set on the host.
364 """
365 models.Host.smart_get(id).update_object(kwargs)
366
367
368@rpc_utils.route_rpc_to_master
showard276f9442009-05-20 00:33:16 +0000369def modify_hosts(host_filter_data, update_data):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700370 """Modify local attributes of multiple hosts.
371
372 If this is called on the master, but one of the hosts in that match the
MK Ryu33889612015-09-04 14:32:35 -0700373 filters is assigned to a shard, this will call `modify_hosts_local` RPC
374 to the responsible shard.
375 When this is called on a shard, the shard just routes the RPC to the master
376 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700377
378 The filters are always applied on the master, not on the shards. This means
379 if the states of a host differ on the master and a shard, the state on the
380 master will be used. I.e. this means:
381 A host was synced to Shard 1. On Shard 1 the status of the host was set to
382 'Repair Failed'.
383 - A call to modify_hosts with host_filter_data={'status': 'Ready'} will
384 update the host (both on the shard and on the master), because the state
385 of the host as the master knows it is still 'Ready'.
386 - A call to modify_hosts with host_filter_data={'status': 'Repair failed'
387 will not update the host, because the filter doesn't apply on the master.
388
showardbe0d8692009-08-20 23:42:44 +0000389 @param host_filter_data: Filters out which hosts to modify.
390 @param update_data: A dictionary with the changes to make to the hosts.
showard276f9442009-05-20 00:33:16 +0000391 """
MK Ryu93161712015-12-21 10:41:32 -0800392 update_data = update_data.copy()
showardbe0d8692009-08-20 23:42:44 +0000393 rpc_utils.check_modify_host(update_data)
showard276f9442009-05-20 00:33:16 +0000394 hosts = models.Host.query_objects(host_filter_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700395
396 affected_shard_hostnames = set()
397 affected_host_ids = []
398
Alex Miller9658a952013-05-14 16:40:02 -0700399 # Check all hosts before changing data for exception safety.
400 for host in hosts:
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800401 try:
402 rpc_utils.check_modify_host_locking(host, update_data)
403 except model_logic.ValidationError as e:
404 if not update_data.get('force_modify_locking', False):
405 raise
406 logging.exception('The following exception will be ignored and '
407 'lock modification will be enforced. %s', e)
408
Jakob Juelich50e91f72014-10-01 12:43:23 -0700409 if host.shard:
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800410 affected_shard_hostnames.add(host.shard.rpc_hostname())
Jakob Juelich50e91f72014-10-01 12:43:23 -0700411 affected_host_ids.append(host.id)
412
MK Ryud53e1492015-12-15 12:09:03 -0800413 # This is required to make `lock_time` for a host be exactly same
414 # between the master and a shard.
415 if update_data.get('locked', None) and 'lock_time' not in update_data:
416 update_data['lock_time'] = datetime.datetime.now()
MK Ryu8e2c2d02016-01-06 15:24:38 -0800417 for host in hosts:
418 host.update_object(update_data)
MK Ryud53e1492015-12-15 12:09:03 -0800419
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800420 update_data.pop('force_modify_locking', None)
MK Ryu33889612015-09-04 14:32:35 -0700421 # Caution: Changing the filter from the original here. See docstring.
422 rpc_utils.run_rpc_on_multiple_hostnames(
423 'modify_hosts_local', affected_shard_hostnames,
Jakob Juelich50e91f72014-10-01 12:43:23 -0700424 host_filter_data={'id__in': affected_host_ids},
425 update_data=update_data)
426
showard276f9442009-05-20 00:33:16 +0000427
MK Ryu33889612015-09-04 14:32:35 -0700428def modify_hosts_local(host_filter_data, update_data):
429 """Modify attributes of hosts in local DB.
430
431 @param host_filter_data: Filters out which hosts to modify.
432 @param update_data: A dictionary with the changes to make to the hosts.
433 """
434 for host in models.Host.query_objects(host_filter_data):
435 host.update_object(update_data)
436
437
MK Ryufbb002c2015-06-08 14:13:16 -0700438def add_labels_to_host(id, labels):
439 """Adds labels to a given host only in local DB.
showardcafd16e2009-05-29 18:37:49 +0000440
MK Ryufbb002c2015-06-08 14:13:16 -0700441 @param id: id or hostname for a host.
442 @param labels: ids or names for labels.
443 """
444 label_objs = models.Label.smart_get_bulk(labels)
445 models.Host.smart_get(id).labels.add(*label_objs)
446
447
448@rpc_utils.route_rpc_to_master
449def host_add_labels(id, labels):
450 """Adds labels to a given host.
451
452 @param id: id or hostname for a host.
453 @param labels: ids or names for labels.
454
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700455 @raises ValidationError: If adding more than one platform/board label.
MK Ryufbb002c2015-06-08 14:13:16 -0700456 """
Kevin Chengbdfc57d2016-04-14 13:46:58 -0700457 # Create the labels on the master/shards.
458 for label in labels:
459 _create_label_everywhere(label, [id])
460
MK Ryufbb002c2015-06-08 14:13:16 -0700461 label_objs = models.Label.smart_get_bulk(labels)
462 platforms = [label.name for label in label_objs if label.platform]
Shuqian Zhao40e182b2016-10-11 11:55:11 -0700463 boards = [label.name for label in label_objs
464 if label.name.startswith('board:')]
Dan Shib5b8b4f2016-11-02 14:04:02 -0700465 if len(platforms) > 1 or not utils.board_labels_allowed(boards):
showardcafd16e2009-05-29 18:37:49 +0000466 raise model_logic.ValidationError(
Dan Shib5b8b4f2016-11-02 14:04:02 -0700467 {'labels': ('Adding more than one platform label, or a list of '
468 'non-compatible board labels.: %s %s' %
469 (', '.join(platforms), ', '.join(boards)))})
MK Ryufbb002c2015-06-08 14:13:16 -0700470
471 host_obj = models.Host.smart_get(id)
Dan Shi4a3deb82016-10-27 21:32:30 -0700472 if platforms:
MK Ryufbb002c2015-06-08 14:13:16 -0700473 models.Host.check_no_platform([host_obj])
Dan Shi4a3deb82016-10-27 21:32:30 -0700474 if boards:
Dan Shib5b8b4f2016-11-02 14:04:02 -0700475 models.Host.check_board_labels_allowed([host_obj], labels)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800476 add_labels_to_host(id, labels)
MK Ryufbb002c2015-06-08 14:13:16 -0700477
478 rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False,
479 id=id, labels=labels)
mblighe8819cd2008-02-15 16:48:40 +0000480
481
MK Ryufbb002c2015-06-08 14:13:16 -0700482def remove_labels_from_host(id, labels):
483 """Removes labels from a given host only in local DB.
484
485 @param id: id or hostname for a host.
486 @param labels: ids or names for labels.
487 """
488 label_objs = models.Label.smart_get_bulk(labels)
489 models.Host.smart_get(id).labels.remove(*label_objs)
490
491
492@rpc_utils.route_rpc_to_master
mblighe8819cd2008-02-15 16:48:40 +0000493def host_remove_labels(id, labels):
MK Ryufbb002c2015-06-08 14:13:16 -0700494 """Removes labels from a given host.
495
496 @param id: id or hostname for a host.
497 @param labels: ids or names for labels.
498 """
MK Ryu8e2c2d02016-01-06 15:24:38 -0800499 remove_labels_from_host(id, labels)
500
MK Ryufbb002c2015-06-08 14:13:16 -0700501 host_obj = models.Host.smart_get(id)
502 rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False,
503 id=id, labels=labels)
mblighe8819cd2008-02-15 16:48:40 +0000504
505
MK Ryuacf35922014-10-03 14:56:49 -0700506def get_host_attribute(attribute, **host_filter_data):
507 """
508 @param attribute: string name of attribute
509 @param host_filter_data: filter data to apply to Hosts to choose hosts to
510 act upon
511 """
512 hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data)
513 hosts = list(hosts)
514 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
515 'attribute_list')
516 host_attr_dicts = []
517 for host_obj in hosts:
518 for attr_obj in host_obj.attribute_list:
519 if attr_obj.attribute == attribute:
520 host_attr_dicts.append(attr_obj.get_object_dict())
521 return rpc_utils.prepare_for_serialization(host_attr_dicts)
522
523
showard0957a842009-05-11 19:25:08 +0000524def set_host_attribute(attribute, value, **host_filter_data):
525 """
MK Ryu26f0c932015-05-28 18:14:33 -0700526 @param attribute: string name of attribute
527 @param value: string, or None to delete an attribute
528 @param host_filter_data: filter data to apply to Hosts to choose hosts to
529 act upon
showard0957a842009-05-11 19:25:08 +0000530 """
531 assert host_filter_data # disallow accidental actions on all hosts
532 hosts = models.Host.query_objects(host_filter_data)
533 models.AclGroup.check_for_acl_violation_hosts(hosts)
MK Ryu8e2c2d02016-01-06 15:24:38 -0800534 for host in hosts:
535 host.set_or_delete_attribute(attribute, value)
showard0957a842009-05-11 19:25:08 +0000536
MK Ryu26f0c932015-05-28 18:14:33 -0700537 # Master forwards this RPC to shards.
538 if not utils.is_shard():
539 rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False,
540 attribute=attribute, value=value, **host_filter_data)
541
showard0957a842009-05-11 19:25:08 +0000542
Jakob Juelich50e91f72014-10-01 12:43:23 -0700543@rpc_utils.forward_single_host_rpc_to_shard
mblighe8819cd2008-02-15 16:48:40 +0000544def delete_host(id):
jadmanski0afbb632008-06-06 21:10:57 +0000545 models.Host.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000546
547
showard87cc38f2009-08-20 23:37:04 +0000548def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
Dan Shi37df54d2015-12-14 11:16:28 -0800549 exclude_atomic_group_hosts=False, valid_only=True,
550 include_current_job=False, **filter_data):
551 """Get a list of dictionaries which contains the information of hosts.
552
showard87cc38f2009-08-20 23:37:04 +0000553 @param multiple_labels: match hosts in all of the labels given. Should
554 be a list of label names.
555 @param exclude_only_if_needed_labels: Exclude hosts with at least one
556 "only_if_needed" label applied.
557 @param exclude_atomic_group_hosts: Exclude hosts that have one or more
558 atomic group labels associated with them.
Dan Shi37df54d2015-12-14 11:16:28 -0800559 @param include_current_job: Set to True to include ids of currently running
560 job and special task.
jadmanski0afbb632008-06-06 21:10:57 +0000561 """
showard43a3d262008-11-12 18:17:05 +0000562 hosts = rpc_utils.get_host_query(multiple_labels,
563 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000564 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000565 valid_only, filter_data)
showard0957a842009-05-11 19:25:08 +0000566 hosts = list(hosts)
567 models.Host.objects.populate_relationships(hosts, models.Label,
568 'label_list')
569 models.Host.objects.populate_relationships(hosts, models.AclGroup,
570 'acl_list')
571 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
572 'attribute_list')
showard43a3d262008-11-12 18:17:05 +0000573 host_dicts = []
574 for host_obj in hosts:
575 host_dict = host_obj.get_object_dict()
showard0957a842009-05-11 19:25:08 +0000576 host_dict['labels'] = [label.name for label in host_obj.label_list]
showard909c9142009-07-07 20:54:42 +0000577 host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
578 find_platform_and_atomic_group(host_obj))
showard0957a842009-05-11 19:25:08 +0000579 host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
580 host_dict['attributes'] = dict((attribute.attribute, attribute.value)
581 for attribute in host_obj.attribute_list)
Dan Shi37df54d2015-12-14 11:16:28 -0800582 if include_current_job:
583 host_dict['current_job'] = None
584 host_dict['current_special_task'] = None
585 entries = models.HostQueueEntry.objects.filter(
586 host_id=host_dict['id'], active=True, complete=False)
587 if entries:
588 host_dict['current_job'] = (
589 entries[0].get_object_dict()['job'])
590 tasks = models.SpecialTask.objects.filter(
591 host_id=host_dict['id'], is_active=True, is_complete=False)
592 if tasks:
593 host_dict['current_special_task'] = (
594 '%d-%s' % (tasks[0].get_object_dict()['id'],
595 tasks[0].get_object_dict()['task'].lower()))
showard43a3d262008-11-12 18:17:05 +0000596 host_dicts.append(host_dict)
597 return rpc_utils.prepare_for_serialization(host_dicts)
mblighe8819cd2008-02-15 16:48:40 +0000598
599
showard87cc38f2009-08-20 23:37:04 +0000600def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
showard8aa84fc2009-09-16 17:17:55 +0000601 exclude_atomic_group_hosts=False, valid_only=True,
602 **filter_data):
showard87cc38f2009-08-20 23:37:04 +0000603 """
604 Same parameters as get_hosts().
605
606 @returns The number of matching hosts.
607 """
showard43a3d262008-11-12 18:17:05 +0000608 hosts = rpc_utils.get_host_query(multiple_labels,
609 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000610 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000611 valid_only, filter_data)
showard43a3d262008-11-12 18:17:05 +0000612 return hosts.count()
showard1385b162008-03-13 15:59:40 +0000613
mblighe8819cd2008-02-15 16:48:40 +0000614
615# tests
616
showard909c7a62008-07-15 21:52:38 +0000617def add_test(name, test_type, path, author=None, dependencies=None,
showard3d9899a2008-07-31 02:11:58 +0000618 experimental=True, run_verify=None, test_class=None,
showard909c7a62008-07-15 21:52:38 +0000619 test_time=None, test_category=None, description=None,
620 sync_count=1):
jadmanski0afbb632008-06-06 21:10:57 +0000621 return models.Test.add_object(name=name, test_type=test_type, path=path,
showard909c7a62008-07-15 21:52:38 +0000622 author=author, dependencies=dependencies,
623 experimental=experimental,
624 run_verify=run_verify, test_time=test_time,
625 test_category=test_category,
626 sync_count=sync_count,
jadmanski0afbb632008-06-06 21:10:57 +0000627 test_class=test_class,
628 description=description).id
mblighe8819cd2008-02-15 16:48:40 +0000629
630
631def modify_test(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000632 models.Test.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000633
634
635def delete_test(id):
jadmanski0afbb632008-06-06 21:10:57 +0000636 models.Test.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000637
638
639def get_tests(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000640 return rpc_utils.prepare_for_serialization(
641 models.Test.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000642
643
Moises Osorio2dc7a102014-12-02 18:24:02 -0800644def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name):
645 """Gets the counts of all passed and failed tests from the matching jobs.
646
Allen Licdd00f22017-02-01 18:01:52 -0800647 @param job_name_prefix: Name prefix of the jobs to get the summary
648 from, e.g., 'butterfly-release/R40-6457.21.0/bvt-cq/'.
Moises Osorio2dc7a102014-12-02 18:24:02 -0800649 @param label_name: Label that must be set in the jobs, e.g.,
650 'cros-version:butterfly-release/R40-6457.21.0'.
651
652 @returns A summary of the counts of all the passed and failed tests.
653 """
654 job_ids = list(models.Job.objects.filter(
655 name__startswith=job_name_prefix,
656 dependency_labels__name=label_name).values_list(
657 'pk', flat=True))
658 summary = {'passed': 0, 'failed': 0}
659 if not job_ids:
660 return summary
661
662 counts = (tko_models.TestView.objects.filter(
663 afe_job_id__in=job_ids).exclude(
664 test_name='SERVER_JOB').exclude(
665 test_name__startswith='CLIENT_JOB').values(
666 'status').annotate(
667 count=Count('status')))
668 for status in counts:
669 if status['status'] == 'GOOD':
670 summary['passed'] += status['count']
671 else:
672 summary['failed'] += status['count']
673 return summary
674
675
showard2b9a88b2008-06-13 20:55:03 +0000676# profilers
677
678def add_profiler(name, description=None):
679 return models.Profiler.add_object(name=name, description=description).id
680
681
682def modify_profiler(id, **data):
683 models.Profiler.smart_get(id).update_object(data)
684
685
686def delete_profiler(id):
687 models.Profiler.smart_get(id).delete()
688
689
690def get_profilers(**filter_data):
691 return rpc_utils.prepare_for_serialization(
692 models.Profiler.list_objects(filter_data))
693
694
mblighe8819cd2008-02-15 16:48:40 +0000695# users
696
697def add_user(login, access_level=None):
jadmanski0afbb632008-06-06 21:10:57 +0000698 return models.User.add_object(login=login, access_level=access_level).id
mblighe8819cd2008-02-15 16:48:40 +0000699
700
701def modify_user(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000702 models.User.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000703
704
705def delete_user(id):
jadmanski0afbb632008-06-06 21:10:57 +0000706 models.User.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000707
708
709def get_users(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000710 return rpc_utils.prepare_for_serialization(
711 models.User.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000712
713
714# acl groups
715
716def add_acl_group(name, description=None):
showard04f2cd82008-07-25 20:53:31 +0000717 group = models.AclGroup.add_object(name=name, description=description)
showard64a95952010-01-13 21:27:16 +0000718 group.users.add(models.User.current_user())
showard04f2cd82008-07-25 20:53:31 +0000719 return group.id
mblighe8819cd2008-02-15 16:48:40 +0000720
721
722def modify_acl_group(id, **data):
showard04f2cd82008-07-25 20:53:31 +0000723 group = models.AclGroup.smart_get(id)
724 group.check_for_acl_violation_acl_group()
725 group.update_object(data)
726 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000727
728
729def acl_group_add_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000730 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000731 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000732 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000733 group.users.add(*users)
mblighe8819cd2008-02-15 16:48:40 +0000734
735
736def acl_group_remove_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000737 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000738 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000739 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000740 group.users.remove(*users)
showard04f2cd82008-07-25 20:53:31 +0000741 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000742
743
744def acl_group_add_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000745 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000746 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000747 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000748 group.hosts.add(*hosts)
showard08f981b2008-06-24 21:59:03 +0000749 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000750
751
752def acl_group_remove_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000753 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000754 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000755 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000756 group.hosts.remove(*hosts)
showard08f981b2008-06-24 21:59:03 +0000757 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000758
759
760def delete_acl_group(id):
jadmanski0afbb632008-06-06 21:10:57 +0000761 models.AclGroup.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000762
763
764def get_acl_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000765 acl_groups = models.AclGroup.list_objects(filter_data)
766 for acl_group in acl_groups:
767 acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
768 acl_group['users'] = [user.login
769 for user in acl_group_obj.users.all()]
770 acl_group['hosts'] = [host.hostname
771 for host in acl_group_obj.hosts.all()]
772 return rpc_utils.prepare_for_serialization(acl_groups)
mblighe8819cd2008-02-15 16:48:40 +0000773
774
775# jobs
776
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700777def generate_control_file(tests=(), profilers=(),
showard91f85102009-10-12 20:34:52 +0000778 client_control_file='', use_container=False,
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700779 profile_only=None, db_tests=True,
780 test_source_build=None):
jadmanski0afbb632008-06-06 21:10:57 +0000781 """
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700782 Generates a client-side control file to run tests.
mbligh120351e2009-01-24 01:40:45 +0000783
Matthew Sartori10438092015-06-24 14:30:18 -0700784 @param tests List of tests to run. See db_tests for more information.
mbligh120351e2009-01-24 01:40:45 +0000785 @param profilers List of profilers to activate during the job.
786 @param client_control_file The contents of a client-side control file to
787 run at the end of all tests. If this is supplied, all tests must be
788 client side.
789 TODO: in the future we should support server control files directly
790 to wrap with a kernel. That'll require changing the parameter
791 name and adding a boolean to indicate if it is a client or server
792 control file.
793 @param use_container unused argument today. TODO: Enable containers
794 on the host during a client side test.
showard91f85102009-10-12 20:34:52 +0000795 @param profile_only A boolean that indicates what default profile_only
796 mode to use in the control file. Passing None will generate a
797 control file that does not explcitly set the default mode at all.
Matthew Sartori10438092015-06-24 14:30:18 -0700798 @param db_tests: if True, the test object can be found in the database
799 backing the test model. In this case, tests is a tuple
800 of test IDs which are used to retrieve the test objects
801 from the database. If False, tests is a tuple of test
802 dictionaries stored client-side in the AFE.
Michael Tang84a2ecf2016-06-07 15:10:53 -0700803 @param test_source_build: Build to be used to retrieve test code. Default
804 to None.
mbligh120351e2009-01-24 01:40:45 +0000805
806 @returns a dict with the following keys:
807 control_file: str, The control file text.
808 is_server: bool, is the control file a server-side control file?
809 synch_count: How many machines the job uses per autoserv execution.
810 synch_count == 1 means the job is asynchronous.
811 dependencies: A list of the names of labels on which the job depends.
812 """
showardd86debe2009-06-10 17:37:56 +0000813 if not tests and not client_control_file:
showard2bab8f42008-11-12 18:15:22 +0000814 return dict(control_file='', is_server=False, synch_count=1,
showard989f25d2008-10-01 11:38:11 +0000815 dependencies=[])
mblighe8819cd2008-02-15 16:48:40 +0000816
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700817 cf_info, test_objects, profiler_objects = (
818 rpc_utils.prepare_generate_control_file(tests, profilers,
819 db_tests))
Allen Lia59b1262016-12-14 12:53:51 -0800820 cf_info['control_file'] = control_file_lib.generate_control(
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700821 tests=test_objects, profilers=profiler_objects,
822 is_server=cf_info['is_server'],
showard232b7ae2009-11-10 00:46:48 +0000823 client_control_file=client_control_file, profile_only=profile_only,
Michael Tang84a2ecf2016-06-07 15:10:53 -0700824 test_source_build=test_source_build)
showard989f25d2008-10-01 11:38:11 +0000825 return cf_info
mblighe8819cd2008-02-15 16:48:40 +0000826
827
Allen Li41e47c12016-12-14 12:43:44 -0800828def create_parameterized_job(
829 name,
830 priority,
831 test,
832 parameters,
833 kernel=None,
834 label=None,
835 profilers=(),
836 profiler_parameters=None,
837 use_container=False,
838 profile_only=None,
839 upload_kernel_config=False,
840 hosts=(),
841 meta_hosts=(),
842 one_time_hosts=(),
843 atomic_group_name=None,
844 synch_count=None,
845 is_template=False,
846 timeout=None,
847 timeout_mins=None,
848 max_runtime_mins=None,
849 run_verify=False,
850 email_list='',
851 dependencies=(),
852 reboot_before=None,
853 reboot_after=None,
854 parse_failed_repair=None,
855 hostless=False,
856 keyvals=None,
857 drone_set=None,
858 run_reset=True,
859 require_ssp=None):
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000860 """
861 Creates and enqueues a parameterized job.
862
863 Most parameters a combination of the parameters for generate_control_file()
864 and create_job(), with the exception of:
865
866 @param test name or ID of the test to run
867 @param parameters a map of parameter name ->
868 tuple of (param value, param type)
869 @param profiler_parameters a dictionary of parameters for the profilers:
870 key: profiler name
871 value: dict of param name -> tuple of
872 (param value,
873 param type)
874 """
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000875 # Set up the parameterized job configs
876 test_obj = models.Test.smart_get(test)
877 control_type = test_obj.test_type
878
879 try:
880 label = models.Label.smart_get(label)
881 except models.Label.DoesNotExist:
882 label = None
883
884 kernel_objs = models.Kernel.create_kernels(kernel)
885 profiler_objs = [models.Profiler.smart_get(profiler)
886 for profiler in profilers]
887
888 parameterized_job = models.ParameterizedJob.objects.create(
889 test=test_obj, label=label, use_container=use_container,
890 profile_only=profile_only,
891 upload_kernel_config=upload_kernel_config)
892 parameterized_job.kernels.add(*kernel_objs)
893
894 for profiler in profiler_objs:
895 parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
896 parameterized_job=parameterized_job,
897 profiler=profiler)
898 profiler_params = profiler_parameters.get(profiler.name, {})
899 for name, (value, param_type) in profiler_params.iteritems():
900 models.ParameterizedJobProfilerParameter.objects.create(
901 parameterized_job_profiler=parameterized_profiler,
902 parameter_name=name,
903 parameter_value=value,
904 parameter_type=param_type)
905
906 try:
907 for parameter in test_obj.testparameter_set.all():
908 if parameter.name in parameters:
909 param_value, param_type = parameters.pop(parameter.name)
910 parameterized_job.parameterizedjobparameter_set.create(
911 test_parameter=parameter, parameter_value=param_value,
912 parameter_type=param_type)
913
914 if parameters:
915 raise Exception('Extra parameters remain: %r' % parameters)
916
917 return rpc_utils.create_job_common(
Allen Li81996a82016-12-14 13:01:37 -0800918 name=name,
919 priority=priority,
920 control_type=control_type,
921 hosts=hosts,
922 meta_hosts=meta_hosts,
923 one_time_hosts=one_time_hosts,
924 atomic_group_name=atomic_group_name,
925 synch_count=synch_count,
926 is_template=is_template,
927 timeout=timeout,
928 timeout_mins=timeout_mins,
929 max_runtime_mins=max_runtime_mins,
930 run_verify=run_verify,
931 email_list=email_list,
932 dependencies=dependencies,
933 reboot_before=reboot_before,
934 reboot_after=reboot_after,
935 parse_failed_repair=parse_failed_repair,
936 hostless=hostless,
937 keyvals=keyvals,
938 drone_set=drone_set,
939 parameterized_job=parameterized_job.id,
940 run_reset=run_reset,
941 require_ssp=require_ssp)
Shuqian Zhao54a5b672016-05-11 22:12:17 +0000942 except:
943 parameterized_job.delete()
944 raise
945
946
Simran Basib6ec8ae2014-04-23 12:05:08 -0700947def create_job_page_handler(name, priority, control_file, control_type,
Dan Shid215dbe2015-06-18 16:14:59 -0700948 image=None, hostless=False, firmware_rw_build=None,
949 firmware_ro_build=None, test_source_build=None,
Michael Tang84a2ecf2016-06-07 15:10:53 -0700950 is_cloning=False, **kwargs):
Simran Basib6ec8ae2014-04-23 12:05:08 -0700951 """\
952 Create and enqueue a job.
953
954 @param name name of this job
955 @param priority Integer priority of this job. Higher is more important.
956 @param control_file String contents of the control file.
957 @param control_type Type of control file, Client or Server.
Dan Shid215dbe2015-06-18 16:14:59 -0700958 @param image: ChromeOS build to be installed in the dut. Default to None.
959 @param firmware_rw_build: Firmware build to update RW firmware. Default to
960 None, i.e., RW firmware will not be updated.
961 @param firmware_ro_build: Firmware build to update RO firmware. Default to
962 None, i.e., RO firmware will not be updated.
963 @param test_source_build: Build to be used to retrieve test code. Default
964 to None.
Michael Tang6dc174e2016-05-31 23:13:42 -0700965 @param is_cloning: True if creating a cloning job.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700966 @param kwargs extra args that will be required by create_suite_job or
967 create_job.
968
969 @returns The created Job id number.
970 """
Michael Tang6dc174e2016-05-31 23:13:42 -0700971 if is_cloning:
972 logging.info('Start to clone a new job')
Shuqian Zhao61f5d312016-08-05 17:15:23 -0700973 # When cloning a job, hosts and meta_hosts should not exist together,
974 # which would cause host-scheduler to schedule two hqe jobs to one host
975 # at the same time, and crash itself. Clear meta_hosts for this case.
976 if kwargs.get('hosts') and kwargs.get('meta_hosts'):
977 kwargs['meta_hosts'] = []
Michael Tang6dc174e2016-05-31 23:13:42 -0700978 else:
979 logging.info('Start to create a new job')
Simran Basib6ec8ae2014-04-23 12:05:08 -0700980 control_file = rpc_utils.encode_ascii(control_file)
Jiaxi Luodd67beb2014-07-18 16:28:31 -0700981 if not control_file:
982 raise model_logic.ValidationError({
983 'control_file' : "Control file cannot be empty"})
Simran Basib6ec8ae2014-04-23 12:05:08 -0700984
985 if image and hostless:
Dan Shid215dbe2015-06-18 16:14:59 -0700986 builds = {}
987 builds[provision.CROS_VERSION_PREFIX] = image
988 if firmware_rw_build:
Dan Shi0723bf52015-06-24 10:52:38 -0700989 builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build
Dan Shid215dbe2015-06-18 16:14:59 -0700990 if firmware_ro_build:
991 builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build
Allen Licdd00f22017-02-01 18:01:52 -0800992 return create_suite_job(
Simran Basib6ec8ae2014-04-23 12:05:08 -0700993 name=name, control_file=control_file, priority=priority,
Michael Tang6dc174e2016-05-31 23:13:42 -0700994 builds=builds, test_source_build=test_source_build,
995 is_cloning=is_cloning, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700996 return create_job(name, priority, control_file, control_type, image=image,
Allen Liac199b62016-12-14 12:56:02 -0800997 hostless=hostless, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700998
999
MK Ryue301eb72015-06-25 12:51:02 -07001000@rpc_utils.route_rpc_to_master
Allen Li8af9da02016-12-12 17:32:39 -08001001def create_job(
1002 name,
1003 priority,
1004 control_file,
1005 control_type,
1006 hosts=(),
1007 meta_hosts=(),
1008 one_time_hosts=(),
1009 atomic_group_name=None,
1010 synch_count=None,
1011 is_template=False,
1012 timeout=None,
1013 timeout_mins=None,
1014 max_runtime_mins=None,
1015 run_verify=False,
1016 email_list='',
1017 dependencies=(),
1018 reboot_before=None,
1019 reboot_after=None,
1020 parse_failed_repair=None,
1021 hostless=False,
1022 keyvals=None,
1023 drone_set=None,
1024 image=None,
1025 parent_job_id=None,
1026 test_retry=0,
1027 run_reset=True,
1028 require_ssp=None,
1029 args=(),
Allen Li8af9da02016-12-12 17:32:39 -08001030 **kwargs):
jadmanski0afbb632008-06-06 21:10:57 +00001031 """\
1032 Create and enqueue a job.
mblighe8819cd2008-02-15 16:48:40 +00001033
showarda1e74b32009-05-12 17:32:04 +00001034 @param name name of this job
Alex Miller7d658cf2013-09-04 16:00:35 -07001035 @param priority Integer priority of this job. Higher is more important.
showarda1e74b32009-05-12 17:32:04 +00001036 @param control_file String contents of the control file.
1037 @param control_type Type of control file, Client or Server.
1038 @param synch_count How many machines the job uses per autoserv execution.
Jiaxi Luo90190c92014-06-18 12:35:57 -07001039 synch_count == 1 means the job is asynchronous. If an atomic group is
1040 given this value is treated as a minimum.
showarda1e74b32009-05-12 17:32:04 +00001041 @param is_template If true then create a template job.
1042 @param timeout Hours after this call returns until the job times out.
Simran Basi7e605742013-11-12 13:43:36 -08001043 @param timeout_mins Minutes after this call returns until the job times
Jiaxi Luo90190c92014-06-18 12:35:57 -07001044 out.
Simran Basi34217022012-11-06 13:43:15 -08001045 @param max_runtime_mins Minutes from job starting time until job times out
showarda1e74b32009-05-12 17:32:04 +00001046 @param run_verify Should the host be verified before running the test?
1047 @param email_list String containing emails to mail when the job is done
1048 @param dependencies List of label names on which this job depends
1049 @param reboot_before Never, If dirty, or Always
1050 @param reboot_after Never, If all tests passed, or Always
1051 @param parse_failed_repair if true, results of failed repairs launched by
Jiaxi Luo90190c92014-06-18 12:35:57 -07001052 this job will be parsed as part of the job.
showarda9545c02009-12-18 22:44:26 +00001053 @param hostless if true, create a hostless job
showardc1a98d12010-01-15 00:22:22 +00001054 @param keyvals dict of keyvals to associate with the job
showarda1e74b32009-05-12 17:32:04 +00001055 @param hosts List of hosts to run job on.
1056 @param meta_hosts List where each entry is a label name, and for each entry
Jiaxi Luo90190c92014-06-18 12:35:57 -07001057 one host will be chosen from that label to run the job on.
showarda1e74b32009-05-12 17:32:04 +00001058 @param one_time_hosts List of hosts not in the database to run the job on.
1059 @param atomic_group_name The name of an atomic group to schedule the job on.
jamesren76fcf192010-04-21 20:39:50 +00001060 @param drone_set The name of the drone set to run this test on.
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -08001061 @param image OS image to install before running job.
Aviv Keshet0b9cfc92013-02-05 11:36:02 -08001062 @param parent_job_id id of a job considered to be parent of created job.
Simran Basib6ec8ae2014-04-23 12:05:08 -07001063 @param test_retry Number of times to retry test if the test did not
Jiaxi Luo90190c92014-06-18 12:35:57 -07001064 complete successfully. (optional, default: 0)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001065 @param run_reset Should the host be reset before running the test?
Dan Shiec1d47d2015-02-13 11:38:13 -08001066 @param require_ssp Set to True to require server-side packaging to run the
1067 test. If it's set to None, drone will still try to run
1068 the server side with server-side packaging. If the
1069 autotest-server package doesn't exist for the build or
1070 image is not set, drone will run the test without server-
1071 side packaging. Default is None.
Jiaxi Luo90190c92014-06-18 12:35:57 -07001072 @param args A list of args to be injected into control file.
Simran Basib6ec8ae2014-04-23 12:05:08 -07001073 @param kwargs extra keyword args. NOT USED.
showardc92da832009-04-07 18:14:34 +00001074
1075 @returns The created Job id number.
jadmanski0afbb632008-06-06 21:10:57 +00001076 """
Jiaxi Luo90190c92014-06-18 12:35:57 -07001077 if args:
1078 control_file = tools.inject_vars({'args': args}, control_file)
Richard Barnette6c2b70a2017-01-26 13:40:51 -08001079 if image:
1080 dependencies += (provision.image_version_to_label(image),)
jamesren4a41e012010-07-16 22:33:48 +00001081 return rpc_utils.create_job_common(
Allen Li81996a82016-12-14 13:01:37 -08001082 name=name,
1083 priority=priority,
1084 control_type=control_type,
1085 control_file=control_file,
1086 hosts=hosts,
1087 meta_hosts=meta_hosts,
1088 one_time_hosts=one_time_hosts,
1089 atomic_group_name=atomic_group_name,
1090 synch_count=synch_count,
1091 is_template=is_template,
1092 timeout=timeout,
1093 timeout_mins=timeout_mins,
1094 max_runtime_mins=max_runtime_mins,
1095 run_verify=run_verify,
1096 email_list=email_list,
1097 dependencies=dependencies,
1098 reboot_before=reboot_before,
1099 reboot_after=reboot_after,
1100 parse_failed_repair=parse_failed_repair,
1101 hostless=hostless,
1102 keyvals=keyvals,
1103 drone_set=drone_set,
Allen Li81996a82016-12-14 13:01:37 -08001104 parent_job_id=parent_job_id,
1105 test_retry=test_retry,
1106 run_reset=run_reset,
1107 require_ssp=require_ssp)
mblighe8819cd2008-02-15 16:48:40 +00001108
1109
showard9dbdcda2008-10-14 17:34:36 +00001110def abort_host_queue_entries(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001111 """\
showard9dbdcda2008-10-14 17:34:36 +00001112 Abort a set of host queue entries.
Fang Deng63b0e452014-12-19 14:38:15 -08001113
1114 @return: A list of dictionaries, each contains information
1115 about an aborted HQE.
jadmanski0afbb632008-06-06 21:10:57 +00001116 """
showard9dbdcda2008-10-14 17:34:36 +00001117 query = models.HostQueueEntry.query_objects(filter_data)
beepsfaecbce2013-10-29 11:35:10 -07001118
1119 # Dont allow aborts on:
1120 # 1. Jobs that have already completed (whether or not they were aborted)
1121 # 2. Jobs that we have already been aborted (but may not have completed)
1122 query = query.filter(complete=False).filter(aborted=False)
showarddc817512008-11-12 18:16:41 +00001123 models.AclGroup.check_abort_permissions(query)
showard9dbdcda2008-10-14 17:34:36 +00001124 host_queue_entries = list(query.select_related())
showard2bab8f42008-11-12 18:15:22 +00001125 rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
mblighe8819cd2008-02-15 16:48:40 +00001126
Simran Basic1b26762013-06-26 14:23:21 -07001127 models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
Fang Deng63b0e452014-12-19 14:38:15 -08001128 hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id,
1129 'Job name': hqe.job.name} for hqe in host_queue_entries]
1130 return hqe_info
showard9d821ab2008-07-11 16:54:29 +00001131
1132
beeps8bb1f7d2013-08-05 01:30:09 -07001133def abort_special_tasks(**filter_data):
1134 """\
1135 Abort the special task, or tasks, specified in the filter.
1136 """
1137 query = models.SpecialTask.query_objects(filter_data)
1138 special_tasks = query.filter(is_active=True)
1139 for task in special_tasks:
1140 task.abort()
1141
1142
Simran Basi73dae552013-02-25 14:57:46 -08001143def _call_special_tasks_on_hosts(task, hosts):
1144 """\
1145 Schedules a set of hosts for a special task.
1146
1147 @returns A list of hostnames that a special task was created for.
1148 """
1149 models.AclGroup.check_for_acl_violation_hosts(hosts)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001150 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001151 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001152 raise ValueError('The following hosts are on shards, please '
1153 'follow the link to the shards and create jobs '
1154 'there instead. %s.' % shard_host_map)
Simran Basi73dae552013-02-25 14:57:46 -08001155 for host in hosts:
1156 models.SpecialTask.schedule_special_task(host, task)
1157 return list(sorted(host.hostname for host in hosts))
1158
1159
MK Ryu5aa25042015-07-28 16:08:04 -07001160def _forward_special_tasks_on_hosts(task, rpc, **filter_data):
1161 """Forward special tasks to corresponding shards.
mbligh4e545a52009-12-19 05:30:39 +00001162
MK Ryu5aa25042015-07-28 16:08:04 -07001163 For master, when special tasks are fired on hosts that are sharded,
1164 forward the RPC to corresponding shards.
1165
1166 For shard, create special task records in local DB.
1167
1168 @param task: Enum value of frontend.afe.models.SpecialTask.Task
1169 @param rpc: RPC name to forward.
1170 @param filter_data: Filter keywords to be used for DB query.
1171
1172 @return: A list of hostnames that a special task was created for.
showard1ff7b2e2009-05-15 23:17:18 +00001173 """
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001174 hosts = models.Host.query_objects(filter_data)
1175 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True)
1176
1177 # Filter out hosts on a shard from those on the master, forward
1178 # rpcs to the shard with an additional hostname__in filter, and
1179 # create a local SpecialTask for each remaining host.
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001180 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001181 hosts = [h for h in hosts if h.shard is None]
1182 for shard, hostnames in shard_host_map.iteritems():
1183
1184 # The main client of this module is the frontend website, and
1185 # it invokes it with an 'id' or an 'id__in' filter. Regardless,
1186 # the 'hostname' filter should narrow down the list of hosts on
1187 # each shard even though we supply all the ids in filter_data.
1188 # This method uses hostname instead of id because it fits better
MK Ryu5aa25042015-07-28 16:08:04 -07001189 # with the overall architecture of redirection functions in
1190 # rpc_utils.
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001191 shard_filter = filter_data.copy()
1192 shard_filter['hostname__in'] = hostnames
1193 rpc_utils.run_rpc_on_multiple_hostnames(
MK Ryu5aa25042015-07-28 16:08:04 -07001194 rpc, [shard], **shard_filter)
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001195
1196 # There is a race condition here if someone assigns a shard to one of these
1197 # hosts before we create the task. The host will stay on the master if:
1198 # 1. The host is not Ready
1199 # 2. The host is Ready but has a task
1200 # But if the host is Ready and doesn't have a task yet, it will get sent
1201 # to the shard as we're creating a task here.
1202
1203 # Given that we only rarely verify Ready hosts it isn't worth putting this
1204 # entire method in a transaction. The worst case scenario is that we have
MK Ryu5aa25042015-07-28 16:08:04 -07001205 # a verify running on a Ready host while the shard is using it, if the
1206 # verify fails no subsequent tasks will be created against the host on the
1207 # master, and verifies are safe enough that this is OK.
1208 return _call_special_tasks_on_hosts(task, hosts)
1209
1210
1211def reverify_hosts(**filter_data):
1212 """\
1213 Schedules a set of hosts for verify.
1214
1215 @returns A list of hostnames that a verify task was created for.
1216 """
1217 return _forward_special_tasks_on_hosts(
1218 models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data)
Simran Basi73dae552013-02-25 14:57:46 -08001219
1220
1221def repair_hosts(**filter_data):
1222 """\
1223 Schedules a set of hosts for repair.
1224
1225 @returns A list of hostnames that a repair task was created for.
1226 """
MK Ryu5aa25042015-07-28 16:08:04 -07001227 return _forward_special_tasks_on_hosts(
1228 models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data)
showard1ff7b2e2009-05-15 23:17:18 +00001229
1230
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001231def get_jobs(not_yet_run=False, running=False, finished=False,
1232 suite=False, sub=False, standalone=False, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001233 """\
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001234 Extra status filter args for get_jobs:
jadmanski0afbb632008-06-06 21:10:57 +00001235 -not_yet_run: Include only jobs that have not yet started running.
1236 -running: Include only jobs that have start running but for which not
1237 all hosts have completed.
1238 -finished: Include only jobs for which all hosts have completed (or
1239 aborted).
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001240
1241 Extra type filter args for get_jobs:
1242 -suite: Include only jobs with child jobs.
1243 -sub: Include only jobs with a parent job.
1244 -standalone: Inlcude only jobs with no child or parent jobs.
1245 At most one of these three fields should be specified.
jadmanski0afbb632008-06-06 21:10:57 +00001246 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001247 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1248 running,
1249 finished)
1250 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1251 suite,
1252 sub,
1253 standalone)
showard0957a842009-05-11 19:25:08 +00001254 job_dicts = []
1255 jobs = list(models.Job.query_objects(filter_data))
1256 models.Job.objects.populate_relationships(jobs, models.Label,
1257 'dependencies')
showardc1a98d12010-01-15 00:22:22 +00001258 models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
showard0957a842009-05-11 19:25:08 +00001259 for job in jobs:
1260 job_dict = job.get_object_dict()
1261 job_dict['dependencies'] = ','.join(label.name
1262 for label in job.dependencies)
showardc1a98d12010-01-15 00:22:22 +00001263 job_dict['keyvals'] = dict((keyval.key, keyval.value)
1264 for keyval in job.keyvals)
Eric Lid23bc192011-02-09 14:38:57 -08001265 if job.parameterized_job:
1266 job_dict['image'] = get_parameterized_autoupdate_image_url(job)
showard0957a842009-05-11 19:25:08 +00001267 job_dicts.append(job_dict)
1268 return rpc_utils.prepare_for_serialization(job_dicts)
mblighe8819cd2008-02-15 16:48:40 +00001269
1270
1271def get_num_jobs(not_yet_run=False, running=False, finished=False,
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001272 suite=False, sub=False, standalone=False,
jadmanski0afbb632008-06-06 21:10:57 +00001273 **filter_data):
Aviv Keshet17660a52016-04-06 18:56:43 +00001274 """\
1275 See get_jobs() for documentation of extra filter parameters.
jadmanski0afbb632008-06-06 21:10:57 +00001276 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001277 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1278 running,
1279 finished)
1280 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1281 suite,
1282 sub,
1283 standalone)
Aviv Keshet17660a52016-04-06 18:56:43 +00001284 return models.Job.query_count(filter_data)
mblighe8819cd2008-02-15 16:48:40 +00001285
1286
mblighe8819cd2008-02-15 16:48:40 +00001287def get_jobs_summary(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001288 """\
Jiaxi Luoaac54572014-06-04 13:57:02 -07001289 Like get_jobs(), but adds 'status_counts' and 'result_counts' field.
1290
1291 'status_counts' filed is a dictionary mapping status strings to the number
1292 of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}.
1293
1294 'result_counts' field is piped to tko's rpc_interface and has the return
1295 format specified under get_group_counts.
jadmanski0afbb632008-06-06 21:10:57 +00001296 """
1297 jobs = get_jobs(**filter_data)
1298 ids = [job['id'] for job in jobs]
1299 all_status_counts = models.Job.objects.get_status_counts(ids)
1300 for job in jobs:
1301 job['status_counts'] = all_status_counts[job['id']]
Jiaxi Luoaac54572014-06-04 13:57:02 -07001302 job['result_counts'] = tko_rpc_interface.get_status_counts(
1303 ['afe_job_id', 'afe_job_id'],
1304 header_groups=[['afe_job_id'], ['afe_job_id']],
1305 **{'afe_job_id': job['id']})
jadmanski0afbb632008-06-06 21:10:57 +00001306 return rpc_utils.prepare_for_serialization(jobs)
mblighe8819cd2008-02-15 16:48:40 +00001307
1308
showarda965cef2009-05-15 23:17:41 +00001309def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
showarda8709c52008-07-03 19:44:54 +00001310 """\
1311 Retrieves all the information needed to clone a job.
1312 """
showarda8709c52008-07-03 19:44:54 +00001313 job = models.Job.objects.get(id=id)
showard29f7cd22009-04-29 21:16:24 +00001314 job_info = rpc_utils.get_job_info(job,
showarda965cef2009-05-15 23:17:41 +00001315 preserve_metahosts,
1316 queue_entry_filter_data)
showard945072f2008-09-03 20:34:59 +00001317
showardd9992fe2008-07-31 02:15:03 +00001318 host_dicts = []
showard29f7cd22009-04-29 21:16:24 +00001319 for host in job_info['hosts']:
1320 host_dict = get_hosts(id=host.id)[0]
1321 other_labels = host_dict['labels']
1322 if host_dict['platform']:
1323 other_labels.remove(host_dict['platform'])
1324 host_dict['other_labels'] = ', '.join(other_labels)
showardd9992fe2008-07-31 02:15:03 +00001325 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001326
showard29f7cd22009-04-29 21:16:24 +00001327 for host in job_info['one_time_hosts']:
1328 host_dict = dict(hostname=host.hostname,
1329 id=host.id,
1330 platform='(one-time host)',
1331 locked_text='')
1332 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001333
showard4d077562009-05-08 18:24:36 +00001334 # convert keys from Label objects to strings (names of labels)
showard29f7cd22009-04-29 21:16:24 +00001335 meta_host_counts = dict((meta_host.name, count) for meta_host, count
showard4d077562009-05-08 18:24:36 +00001336 in job_info['meta_host_counts'].iteritems())
showard29f7cd22009-04-29 21:16:24 +00001337
1338 info = dict(job=job.get_object_dict(),
1339 meta_host_counts=meta_host_counts,
1340 hosts=host_dicts)
1341 info['job']['dependencies'] = job_info['dependencies']
1342 if job_info['atomic_group']:
1343 info['atomic_group_name'] = (job_info['atomic_group']).name
1344 else:
1345 info['atomic_group_name'] = None
jamesren2275ef12010-04-12 18:25:06 +00001346 info['hostless'] = job_info['hostless']
jamesren76fcf192010-04-21 20:39:50 +00001347 info['drone_set'] = job.drone_set and job.drone_set.name
showarda8709c52008-07-03 19:44:54 +00001348
Michael Tang6dc174e2016-05-31 23:13:42 -07001349 image = _get_image_for_job(job, job_info['hostless'])
1350 if image:
1351 info['job']['image'] = image
Eric Lid23bc192011-02-09 14:38:57 -08001352
showarda8709c52008-07-03 19:44:54 +00001353 return rpc_utils.prepare_for_serialization(info)
1354
1355
Michael Tang6dc174e2016-05-31 23:13:42 -07001356def _get_image_for_job(job, hostless):
1357 """ Gets the image used for a job.
1358
1359 Gets the image used for an AFE job. If the job is a parameterized job, get
1360 the image from the job parameter; otherwise, tries to get the image from
1361 the job's keyvals 'build' or 'builds'. As a last resort, if the job is a
1362 hostless job, tries to get the image from its control file attributes
1363 'build' or 'builds'.
1364
1365 TODO(ntang): Needs to handle FAFT with two builds for ro/rw.
1366
1367 @param job An AFE job object.
1368 @param hostless Boolean on of the job is hostless.
1369
1370 @returns The image build used for the job.
1371 """
1372 image = None
1373 if job.parameterized_job:
1374 image = get_parameterized_autoupdate_image_url(job)
1375 else:
1376 keyvals = job.keyval_dict()
Michael Tang84a2ecf2016-06-07 15:10:53 -07001377 image = keyvals.get('build')
Michael Tang6dc174e2016-05-31 23:13:42 -07001378 if not image:
1379 value = keyvals.get('builds')
1380 builds = None
1381 if isinstance(value, dict):
1382 builds = value
1383 elif isinstance(value, basestring):
1384 builds = ast.literal_eval(value)
1385 if builds:
1386 image = builds.get('cros-version')
1387 if not image and hostless and job.control_file:
1388 try:
1389 control_obj = control_data.parse_control_string(
1390 job.control_file)
1391 if hasattr(control_obj, 'build'):
1392 image = getattr(control_obj, 'build')
1393 if not image and hasattr(control_obj, 'builds'):
1394 builds = getattr(control_obj, 'builds')
1395 image = builds.get('cros-version')
1396 except:
1397 logging.warning('Failed to parse control file for job: %s',
1398 job.name)
1399 return image
1400
showard34dc5fa2008-04-24 20:58:40 +00001401
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001402def get_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001403 """\
showardc92da832009-04-07 18:14:34 +00001404 @returns A sequence of nested dictionaries of host and job information.
jadmanski0afbb632008-06-06 21:10:57 +00001405 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001406 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1407 'started_on__lte',
1408 start_time,
1409 end_time,
1410 **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001411 return rpc_utils.prepare_rows_as_nested_dicts(
1412 models.HostQueueEntry.query_objects(filter_data),
1413 ('host', 'atomic_group', 'job'))
showard34dc5fa2008-04-24 20:58:40 +00001414
1415
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001416def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001417 """\
1418 Get the number of host queue entries associated with this job.
1419 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001420 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1421 'started_on__lte',
1422 start_time,
1423 end_time,
1424 **filter_data)
jadmanski0afbb632008-06-06 21:10:57 +00001425 return models.HostQueueEntry.query_count(filter_data)
showard34dc5fa2008-04-24 20:58:40 +00001426
1427
showard1e935f12008-07-11 00:11:36 +00001428def get_hqe_percentage_complete(**filter_data):
1429 """
showardc92da832009-04-07 18:14:34 +00001430 Computes the fraction of host queue entries matching the given filter data
showard1e935f12008-07-11 00:11:36 +00001431 that are complete.
1432 """
1433 query = models.HostQueueEntry.query_objects(filter_data)
1434 complete_count = query.filter(complete=True).count()
1435 total_count = query.count()
1436 if total_count == 0:
1437 return 1
1438 return float(complete_count) / total_count
1439
1440
showard1a5a4082009-07-28 20:01:37 +00001441# special tasks
1442
1443def get_special_tasks(**filter_data):
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001444 """Get special task entries from the local database.
1445
1446 Query the special tasks table for tasks matching the given
1447 `filter_data`, and return a list of the results. No attempt is
1448 made to forward the call to shards; the buck will stop here.
1449 The caller is expected to know the target shard for such reasons
1450 as:
1451 * The caller is a service (such as gs_offloader) configured
1452 to operate on behalf of one specific shard, and no other.
1453 * The caller has a host as a parameter, and knows that this is
1454 the shard assigned to that host.
1455
1456 @param filter_data Filter keywords to pass to the underlying
1457 database query.
1458
1459 """
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001460 return rpc_utils.prepare_rows_as_nested_dicts(
1461 models.SpecialTask.query_objects(filter_data),
1462 ('host', 'queue_entry'))
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001463
1464
1465def get_host_special_tasks(host_id, **filter_data):
1466 """Get special task entries for a given host.
1467
1468 Query the special tasks table for tasks that ran on the host
1469 given by `host_id` and matching the given `filter_data`.
1470 Return a list of the results. If the host is assigned to a
1471 shard, forward this call to that shard.
1472
1473 @param host_id Id in the database of the target host.
1474 @param filter_data Filter keywords to pass to the underlying
1475 database query.
1476
1477 """
MK Ryu0c1a37d2015-04-30 12:00:55 -07001478 # Retrieve host data even if the host is in an invalid state.
1479 host = models.Host.smart_get(host_id, False)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001480 if not host.shard:
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001481 return get_special_tasks(host_id=host_id, **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001482 else:
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001483 # The return values from AFE methods are post-processed
1484 # objects that aren't JSON-serializable. So, we have to
1485 # call AFE.run() to get the raw, serializable output from
1486 # the shard.
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001487 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1488 return shard_afe.run('get_special_tasks',
1489 host_id=host_id, **filter_data)
showard1a5a4082009-07-28 20:01:37 +00001490
1491
MK Ryu0c1a37d2015-04-30 12:00:55 -07001492def get_num_special_tasks(**kwargs):
1493 """Get the number of special task entries from the local database.
1494
1495 Query the special tasks table for tasks matching the given 'kwargs',
1496 and return the number of the results. No attempt is made to forward
1497 the call to shards; the buck will stop here.
1498
1499 @param kwargs Filter keywords to pass to the underlying database query.
1500
1501 """
1502 return models.SpecialTask.query_count(kwargs)
1503
1504
1505def get_host_num_special_tasks(host, **kwargs):
1506 """Get special task entries for a given host.
1507
1508 Query the special tasks table for tasks that ran on the host
1509 given by 'host' and matching the given 'kwargs'.
1510 Return a list of the results. If the host is assigned to a
1511 shard, forward this call to that shard.
1512
1513 @param host id or name of a host. More often a hostname.
1514 @param kwargs Filter keywords to pass to the underlying database query.
1515
1516 """
1517 # Retrieve host data even if the host is in an invalid state.
1518 host_model = models.Host.smart_get(host, False)
1519 if not host_model.shard:
1520 return get_num_special_tasks(host=host, **kwargs)
1521 else:
1522 shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname())
1523 return shard_afe.run('get_num_special_tasks', host=host, **kwargs)
1524
1525
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001526def get_status_task(host_id, end_time):
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001527 """Get the "status task" for a host from the local shard.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001528
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001529 Returns a single special task representing the given host's
1530 "status task". The status task is a completed special task that
1531 identifies whether the corresponding host was working or broken
1532 when it completed. A successful task indicates a working host;
1533 a failed task indicates broken.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001534
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001535 This call will not be forward to a shard; the receiving server
1536 must be the shard that owns the host.
1537
1538 @param host_id Id in the database of the target host.
1539 @param end_time Time reference for the host's status.
1540
1541 @return A single task; its status (successful or not)
1542 corresponds to the status of the host (working or
1543 broken) at the given time. If no task is found, return
1544 `None`.
1545
1546 """
1547 tasklist = rpc_utils.prepare_rows_as_nested_dicts(
1548 status_history.get_status_task(host_id, end_time),
1549 ('host', 'queue_entry'))
1550 return tasklist[0] if tasklist else None
1551
1552
1553def get_host_status_task(host_id, end_time):
1554 """Get the "status task" for a host from its owning shard.
1555
1556 Finds the given host's owning shard, and forwards to it a call
1557 to `get_status_task()` (see above).
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001558
1559 @param host_id Id in the database of the target host.
1560 @param end_time Time reference for the host's status.
1561
1562 @return A single task; its status (successful or not)
1563 corresponds to the status of the host (working or
1564 broken) at the given time. If no task is found, return
1565 `None`.
1566
1567 """
1568 host = models.Host.smart_get(host_id)
1569 if not host.shard:
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001570 return get_status_task(host_id, end_time)
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001571 else:
1572 # The return values from AFE methods are post-processed
1573 # objects that aren't JSON-serializable. So, we have to
1574 # call AFE.run() to get the raw, serializable output from
1575 # the shard.
1576 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1577 return shard_afe.run('get_status_task',
1578 host_id=host_id, end_time=end_time)
1579
1580
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001581def get_host_diagnosis_interval(host_id, end_time, success):
1582 """Find a "diagnosis interval" for a given host.
1583
1584 A "diagnosis interval" identifies a start and end time where
1585 the host went from "working" to "broken", or vice versa. The
1586 interval's starting time is the starting time of the last status
1587 task with the old status; the end time is the finish time of the
1588 first status task with the new status.
1589
1590 This routine finds the most recent diagnosis interval for the
1591 given host prior to `end_time`, with a starting status matching
1592 `success`. If `success` is true, the interval will start with a
1593 successful status task; if false the interval will start with a
1594 failed status task.
1595
1596 @param host_id Id in the database of the target host.
1597 @param end_time Time reference for the diagnosis interval.
1598 @param success Whether the diagnosis interval should start
1599 with a successful or failed status task.
1600
1601 @return A list of two strings. The first is the timestamp for
1602 the beginning of the interval; the second is the
1603 timestamp for the end. If the host has never changed
1604 state, the list is empty.
1605
1606 """
1607 host = models.Host.smart_get(host_id)
J. Richard Barnette78f281a2015-06-29 13:24:51 -07001608 if not host.shard or utils.is_shard():
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001609 return status_history.get_diagnosis_interval(
1610 host_id, end_time, success)
1611 else:
1612 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1613 return shard_afe.get_host_diagnosis_interval(
1614 host_id, end_time, success)
1615
1616
showardc0ac3a72009-07-08 21:14:45 +00001617# support for host detail view
1618
MK Ryu0c1a37d2015-04-30 12:00:55 -07001619def get_host_queue_entries_and_special_tasks(host, query_start=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001620 query_limit=None, start_time=None,
1621 end_time=None):
showardc0ac3a72009-07-08 21:14:45 +00001622 """
1623 @returns an interleaved list of HostQueueEntries and SpecialTasks,
1624 in approximate run order. each dict contains keys for type, host,
1625 job, status, started_on, execution_path, and ID.
1626 """
1627 total_limit = None
1628 if query_limit is not None:
1629 total_limit = query_start + query_limit
MK Ryu0c1a37d2015-04-30 12:00:55 -07001630 filter_data_common = {'host': host,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001631 'query_limit': total_limit,
1632 'sort_by': ['-id']}
showardc0ac3a72009-07-08 21:14:45 +00001633
MK Ryu0c1a37d2015-04-30 12:00:55 -07001634 filter_data_special_tasks = rpc_utils.inject_times_to_filter(
1635 'time_started__gte', 'time_started__lte', start_time, end_time,
1636 **filter_data_common)
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001637
MK Ryu0c1a37d2015-04-30 12:00:55 -07001638 queue_entries = get_host_queue_entries(
1639 start_time, end_time, **filter_data_common)
1640 special_tasks = get_host_special_tasks(host, **filter_data_special_tasks)
showardc0ac3a72009-07-08 21:14:45 +00001641
1642 interleaved_entries = rpc_utils.interleave_entries(queue_entries,
1643 special_tasks)
1644 if query_start is not None:
1645 interleaved_entries = interleaved_entries[query_start:]
1646 if query_limit is not None:
1647 interleaved_entries = interleaved_entries[:query_limit]
MK Ryu0c1a37d2015-04-30 12:00:55 -07001648 return rpc_utils.prepare_host_queue_entries_and_special_tasks(
1649 interleaved_entries, queue_entries)
showardc0ac3a72009-07-08 21:14:45 +00001650
1651
MK Ryu0c1a37d2015-04-30 12:00:55 -07001652def get_num_host_queue_entries_and_special_tasks(host, start_time=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001653 end_time=None):
MK Ryu0c1a37d2015-04-30 12:00:55 -07001654 filter_data_common = {'host': host}
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001655
1656 filter_data_queue_entries, filter_data_special_tasks = (
1657 rpc_utils.inject_times_to_hqe_special_tasks_filters(
1658 filter_data_common, start_time, end_time))
1659
1660 return (models.HostQueueEntry.query_count(filter_data_queue_entries)
MK Ryu0c1a37d2015-04-30 12:00:55 -07001661 + get_host_num_special_tasks(**filter_data_special_tasks))
showardc0ac3a72009-07-08 21:14:45 +00001662
1663
mblighe8819cd2008-02-15 16:48:40 +00001664# other
1665
showarde0b63622008-08-04 20:58:47 +00001666def echo(data=""):
1667 """\
1668 Returns a passed in string. For doing a basic test to see if RPC calls
1669 can successfully be made.
1670 """
1671 return data
1672
1673
showardb7a52fd2009-04-27 20:10:56 +00001674def get_motd():
1675 """\
1676 Returns the message of the day as a string.
1677 """
1678 return rpc_utils.get_motd()
1679
1680
mblighe8819cd2008-02-15 16:48:40 +00001681def get_static_data():
jadmanski0afbb632008-06-06 21:10:57 +00001682 """\
1683 Returns a dictionary containing a bunch of data that shouldn't change
1684 often and is otherwise inaccessible. This includes:
showardc92da832009-04-07 18:14:34 +00001685
1686 priorities: List of job priority choices.
1687 default_priority: Default priority value for new jobs.
1688 users: Sorted list of all users.
Jiaxi Luo31874592014-06-11 10:36:35 -07001689 labels: Sorted list of labels not start with 'cros-version' and
1690 'fw-version'.
showardc92da832009-04-07 18:14:34 +00001691 atomic_groups: Sorted list of all atomic groups.
1692 tests: Sorted list of all tests.
1693 profilers: Sorted list of all profilers.
1694 current_user: Logged-in username.
1695 host_statuses: Sorted list of possible Host statuses.
1696 job_statuses: Sorted list of possible HostQueueEntry statuses.
Simran Basi7e605742013-11-12 13:43:36 -08001697 job_timeout_default: The default job timeout length in minutes.
showarda1e74b32009-05-12 17:32:04 +00001698 parse_failed_repair_default: Default value for the parse_failed_repair job
Jiaxi Luo31874592014-06-11 10:36:35 -07001699 option.
showardc92da832009-04-07 18:14:34 +00001700 reboot_before_options: A list of valid RebootBefore string enums.
1701 reboot_after_options: A list of valid RebootAfter string enums.
1702 motd: Server's message of the day.
1703 status_dictionary: A mapping from one word job status names to a more
1704 informative description.
jadmanski0afbb632008-06-06 21:10:57 +00001705 """
showard21baa452008-10-21 00:08:39 +00001706
jamesren76fcf192010-04-21 20:39:50 +00001707 default_drone_set_name = models.DroneSet.default_drone_set_name()
1708 drone_sets = ([default_drone_set_name] +
1709 sorted(drone_set.name for drone_set in
1710 models.DroneSet.objects.exclude(
1711 name=default_drone_set_name)))
showard21baa452008-10-21 00:08:39 +00001712
jadmanski0afbb632008-06-06 21:10:57 +00001713 result = {}
Alex Miller7d658cf2013-09-04 16:00:35 -07001714 result['priorities'] = priorities.Priority.choices()
Alex Miller7d658cf2013-09-04 16:00:35 -07001715 result['default_priority'] = 'Default'
1716 result['max_schedulable_priority'] = priorities.Priority.DEFAULT
jadmanski0afbb632008-06-06 21:10:57 +00001717 result['users'] = get_users(sort_by=['login'])
Jiaxi Luo31874592014-06-11 10:36:35 -07001718
1719 label_exclude_filters = [{'name__startswith': 'cros-version'},
Dan Shi65351d62015-08-03 12:03:23 -07001720 {'name__startswith': 'fw-version'},
1721 {'name__startswith': 'fwrw-version'},
Dan Shi27516972016-03-16 14:03:41 -07001722 {'name__startswith': 'fwro-version'},
1723 {'name__startswith': 'ab-version'},
1724 {'name__startswith': 'testbed-version'}]
Jiaxi Luo31874592014-06-11 10:36:35 -07001725 result['labels'] = get_labels(
1726 label_exclude_filters,
1727 sort_by=['-platform', 'name'])
1728
showardc92da832009-04-07 18:14:34 +00001729 result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
jadmanski0afbb632008-06-06 21:10:57 +00001730 result['tests'] = get_tests(sort_by=['name'])
showard2b9a88b2008-06-13 20:55:03 +00001731 result['profilers'] = get_profilers(sort_by=['name'])
showard0fc38302008-10-23 00:44:07 +00001732 result['current_user'] = rpc_utils.prepare_for_serialization(
showard64a95952010-01-13 21:27:16 +00001733 models.User.current_user().get_object_dict())
showard2b9a88b2008-06-13 20:55:03 +00001734 result['host_statuses'] = sorted(models.Host.Status.names)
mbligh5a198b92008-12-11 19:33:29 +00001735 result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
Simran Basi7e605742013-11-12 13:43:36 -08001736 result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
Simran Basi34217022012-11-06 13:43:15 -08001737 result['job_max_runtime_mins_default'] = (
1738 models.Job.DEFAULT_MAX_RUNTIME_MINS)
showarda1e74b32009-05-12 17:32:04 +00001739 result['parse_failed_repair_default'] = bool(
1740 models.Job.DEFAULT_PARSE_FAILED_REPAIR)
jamesrendd855242010-03-02 22:23:44 +00001741 result['reboot_before_options'] = model_attributes.RebootBefore.names
1742 result['reboot_after_options'] = model_attributes.RebootAfter.names
showard8fbae652009-01-20 23:23:10 +00001743 result['motd'] = rpc_utils.get_motd()
jamesren76fcf192010-04-21 20:39:50 +00001744 result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
1745 result['drone_sets'] = drone_sets
jamesren4a41e012010-07-16 22:33:48 +00001746 result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
showard8ac29b42008-07-17 17:01:55 +00001747
showardd3dc1992009-04-22 21:01:40 +00001748 result['status_dictionary'] = {"Aborted": "Aborted",
showard8ac29b42008-07-17 17:01:55 +00001749 "Verifying": "Verifying Host",
Alex Millerdfff2fd2013-05-28 13:05:06 -07001750 "Provisioning": "Provisioning Host",
showard8ac29b42008-07-17 17:01:55 +00001751 "Pending": "Waiting on other hosts",
1752 "Running": "Running autoserv",
1753 "Completed": "Autoserv completed",
1754 "Failed": "Failed to complete",
showardd823b362008-07-24 16:35:46 +00001755 "Queued": "Queued",
showard5deb6772008-11-04 21:54:33 +00001756 "Starting": "Next in host's queue",
1757 "Stopped": "Other host(s) failed verify",
showardd3dc1992009-04-22 21:01:40 +00001758 "Parsing": "Awaiting parse of final results",
showard29f7cd22009-04-29 21:16:24 +00001759 "Gathering": "Gathering log files",
mbligh4608b002010-01-05 18:22:35 +00001760 "Waiting": "Waiting for scheduler action",
Dan Shi07e09af2013-04-12 09:31:29 -07001761 "Archiving": "Archiving results",
1762 "Resetting": "Resetting hosts"}
Jiaxi Luo421608e2014-07-07 14:38:00 -07001763
1764 result['wmatrix_url'] = rpc_utils.get_wmatrix_url()
Simran Basi71206ef2014-08-13 13:51:18 -07001765 result['is_moblab'] = bool(utils.is_moblab())
Jiaxi Luo421608e2014-07-07 14:38:00 -07001766
jadmanski0afbb632008-06-06 21:10:57 +00001767 return result
showard29f7cd22009-04-29 21:16:24 +00001768
1769
1770def get_server_time():
1771 return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
Kevin Cheng19521982016-09-22 12:27:23 -07001772
1773
1774def get_hosts_by_attribute(attribute, value):
1775 """
1776 Get the list of valid hosts that share the same host attribute value.
1777
1778 @param attribute: String of the host attribute to check.
1779 @param value: String of the value that is shared between hosts.
1780
1781 @returns List of hostnames that all have the same host attribute and
1782 value.
1783 """
1784 hosts = models.HostAttribute.query_objects({'attribute': attribute,
1785 'value': value})
1786 return [row.host.hostname for row in hosts if row.host.invalid == 0]
Allen Licdd00f22017-02-01 18:01:52 -08001787
1788
1789def canonicalize_suite_name(suite_name):
1790 """Canonicalize the suite's name.
1791
1792 @param suite_name: the name of the suite.
1793 """
1794 # Do not change this naming convention without updating
1795 # site_utils.parse_job_name.
1796 return 'test_suites/control.%s' % suite_name
1797
1798
1799def formatted_now():
1800 """Format the current datetime."""
1801 return datetime.datetime.now().strftime(time_utils.TIME_FMT)
1802
1803
1804def _get_control_file_by_build(build, ds, suite_name):
1805 """Return control file contents for |suite_name|.
1806
1807 Query the dev server at |ds| for the control file |suite_name|, included
1808 in |build| for |board|.
1809
1810 @param build: unique name by which to refer to the image from now on.
1811 @param ds: a dev_server.DevServer instance to fetch control file with.
1812 @param suite_name: canonicalized suite name, e.g. test_suites/control.bvt.
1813 @raises ControlFileNotFound if a unique suite control file doesn't exist.
1814 @raises NoControlFileList if we can't list the control files at all.
1815 @raises ControlFileEmpty if the control file exists on the server, but
1816 can't be read.
1817
1818 @return the contents of the desired control file.
1819 """
1820 getter = control_file_getter.DevServerGetter.create(build, ds)
1821 devserver_name = ds.hostname
1822 timer = autotest_stats.Timer('control_files.parse.%s.%s' %
1823 (devserver_name.replace('.', '_'),
1824 suite_name.rsplit('.')[-1]))
1825 # Get the control file for the suite.
1826 try:
1827 with timer:
1828 control_file_in = getter.get_control_file_contents_by_name(
1829 suite_name)
1830 except error.CrosDynamicSuiteException as e:
1831 raise type(e)('Failed to get control file for %s '
1832 '(devserver: %s) (error: %s)' %
1833 (build, devserver_name, e))
1834 if not control_file_in:
1835 raise error.ControlFileEmpty(
1836 "Fetching %s returned no data. (devserver: %s)" %
1837 (suite_name, devserver_name))
1838 # Force control files to only contain ascii characters.
1839 try:
1840 control_file_in.encode('ascii')
1841 except UnicodeDecodeError as e:
1842 raise error.ControlFileMalformed(str(e))
1843
1844 return control_file_in
1845
1846
1847def _get_control_file_by_suite(suite_name):
1848 """Get control file contents by suite name.
1849
1850 @param suite_name: Suite name as string.
1851 @returns: Control file contents as string.
1852 """
1853 getter = control_file_getter.FileSystemGetter(
Dan Shi6cd838f2017-02-02 15:30:18 -08001854 [_CONFIG.get_config_value('SCHEDULER',
1855 'drone_installation_directory')])
Allen Licdd00f22017-02-01 18:01:52 -08001856 return getter.get_control_file_contents_by_name(suite_name)
1857
1858
1859def _stage_build_artifacts(build, hostname=None):
1860 """
1861 Ensure components of |build| necessary for installing images are staged.
1862
1863 @param build image we want to stage.
1864 @param hostname hostname of a dut may run test on. This is to help to locate
1865 a devserver closer to duts if needed. Default is None.
1866
1867 @raises StageControlFileFailure: if the dev server throws 500 while staging
1868 suite control files.
1869
1870 @return: dev_server.ImageServer instance to use with this build.
1871 @return: timings dictionary containing staging start/end times.
1872 """
1873 timings = {}
1874 # Ensure components of |build| necessary for installing images are staged
1875 # on the dev server. However set synchronous to False to allow other
1876 # components to be downloaded in the background.
1877 ds = dev_server.resolve(build, hostname=hostname)
1878 ds_name = ds.hostname
1879 timings[constants.DOWNLOAD_STARTED_TIME] = formatted_now()
1880 timer = autotest_stats.Timer('control_files.stage.%s' % (
1881 ds_name.replace('.', '_')))
1882 try:
1883 with timer:
1884 ds.stage_artifacts(image=build, artifacts=['test_suites'])
1885 except dev_server.DevServerException as e:
1886 raise error.StageControlFileFailure(
1887 "Failed to stage %s on %s: %s" % (build, ds_name, e))
1888 timings[constants.PAYLOAD_FINISHED_TIME] = formatted_now()
1889 return (ds, timings)
1890
1891
1892@rpc_utils.route_rpc_to_master
1893def create_suite_job(
1894 name='',
1895 board='',
1896 pool='',
1897 control_file='',
1898 check_hosts=True,
1899 num=None,
1900 file_bugs=False,
1901 timeout=24,
1902 timeout_mins=None,
1903 priority=priorities.Priority.DEFAULT,
1904 suite_args=None,
1905 wait_for_results=True,
1906 job_retry=False,
1907 max_retries=None,
1908 max_runtime_mins=None,
1909 suite_min_duts=0,
1910 offload_failures_only=False,
1911 builds=None,
1912 test_source_build=None,
1913 run_prod_code=False,
1914 delay_minutes=0,
1915 is_cloning=False,
1916 **kwargs
1917):
1918 """
1919 Create a job to run a test suite on the given device with the given image.
1920
1921 When the timeout specified in the control file is reached, the
1922 job is guaranteed to have completed and results will be available.
1923
1924 @param name: The test name if control_file is supplied, otherwise the name
1925 of the test suite to run, e.g. 'bvt'.
1926 @param board: the kind of device to run the tests on.
1927 @param builds: the builds to install e.g.
1928 {'cros-version:': 'x86-alex-release/R18-1655.0.0',
1929 'fwrw-version:': 'x86-alex-firmware/R36-5771.50.0',
1930 'fwro-version:': 'x86-alex-firmware/R36-5771.49.0'}
1931 If builds is given a value, it overrides argument build.
1932 @param test_source_build: Build that contains the server-side test code.
1933 @param pool: Specify the pool of machines to use for scheduling
1934 purposes.
1935 @param control_file: the control file of the job.
1936 @param check_hosts: require appropriate live hosts to exist in the lab.
1937 @param num: Specify the number of machines to schedule across (integer).
1938 Leave unspecified or use None to use default sharding factor.
1939 @param file_bugs: File a bug on each test failure in this suite.
1940 @param timeout: The max lifetime of this suite, in hours.
1941 @param timeout_mins: The max lifetime of this suite, in minutes. Takes
1942 priority over timeout.
1943 @param priority: Integer denoting priority. Higher is more important.
1944 @param suite_args: Optional arguments which will be parsed by the suite
1945 control file. Used by control.test_that_wrapper to
1946 determine which tests to run.
1947 @param wait_for_results: Set to False to run the suite job without waiting
1948 for test jobs to finish. Default is True.
1949 @param job_retry: Set to True to enable job-level retry. Default is False.
1950 @param max_retries: Integer, maximum job retries allowed at suite level.
1951 None for no max.
1952 @param max_runtime_mins: Maximum amount of time a job can be running in
1953 minutes.
1954 @param suite_min_duts: Integer. Scheduler will prioritize getting the
1955 minimum number of machines for the suite when it is
1956 competing with another suite that has a higher
1957 priority but already got minimum machines it needs.
1958 @param offload_failures_only: Only enable gs_offloading for failed jobs.
1959 @param run_prod_code: If True, the suite will run the test code that
1960 lives in prod aka the test code currently on the
1961 lab servers. If False, the control files and test
1962 code for this suite run will be retrieved from the
1963 build artifacts.
1964 @param delay_minutes: Delay the creation of test jobs for a given number of
1965 minutes.
1966 @param is_cloning: True if creating a cloning job.
1967 @param kwargs: extra keyword args. NOT USED.
1968
1969 @raises ControlFileNotFound: if a unique suite control file doesn't exist.
1970 @raises NoControlFileList: if we can't list the control files at all.
1971 @raises StageControlFileFailure: If the dev server throws 500 while
1972 staging test_suites.
1973 @raises ControlFileEmpty: if the control file exists on the server, but
1974 can't be read.
1975
1976 @return: the job ID of the suite; -1 on error.
1977 """
1978 if type(num) is not int and num is not None:
1979 raise error.SuiteArgumentException('Ill specified num argument %r. '
1980 'Must be an integer or None.' % num)
1981 if num == 0:
1982 logging.warning("Can't run on 0 hosts; using default.")
1983 num = None
1984
1985 if builds is None:
1986 builds = {}
1987
1988 # Default test source build to CrOS build if it's not specified and
1989 # run_prod_code is set to False.
1990 if not run_prod_code:
1991 test_source_build = Suite.get_test_source_build(
1992 builds, test_source_build=test_source_build)
1993
1994 sample_dut = rpc_utils.get_sample_dut(board, pool)
1995
1996 suite_name = canonicalize_suite_name(name)
1997 if run_prod_code:
1998 ds = dev_server.resolve(test_source_build, hostname=sample_dut)
1999 keyvals = {}
2000 else:
2001 (ds, keyvals) = _stage_build_artifacts(
2002 test_source_build, hostname=sample_dut)
2003 keyvals[constants.SUITE_MIN_DUTS_KEY] = suite_min_duts
2004
2005 # Do not change this naming convention without updating
2006 # site_utils.parse_job_name.
2007 if run_prod_code:
2008 # If run_prod_code is True, test_source_build is not set, use the
2009 # first build in the builds list for the sutie job name.
2010 name = '%s-%s' % (builds.values()[0], suite_name)
2011 else:
2012 name = '%s-%s' % (test_source_build, suite_name)
2013
2014 timeout_mins = timeout_mins or timeout * 60
2015 max_runtime_mins = max_runtime_mins or timeout * 60
2016
2017 if not board:
2018 board = utils.ParseBuildName(builds[provision.CROS_VERSION_PREFIX])[0]
2019
2020 if run_prod_code:
2021 control_file = _get_control_file_by_suite(suite_name)
2022
2023 if not control_file:
2024 # No control file was supplied so look it up from the build artifacts.
2025 control_file = _get_control_file_by_build(
2026 test_source_build, ds, suite_name)
2027
2028 # Prepend builds and board to the control file.
2029 if is_cloning:
2030 control_file = tools.remove_injection(control_file)
2031
2032 inject_dict = {
2033 'board': board,
2034 # `build` is needed for suites like AU to stage image inside suite
2035 # control file.
2036 'build': test_source_build,
2037 'builds': builds,
2038 'check_hosts': check_hosts,
2039 'pool': pool,
2040 'num': num,
2041 'file_bugs': file_bugs,
2042 'timeout': timeout,
2043 'timeout_mins': timeout_mins,
2044 'devserver_url': ds.url(),
2045 'priority': priority,
2046 'suite_args' : suite_args,
2047 'wait_for_results': wait_for_results,
2048 'job_retry': job_retry,
2049 'max_retries': max_retries,
2050 'max_runtime_mins': max_runtime_mins,
2051 'offload_failures_only': offload_failures_only,
2052 'test_source_build': test_source_build,
2053 'run_prod_code': run_prod_code,
2054 'delay_minutes': delay_minutes,
2055 }
2056 control_file = tools.inject_vars(inject_dict, control_file)
2057
2058 return rpc_utils.create_job_common(name,
2059 priority=priority,
2060 timeout_mins=timeout_mins,
2061 max_runtime_mins=max_runtime_mins,
2062 control_type='Server',
2063 control_file=control_file,
2064 hostless=True,
2065 keyvals=keyvals)
2066
2067
2068def get_job_history(**filter_data):
2069 """Get history of the job, including the special tasks executed for the job
2070
2071 @param filter_data: filter for the call, should at least include
2072 {'job_id': [job id]}
2073 @returns: JSON string of the job's history, including the information such
2074 as the hosts run the job and the special tasks executed before
2075 and after the job.
2076 """
2077 job_id = filter_data['job_id']
2078 job_info = job_history.get_job_info(job_id)
2079 return rpc_utils.prepare_for_serialization(job_info.get_history())
2080
2081
2082def get_host_history(start_time, end_time, hosts=None, board=None, pool=None):
2083 """Get history of a list of host.
2084
2085 The return is a JSON string of host history for each host, for example,
2086 {'172.22.33.51': [{'status': 'Resetting'
2087 'start_time': '2014-08-07 10:02:16',
2088 'end_time': '2014-08-07 10:03:16',
2089 'log_url': 'http://autotest/reset-546546/debug',
2090 'dbg_str': 'Task: Special Task 19441991 (host ...)'},
2091 {'status': 'Running'
2092 'start_time': '2014-08-07 10:03:18',
2093 'end_time': '2014-08-07 10:13:00',
2094 'log_url': 'http://autotest/reset-546546/debug',
2095 'dbg_str': 'HQE: 15305005, for job: 14995562'}
2096 ]
2097 }
2098 @param start_time: start time to search for history, can be string value or
2099 epoch time.
2100 @param end_time: end time to search for history, can be string value or
2101 epoch time.
2102 @param hosts: A list of hosts to search for history. Default is None.
2103 @param board: board type of hosts. Default is None.
2104 @param pool: pool type of hosts. Default is None.
2105 @returns: JSON string of the host history.
2106 """
2107 return rpc_utils.prepare_for_serialization(
2108 host_history.get_history_details(
2109 start_time=start_time, end_time=end_time,
2110 hosts=hosts, board=board, pool=pool,
2111 process_pool_size=4))
2112
2113
2114def shard_heartbeat(shard_hostname, jobs=(), hqes=(), known_job_ids=(),
2115 known_host_ids=(), known_host_statuses=()):
2116 """Receive updates for job statuses from shards and assign hosts and jobs.
2117
2118 @param shard_hostname: Hostname of the calling shard
2119 @param jobs: Jobs in serialized form that should be updated with newer
2120 status from a shard.
2121 @param hqes: Hostqueueentries in serialized form that should be updated with
2122 newer status from a shard. Note that for every hostqueueentry
2123 the corresponding job must be in jobs.
2124 @param known_job_ids: List of ids of jobs the shard already has.
2125 @param known_host_ids: List of ids of hosts the shard already has.
2126 @param known_host_statuses: List of statuses of hosts the shard already has.
2127
2128 @returns: Serialized representations of hosts, jobs, suite job keyvals
2129 and their dependencies to be inserted into a shard's database.
2130 """
2131 # The following alternatives to sending host and job ids in every heartbeat
2132 # have been considered:
2133 # 1. Sending the highest known job and host ids. This would work for jobs:
2134 # Newer jobs always have larger ids. Also, if a job is not assigned to a
2135 # particular shard during a heartbeat, it never will be assigned to this
2136 # shard later.
2137 # This is not true for hosts though: A host that is leased won't be sent
2138 # to the shard now, but might be sent in a future heartbeat. This means
2139 # sometimes hosts should be transfered that have a lower id than the
2140 # maximum host id the shard knows.
2141 # 2. Send the number of jobs/hosts the shard knows to the master in each
2142 # heartbeat. Compare these to the number of records that already have
2143 # the shard_id set to this shard. In the normal case, they should match.
2144 # In case they don't, resend all entities of that type.
2145 # This would work well for hosts, because there aren't that many.
2146 # Resending all jobs is quite a big overhead though.
2147 # Also, this approach might run into edge cases when entities are
2148 # ever deleted.
2149 # 3. Mixtures of the above: Use 1 for jobs and 2 for hosts.
2150 # Using two different approaches isn't consistent and might cause
2151 # confusion. Also the issues with the case of deletions might still
2152 # occur.
2153 #
2154 # The overhead of sending all job and host ids in every heartbeat is low:
2155 # At peaks one board has about 1200 created but unfinished jobs.
2156 # See the numbers here: http://goo.gl/gQCGWH
2157 # Assuming that job id's have 6 digits and that json serialization takes a
2158 # comma and a space as overhead, the traffic per id sent is about 8 bytes.
2159 # If 5000 ids need to be sent, this means 40 kilobytes of traffic.
2160 # A NOT IN query with 5000 ids took about 30ms in tests made.
2161 # These numbers seem low enough to outweigh the disadvantages of the
2162 # solutions described above.
2163 timer = autotest_stats.Timer('shard_heartbeat')
2164 with timer:
2165 shard_obj = rpc_utils.retrieve_shard(shard_hostname=shard_hostname)
2166 rpc_utils.persist_records_sent_from_shard(shard_obj, jobs, hqes)
2167 assert len(known_host_ids) == len(known_host_statuses)
2168 for i in range(len(known_host_ids)):
2169 host_model = models.Host.objects.get(pk=known_host_ids[i])
2170 if host_model.status != known_host_statuses[i]:
2171 host_model.status = known_host_statuses[i]
2172 host_model.save()
2173
2174 hosts, jobs, suite_keyvals = rpc_utils.find_records_for_shard(
2175 shard_obj, known_job_ids=known_job_ids,
2176 known_host_ids=known_host_ids)
2177 return {
2178 'hosts': [host.serialize() for host in hosts],
2179 'jobs': [job.serialize() for job in jobs],
2180 'suite_keyvals': [kv.serialize() for kv in suite_keyvals],
2181 }
2182
2183
2184def get_shards(**filter_data):
2185 """Return a list of all shards.
2186
2187 @returns A sequence of nested dictionaries of shard information.
2188 """
2189 shards = models.Shard.query_objects(filter_data)
2190 serialized_shards = rpc_utils.prepare_rows_as_nested_dicts(shards, ())
2191 for serialized, shard in zip(serialized_shards, shards):
2192 serialized['labels'] = [label.name for label in shard.labels.all()]
2193
2194 return serialized_shards
2195
2196
2197def _assign_board_to_shard_precheck(labels):
2198 """Verify whether board labels are valid to be added to a given shard.
2199
2200 First check whether board label is in correct format. Second, check whether
2201 the board label exist. Third, check whether the board has already been
2202 assigned to shard.
2203
2204 @param labels: Board labels separated by comma.
2205
2206 @raises error.RPCException: If label provided doesn't start with `board:`
2207 or board has been added to shard already.
2208 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2209
2210 @returns: A list of label models that ready to be added to shard.
2211 """
2212 labels = labels.split(',')
2213 label_models = []
2214 for label in labels:
2215 # Check whether the board label is in correct format.
2216 if not label.startswith('board:'):
2217 raise error.RPCException('Sharding only supports `board:.*` label.')
2218 # Check whether the board label exist. If not, exception will be thrown
2219 # by smart_get function.
2220 label = models.Label.smart_get(label)
2221 # Check whether the board has been sharded already
2222 try:
2223 shard = models.Shard.objects.get(labels=label)
2224 raise error.RPCException(
2225 '%s is already on shard %s' % (label, shard.hostname))
2226 except models.Shard.DoesNotExist:
2227 # board is not on any shard, so it's valid.
2228 label_models.append(label)
2229 return label_models
2230
2231
2232def add_shard(hostname, labels):
2233 """Add a shard and start running jobs on it.
2234
2235 @param hostname: The hostname of the shard to be added; needs to be unique.
2236 @param labels: Board labels separated by comma. Jobs of one of the labels
2237 will be assigned to the shard.
2238
2239 @raises error.RPCException: If label provided doesn't start with `board:` or
2240 board has been added to shard already.
2241 @raises model_logic.ValidationError: If a shard with the given hostname
2242 already exist.
2243 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2244
2245 @returns: The id of the added shard.
2246 """
2247 labels = _assign_board_to_shard_precheck(labels)
2248 shard = models.Shard.add_object(hostname=hostname)
2249 for label in labels:
2250 shard.labels.add(label)
2251 return shard.id
2252
2253
2254def add_board_to_shard(hostname, labels):
2255 """Add boards to a given shard
2256
2257 @param hostname: The hostname of the shard to be changed.
2258 @param labels: Board labels separated by comma.
2259
2260 @raises error.RPCException: If label provided doesn't start with `board:` or
2261 board has been added to shard already.
2262 @raises models.Label.DoesNotExist: If the label specified doesn't exist.
2263
2264 @returns: The id of the changed shard.
2265 """
2266 labels = _assign_board_to_shard_precheck(labels)
2267 shard = models.Shard.objects.get(hostname=hostname)
2268 for label in labels:
2269 shard.labels.add(label)
2270 return shard.id
2271
2272
2273def delete_shard(hostname):
2274 """Delete a shard and reclaim all resources from it.
2275
2276 This claims back all assigned hosts from the shard. To ensure all DUTs are
2277 in a sane state, a Reboot task with highest priority is scheduled for them.
2278 This reboots the DUTs and then all left tasks continue to run in drone of
2279 the master.
2280
2281 The procedure for deleting a shard:
2282 * Lock all unlocked hosts on that shard.
2283 * Remove shard information .
2284 * Assign a reboot task with highest priority to these hosts.
2285 * Unlock these hosts, then, the reboot tasks run in front of all other
2286 tasks.
2287
2288 The status of jobs that haven't been reported to be finished yet, will be
2289 lost. The master scheduler will pick up the jobs and execute them.
2290
2291 @param hostname: Hostname of the shard to delete.
2292 """
2293 shard = rpc_utils.retrieve_shard(shard_hostname=hostname)
2294 hostnames_to_lock = [h.hostname for h in
2295 models.Host.objects.filter(shard=shard, locked=False)]
2296
2297 # TODO(beeps): Power off shard
2298 # For ChromeOS hosts, a reboot test with the highest priority is added to
2299 # the DUT. After a reboot it should be ganranteed that no processes from
2300 # prior tests that were run by a shard are still running on.
2301
2302 # Lock all unlocked hosts.
2303 dicts = {'locked': True, 'lock_time': datetime.datetime.now()}
2304 models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts)
2305
2306 # Remove shard information.
2307 models.Host.objects.filter(shard=shard).update(shard=None)
2308 models.Job.objects.filter(shard=shard).update(shard=None)
2309 shard.labels.clear()
2310 shard.delete()
2311
2312 # Assign a reboot task with highest priority: Super.
2313 t = models.Test.objects.get(name='platform_BootPerfServer:shard')
2314 c = utils.read_file(os.path.join(common.autotest_dir, t.path))
2315 if hostnames_to_lock:
2316 rpc_utils.create_job_common(
2317 'reboot_dut_for_shard_deletion',
2318 priority=priorities.Priority.SUPER,
2319 control_type='Server',
2320 control_file=c, hosts=hostnames_to_lock)
2321
2322 # Unlock these shard-related hosts.
2323 dicts = {'locked': False, 'lock_time': None}
2324 models.Host.objects.filter(hostname__in=hostnames_to_lock).update(**dicts)
2325
2326
2327def get_servers(hostname=None, role=None, status=None):
2328 """Get a list of servers with matching role and status.
2329
2330 @param hostname: FQDN of the server.
2331 @param role: Name of the server role, e.g., drone, scheduler. Default to
2332 None to match any role.
2333 @param status: Status of the server, e.g., primary, backup, repair_required.
2334 Default to None to match any server status.
2335
2336 @raises error.RPCException: If server database is not used.
2337 @return: A list of server names for servers with matching role and status.
2338 """
2339 if not server_manager_utils.use_server_db():
2340 raise error.RPCException('Server database is not enabled. Please try '
2341 'retrieve servers from global config.')
2342 servers = server_manager_utils.get_servers(hostname=hostname, role=role,
2343 status=status)
2344 return [s.get_details() for s in servers]
2345
2346
2347@rpc_utils.route_rpc_to_master
2348def get_stable_version(board=stable_version_utils.DEFAULT, android=False):
2349 """Get stable version for the given board.
2350
2351 @param board: Name of the board.
2352 @param android: If True, the given board is an Android-based device. If
2353 False, assume its a Chrome OS-based device.
2354
2355 @return: Stable version of the given board. Return global configure value
2356 of CROS.stable_cros_version if stable_versinos table does not have
2357 entry of board DEFAULT.
2358 """
2359 return stable_version_utils.get(board=board, android=android)
2360
2361
2362@rpc_utils.route_rpc_to_master
2363def get_all_stable_versions():
2364 """Get stable versions for all boards.
2365
2366 @return: A dictionary of board:version.
2367 """
2368 return stable_version_utils.get_all()
2369
2370
2371@rpc_utils.route_rpc_to_master
2372def set_stable_version(version, board=stable_version_utils.DEFAULT):
2373 """Modify stable version for the given board.
2374
2375 @param version: The new value of stable version for given board.
2376 @param board: Name of the board, default to value `DEFAULT`.
2377 """
2378 stable_version_utils.set(version=version, board=board)
2379
2380
2381@rpc_utils.route_rpc_to_master
2382def delete_stable_version(board):
2383 """Modify stable version for the given board.
2384
2385 Delete a stable version entry in afe_stable_versions table for a given
2386 board, so default stable version will be used.
2387
2388 @param board: Name of the board.
2389 """
2390 stable_version_utils.delete(board=board)
2391
2392
2393def get_tests_by_build(build, ignore_invalid_tests=True):
2394 """Get the tests that are available for the specified build.
2395
2396 @param build: unique name by which to refer to the image.
2397 @param ignore_invalid_tests: flag on if unparsable tests are ignored.
2398
2399 @return: A sorted list of all tests that are in the build specified.
2400 """
2401 # Collect the control files specified in this build
2402 cfile_getter = control_file_lib._initialize_control_file_getter(build)
2403 if SuiteBase.ENABLE_CONTROLS_IN_BATCH:
2404 control_file_info_list = cfile_getter.get_suite_info()
2405 control_file_list = control_file_info_list.keys()
2406 else:
2407 control_file_list = cfile_getter.get_control_file_list()
2408
2409 test_objects = []
2410 _id = 0
2411 for control_file_path in control_file_list:
2412 # Read and parse the control file
2413 if SuiteBase.ENABLE_CONTROLS_IN_BATCH:
2414 control_file = control_file_info_list[control_file_path]
2415 else:
2416 control_file = cfile_getter.get_control_file_contents(
2417 control_file_path)
2418 try:
2419 control_obj = control_data.parse_control_string(control_file)
2420 except:
2421 logging.info('Failed to parse control file: %s', control_file_path)
2422 if not ignore_invalid_tests:
2423 raise
2424
2425 # Extract the values needed for the AFE from the control_obj.
2426 # The keys list represents attributes in the control_obj that
2427 # are required by the AFE
2428 keys = ['author', 'doc', 'name', 'time', 'test_type', 'experimental',
2429 'test_category', 'test_class', 'dependencies', 'run_verify',
2430 'sync_count', 'job_retries', 'retries', 'path']
2431
2432 test_object = {}
2433 for key in keys:
2434 test_object[key] = getattr(control_obj, key) if hasattr(
2435 control_obj, key) else ''
2436
2437 # Unfortunately, the AFE expects different key-names for certain
2438 # values, these must be corrected to avoid the risk of tests
2439 # being omitted by the AFE.
2440 # The 'id' is an additional value used in the AFE.
2441 # The control_data parsing does not reference 'run_reset', but it
2442 # is also used in the AFE and defaults to True.
2443 test_object['id'] = _id
2444 test_object['run_reset'] = True
2445 test_object['description'] = test_object.get('doc', '')
2446 test_object['test_time'] = test_object.get('time', 0)
2447 test_object['test_retry'] = test_object.get('retries', 0)
2448
2449 # Fix the test name to be consistent with the current presentation
2450 # of test names in the AFE.
2451 testpath, subname = os.path.split(control_file_path)
2452 testname = os.path.basename(testpath)
2453 subname = subname.split('.')[1:]
2454 if subname:
2455 testname = '%s:%s' % (testname, ':'.join(subname))
2456
2457 test_object['name'] = testname
2458
2459 # Correct the test path as parse_control_string sets an empty string.
2460 test_object['path'] = control_file_path
2461
2462 _id += 1
2463 test_objects.append(test_object)
2464
2465 test_objects = sorted(test_objects, key=lambda x: x.get('name'))
2466 return rpc_utils.prepare_for_serialization(test_objects)