blob: 3959c71b95b4e5d2f2ff9dc41e27d7b1a14ad464 [file] [log] [blame]
Aviv Keshet0b9cfc92013-02-05 11:36:02 -08001# pylint: disable-msg=C0111
2
mblighe8819cd2008-02-15 16:48:40 +00003"""\
4Functions to expose over the RPC interface.
5
6For all modify* and delete* functions that ask for an 'id' parameter to
7identify the object to operate on, the id may be either
8 * the database row ID
9 * the name of the object (label name, hostname, user login, etc.)
10 * a dictionary containing uniquely identifying field (this option should seldom
11 be used)
12
13When specifying foreign key fields (i.e. adding hosts to a label, or adding
14users to an ACL group), the given value may be either the database row ID or the
15name of the object.
16
17All get* functions return lists of dictionaries. Each dictionary represents one
18object and maps field names to values.
19
20Some examples:
21modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
22modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
23modify_test('sleeptest', test_type='Client', params=', seconds=60')
24delete_acl_group(1) # delete by ID
25delete_acl_group('Everyone') # delete by name
26acl_group_add_users('Everyone', ['mbligh', 'showard'])
27get_jobs(owner='showard', status='Queued')
28
mbligh93c80e62009-02-03 17:48:30 +000029See doctests/001_rpc_test.txt for (lots) more examples.
mblighe8819cd2008-02-15 16:48:40 +000030"""
31
32__author__ = 'showard@google.com (Steve Howard)'
33
MK Ryu9c5fbbe2015-02-11 15:46:22 -080034import sys
showard29f7cd22009-04-29 21:16:24 +000035import datetime
MK Ryu9c5fbbe2015-02-11 15:46:22 -080036
Moises Osorio2dc7a102014-12-02 18:24:02 -080037from django.db.models import Count
showardcafd16e2009-05-29 18:37:49 +000038import common
Simran Basib6ec8ae2014-04-23 12:05:08 -070039from autotest_lib.client.common_lib import priorities
Gabe Black1e1c41b2015-02-04 23:55:15 -080040from autotest_lib.client.common_lib.cros.graphite import autotest_stats
showard6d7b2ff2009-06-10 00:16:47 +000041from autotest_lib.frontend.afe import control_file, rpc_utils
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070042from autotest_lib.frontend.afe import models, model_logic, model_attributes
Simran Basib6ec8ae2014-04-23 12:05:08 -070043from autotest_lib.frontend.afe import site_rpc_interface
Moises Osorio2dc7a102014-12-02 18:24:02 -080044from autotest_lib.frontend.tko import models as tko_models
Jiaxi Luoaac54572014-06-04 13:57:02 -070045from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070046from autotest_lib.server import frontend
Simran Basi71206ef2014-08-13 13:51:18 -070047from autotest_lib.server import utils
Dan Shid215dbe2015-06-18 16:14:59 -070048from autotest_lib.server.cros import provision
Jiaxi Luo90190c92014-06-18 12:35:57 -070049from autotest_lib.server.cros.dynamic_suite import tools
J. Richard Barnette39255fa2015-04-14 17:23:41 -070050from autotest_lib.site_utils import status_history
mblighe8819cd2008-02-15 16:48:40 +000051
Moises Osorio2dc7a102014-12-02 18:24:02 -080052
Gabe Black1e1c41b2015-02-04 23:55:15 -080053_timer = autotest_stats.Timer('rpc_interface')
Moises Osorio2dc7a102014-12-02 18:24:02 -080054
Eric Lid23bc192011-02-09 14:38:57 -080055def get_parameterized_autoupdate_image_url(job):
56 """Get the parameterized autoupdate image url from a parameterized job."""
57 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
58 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
beeps8bb1f7d2013-08-05 01:30:09 -070059 name='image')
Eric Lid23bc192011-02-09 14:38:57 -080060 para_set = job.parameterized_job.parameterizedjobparameter_set
61 job_test_para = para_set.get(test_parameter=image_parameter)
62 return job_test_para.parameter_value
63
64
mblighe8819cd2008-02-15 16:48:40 +000065# labels
66
mblighe8819cd2008-02-15 16:48:40 +000067def modify_label(id, **data):
MK Ryu8c554cf2015-06-12 11:45:50 -070068 """Modify a label.
69
70 @param id: id or name of a label. More often a label name.
71 @param data: New data for a label.
72 """
73 label_model = models.Label.smart_get(id)
74
75 # Master forwards the RPC to shards
76 if not utils.is_shard():
77 rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False,
78 id=id, **data)
79
80 label_model.update_object(data)
mblighe8819cd2008-02-15 16:48:40 +000081
82
83def delete_label(id):
MK Ryu8c554cf2015-06-12 11:45:50 -070084 """Delete a label.
85
86 @param id: id or name of a label. More often a label name.
87 """
88 label_model = models.Label.smart_get(id)
89
90 # Master forwards the RPC to shards
91 if not utils.is_shard():
92 rpc_utils.fanout_rpc(label_model.host_set.all(), 'delete_label', False,
93 id=id)
94
95 label_model.delete()
mblighe8819cd2008-02-15 16:48:40 +000096
Prashanth Balasubramanian744898f2015-01-13 05:04:16 -080097
MK Ryu9c5fbbe2015-02-11 15:46:22 -080098def add_label(name, ignore_exception_if_exists=False, **kwargs):
MK Ryucf027c62015-03-04 12:00:50 -080099 """Adds a new label of a given name.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800100
101 @param name: label name.
102 @param ignore_exception_if_exists: If True and the exception was
103 thrown due to the duplicated label name when adding a label,
104 then suppress the exception. Default is False.
105 @param kwargs: keyword args that store more info about a label
106 other than the name.
107 @return: int/long id of a new label.
108 """
109 # models.Label.add_object() throws model_logic.ValidationError
110 # when it is given a label name that already exists.
111 # However, ValidationError can be thrown with different errors,
112 # and those errors should be thrown up to the call chain.
113 try:
114 label = models.Label.add_object(name=name, **kwargs)
115 except:
116 exc_info = sys.exc_info()
117 if ignore_exception_if_exists:
118 label = rpc_utils.get_label(name)
119 # If the exception is raised not because of duplicated
120 # "name", then raise the original exception.
121 if label is None:
122 raise exc_info[0], exc_info[1], exc_info[2]
123 else:
124 raise exc_info[0], exc_info[1], exc_info[2]
125 return label.id
126
127
128def add_label_to_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800129 """Adds a label of the given id to the given hosts only in local DB.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800130
131 @param id: id or name of a label. More often a label name.
132 @param hosts: The hostnames of hosts that need the label.
133
134 @raises models.Label.DoesNotExist: If the label with id doesn't exist.
135 """
136 label = models.Label.smart_get(id)
137 host_objs = models.Host.smart_get_bulk(hosts)
138 if label.platform:
139 models.Host.check_no_platform(host_objs)
140 label.host_set.add(*host_objs)
141
142
MK Ryufbb002c2015-06-08 14:13:16 -0700143@rpc_utils.route_rpc_to_master
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800144def label_add_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800145 """Adds a label with the given id to the given hosts.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800146
147 This method should be run only on master not shards.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800148 The given label will be created if it doesn't exist, provided the `id`
149 supplied is a label name not an int/long id.
150
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800151 @param id: id or name of a label. More often a label name.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800152 @param hosts: A list of hostnames or ids. More often hostnames.
153
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800154 @raises ValueError: If the id specified is an int/long (label id)
155 while the label does not exist.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800156 """
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800157 try:
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800158 label = models.Label.smart_get(id)
159 except models.Label.DoesNotExist:
160 # This matches the type checks in smart_get, which is a hack
161 # in and off itself. The aim here is to create any non-existent
162 # label, which we cannot do if the 'id' specified isn't a label name.
163 if isinstance(id, basestring):
164 label = models.Label.smart_get(add_label(id))
165 else:
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800166 raise ValueError('Label id (%s) does not exist. Please specify '
167 'the argument, id, as a string (label name).'
168 % id)
MK Ryucf027c62015-03-04 12:00:50 -0800169
170 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800171 # Make sure the label exists on the shard with the same id
172 # as it is on the master.
MK Ryucf027c62015-03-04 12:00:50 -0800173 # It is possible that the label is already in a shard because
174 # we are adding a new label only to shards of hosts that the label
175 # is going to be attached.
176 # For example, we add a label L1 to a host in shard S1.
177 # Master and S1 will have L1 but other shards won't.
178 # Later, when we add the same label L1 to hosts in shards S1 and S2,
179 # S1 already has the label but S2 doesn't.
180 # S2 should have the new label without any problem.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800181 # We ignore exception in such a case.
182 rpc_utils.fanout_rpc(
MK Ryue019aae2015-07-07 12:46:07 -0700183 host_objs, 'add_label', include_hostnames=False,
184 name=label.name, ignore_exception_if_exists=True,
185 id=label.id, platform=label.platform)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800186 rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id)
showardbbabf502008-06-06 00:02:02 +0000187
MK Ryu26f0c932015-05-28 18:14:33 -0700188 add_label_to_hosts(id, hosts)
189
showardbbabf502008-06-06 00:02:02 +0000190
MK Ryucf027c62015-03-04 12:00:50 -0800191def remove_label_from_hosts(id, hosts):
192 """Removes a label of the given id from the given hosts only in local DB.
193
194 @param id: id or name of a label.
195 @param hosts: The hostnames of hosts that need to remove the label from.
196 """
showardbe3ec042008-11-12 18:16:07 +0000197 host_objs = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000198 models.Label.smart_get(id).host_set.remove(*host_objs)
showardbbabf502008-06-06 00:02:02 +0000199
200
MK Ryufbb002c2015-06-08 14:13:16 -0700201@rpc_utils.route_rpc_to_master
MK Ryucf027c62015-03-04 12:00:50 -0800202def label_remove_hosts(id, hosts):
203 """Removes a label of the given id from the given hosts.
204
205 This method should be run only on master not shards.
206
207 @param id: id or name of a label.
208 @param hosts: A list of hostnames or ids. More often hostnames.
209 """
MK Ryucf027c62015-03-04 12:00:50 -0800210 host_objs = models.Host.smart_get_bulk(hosts)
211 rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id)
212
MK Ryu26f0c932015-05-28 18:14:33 -0700213 remove_label_from_hosts(id, hosts)
214
MK Ryucf027c62015-03-04 12:00:50 -0800215
Jiaxi Luo31874592014-06-11 10:36:35 -0700216def get_labels(exclude_filters=(), **filter_data):
showardc92da832009-04-07 18:14:34 +0000217 """\
Jiaxi Luo31874592014-06-11 10:36:35 -0700218 @param exclude_filters: A sequence of dictionaries of filters.
219
showardc92da832009-04-07 18:14:34 +0000220 @returns A sequence of nested dictionaries of label information.
221 """
Jiaxi Luo31874592014-06-11 10:36:35 -0700222 labels = models.Label.query_objects(filter_data)
223 for exclude_filter in exclude_filters:
224 labels = labels.exclude(**exclude_filter)
225 return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',))
showardc92da832009-04-07 18:14:34 +0000226
227
228# atomic groups
229
showarde9450c92009-06-30 01:58:52 +0000230def add_atomic_group(name, max_number_of_machines=None, description=None):
showardc92da832009-04-07 18:14:34 +0000231 return models.AtomicGroup.add_object(
232 name=name, max_number_of_machines=max_number_of_machines,
233 description=description).id
234
235
236def modify_atomic_group(id, **data):
237 models.AtomicGroup.smart_get(id).update_object(data)
238
239
240def delete_atomic_group(id):
241 models.AtomicGroup.smart_get(id).delete()
242
243
244def atomic_group_add_labels(id, labels):
245 label_objs = models.Label.smart_get_bulk(labels)
246 models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
247
248
249def atomic_group_remove_labels(id, labels):
250 label_objs = models.Label.smart_get_bulk(labels)
251 models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
252
253
254def get_atomic_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000255 return rpc_utils.prepare_for_serialization(
showardc92da832009-04-07 18:14:34 +0000256 models.AtomicGroup.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000257
258
259# hosts
260
Matthew Sartori68186332015-04-27 17:19:53 -0700261def add_host(hostname, status=None, locked=None, lock_reason='', protection=None):
262 if locked and not lock_reason:
263 raise model_logic.ValidationError(
264 {'locked': 'Please provide a reason for locking when adding host.'})
265
jadmanski0afbb632008-06-06 21:10:57 +0000266 return models.Host.add_object(hostname=hostname, status=status,
Matthew Sartori68186332015-04-27 17:19:53 -0700267 locked=locked, lock_reason=lock_reason,
268 protection=protection).id
mblighe8819cd2008-02-15 16:48:40 +0000269
270
Jakob Juelich50e91f72014-10-01 12:43:23 -0700271@rpc_utils.forward_single_host_rpc_to_shard
mblighe8819cd2008-02-15 16:48:40 +0000272def modify_host(id, **data):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700273 """Modify local attributes of a host.
274
275 If this is called on the master, but the host is assigned to a shard, this
276 will also forward the call to the responsible shard. This means i.e. if a
277 host is being locked using this function, this change will also propagate to
278 shards.
279
280 @param id: id of the host to modify.
281 @param **data: key=value pairs of values to set on the host.
282 """
showardbe0d8692009-08-20 23:42:44 +0000283 rpc_utils.check_modify_host(data)
showardce7c0922009-09-11 18:39:24 +0000284 host = models.Host.smart_get(id)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700285
showardce7c0922009-09-11 18:39:24 +0000286 rpc_utils.check_modify_host_locking(host, data)
287 host.update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000288
289
showard276f9442009-05-20 00:33:16 +0000290def modify_hosts(host_filter_data, update_data):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700291 """Modify local attributes of multiple hosts.
292
293 If this is called on the master, but one of the hosts in that match the
294 filters is assigned to a shard, this will also forward the call to the
295 responsible shard.
296
297 The filters are always applied on the master, not on the shards. This means
298 if the states of a host differ on the master and a shard, the state on the
299 master will be used. I.e. this means:
300 A host was synced to Shard 1. On Shard 1 the status of the host was set to
301 'Repair Failed'.
302 - A call to modify_hosts with host_filter_data={'status': 'Ready'} will
303 update the host (both on the shard and on the master), because the state
304 of the host as the master knows it is still 'Ready'.
305 - A call to modify_hosts with host_filter_data={'status': 'Repair failed'
306 will not update the host, because the filter doesn't apply on the master.
307
showardbe0d8692009-08-20 23:42:44 +0000308 @param host_filter_data: Filters out which hosts to modify.
309 @param update_data: A dictionary with the changes to make to the hosts.
showard276f9442009-05-20 00:33:16 +0000310 """
showardbe0d8692009-08-20 23:42:44 +0000311 rpc_utils.check_modify_host(update_data)
showard276f9442009-05-20 00:33:16 +0000312 hosts = models.Host.query_objects(host_filter_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700313
314 affected_shard_hostnames = set()
315 affected_host_ids = []
316
Alex Miller9658a952013-05-14 16:40:02 -0700317 # Check all hosts before changing data for exception safety.
318 for host in hosts:
319 rpc_utils.check_modify_host_locking(host, update_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700320 if host.shard:
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800321 affected_shard_hostnames.add(host.shard.rpc_hostname())
Jakob Juelich50e91f72014-10-01 12:43:23 -0700322 affected_host_ids.append(host.id)
323
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800324 if not utils.is_shard():
Jakob Juelich50e91f72014-10-01 12:43:23 -0700325 # Caution: Changing the filter from the original here. See docstring.
326 rpc_utils.run_rpc_on_multiple_hostnames(
327 'modify_hosts', affected_shard_hostnames,
328 host_filter_data={'id__in': affected_host_ids},
329 update_data=update_data)
330
showard276f9442009-05-20 00:33:16 +0000331 for host in hosts:
332 host.update_object(update_data)
333
334
MK Ryufbb002c2015-06-08 14:13:16 -0700335def add_labels_to_host(id, labels):
336 """Adds labels to a given host only in local DB.
showardcafd16e2009-05-29 18:37:49 +0000337
MK Ryufbb002c2015-06-08 14:13:16 -0700338 @param id: id or hostname for a host.
339 @param labels: ids or names for labels.
340 """
341 label_objs = models.Label.smart_get_bulk(labels)
342 models.Host.smart_get(id).labels.add(*label_objs)
343
344
345@rpc_utils.route_rpc_to_master
346def host_add_labels(id, labels):
347 """Adds labels to a given host.
348
349 @param id: id or hostname for a host.
350 @param labels: ids or names for labels.
351
352 @raises ValidationError: If adding more than one platform label.
353 """
354 label_objs = models.Label.smart_get_bulk(labels)
355 platforms = [label.name for label in label_objs if label.platform]
showardcafd16e2009-05-29 18:37:49 +0000356 if len(platforms) > 1:
357 raise model_logic.ValidationError(
358 {'labels': 'Adding more than one platform label: %s' %
359 ', '.join(platforms)})
MK Ryufbb002c2015-06-08 14:13:16 -0700360
361 host_obj = models.Host.smart_get(id)
showardcafd16e2009-05-29 18:37:49 +0000362 if len(platforms) == 1:
MK Ryufbb002c2015-06-08 14:13:16 -0700363 models.Host.check_no_platform([host_obj])
364
365 rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False,
366 id=id, labels=labels)
367 add_labels_to_host(id, labels)
mblighe8819cd2008-02-15 16:48:40 +0000368
369
MK Ryufbb002c2015-06-08 14:13:16 -0700370def remove_labels_from_host(id, labels):
371 """Removes labels from a given host only in local DB.
372
373 @param id: id or hostname for a host.
374 @param labels: ids or names for labels.
375 """
376 label_objs = models.Label.smart_get_bulk(labels)
377 models.Host.smart_get(id).labels.remove(*label_objs)
378
379
380@rpc_utils.route_rpc_to_master
mblighe8819cd2008-02-15 16:48:40 +0000381def host_remove_labels(id, labels):
MK Ryufbb002c2015-06-08 14:13:16 -0700382 """Removes labels from a given host.
383
384 @param id: id or hostname for a host.
385 @param labels: ids or names for labels.
386 """
387 host_obj = models.Host.smart_get(id)
388 rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False,
389 id=id, labels=labels)
390 remove_labels_from_host(id, labels)
mblighe8819cd2008-02-15 16:48:40 +0000391
392
MK Ryuacf35922014-10-03 14:56:49 -0700393def get_host_attribute(attribute, **host_filter_data):
394 """
395 @param attribute: string name of attribute
396 @param host_filter_data: filter data to apply to Hosts to choose hosts to
397 act upon
398 """
399 hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data)
400 hosts = list(hosts)
401 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
402 'attribute_list')
403 host_attr_dicts = []
404 for host_obj in hosts:
405 for attr_obj in host_obj.attribute_list:
406 if attr_obj.attribute == attribute:
407 host_attr_dicts.append(attr_obj.get_object_dict())
408 return rpc_utils.prepare_for_serialization(host_attr_dicts)
409
410
showard0957a842009-05-11 19:25:08 +0000411def set_host_attribute(attribute, value, **host_filter_data):
412 """
MK Ryu26f0c932015-05-28 18:14:33 -0700413 @param attribute: string name of attribute
414 @param value: string, or None to delete an attribute
415 @param host_filter_data: filter data to apply to Hosts to choose hosts to
416 act upon
showard0957a842009-05-11 19:25:08 +0000417 """
418 assert host_filter_data # disallow accidental actions on all hosts
419 hosts = models.Host.query_objects(host_filter_data)
420 models.AclGroup.check_for_acl_violation_hosts(hosts)
421
MK Ryu26f0c932015-05-28 18:14:33 -0700422 # Master forwards this RPC to shards.
423 if not utils.is_shard():
424 rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False,
425 attribute=attribute, value=value, **host_filter_data)
426
showard0957a842009-05-11 19:25:08 +0000427 for host in hosts:
showardf8b19042009-05-12 17:22:49 +0000428 host.set_or_delete_attribute(attribute, value)
showard0957a842009-05-11 19:25:08 +0000429
430
Jakob Juelich50e91f72014-10-01 12:43:23 -0700431@rpc_utils.forward_single_host_rpc_to_shard
mblighe8819cd2008-02-15 16:48:40 +0000432def delete_host(id):
jadmanski0afbb632008-06-06 21:10:57 +0000433 models.Host.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000434
435
showard87cc38f2009-08-20 23:37:04 +0000436def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
showard8aa84fc2009-09-16 17:17:55 +0000437 exclude_atomic_group_hosts=False, valid_only=True, **filter_data):
showard87cc38f2009-08-20 23:37:04 +0000438 """
439 @param multiple_labels: match hosts in all of the labels given. Should
440 be a list of label names.
441 @param exclude_only_if_needed_labels: Exclude hosts with at least one
442 "only_if_needed" label applied.
443 @param exclude_atomic_group_hosts: Exclude hosts that have one or more
444 atomic group labels associated with them.
jadmanski0afbb632008-06-06 21:10:57 +0000445 """
showard43a3d262008-11-12 18:17:05 +0000446 hosts = rpc_utils.get_host_query(multiple_labels,
447 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000448 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000449 valid_only, filter_data)
showard0957a842009-05-11 19:25:08 +0000450 hosts = list(hosts)
451 models.Host.objects.populate_relationships(hosts, models.Label,
452 'label_list')
453 models.Host.objects.populate_relationships(hosts, models.AclGroup,
454 'acl_list')
455 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
456 'attribute_list')
showard43a3d262008-11-12 18:17:05 +0000457 host_dicts = []
458 for host_obj in hosts:
459 host_dict = host_obj.get_object_dict()
showard0957a842009-05-11 19:25:08 +0000460 host_dict['labels'] = [label.name for label in host_obj.label_list]
showard909c9142009-07-07 20:54:42 +0000461 host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
462 find_platform_and_atomic_group(host_obj))
showard0957a842009-05-11 19:25:08 +0000463 host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
464 host_dict['attributes'] = dict((attribute.attribute, attribute.value)
465 for attribute in host_obj.attribute_list)
showard43a3d262008-11-12 18:17:05 +0000466 host_dicts.append(host_dict)
467 return rpc_utils.prepare_for_serialization(host_dicts)
mblighe8819cd2008-02-15 16:48:40 +0000468
469
showard87cc38f2009-08-20 23:37:04 +0000470def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
showard8aa84fc2009-09-16 17:17:55 +0000471 exclude_atomic_group_hosts=False, valid_only=True,
472 **filter_data):
showard87cc38f2009-08-20 23:37:04 +0000473 """
474 Same parameters as get_hosts().
475
476 @returns The number of matching hosts.
477 """
showard43a3d262008-11-12 18:17:05 +0000478 hosts = rpc_utils.get_host_query(multiple_labels,
479 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000480 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000481 valid_only, filter_data)
showard43a3d262008-11-12 18:17:05 +0000482 return hosts.count()
showard1385b162008-03-13 15:59:40 +0000483
mblighe8819cd2008-02-15 16:48:40 +0000484
485# tests
486
showard909c7a62008-07-15 21:52:38 +0000487def add_test(name, test_type, path, author=None, dependencies=None,
showard3d9899a2008-07-31 02:11:58 +0000488 experimental=True, run_verify=None, test_class=None,
showard909c7a62008-07-15 21:52:38 +0000489 test_time=None, test_category=None, description=None,
490 sync_count=1):
jadmanski0afbb632008-06-06 21:10:57 +0000491 return models.Test.add_object(name=name, test_type=test_type, path=path,
showard909c7a62008-07-15 21:52:38 +0000492 author=author, dependencies=dependencies,
493 experimental=experimental,
494 run_verify=run_verify, test_time=test_time,
495 test_category=test_category,
496 sync_count=sync_count,
jadmanski0afbb632008-06-06 21:10:57 +0000497 test_class=test_class,
498 description=description).id
mblighe8819cd2008-02-15 16:48:40 +0000499
500
501def modify_test(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000502 models.Test.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000503
504
505def delete_test(id):
jadmanski0afbb632008-06-06 21:10:57 +0000506 models.Test.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000507
508
509def get_tests(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000510 return rpc_utils.prepare_for_serialization(
511 models.Test.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000512
513
Moises Osorio2dc7a102014-12-02 18:24:02 -0800514@_timer.decorate
515def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name):
516 """Gets the counts of all passed and failed tests from the matching jobs.
517
518 @param job_name_prefix: Name prefix of the jobs to get the summary from, e.g.,
519 'butterfly-release/R40-6457.21.0/bvt-cq/'.
520 @param label_name: Label that must be set in the jobs, e.g.,
521 'cros-version:butterfly-release/R40-6457.21.0'.
522
523 @returns A summary of the counts of all the passed and failed tests.
524 """
525 job_ids = list(models.Job.objects.filter(
526 name__startswith=job_name_prefix,
527 dependency_labels__name=label_name).values_list(
528 'pk', flat=True))
529 summary = {'passed': 0, 'failed': 0}
530 if not job_ids:
531 return summary
532
533 counts = (tko_models.TestView.objects.filter(
534 afe_job_id__in=job_ids).exclude(
535 test_name='SERVER_JOB').exclude(
536 test_name__startswith='CLIENT_JOB').values(
537 'status').annotate(
538 count=Count('status')))
539 for status in counts:
540 if status['status'] == 'GOOD':
541 summary['passed'] += status['count']
542 else:
543 summary['failed'] += status['count']
544 return summary
545
546
showard2b9a88b2008-06-13 20:55:03 +0000547# profilers
548
549def add_profiler(name, description=None):
550 return models.Profiler.add_object(name=name, description=description).id
551
552
553def modify_profiler(id, **data):
554 models.Profiler.smart_get(id).update_object(data)
555
556
557def delete_profiler(id):
558 models.Profiler.smart_get(id).delete()
559
560
561def get_profilers(**filter_data):
562 return rpc_utils.prepare_for_serialization(
563 models.Profiler.list_objects(filter_data))
564
565
mblighe8819cd2008-02-15 16:48:40 +0000566# users
567
568def add_user(login, access_level=None):
jadmanski0afbb632008-06-06 21:10:57 +0000569 return models.User.add_object(login=login, access_level=access_level).id
mblighe8819cd2008-02-15 16:48:40 +0000570
571
572def modify_user(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000573 models.User.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000574
575
576def delete_user(id):
jadmanski0afbb632008-06-06 21:10:57 +0000577 models.User.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000578
579
580def get_users(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000581 return rpc_utils.prepare_for_serialization(
582 models.User.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000583
584
585# acl groups
586
587def add_acl_group(name, description=None):
showard04f2cd82008-07-25 20:53:31 +0000588 group = models.AclGroup.add_object(name=name, description=description)
showard64a95952010-01-13 21:27:16 +0000589 group.users.add(models.User.current_user())
showard04f2cd82008-07-25 20:53:31 +0000590 return group.id
mblighe8819cd2008-02-15 16:48:40 +0000591
592
593def modify_acl_group(id, **data):
showard04f2cd82008-07-25 20:53:31 +0000594 group = models.AclGroup.smart_get(id)
595 group.check_for_acl_violation_acl_group()
596 group.update_object(data)
597 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000598
599
600def acl_group_add_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000601 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000602 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000603 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000604 group.users.add(*users)
mblighe8819cd2008-02-15 16:48:40 +0000605
606
607def acl_group_remove_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000608 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000609 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000610 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000611 group.users.remove(*users)
showard04f2cd82008-07-25 20:53:31 +0000612 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000613
614
615def acl_group_add_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000616 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000617 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000618 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000619 group.hosts.add(*hosts)
showard08f981b2008-06-24 21:59:03 +0000620 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000621
622
623def acl_group_remove_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000624 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000625 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000626 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000627 group.hosts.remove(*hosts)
showard08f981b2008-06-24 21:59:03 +0000628 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000629
630
631def delete_acl_group(id):
jadmanski0afbb632008-06-06 21:10:57 +0000632 models.AclGroup.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000633
634
635def get_acl_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000636 acl_groups = models.AclGroup.list_objects(filter_data)
637 for acl_group in acl_groups:
638 acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
639 acl_group['users'] = [user.login
640 for user in acl_group_obj.users.all()]
641 acl_group['hosts'] = [host.hostname
642 for host in acl_group_obj.hosts.all()]
643 return rpc_utils.prepare_for_serialization(acl_groups)
mblighe8819cd2008-02-15 16:48:40 +0000644
645
646# jobs
647
mbligh120351e2009-01-24 01:40:45 +0000648def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
showard91f85102009-10-12 20:34:52 +0000649 client_control_file='', use_container=False,
Matthew Sartori10438092015-06-24 14:30:18 -0700650 profile_only=None, upload_kernel_config=False,
651 db_tests=True):
jadmanski0afbb632008-06-06 21:10:57 +0000652 """
mbligh120351e2009-01-24 01:40:45 +0000653 Generates a client-side control file to load a kernel and run tests.
654
Matthew Sartori10438092015-06-24 14:30:18 -0700655 @param tests List of tests to run. See db_tests for more information.
mbligha3c58d22009-08-24 22:01:51 +0000656 @param kernel A list of kernel info dictionaries configuring which kernels
657 to boot for this job and other options for them
mbligh120351e2009-01-24 01:40:45 +0000658 @param label Name of label to grab kernel config from.
659 @param profilers List of profilers to activate during the job.
660 @param client_control_file The contents of a client-side control file to
661 run at the end of all tests. If this is supplied, all tests must be
662 client side.
663 TODO: in the future we should support server control files directly
664 to wrap with a kernel. That'll require changing the parameter
665 name and adding a boolean to indicate if it is a client or server
666 control file.
667 @param use_container unused argument today. TODO: Enable containers
668 on the host during a client side test.
showard91f85102009-10-12 20:34:52 +0000669 @param profile_only A boolean that indicates what default profile_only
670 mode to use in the control file. Passing None will generate a
671 control file that does not explcitly set the default mode at all.
showard232b7ae2009-11-10 00:46:48 +0000672 @param upload_kernel_config: if enabled it will generate server control
673 file code that uploads the kernel config file to the client and
674 tells the client of the new (local) path when compiling the kernel;
675 the tests must be server side tests
Matthew Sartori10438092015-06-24 14:30:18 -0700676 @param db_tests: if True, the test object can be found in the database
677 backing the test model. In this case, tests is a tuple
678 of test IDs which are used to retrieve the test objects
679 from the database. If False, tests is a tuple of test
680 dictionaries stored client-side in the AFE.
mbligh120351e2009-01-24 01:40:45 +0000681
682 @returns a dict with the following keys:
683 control_file: str, The control file text.
684 is_server: bool, is the control file a server-side control file?
685 synch_count: How many machines the job uses per autoserv execution.
686 synch_count == 1 means the job is asynchronous.
687 dependencies: A list of the names of labels on which the job depends.
688 """
showardd86debe2009-06-10 17:37:56 +0000689 if not tests and not client_control_file:
showard2bab8f42008-11-12 18:15:22 +0000690 return dict(control_file='', is_server=False, synch_count=1,
showard989f25d2008-10-01 11:38:11 +0000691 dependencies=[])
mblighe8819cd2008-02-15 16:48:40 +0000692
showard989f25d2008-10-01 11:38:11 +0000693 cf_info, test_objects, profiler_objects, label = (
showard2b9a88b2008-06-13 20:55:03 +0000694 rpc_utils.prepare_generate_control_file(tests, kernel, label,
Matthew Sartori10438092015-06-24 14:30:18 -0700695 profilers, db_tests))
showard989f25d2008-10-01 11:38:11 +0000696 cf_info['control_file'] = control_file.generate_control(
mbligha3c58d22009-08-24 22:01:51 +0000697 tests=test_objects, kernels=kernel, platform=label,
mbligh120351e2009-01-24 01:40:45 +0000698 profilers=profiler_objects, is_server=cf_info['is_server'],
showard232b7ae2009-11-10 00:46:48 +0000699 client_control_file=client_control_file, profile_only=profile_only,
700 upload_kernel_config=upload_kernel_config)
showard989f25d2008-10-01 11:38:11 +0000701 return cf_info
mblighe8819cd2008-02-15 16:48:40 +0000702
703
jamesren4a41e012010-07-16 22:33:48 +0000704def create_parameterized_job(name, priority, test, parameters, kernel=None,
705 label=None, profilers=(), profiler_parameters=None,
706 use_container=False, profile_only=None,
707 upload_kernel_config=False, hosts=(),
708 meta_hosts=(), one_time_hosts=(),
709 atomic_group_name=None, synch_count=None,
710 is_template=False, timeout=None,
Simran Basi7e605742013-11-12 13:43:36 -0800711 timeout_mins=None, max_runtime_mins=None,
712 run_verify=False, email_list='', dependencies=(),
713 reboot_before=None, reboot_after=None,
714 parse_failed_repair=None, hostless=False,
Dan Shiec1d47d2015-02-13 11:38:13 -0800715 keyvals=None, drone_set=None, run_reset=True,
Dan Shi2a5297b2015-07-23 17:03:29 -0700716 require_ssp=None):
jamesren4a41e012010-07-16 22:33:48 +0000717 """
718 Creates and enqueues a parameterized job.
719
720 Most parameters a combination of the parameters for generate_control_file()
721 and create_job(), with the exception of:
722
723 @param test name or ID of the test to run
724 @param parameters a map of parameter name ->
725 tuple of (param value, param type)
726 @param profiler_parameters a dictionary of parameters for the profilers:
727 key: profiler name
728 value: dict of param name -> tuple of
729 (param value,
730 param type)
731 """
732 # Save the values of the passed arguments here. What we're going to do with
733 # them is pass them all to rpc_utils.get_create_job_common_args(), which
734 # will extract the subset of these arguments that apply for
735 # rpc_utils.create_job_common(), which we then pass in to that function.
736 args = locals()
737
738 # Set up the parameterized job configs
739 test_obj = models.Test.smart_get(test)
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700740 control_type = test_obj.test_type
jamesren4a41e012010-07-16 22:33:48 +0000741
742 try:
743 label = models.Label.smart_get(label)
744 except models.Label.DoesNotExist:
745 label = None
746
747 kernel_objs = models.Kernel.create_kernels(kernel)
748 profiler_objs = [models.Profiler.smart_get(profiler)
749 for profiler in profilers]
750
751 parameterized_job = models.ParameterizedJob.objects.create(
752 test=test_obj, label=label, use_container=use_container,
753 profile_only=profile_only,
754 upload_kernel_config=upload_kernel_config)
755 parameterized_job.kernels.add(*kernel_objs)
756
757 for profiler in profiler_objs:
758 parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
759 parameterized_job=parameterized_job,
760 profiler=profiler)
761 profiler_params = profiler_parameters.get(profiler.name, {})
762 for name, (value, param_type) in profiler_params.iteritems():
763 models.ParameterizedJobProfilerParameter.objects.create(
764 parameterized_job_profiler=parameterized_profiler,
765 parameter_name=name,
766 parameter_value=value,
767 parameter_type=param_type)
768
769 try:
770 for parameter in test_obj.testparameter_set.all():
771 if parameter.name in parameters:
772 param_value, param_type = parameters.pop(parameter.name)
773 parameterized_job.parameterizedjobparameter_set.create(
774 test_parameter=parameter, parameter_value=param_value,
775 parameter_type=param_type)
776
777 if parameters:
778 raise Exception('Extra parameters remain: %r' % parameters)
779
780 return rpc_utils.create_job_common(
781 parameterized_job=parameterized_job.id,
782 control_type=control_type,
783 **rpc_utils.get_create_job_common_args(args))
784 except:
785 parameterized_job.delete()
786 raise
787
788
Simran Basib6ec8ae2014-04-23 12:05:08 -0700789def create_job_page_handler(name, priority, control_file, control_type,
Dan Shid215dbe2015-06-18 16:14:59 -0700790 image=None, hostless=False, firmware_rw_build=None,
791 firmware_ro_build=None, test_source_build=None,
792 **kwargs):
Simran Basib6ec8ae2014-04-23 12:05:08 -0700793 """\
794 Create and enqueue a job.
795
796 @param name name of this job
797 @param priority Integer priority of this job. Higher is more important.
798 @param control_file String contents of the control file.
799 @param control_type Type of control file, Client or Server.
Dan Shid215dbe2015-06-18 16:14:59 -0700800 @param image: ChromeOS build to be installed in the dut. Default to None.
801 @param firmware_rw_build: Firmware build to update RW firmware. Default to
802 None, i.e., RW firmware will not be updated.
803 @param firmware_ro_build: Firmware build to update RO firmware. Default to
804 None, i.e., RO firmware will not be updated.
805 @param test_source_build: Build to be used to retrieve test code. Default
806 to None.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700807 @param kwargs extra args that will be required by create_suite_job or
808 create_job.
809
810 @returns The created Job id number.
811 """
812 control_file = rpc_utils.encode_ascii(control_file)
Jiaxi Luodd67beb2014-07-18 16:28:31 -0700813 if not control_file:
814 raise model_logic.ValidationError({
815 'control_file' : "Control file cannot be empty"})
Simran Basib6ec8ae2014-04-23 12:05:08 -0700816
817 if image and hostless:
Dan Shid215dbe2015-06-18 16:14:59 -0700818 builds = {}
819 builds[provision.CROS_VERSION_PREFIX] = image
820 if firmware_rw_build:
Dan Shi0723bf52015-06-24 10:52:38 -0700821 builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build
Dan Shid215dbe2015-06-18 16:14:59 -0700822 if firmware_ro_build:
823 builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build
Simran Basib6ec8ae2014-04-23 12:05:08 -0700824 return site_rpc_interface.create_suite_job(
825 name=name, control_file=control_file, priority=priority,
Dan Shid215dbe2015-06-18 16:14:59 -0700826 builds=builds, test_source_build=test_source_build, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700827 return create_job(name, priority, control_file, control_type, image=image,
828 hostless=hostless, **kwargs)
829
830
MK Ryue301eb72015-06-25 12:51:02 -0700831@rpc_utils.route_rpc_to_master
showard12f3e322009-05-13 21:27:42 +0000832def create_job(name, priority, control_file, control_type,
833 hosts=(), meta_hosts=(), one_time_hosts=(),
834 atomic_group_name=None, synch_count=None, is_template=False,
Simran Basi7e605742013-11-12 13:43:36 -0800835 timeout=None, timeout_mins=None, max_runtime_mins=None,
836 run_verify=False, email_list='', dependencies=(),
837 reboot_before=None, reboot_after=None, parse_failed_repair=None,
838 hostless=False, keyvals=None, drone_set=None, image=None,
Dan Shiec1d47d2015-02-13 11:38:13 -0800839 parent_job_id=None, test_retry=0, run_reset=True,
840 require_ssp=None, args=(), **kwargs):
jadmanski0afbb632008-06-06 21:10:57 +0000841 """\
842 Create and enqueue a job.
mblighe8819cd2008-02-15 16:48:40 +0000843
showarda1e74b32009-05-12 17:32:04 +0000844 @param name name of this job
Alex Miller7d658cf2013-09-04 16:00:35 -0700845 @param priority Integer priority of this job. Higher is more important.
showarda1e74b32009-05-12 17:32:04 +0000846 @param control_file String contents of the control file.
847 @param control_type Type of control file, Client or Server.
848 @param synch_count How many machines the job uses per autoserv execution.
Jiaxi Luo90190c92014-06-18 12:35:57 -0700849 synch_count == 1 means the job is asynchronous. If an atomic group is
850 given this value is treated as a minimum.
showarda1e74b32009-05-12 17:32:04 +0000851 @param is_template If true then create a template job.
852 @param timeout Hours after this call returns until the job times out.
Simran Basi7e605742013-11-12 13:43:36 -0800853 @param timeout_mins Minutes after this call returns until the job times
Jiaxi Luo90190c92014-06-18 12:35:57 -0700854 out.
Simran Basi34217022012-11-06 13:43:15 -0800855 @param max_runtime_mins Minutes from job starting time until job times out
showarda1e74b32009-05-12 17:32:04 +0000856 @param run_verify Should the host be verified before running the test?
857 @param email_list String containing emails to mail when the job is done
858 @param dependencies List of label names on which this job depends
859 @param reboot_before Never, If dirty, or Always
860 @param reboot_after Never, If all tests passed, or Always
861 @param parse_failed_repair if true, results of failed repairs launched by
Jiaxi Luo90190c92014-06-18 12:35:57 -0700862 this job will be parsed as part of the job.
showarda9545c02009-12-18 22:44:26 +0000863 @param hostless if true, create a hostless job
showardc1a98d12010-01-15 00:22:22 +0000864 @param keyvals dict of keyvals to associate with the job
showarda1e74b32009-05-12 17:32:04 +0000865 @param hosts List of hosts to run job on.
866 @param meta_hosts List where each entry is a label name, and for each entry
Jiaxi Luo90190c92014-06-18 12:35:57 -0700867 one host will be chosen from that label to run the job on.
showarda1e74b32009-05-12 17:32:04 +0000868 @param one_time_hosts List of hosts not in the database to run the job on.
869 @param atomic_group_name The name of an atomic group to schedule the job on.
jamesren76fcf192010-04-21 20:39:50 +0000870 @param drone_set The name of the drone set to run this test on.
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -0800871 @param image OS image to install before running job.
Aviv Keshet0b9cfc92013-02-05 11:36:02 -0800872 @param parent_job_id id of a job considered to be parent of created job.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700873 @param test_retry Number of times to retry test if the test did not
Jiaxi Luo90190c92014-06-18 12:35:57 -0700874 complete successfully. (optional, default: 0)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700875 @param run_reset Should the host be reset before running the test?
Dan Shiec1d47d2015-02-13 11:38:13 -0800876 @param require_ssp Set to True to require server-side packaging to run the
877 test. If it's set to None, drone will still try to run
878 the server side with server-side packaging. If the
879 autotest-server package doesn't exist for the build or
880 image is not set, drone will run the test without server-
881 side packaging. Default is None.
Jiaxi Luo90190c92014-06-18 12:35:57 -0700882 @param args A list of args to be injected into control file.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700883 @param kwargs extra keyword args. NOT USED.
showardc92da832009-04-07 18:14:34 +0000884
885 @returns The created Job id number.
jadmanski0afbb632008-06-06 21:10:57 +0000886 """
Jiaxi Luo90190c92014-06-18 12:35:57 -0700887 if args:
888 control_file = tools.inject_vars({'args': args}, control_file)
889
Simran Basiab5a1bf2014-05-28 15:39:44 -0700890 if image is None:
891 return rpc_utils.create_job_common(
892 **rpc_utils.get_create_job_common_args(locals()))
893
894 # When image is supplied use a known parameterized test already in the
895 # database to pass the OS image path from the front end, through the
896 # scheduler, and finally to autoserv as the --image parameter.
897
898 # The test autoupdate_ParameterizedJob is in afe_autotests and used to
899 # instantiate a Test object and from there a ParameterizedJob.
900 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
901 known_parameterized_job = models.ParameterizedJob.objects.create(
902 test=known_test_obj)
903
904 # autoupdate_ParameterizedJob has a single parameter, the image parameter,
905 # stored in the table afe_test_parameters. We retrieve and set this
906 # instance of the parameter to the OS image path.
907 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
908 name='image')
909 known_parameterized_job.parameterizedjobparameter_set.create(
910 test_parameter=image_parameter, parameter_value=image,
911 parameter_type='string')
912
Dan Shid215dbe2015-06-18 16:14:59 -0700913 # TODO(crbug.com/502638): save firmware build etc to parameterized_job.
914
Simran Basiab5a1bf2014-05-28 15:39:44 -0700915 # By passing a parameterized_job to create_job_common the job entry in
916 # the afe_jobs table will have the field parameterized_job_id set.
917 # The scheduler uses this id in the afe_parameterized_jobs table to
918 # match this job to our known test, and then with the
919 # afe_parameterized_job_parameters table to get the actual image path.
jamesren4a41e012010-07-16 22:33:48 +0000920 return rpc_utils.create_job_common(
Simran Basiab5a1bf2014-05-28 15:39:44 -0700921 parameterized_job=known_parameterized_job.id,
jamesren4a41e012010-07-16 22:33:48 +0000922 **rpc_utils.get_create_job_common_args(locals()))
mblighe8819cd2008-02-15 16:48:40 +0000923
924
showard9dbdcda2008-10-14 17:34:36 +0000925def abort_host_queue_entries(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000926 """\
showard9dbdcda2008-10-14 17:34:36 +0000927 Abort a set of host queue entries.
Fang Deng63b0e452014-12-19 14:38:15 -0800928
929 @return: A list of dictionaries, each contains information
930 about an aborted HQE.
jadmanski0afbb632008-06-06 21:10:57 +0000931 """
showard9dbdcda2008-10-14 17:34:36 +0000932 query = models.HostQueueEntry.query_objects(filter_data)
beepsfaecbce2013-10-29 11:35:10 -0700933
934 # Dont allow aborts on:
935 # 1. Jobs that have already completed (whether or not they were aborted)
936 # 2. Jobs that we have already been aborted (but may not have completed)
937 query = query.filter(complete=False).filter(aborted=False)
showarddc817512008-11-12 18:16:41 +0000938 models.AclGroup.check_abort_permissions(query)
showard9dbdcda2008-10-14 17:34:36 +0000939 host_queue_entries = list(query.select_related())
showard2bab8f42008-11-12 18:15:22 +0000940 rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
mblighe8819cd2008-02-15 16:48:40 +0000941
Simran Basic1b26762013-06-26 14:23:21 -0700942 models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
Fang Deng63b0e452014-12-19 14:38:15 -0800943 hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id,
944 'Job name': hqe.job.name} for hqe in host_queue_entries]
945 return hqe_info
showard9d821ab2008-07-11 16:54:29 +0000946
947
beeps8bb1f7d2013-08-05 01:30:09 -0700948def abort_special_tasks(**filter_data):
949 """\
950 Abort the special task, or tasks, specified in the filter.
951 """
952 query = models.SpecialTask.query_objects(filter_data)
953 special_tasks = query.filter(is_active=True)
954 for task in special_tasks:
955 task.abort()
956
957
Simran Basi73dae552013-02-25 14:57:46 -0800958def _call_special_tasks_on_hosts(task, hosts):
959 """\
960 Schedules a set of hosts for a special task.
961
962 @returns A list of hostnames that a special task was created for.
963 """
964 models.AclGroup.check_for_acl_violation_hosts(hosts)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800965 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800966 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800967 raise ValueError('The following hosts are on shards, please '
968 'follow the link to the shards and create jobs '
969 'there instead. %s.' % shard_host_map)
Simran Basi73dae552013-02-25 14:57:46 -0800970 for host in hosts:
971 models.SpecialTask.schedule_special_task(host, task)
972 return list(sorted(host.hostname for host in hosts))
973
974
MK Ryu5aa25042015-07-28 16:08:04 -0700975def _forward_special_tasks_on_hosts(task, rpc, **filter_data):
976 """Forward special tasks to corresponding shards.
mbligh4e545a52009-12-19 05:30:39 +0000977
MK Ryu5aa25042015-07-28 16:08:04 -0700978 For master, when special tasks are fired on hosts that are sharded,
979 forward the RPC to corresponding shards.
980
981 For shard, create special task records in local DB.
982
983 @param task: Enum value of frontend.afe.models.SpecialTask.Task
984 @param rpc: RPC name to forward.
985 @param filter_data: Filter keywords to be used for DB query.
986
987 @return: A list of hostnames that a special task was created for.
showard1ff7b2e2009-05-15 23:17:18 +0000988 """
Prashanth Balasubramanian40981232014-12-16 19:01:58 -0800989 hosts = models.Host.query_objects(filter_data)
990 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True)
991
992 # Filter out hosts on a shard from those on the master, forward
993 # rpcs to the shard with an additional hostname__in filter, and
994 # create a local SpecialTask for each remaining host.
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800995 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian40981232014-12-16 19:01:58 -0800996 hosts = [h for h in hosts if h.shard is None]
997 for shard, hostnames in shard_host_map.iteritems():
998
999 # The main client of this module is the frontend website, and
1000 # it invokes it with an 'id' or an 'id__in' filter. Regardless,
1001 # the 'hostname' filter should narrow down the list of hosts on
1002 # each shard even though we supply all the ids in filter_data.
1003 # This method uses hostname instead of id because it fits better
MK Ryu5aa25042015-07-28 16:08:04 -07001004 # with the overall architecture of redirection functions in
1005 # rpc_utils.
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001006 shard_filter = filter_data.copy()
1007 shard_filter['hostname__in'] = hostnames
1008 rpc_utils.run_rpc_on_multiple_hostnames(
MK Ryu5aa25042015-07-28 16:08:04 -07001009 rpc, [shard], **shard_filter)
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001010
1011 # There is a race condition here if someone assigns a shard to one of these
1012 # hosts before we create the task. The host will stay on the master if:
1013 # 1. The host is not Ready
1014 # 2. The host is Ready but has a task
1015 # But if the host is Ready and doesn't have a task yet, it will get sent
1016 # to the shard as we're creating a task here.
1017
1018 # Given that we only rarely verify Ready hosts it isn't worth putting this
1019 # entire method in a transaction. The worst case scenario is that we have
MK Ryu5aa25042015-07-28 16:08:04 -07001020 # a verify running on a Ready host while the shard is using it, if the
1021 # verify fails no subsequent tasks will be created against the host on the
1022 # master, and verifies are safe enough that this is OK.
1023 return _call_special_tasks_on_hosts(task, hosts)
1024
1025
1026def reverify_hosts(**filter_data):
1027 """\
1028 Schedules a set of hosts for verify.
1029
1030 @returns A list of hostnames that a verify task was created for.
1031 """
1032 return _forward_special_tasks_on_hosts(
1033 models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data)
Simran Basi73dae552013-02-25 14:57:46 -08001034
1035
1036def repair_hosts(**filter_data):
1037 """\
1038 Schedules a set of hosts for repair.
1039
1040 @returns A list of hostnames that a repair task was created for.
1041 """
MK Ryu5aa25042015-07-28 16:08:04 -07001042 return _forward_special_tasks_on_hosts(
1043 models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data)
showard1ff7b2e2009-05-15 23:17:18 +00001044
1045
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001046def get_jobs(not_yet_run=False, running=False, finished=False,
1047 suite=False, sub=False, standalone=False, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001048 """\
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001049 Extra status filter args for get_jobs:
jadmanski0afbb632008-06-06 21:10:57 +00001050 -not_yet_run: Include only jobs that have not yet started running.
1051 -running: Include only jobs that have start running but for which not
1052 all hosts have completed.
1053 -finished: Include only jobs for which all hosts have completed (or
1054 aborted).
1055 At most one of these three fields should be specified.
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001056
1057 Extra type filter args for get_jobs:
1058 -suite: Include only jobs with child jobs.
1059 -sub: Include only jobs with a parent job.
1060 -standalone: Inlcude only jobs with no child or parent jobs.
1061 At most one of these three fields should be specified.
jadmanski0afbb632008-06-06 21:10:57 +00001062 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001063 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1064 running,
1065 finished)
1066 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1067 suite,
1068 sub,
1069 standalone)
showard0957a842009-05-11 19:25:08 +00001070 job_dicts = []
1071 jobs = list(models.Job.query_objects(filter_data))
1072 models.Job.objects.populate_relationships(jobs, models.Label,
1073 'dependencies')
showardc1a98d12010-01-15 00:22:22 +00001074 models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
showard0957a842009-05-11 19:25:08 +00001075 for job in jobs:
1076 job_dict = job.get_object_dict()
1077 job_dict['dependencies'] = ','.join(label.name
1078 for label in job.dependencies)
showardc1a98d12010-01-15 00:22:22 +00001079 job_dict['keyvals'] = dict((keyval.key, keyval.value)
1080 for keyval in job.keyvals)
Eric Lid23bc192011-02-09 14:38:57 -08001081 if job.parameterized_job:
1082 job_dict['image'] = get_parameterized_autoupdate_image_url(job)
showard0957a842009-05-11 19:25:08 +00001083 job_dicts.append(job_dict)
1084 return rpc_utils.prepare_for_serialization(job_dicts)
mblighe8819cd2008-02-15 16:48:40 +00001085
1086
1087def get_num_jobs(not_yet_run=False, running=False, finished=False,
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001088 suite=False, sub=False, standalone=False,
jadmanski0afbb632008-06-06 21:10:57 +00001089 **filter_data):
1090 """\
1091 See get_jobs() for documentation of extra filter parameters.
1092 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001093 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1094 running,
1095 finished)
1096 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1097 suite,
1098 sub,
1099 standalone)
jadmanski0afbb632008-06-06 21:10:57 +00001100 return models.Job.query_count(filter_data)
mblighe8819cd2008-02-15 16:48:40 +00001101
1102
mblighe8819cd2008-02-15 16:48:40 +00001103def get_jobs_summary(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001104 """\
Jiaxi Luoaac54572014-06-04 13:57:02 -07001105 Like get_jobs(), but adds 'status_counts' and 'result_counts' field.
1106
1107 'status_counts' filed is a dictionary mapping status strings to the number
1108 of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}.
1109
1110 'result_counts' field is piped to tko's rpc_interface and has the return
1111 format specified under get_group_counts.
jadmanski0afbb632008-06-06 21:10:57 +00001112 """
1113 jobs = get_jobs(**filter_data)
1114 ids = [job['id'] for job in jobs]
1115 all_status_counts = models.Job.objects.get_status_counts(ids)
1116 for job in jobs:
1117 job['status_counts'] = all_status_counts[job['id']]
Jiaxi Luoaac54572014-06-04 13:57:02 -07001118 job['result_counts'] = tko_rpc_interface.get_status_counts(
1119 ['afe_job_id', 'afe_job_id'],
1120 header_groups=[['afe_job_id'], ['afe_job_id']],
1121 **{'afe_job_id': job['id']})
jadmanski0afbb632008-06-06 21:10:57 +00001122 return rpc_utils.prepare_for_serialization(jobs)
mblighe8819cd2008-02-15 16:48:40 +00001123
1124
showarda965cef2009-05-15 23:17:41 +00001125def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
showarda8709c52008-07-03 19:44:54 +00001126 """\
1127 Retrieves all the information needed to clone a job.
1128 """
showarda8709c52008-07-03 19:44:54 +00001129 job = models.Job.objects.get(id=id)
showard29f7cd22009-04-29 21:16:24 +00001130 job_info = rpc_utils.get_job_info(job,
showarda965cef2009-05-15 23:17:41 +00001131 preserve_metahosts,
1132 queue_entry_filter_data)
showard945072f2008-09-03 20:34:59 +00001133
showardd9992fe2008-07-31 02:15:03 +00001134 host_dicts = []
showard29f7cd22009-04-29 21:16:24 +00001135 for host in job_info['hosts']:
1136 host_dict = get_hosts(id=host.id)[0]
1137 other_labels = host_dict['labels']
1138 if host_dict['platform']:
1139 other_labels.remove(host_dict['platform'])
1140 host_dict['other_labels'] = ', '.join(other_labels)
showardd9992fe2008-07-31 02:15:03 +00001141 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001142
showard29f7cd22009-04-29 21:16:24 +00001143 for host in job_info['one_time_hosts']:
1144 host_dict = dict(hostname=host.hostname,
1145 id=host.id,
1146 platform='(one-time host)',
1147 locked_text='')
1148 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001149
showard4d077562009-05-08 18:24:36 +00001150 # convert keys from Label objects to strings (names of labels)
showard29f7cd22009-04-29 21:16:24 +00001151 meta_host_counts = dict((meta_host.name, count) for meta_host, count
showard4d077562009-05-08 18:24:36 +00001152 in job_info['meta_host_counts'].iteritems())
showard29f7cd22009-04-29 21:16:24 +00001153
1154 info = dict(job=job.get_object_dict(),
1155 meta_host_counts=meta_host_counts,
1156 hosts=host_dicts)
1157 info['job']['dependencies'] = job_info['dependencies']
1158 if job_info['atomic_group']:
1159 info['atomic_group_name'] = (job_info['atomic_group']).name
1160 else:
1161 info['atomic_group_name'] = None
jamesren2275ef12010-04-12 18:25:06 +00001162 info['hostless'] = job_info['hostless']
jamesren76fcf192010-04-21 20:39:50 +00001163 info['drone_set'] = job.drone_set and job.drone_set.name
showarda8709c52008-07-03 19:44:54 +00001164
Eric Lid23bc192011-02-09 14:38:57 -08001165 if job.parameterized_job:
1166 info['job']['image'] = get_parameterized_autoupdate_image_url(job)
1167
showarda8709c52008-07-03 19:44:54 +00001168 return rpc_utils.prepare_for_serialization(info)
1169
1170
showard34dc5fa2008-04-24 20:58:40 +00001171# host queue entries
1172
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001173def get_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001174 """\
showardc92da832009-04-07 18:14:34 +00001175 @returns A sequence of nested dictionaries of host and job information.
jadmanski0afbb632008-06-06 21:10:57 +00001176 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001177 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1178 'started_on__lte',
1179 start_time,
1180 end_time,
1181 **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001182 return rpc_utils.prepare_rows_as_nested_dicts(
1183 models.HostQueueEntry.query_objects(filter_data),
1184 ('host', 'atomic_group', 'job'))
showard34dc5fa2008-04-24 20:58:40 +00001185
1186
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001187def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001188 """\
1189 Get the number of host queue entries associated with this job.
1190 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001191 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1192 'started_on__lte',
1193 start_time,
1194 end_time,
1195 **filter_data)
jadmanski0afbb632008-06-06 21:10:57 +00001196 return models.HostQueueEntry.query_count(filter_data)
showard34dc5fa2008-04-24 20:58:40 +00001197
1198
showard1e935f12008-07-11 00:11:36 +00001199def get_hqe_percentage_complete(**filter_data):
1200 """
showardc92da832009-04-07 18:14:34 +00001201 Computes the fraction of host queue entries matching the given filter data
showard1e935f12008-07-11 00:11:36 +00001202 that are complete.
1203 """
1204 query = models.HostQueueEntry.query_objects(filter_data)
1205 complete_count = query.filter(complete=True).count()
1206 total_count = query.count()
1207 if total_count == 0:
1208 return 1
1209 return float(complete_count) / total_count
1210
1211
showard1a5a4082009-07-28 20:01:37 +00001212# special tasks
1213
1214def get_special_tasks(**filter_data):
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001215 """Get special task entries from the local database.
1216
1217 Query the special tasks table for tasks matching the given
1218 `filter_data`, and return a list of the results. No attempt is
1219 made to forward the call to shards; the buck will stop here.
1220 The caller is expected to know the target shard for such reasons
1221 as:
1222 * The caller is a service (such as gs_offloader) configured
1223 to operate on behalf of one specific shard, and no other.
1224 * The caller has a host as a parameter, and knows that this is
1225 the shard assigned to that host.
1226
1227 @param filter_data Filter keywords to pass to the underlying
1228 database query.
1229
1230 """
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001231 return rpc_utils.prepare_rows_as_nested_dicts(
1232 models.SpecialTask.query_objects(filter_data),
1233 ('host', 'queue_entry'))
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001234
1235
1236def get_host_special_tasks(host_id, **filter_data):
1237 """Get special task entries for a given host.
1238
1239 Query the special tasks table for tasks that ran on the host
1240 given by `host_id` and matching the given `filter_data`.
1241 Return a list of the results. If the host is assigned to a
1242 shard, forward this call to that shard.
1243
1244 @param host_id Id in the database of the target host.
1245 @param filter_data Filter keywords to pass to the underlying
1246 database query.
1247
1248 """
MK Ryu0c1a37d2015-04-30 12:00:55 -07001249 # Retrieve host data even if the host is in an invalid state.
1250 host = models.Host.smart_get(host_id, False)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001251 if not host.shard:
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001252 return get_special_tasks(host_id=host_id, **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001253 else:
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001254 # The return values from AFE methods are post-processed
1255 # objects that aren't JSON-serializable. So, we have to
1256 # call AFE.run() to get the raw, serializable output from
1257 # the shard.
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001258 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1259 return shard_afe.run('get_special_tasks',
1260 host_id=host_id, **filter_data)
showard1a5a4082009-07-28 20:01:37 +00001261
1262
MK Ryu0c1a37d2015-04-30 12:00:55 -07001263def get_num_special_tasks(**kwargs):
1264 """Get the number of special task entries from the local database.
1265
1266 Query the special tasks table for tasks matching the given 'kwargs',
1267 and return the number of the results. No attempt is made to forward
1268 the call to shards; the buck will stop here.
1269
1270 @param kwargs Filter keywords to pass to the underlying database query.
1271
1272 """
1273 return models.SpecialTask.query_count(kwargs)
1274
1275
1276def get_host_num_special_tasks(host, **kwargs):
1277 """Get special task entries for a given host.
1278
1279 Query the special tasks table for tasks that ran on the host
1280 given by 'host' and matching the given 'kwargs'.
1281 Return a list of the results. If the host is assigned to a
1282 shard, forward this call to that shard.
1283
1284 @param host id or name of a host. More often a hostname.
1285 @param kwargs Filter keywords to pass to the underlying database query.
1286
1287 """
1288 # Retrieve host data even if the host is in an invalid state.
1289 host_model = models.Host.smart_get(host, False)
1290 if not host_model.shard:
1291 return get_num_special_tasks(host=host, **kwargs)
1292 else:
1293 shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname())
1294 return shard_afe.run('get_num_special_tasks', host=host, **kwargs)
1295
1296
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001297def get_status_task(host_id, end_time):
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001298 """Get the "status task" for a host from the local shard.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001299
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001300 Returns a single special task representing the given host's
1301 "status task". The status task is a completed special task that
1302 identifies whether the corresponding host was working or broken
1303 when it completed. A successful task indicates a working host;
1304 a failed task indicates broken.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001305
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001306 This call will not be forward to a shard; the receiving server
1307 must be the shard that owns the host.
1308
1309 @param host_id Id in the database of the target host.
1310 @param end_time Time reference for the host's status.
1311
1312 @return A single task; its status (successful or not)
1313 corresponds to the status of the host (working or
1314 broken) at the given time. If no task is found, return
1315 `None`.
1316
1317 """
1318 tasklist = rpc_utils.prepare_rows_as_nested_dicts(
1319 status_history.get_status_task(host_id, end_time),
1320 ('host', 'queue_entry'))
1321 return tasklist[0] if tasklist else None
1322
1323
1324def get_host_status_task(host_id, end_time):
1325 """Get the "status task" for a host from its owning shard.
1326
1327 Finds the given host's owning shard, and forwards to it a call
1328 to `get_status_task()` (see above).
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001329
1330 @param host_id Id in the database of the target host.
1331 @param end_time Time reference for the host's status.
1332
1333 @return A single task; its status (successful or not)
1334 corresponds to the status of the host (working or
1335 broken) at the given time. If no task is found, return
1336 `None`.
1337
1338 """
1339 host = models.Host.smart_get(host_id)
1340 if not host.shard:
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001341 return get_status_task(host_id, end_time)
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001342 else:
1343 # The return values from AFE methods are post-processed
1344 # objects that aren't JSON-serializable. So, we have to
1345 # call AFE.run() to get the raw, serializable output from
1346 # the shard.
1347 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1348 return shard_afe.run('get_status_task',
1349 host_id=host_id, end_time=end_time)
1350
1351
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001352def get_host_diagnosis_interval(host_id, end_time, success):
1353 """Find a "diagnosis interval" for a given host.
1354
1355 A "diagnosis interval" identifies a start and end time where
1356 the host went from "working" to "broken", or vice versa. The
1357 interval's starting time is the starting time of the last status
1358 task with the old status; the end time is the finish time of the
1359 first status task with the new status.
1360
1361 This routine finds the most recent diagnosis interval for the
1362 given host prior to `end_time`, with a starting status matching
1363 `success`. If `success` is true, the interval will start with a
1364 successful status task; if false the interval will start with a
1365 failed status task.
1366
1367 @param host_id Id in the database of the target host.
1368 @param end_time Time reference for the diagnosis interval.
1369 @param success Whether the diagnosis interval should start
1370 with a successful or failed status task.
1371
1372 @return A list of two strings. The first is the timestamp for
1373 the beginning of the interval; the second is the
1374 timestamp for the end. If the host has never changed
1375 state, the list is empty.
1376
1377 """
1378 host = models.Host.smart_get(host_id)
J. Richard Barnette78f281a2015-06-29 13:24:51 -07001379 if not host.shard or utils.is_shard():
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001380 return status_history.get_diagnosis_interval(
1381 host_id, end_time, success)
1382 else:
1383 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1384 return shard_afe.get_host_diagnosis_interval(
1385 host_id, end_time, success)
1386
1387
showardc0ac3a72009-07-08 21:14:45 +00001388# support for host detail view
1389
MK Ryu0c1a37d2015-04-30 12:00:55 -07001390def get_host_queue_entries_and_special_tasks(host, query_start=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001391 query_limit=None, start_time=None,
1392 end_time=None):
showardc0ac3a72009-07-08 21:14:45 +00001393 """
1394 @returns an interleaved list of HostQueueEntries and SpecialTasks,
1395 in approximate run order. each dict contains keys for type, host,
1396 job, status, started_on, execution_path, and ID.
1397 """
1398 total_limit = None
1399 if query_limit is not None:
1400 total_limit = query_start + query_limit
MK Ryu0c1a37d2015-04-30 12:00:55 -07001401 filter_data_common = {'host': host,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001402 'query_limit': total_limit,
1403 'sort_by': ['-id']}
showardc0ac3a72009-07-08 21:14:45 +00001404
MK Ryu0c1a37d2015-04-30 12:00:55 -07001405 filter_data_special_tasks = rpc_utils.inject_times_to_filter(
1406 'time_started__gte', 'time_started__lte', start_time, end_time,
1407 **filter_data_common)
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001408
MK Ryu0c1a37d2015-04-30 12:00:55 -07001409 queue_entries = get_host_queue_entries(
1410 start_time, end_time, **filter_data_common)
1411 special_tasks = get_host_special_tasks(host, **filter_data_special_tasks)
showardc0ac3a72009-07-08 21:14:45 +00001412
1413 interleaved_entries = rpc_utils.interleave_entries(queue_entries,
1414 special_tasks)
1415 if query_start is not None:
1416 interleaved_entries = interleaved_entries[query_start:]
1417 if query_limit is not None:
1418 interleaved_entries = interleaved_entries[:query_limit]
MK Ryu0c1a37d2015-04-30 12:00:55 -07001419 return rpc_utils.prepare_host_queue_entries_and_special_tasks(
1420 interleaved_entries, queue_entries)
showardc0ac3a72009-07-08 21:14:45 +00001421
1422
MK Ryu0c1a37d2015-04-30 12:00:55 -07001423def get_num_host_queue_entries_and_special_tasks(host, start_time=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001424 end_time=None):
MK Ryu0c1a37d2015-04-30 12:00:55 -07001425 filter_data_common = {'host': host}
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001426
1427 filter_data_queue_entries, filter_data_special_tasks = (
1428 rpc_utils.inject_times_to_hqe_special_tasks_filters(
1429 filter_data_common, start_time, end_time))
1430
1431 return (models.HostQueueEntry.query_count(filter_data_queue_entries)
MK Ryu0c1a37d2015-04-30 12:00:55 -07001432 + get_host_num_special_tasks(**filter_data_special_tasks))
showardc0ac3a72009-07-08 21:14:45 +00001433
1434
showard29f7cd22009-04-29 21:16:24 +00001435# recurring run
1436
1437def get_recurring(**filter_data):
1438 return rpc_utils.prepare_rows_as_nested_dicts(
1439 models.RecurringRun.query_objects(filter_data),
1440 ('job', 'owner'))
1441
1442
1443def get_num_recurring(**filter_data):
1444 return models.RecurringRun.query_count(filter_data)
1445
1446
1447def delete_recurring_runs(**filter_data):
1448 to_delete = models.RecurringRun.query_objects(filter_data)
1449 to_delete.delete()
1450
1451
1452def create_recurring_run(job_id, start_date, loop_period, loop_count):
showard64a95952010-01-13 21:27:16 +00001453 owner = models.User.current_user().login
showard29f7cd22009-04-29 21:16:24 +00001454 job = models.Job.objects.get(id=job_id)
1455 return job.create_recurring_job(start_date=start_date,
1456 loop_period=loop_period,
1457 loop_count=loop_count,
1458 owner=owner)
1459
1460
mblighe8819cd2008-02-15 16:48:40 +00001461# other
1462
showarde0b63622008-08-04 20:58:47 +00001463def echo(data=""):
1464 """\
1465 Returns a passed in string. For doing a basic test to see if RPC calls
1466 can successfully be made.
1467 """
1468 return data
1469
1470
showardb7a52fd2009-04-27 20:10:56 +00001471def get_motd():
1472 """\
1473 Returns the message of the day as a string.
1474 """
1475 return rpc_utils.get_motd()
1476
1477
mblighe8819cd2008-02-15 16:48:40 +00001478def get_static_data():
jadmanski0afbb632008-06-06 21:10:57 +00001479 """\
1480 Returns a dictionary containing a bunch of data that shouldn't change
1481 often and is otherwise inaccessible. This includes:
showardc92da832009-04-07 18:14:34 +00001482
1483 priorities: List of job priority choices.
1484 default_priority: Default priority value for new jobs.
1485 users: Sorted list of all users.
Jiaxi Luo31874592014-06-11 10:36:35 -07001486 labels: Sorted list of labels not start with 'cros-version' and
1487 'fw-version'.
showardc92da832009-04-07 18:14:34 +00001488 atomic_groups: Sorted list of all atomic groups.
1489 tests: Sorted list of all tests.
1490 profilers: Sorted list of all profilers.
1491 current_user: Logged-in username.
1492 host_statuses: Sorted list of possible Host statuses.
1493 job_statuses: Sorted list of possible HostQueueEntry statuses.
Simran Basi7e605742013-11-12 13:43:36 -08001494 job_timeout_default: The default job timeout length in minutes.
showarda1e74b32009-05-12 17:32:04 +00001495 parse_failed_repair_default: Default value for the parse_failed_repair job
Jiaxi Luo31874592014-06-11 10:36:35 -07001496 option.
showardc92da832009-04-07 18:14:34 +00001497 reboot_before_options: A list of valid RebootBefore string enums.
1498 reboot_after_options: A list of valid RebootAfter string enums.
1499 motd: Server's message of the day.
1500 status_dictionary: A mapping from one word job status names to a more
1501 informative description.
jadmanski0afbb632008-06-06 21:10:57 +00001502 """
showard21baa452008-10-21 00:08:39 +00001503
1504 job_fields = models.Job.get_field_dict()
jamesren76fcf192010-04-21 20:39:50 +00001505 default_drone_set_name = models.DroneSet.default_drone_set_name()
1506 drone_sets = ([default_drone_set_name] +
1507 sorted(drone_set.name for drone_set in
1508 models.DroneSet.objects.exclude(
1509 name=default_drone_set_name)))
showard21baa452008-10-21 00:08:39 +00001510
jadmanski0afbb632008-06-06 21:10:57 +00001511 result = {}
Alex Miller7d658cf2013-09-04 16:00:35 -07001512 result['priorities'] = priorities.Priority.choices()
1513 default_priority = priorities.Priority.DEFAULT
1514 result['default_priority'] = 'Default'
1515 result['max_schedulable_priority'] = priorities.Priority.DEFAULT
jadmanski0afbb632008-06-06 21:10:57 +00001516 result['users'] = get_users(sort_by=['login'])
Jiaxi Luo31874592014-06-11 10:36:35 -07001517
1518 label_exclude_filters = [{'name__startswith': 'cros-version'},
Dan Shi65351d62015-08-03 12:03:23 -07001519 {'name__startswith': 'fw-version'},
1520 {'name__startswith': 'fwrw-version'},
1521 {'name__startswith': 'fwro-version'}]
Jiaxi Luo31874592014-06-11 10:36:35 -07001522 result['labels'] = get_labels(
1523 label_exclude_filters,
1524 sort_by=['-platform', 'name'])
1525
showardc92da832009-04-07 18:14:34 +00001526 result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
jadmanski0afbb632008-06-06 21:10:57 +00001527 result['tests'] = get_tests(sort_by=['name'])
showard2b9a88b2008-06-13 20:55:03 +00001528 result['profilers'] = get_profilers(sort_by=['name'])
showard0fc38302008-10-23 00:44:07 +00001529 result['current_user'] = rpc_utils.prepare_for_serialization(
showard64a95952010-01-13 21:27:16 +00001530 models.User.current_user().get_object_dict())
showard2b9a88b2008-06-13 20:55:03 +00001531 result['host_statuses'] = sorted(models.Host.Status.names)
mbligh5a198b92008-12-11 19:33:29 +00001532 result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
Simran Basi7e605742013-11-12 13:43:36 -08001533 result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
Simran Basi34217022012-11-06 13:43:15 -08001534 result['job_max_runtime_mins_default'] = (
1535 models.Job.DEFAULT_MAX_RUNTIME_MINS)
showarda1e74b32009-05-12 17:32:04 +00001536 result['parse_failed_repair_default'] = bool(
1537 models.Job.DEFAULT_PARSE_FAILED_REPAIR)
jamesrendd855242010-03-02 22:23:44 +00001538 result['reboot_before_options'] = model_attributes.RebootBefore.names
1539 result['reboot_after_options'] = model_attributes.RebootAfter.names
showard8fbae652009-01-20 23:23:10 +00001540 result['motd'] = rpc_utils.get_motd()
jamesren76fcf192010-04-21 20:39:50 +00001541 result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
1542 result['drone_sets'] = drone_sets
jamesren4a41e012010-07-16 22:33:48 +00001543 result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
showard8ac29b42008-07-17 17:01:55 +00001544
showardd3dc1992009-04-22 21:01:40 +00001545 result['status_dictionary'] = {"Aborted": "Aborted",
showard8ac29b42008-07-17 17:01:55 +00001546 "Verifying": "Verifying Host",
Alex Millerdfff2fd2013-05-28 13:05:06 -07001547 "Provisioning": "Provisioning Host",
showard8ac29b42008-07-17 17:01:55 +00001548 "Pending": "Waiting on other hosts",
1549 "Running": "Running autoserv",
1550 "Completed": "Autoserv completed",
1551 "Failed": "Failed to complete",
showardd823b362008-07-24 16:35:46 +00001552 "Queued": "Queued",
showard5deb6772008-11-04 21:54:33 +00001553 "Starting": "Next in host's queue",
1554 "Stopped": "Other host(s) failed verify",
showardd3dc1992009-04-22 21:01:40 +00001555 "Parsing": "Awaiting parse of final results",
showard29f7cd22009-04-29 21:16:24 +00001556 "Gathering": "Gathering log files",
showard8cc058f2009-09-08 16:26:33 +00001557 "Template": "Template job for recurring run",
mbligh4608b002010-01-05 18:22:35 +00001558 "Waiting": "Waiting for scheduler action",
Dan Shi07e09af2013-04-12 09:31:29 -07001559 "Archiving": "Archiving results",
1560 "Resetting": "Resetting hosts"}
Jiaxi Luo421608e2014-07-07 14:38:00 -07001561
1562 result['wmatrix_url'] = rpc_utils.get_wmatrix_url()
Simran Basi71206ef2014-08-13 13:51:18 -07001563 result['is_moblab'] = bool(utils.is_moblab())
Jiaxi Luo421608e2014-07-07 14:38:00 -07001564
jadmanski0afbb632008-06-06 21:10:57 +00001565 return result
showard29f7cd22009-04-29 21:16:24 +00001566
1567
1568def get_server_time():
1569 return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")