blob: 7ff36ee39eb0b23446e336836357c6277e4cdb93 [file] [log] [blame]
Aviv Keshet0b9cfc92013-02-05 11:36:02 -08001# pylint: disable-msg=C0111
2
mblighe8819cd2008-02-15 16:48:40 +00003"""\
4Functions to expose over the RPC interface.
5
6For all modify* and delete* functions that ask for an 'id' parameter to
7identify the object to operate on, the id may be either
8 * the database row ID
9 * the name of the object (label name, hostname, user login, etc.)
10 * a dictionary containing uniquely identifying field (this option should seldom
11 be used)
12
13When specifying foreign key fields (i.e. adding hosts to a label, or adding
14users to an ACL group), the given value may be either the database row ID or the
15name of the object.
16
17All get* functions return lists of dictionaries. Each dictionary represents one
18object and maps field names to values.
19
20Some examples:
21modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
22modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
23modify_test('sleeptest', test_type='Client', params=', seconds=60')
24delete_acl_group(1) # delete by ID
25delete_acl_group('Everyone') # delete by name
26acl_group_add_users('Everyone', ['mbligh', 'showard'])
27get_jobs(owner='showard', status='Queued')
28
mbligh93c80e62009-02-03 17:48:30 +000029See doctests/001_rpc_test.txt for (lots) more examples.
mblighe8819cd2008-02-15 16:48:40 +000030"""
31
32__author__ = 'showard@google.com (Steve Howard)'
33
MK Ryu9c5fbbe2015-02-11 15:46:22 -080034import sys
showard29f7cd22009-04-29 21:16:24 +000035import datetime
MK Ryu9c5fbbe2015-02-11 15:46:22 -080036
Moises Osorio2dc7a102014-12-02 18:24:02 -080037from django.db.models import Count
showardcafd16e2009-05-29 18:37:49 +000038import common
Simran Basib6ec8ae2014-04-23 12:05:08 -070039from autotest_lib.client.common_lib import priorities
Simran Basi6157e8e2015-12-07 18:22:34 -080040from autotest_lib.client.common_lib.cros import dev_server
Gabe Black1e1c41b2015-02-04 23:55:15 -080041from autotest_lib.client.common_lib.cros.graphite import autotest_stats
showard6d7b2ff2009-06-10 00:16:47 +000042from autotest_lib.frontend.afe import control_file, rpc_utils
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070043from autotest_lib.frontend.afe import models, model_logic, model_attributes
Simran Basib6ec8ae2014-04-23 12:05:08 -070044from autotest_lib.frontend.afe import site_rpc_interface
Moises Osorio2dc7a102014-12-02 18:24:02 -080045from autotest_lib.frontend.tko import models as tko_models
Jiaxi Luoaac54572014-06-04 13:57:02 -070046from autotest_lib.frontend.tko import rpc_interface as tko_rpc_interface
J. Richard Barnetteb5164d62015-04-13 12:59:31 -070047from autotest_lib.server import frontend
Simran Basi71206ef2014-08-13 13:51:18 -070048from autotest_lib.server import utils
Dan Shid215dbe2015-06-18 16:14:59 -070049from autotest_lib.server.cros import provision
Jiaxi Luo90190c92014-06-18 12:35:57 -070050from autotest_lib.server.cros.dynamic_suite import tools
J. Richard Barnette39255fa2015-04-14 17:23:41 -070051from autotest_lib.site_utils import status_history
mblighe8819cd2008-02-15 16:48:40 +000052
Moises Osorio2dc7a102014-12-02 18:24:02 -080053
Gabe Black1e1c41b2015-02-04 23:55:15 -080054_timer = autotest_stats.Timer('rpc_interface')
Moises Osorio2dc7a102014-12-02 18:24:02 -080055
Eric Lid23bc192011-02-09 14:38:57 -080056def get_parameterized_autoupdate_image_url(job):
57 """Get the parameterized autoupdate image url from a parameterized job."""
58 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
59 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
beeps8bb1f7d2013-08-05 01:30:09 -070060 name='image')
Eric Lid23bc192011-02-09 14:38:57 -080061 para_set = job.parameterized_job.parameterizedjobparameter_set
62 job_test_para = para_set.get(test_parameter=image_parameter)
63 return job_test_para.parameter_value
64
65
mblighe8819cd2008-02-15 16:48:40 +000066# labels
67
mblighe8819cd2008-02-15 16:48:40 +000068def modify_label(id, **data):
MK Ryu8c554cf2015-06-12 11:45:50 -070069 """Modify a label.
70
71 @param id: id or name of a label. More often a label name.
72 @param data: New data for a label.
73 """
74 label_model = models.Label.smart_get(id)
75
76 # Master forwards the RPC to shards
77 if not utils.is_shard():
78 rpc_utils.fanout_rpc(label_model.host_set.all(), 'modify_label', False,
79 id=id, **data)
80
81 label_model.update_object(data)
mblighe8819cd2008-02-15 16:48:40 +000082
83
84def delete_label(id):
MK Ryu8c554cf2015-06-12 11:45:50 -070085 """Delete a label.
86
87 @param id: id or name of a label. More often a label name.
88 """
89 label_model = models.Label.smart_get(id)
90
91 # Master forwards the RPC to shards
92 if not utils.is_shard():
93 rpc_utils.fanout_rpc(label_model.host_set.all(), 'delete_label', False,
94 id=id)
95
96 label_model.delete()
mblighe8819cd2008-02-15 16:48:40 +000097
Prashanth Balasubramanian744898f2015-01-13 05:04:16 -080098
MK Ryu9c5fbbe2015-02-11 15:46:22 -080099def add_label(name, ignore_exception_if_exists=False, **kwargs):
MK Ryucf027c62015-03-04 12:00:50 -0800100 """Adds a new label of a given name.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800101
102 @param name: label name.
103 @param ignore_exception_if_exists: If True and the exception was
104 thrown due to the duplicated label name when adding a label,
105 then suppress the exception. Default is False.
106 @param kwargs: keyword args that store more info about a label
107 other than the name.
108 @return: int/long id of a new label.
109 """
110 # models.Label.add_object() throws model_logic.ValidationError
111 # when it is given a label name that already exists.
112 # However, ValidationError can be thrown with different errors,
113 # and those errors should be thrown up to the call chain.
114 try:
115 label = models.Label.add_object(name=name, **kwargs)
116 except:
117 exc_info = sys.exc_info()
118 if ignore_exception_if_exists:
119 label = rpc_utils.get_label(name)
120 # If the exception is raised not because of duplicated
121 # "name", then raise the original exception.
122 if label is None:
123 raise exc_info[0], exc_info[1], exc_info[2]
124 else:
125 raise exc_info[0], exc_info[1], exc_info[2]
126 return label.id
127
128
129def add_label_to_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800130 """Adds a label of the given id to the given hosts only in local DB.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800131
132 @param id: id or name of a label. More often a label name.
133 @param hosts: The hostnames of hosts that need the label.
134
135 @raises models.Label.DoesNotExist: If the label with id doesn't exist.
136 """
137 label = models.Label.smart_get(id)
138 host_objs = models.Host.smart_get_bulk(hosts)
139 if label.platform:
140 models.Host.check_no_platform(host_objs)
141 label.host_set.add(*host_objs)
142
143
MK Ryufbb002c2015-06-08 14:13:16 -0700144@rpc_utils.route_rpc_to_master
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800145def label_add_hosts(id, hosts):
MK Ryucf027c62015-03-04 12:00:50 -0800146 """Adds a label with the given id to the given hosts.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800147
148 This method should be run only on master not shards.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800149 The given label will be created if it doesn't exist, provided the `id`
150 supplied is a label name not an int/long id.
151
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800152 @param id: id or name of a label. More often a label name.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800153 @param hosts: A list of hostnames or ids. More often hostnames.
154
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800155 @raises ValueError: If the id specified is an int/long (label id)
156 while the label does not exist.
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800157 """
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800158 try:
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -0800159 label = models.Label.smart_get(id)
160 except models.Label.DoesNotExist:
161 # This matches the type checks in smart_get, which is a hack
162 # in and off itself. The aim here is to create any non-existent
163 # label, which we cannot do if the 'id' specified isn't a label name.
164 if isinstance(id, basestring):
165 label = models.Label.smart_get(add_label(id))
166 else:
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800167 raise ValueError('Label id (%s) does not exist. Please specify '
168 'the argument, id, as a string (label name).'
169 % id)
MK Ryucf027c62015-03-04 12:00:50 -0800170
171 host_objs = models.Host.smart_get_bulk(hosts)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800172 # Make sure the label exists on the shard with the same id
173 # as it is on the master.
MK Ryucf027c62015-03-04 12:00:50 -0800174 # It is possible that the label is already in a shard because
175 # we are adding a new label only to shards of hosts that the label
176 # is going to be attached.
177 # For example, we add a label L1 to a host in shard S1.
178 # Master and S1 will have L1 but other shards won't.
179 # Later, when we add the same label L1 to hosts in shards S1 and S2,
180 # S1 already has the label but S2 doesn't.
181 # S2 should have the new label without any problem.
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800182 # We ignore exception in such a case.
183 rpc_utils.fanout_rpc(
MK Ryue019aae2015-07-07 12:46:07 -0700184 host_objs, 'add_label', include_hostnames=False,
185 name=label.name, ignore_exception_if_exists=True,
186 id=label.id, platform=label.platform)
MK Ryu9c5fbbe2015-02-11 15:46:22 -0800187 rpc_utils.fanout_rpc(host_objs, 'add_label_to_hosts', id=id)
showardbbabf502008-06-06 00:02:02 +0000188
MK Ryu26f0c932015-05-28 18:14:33 -0700189 add_label_to_hosts(id, hosts)
190
showardbbabf502008-06-06 00:02:02 +0000191
MK Ryucf027c62015-03-04 12:00:50 -0800192def remove_label_from_hosts(id, hosts):
193 """Removes a label of the given id from the given hosts only in local DB.
194
195 @param id: id or name of a label.
196 @param hosts: The hostnames of hosts that need to remove the label from.
197 """
showardbe3ec042008-11-12 18:16:07 +0000198 host_objs = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000199 models.Label.smart_get(id).host_set.remove(*host_objs)
showardbbabf502008-06-06 00:02:02 +0000200
201
MK Ryufbb002c2015-06-08 14:13:16 -0700202@rpc_utils.route_rpc_to_master
MK Ryucf027c62015-03-04 12:00:50 -0800203def label_remove_hosts(id, hosts):
204 """Removes a label of the given id from the given hosts.
205
206 This method should be run only on master not shards.
207
208 @param id: id or name of a label.
209 @param hosts: A list of hostnames or ids. More often hostnames.
210 """
MK Ryucf027c62015-03-04 12:00:50 -0800211 host_objs = models.Host.smart_get_bulk(hosts)
212 rpc_utils.fanout_rpc(host_objs, 'remove_label_from_hosts', id=id)
213
MK Ryu26f0c932015-05-28 18:14:33 -0700214 remove_label_from_hosts(id, hosts)
215
MK Ryucf027c62015-03-04 12:00:50 -0800216
Jiaxi Luo31874592014-06-11 10:36:35 -0700217def get_labels(exclude_filters=(), **filter_data):
showardc92da832009-04-07 18:14:34 +0000218 """\
Jiaxi Luo31874592014-06-11 10:36:35 -0700219 @param exclude_filters: A sequence of dictionaries of filters.
220
showardc92da832009-04-07 18:14:34 +0000221 @returns A sequence of nested dictionaries of label information.
222 """
Jiaxi Luo31874592014-06-11 10:36:35 -0700223 labels = models.Label.query_objects(filter_data)
224 for exclude_filter in exclude_filters:
225 labels = labels.exclude(**exclude_filter)
226 return rpc_utils.prepare_rows_as_nested_dicts(labels, ('atomic_group',))
showardc92da832009-04-07 18:14:34 +0000227
228
229# atomic groups
230
showarde9450c92009-06-30 01:58:52 +0000231def add_atomic_group(name, max_number_of_machines=None, description=None):
showardc92da832009-04-07 18:14:34 +0000232 return models.AtomicGroup.add_object(
233 name=name, max_number_of_machines=max_number_of_machines,
234 description=description).id
235
236
237def modify_atomic_group(id, **data):
238 models.AtomicGroup.smart_get(id).update_object(data)
239
240
241def delete_atomic_group(id):
242 models.AtomicGroup.smart_get(id).delete()
243
244
245def atomic_group_add_labels(id, labels):
246 label_objs = models.Label.smart_get_bulk(labels)
247 models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
248
249
250def atomic_group_remove_labels(id, labels):
251 label_objs = models.Label.smart_get_bulk(labels)
252 models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
253
254
255def get_atomic_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000256 return rpc_utils.prepare_for_serialization(
showardc92da832009-04-07 18:14:34 +0000257 models.AtomicGroup.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000258
259
260# hosts
261
Matthew Sartori68186332015-04-27 17:19:53 -0700262def add_host(hostname, status=None, locked=None, lock_reason='', protection=None):
263 if locked and not lock_reason:
264 raise model_logic.ValidationError(
265 {'locked': 'Please provide a reason for locking when adding host.'})
266
jadmanski0afbb632008-06-06 21:10:57 +0000267 return models.Host.add_object(hostname=hostname, status=status,
Matthew Sartori68186332015-04-27 17:19:53 -0700268 locked=locked, lock_reason=lock_reason,
269 protection=protection).id
mblighe8819cd2008-02-15 16:48:40 +0000270
271
MK Ryu33889612015-09-04 14:32:35 -0700272@rpc_utils.route_rpc_to_master
273def modify_host(id, **kwargs):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700274 """Modify local attributes of a host.
275
276 If this is called on the master, but the host is assigned to a shard, this
MK Ryu33889612015-09-04 14:32:35 -0700277 will call `modify_host_local` RPC to the responsible shard. This means if
278 a host is being locked using this function, this change will also propagate
279 to shards.
280 When this is called on a shard, the shard just routes the RPC to the master
281 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700282
283 @param id: id of the host to modify.
MK Ryu33889612015-09-04 14:32:35 -0700284 @param kwargs: key=value pairs of values to set on the host.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700285 """
MK Ryu33889612015-09-04 14:32:35 -0700286 rpc_utils.check_modify_host(kwargs)
showardce7c0922009-09-11 18:39:24 +0000287 host = models.Host.smart_get(id)
MK Ryu33889612015-09-04 14:32:35 -0700288 rpc_utils.check_modify_host_locking(host, kwargs)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700289
MK Ryu33889612015-09-04 14:32:35 -0700290 rpc_utils.fanout_rpc([host], 'modify_host_local',
291 include_hostnames=False, id=id, **kwargs)
292 host.update_object(kwargs)
mblighe8819cd2008-02-15 16:48:40 +0000293
294
MK Ryu33889612015-09-04 14:32:35 -0700295def modify_host_local(id, **kwargs):
296 """Modify host attributes in local DB.
297
298 @param id: Host id.
299 @param kwargs: key=value pairs of values to set on the host.
300 """
301 models.Host.smart_get(id).update_object(kwargs)
302
303
304@rpc_utils.route_rpc_to_master
showard276f9442009-05-20 00:33:16 +0000305def modify_hosts(host_filter_data, update_data):
Jakob Juelich50e91f72014-10-01 12:43:23 -0700306 """Modify local attributes of multiple hosts.
307
308 If this is called on the master, but one of the hosts in that match the
MK Ryu33889612015-09-04 14:32:35 -0700309 filters is assigned to a shard, this will call `modify_hosts_local` RPC
310 to the responsible shard.
311 When this is called on a shard, the shard just routes the RPC to the master
312 and does nothing.
Jakob Juelich50e91f72014-10-01 12:43:23 -0700313
314 The filters are always applied on the master, not on the shards. This means
315 if the states of a host differ on the master and a shard, the state on the
316 master will be used. I.e. this means:
317 A host was synced to Shard 1. On Shard 1 the status of the host was set to
318 'Repair Failed'.
319 - A call to modify_hosts with host_filter_data={'status': 'Ready'} will
320 update the host (both on the shard and on the master), because the state
321 of the host as the master knows it is still 'Ready'.
322 - A call to modify_hosts with host_filter_data={'status': 'Repair failed'
323 will not update the host, because the filter doesn't apply on the master.
324
showardbe0d8692009-08-20 23:42:44 +0000325 @param host_filter_data: Filters out which hosts to modify.
326 @param update_data: A dictionary with the changes to make to the hosts.
showard276f9442009-05-20 00:33:16 +0000327 """
showardbe0d8692009-08-20 23:42:44 +0000328 rpc_utils.check_modify_host(update_data)
showard276f9442009-05-20 00:33:16 +0000329 hosts = models.Host.query_objects(host_filter_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700330
331 affected_shard_hostnames = set()
332 affected_host_ids = []
333
Alex Miller9658a952013-05-14 16:40:02 -0700334 # Check all hosts before changing data for exception safety.
335 for host in hosts:
336 rpc_utils.check_modify_host_locking(host, update_data)
Jakob Juelich50e91f72014-10-01 12:43:23 -0700337 if host.shard:
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800338 affected_shard_hostnames.add(host.shard.rpc_hostname())
Jakob Juelich50e91f72014-10-01 12:43:23 -0700339 affected_host_ids.append(host.id)
340
MK Ryu33889612015-09-04 14:32:35 -0700341 # Caution: Changing the filter from the original here. See docstring.
342 rpc_utils.run_rpc_on_multiple_hostnames(
343 'modify_hosts_local', affected_shard_hostnames,
Jakob Juelich50e91f72014-10-01 12:43:23 -0700344 host_filter_data={'id__in': affected_host_ids},
345 update_data=update_data)
346
showard276f9442009-05-20 00:33:16 +0000347 for host in hosts:
348 host.update_object(update_data)
349
350
MK Ryu33889612015-09-04 14:32:35 -0700351def modify_hosts_local(host_filter_data, update_data):
352 """Modify attributes of hosts in local DB.
353
354 @param host_filter_data: Filters out which hosts to modify.
355 @param update_data: A dictionary with the changes to make to the hosts.
356 """
357 for host in models.Host.query_objects(host_filter_data):
358 host.update_object(update_data)
359
360
MK Ryufbb002c2015-06-08 14:13:16 -0700361def add_labels_to_host(id, labels):
362 """Adds labels to a given host only in local DB.
showardcafd16e2009-05-29 18:37:49 +0000363
MK Ryufbb002c2015-06-08 14:13:16 -0700364 @param id: id or hostname for a host.
365 @param labels: ids or names for labels.
366 """
367 label_objs = models.Label.smart_get_bulk(labels)
368 models.Host.smart_get(id).labels.add(*label_objs)
369
370
371@rpc_utils.route_rpc_to_master
372def host_add_labels(id, labels):
373 """Adds labels to a given host.
374
375 @param id: id or hostname for a host.
376 @param labels: ids or names for labels.
377
378 @raises ValidationError: If adding more than one platform label.
379 """
380 label_objs = models.Label.smart_get_bulk(labels)
381 platforms = [label.name for label in label_objs if label.platform]
showardcafd16e2009-05-29 18:37:49 +0000382 if len(platforms) > 1:
383 raise model_logic.ValidationError(
384 {'labels': 'Adding more than one platform label: %s' %
385 ', '.join(platforms)})
MK Ryufbb002c2015-06-08 14:13:16 -0700386
387 host_obj = models.Host.smart_get(id)
showardcafd16e2009-05-29 18:37:49 +0000388 if len(platforms) == 1:
MK Ryufbb002c2015-06-08 14:13:16 -0700389 models.Host.check_no_platform([host_obj])
390
391 rpc_utils.fanout_rpc([host_obj], 'add_labels_to_host', False,
392 id=id, labels=labels)
393 add_labels_to_host(id, labels)
mblighe8819cd2008-02-15 16:48:40 +0000394
395
MK Ryufbb002c2015-06-08 14:13:16 -0700396def remove_labels_from_host(id, labels):
397 """Removes labels from a given host only in local DB.
398
399 @param id: id or hostname for a host.
400 @param labels: ids or names for labels.
401 """
402 label_objs = models.Label.smart_get_bulk(labels)
403 models.Host.smart_get(id).labels.remove(*label_objs)
404
405
406@rpc_utils.route_rpc_to_master
mblighe8819cd2008-02-15 16:48:40 +0000407def host_remove_labels(id, labels):
MK Ryufbb002c2015-06-08 14:13:16 -0700408 """Removes labels from a given host.
409
410 @param id: id or hostname for a host.
411 @param labels: ids or names for labels.
412 """
413 host_obj = models.Host.smart_get(id)
414 rpc_utils.fanout_rpc([host_obj], 'remove_labels_from_host', False,
415 id=id, labels=labels)
416 remove_labels_from_host(id, labels)
mblighe8819cd2008-02-15 16:48:40 +0000417
418
MK Ryuacf35922014-10-03 14:56:49 -0700419def get_host_attribute(attribute, **host_filter_data):
420 """
421 @param attribute: string name of attribute
422 @param host_filter_data: filter data to apply to Hosts to choose hosts to
423 act upon
424 """
425 hosts = rpc_utils.get_host_query((), False, False, True, host_filter_data)
426 hosts = list(hosts)
427 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
428 'attribute_list')
429 host_attr_dicts = []
430 for host_obj in hosts:
431 for attr_obj in host_obj.attribute_list:
432 if attr_obj.attribute == attribute:
433 host_attr_dicts.append(attr_obj.get_object_dict())
434 return rpc_utils.prepare_for_serialization(host_attr_dicts)
435
436
showard0957a842009-05-11 19:25:08 +0000437def set_host_attribute(attribute, value, **host_filter_data):
438 """
MK Ryu26f0c932015-05-28 18:14:33 -0700439 @param attribute: string name of attribute
440 @param value: string, or None to delete an attribute
441 @param host_filter_data: filter data to apply to Hosts to choose hosts to
442 act upon
showard0957a842009-05-11 19:25:08 +0000443 """
444 assert host_filter_data # disallow accidental actions on all hosts
445 hosts = models.Host.query_objects(host_filter_data)
446 models.AclGroup.check_for_acl_violation_hosts(hosts)
447
MK Ryu26f0c932015-05-28 18:14:33 -0700448 # Master forwards this RPC to shards.
449 if not utils.is_shard():
450 rpc_utils.fanout_rpc(hosts, 'set_host_attribute', False,
451 attribute=attribute, value=value, **host_filter_data)
452
showard0957a842009-05-11 19:25:08 +0000453 for host in hosts:
showardf8b19042009-05-12 17:22:49 +0000454 host.set_or_delete_attribute(attribute, value)
showard0957a842009-05-11 19:25:08 +0000455
456
Jakob Juelich50e91f72014-10-01 12:43:23 -0700457@rpc_utils.forward_single_host_rpc_to_shard
mblighe8819cd2008-02-15 16:48:40 +0000458def delete_host(id):
jadmanski0afbb632008-06-06 21:10:57 +0000459 models.Host.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000460
461
showard87cc38f2009-08-20 23:37:04 +0000462def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
Dan Shi37df54d2015-12-14 11:16:28 -0800463 exclude_atomic_group_hosts=False, valid_only=True,
464 include_current_job=False, **filter_data):
465 """Get a list of dictionaries which contains the information of hosts.
466
showard87cc38f2009-08-20 23:37:04 +0000467 @param multiple_labels: match hosts in all of the labels given. Should
468 be a list of label names.
469 @param exclude_only_if_needed_labels: Exclude hosts with at least one
470 "only_if_needed" label applied.
471 @param exclude_atomic_group_hosts: Exclude hosts that have one or more
472 atomic group labels associated with them.
Dan Shi37df54d2015-12-14 11:16:28 -0800473 @param include_current_job: Set to True to include ids of currently running
474 job and special task.
jadmanski0afbb632008-06-06 21:10:57 +0000475 """
showard43a3d262008-11-12 18:17:05 +0000476 hosts = rpc_utils.get_host_query(multiple_labels,
477 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000478 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000479 valid_only, filter_data)
showard0957a842009-05-11 19:25:08 +0000480 hosts = list(hosts)
481 models.Host.objects.populate_relationships(hosts, models.Label,
482 'label_list')
483 models.Host.objects.populate_relationships(hosts, models.AclGroup,
484 'acl_list')
485 models.Host.objects.populate_relationships(hosts, models.HostAttribute,
486 'attribute_list')
showard43a3d262008-11-12 18:17:05 +0000487 host_dicts = []
488 for host_obj in hosts:
489 host_dict = host_obj.get_object_dict()
showard0957a842009-05-11 19:25:08 +0000490 host_dict['labels'] = [label.name for label in host_obj.label_list]
showard909c9142009-07-07 20:54:42 +0000491 host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
492 find_platform_and_atomic_group(host_obj))
showard0957a842009-05-11 19:25:08 +0000493 host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
494 host_dict['attributes'] = dict((attribute.attribute, attribute.value)
495 for attribute in host_obj.attribute_list)
Dan Shi37df54d2015-12-14 11:16:28 -0800496 if include_current_job:
497 host_dict['current_job'] = None
498 host_dict['current_special_task'] = None
499 entries = models.HostQueueEntry.objects.filter(
500 host_id=host_dict['id'], active=True, complete=False)
501 if entries:
502 host_dict['current_job'] = (
503 entries[0].get_object_dict()['job'])
504 tasks = models.SpecialTask.objects.filter(
505 host_id=host_dict['id'], is_active=True, is_complete=False)
506 if tasks:
507 host_dict['current_special_task'] = (
508 '%d-%s' % (tasks[0].get_object_dict()['id'],
509 tasks[0].get_object_dict()['task'].lower()))
showard43a3d262008-11-12 18:17:05 +0000510 host_dicts.append(host_dict)
511 return rpc_utils.prepare_for_serialization(host_dicts)
mblighe8819cd2008-02-15 16:48:40 +0000512
513
showard87cc38f2009-08-20 23:37:04 +0000514def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
showard8aa84fc2009-09-16 17:17:55 +0000515 exclude_atomic_group_hosts=False, valid_only=True,
516 **filter_data):
showard87cc38f2009-08-20 23:37:04 +0000517 """
518 Same parameters as get_hosts().
519
520 @returns The number of matching hosts.
521 """
showard43a3d262008-11-12 18:17:05 +0000522 hosts = rpc_utils.get_host_query(multiple_labels,
523 exclude_only_if_needed_labels,
showard87cc38f2009-08-20 23:37:04 +0000524 exclude_atomic_group_hosts,
showard8aa84fc2009-09-16 17:17:55 +0000525 valid_only, filter_data)
showard43a3d262008-11-12 18:17:05 +0000526 return hosts.count()
showard1385b162008-03-13 15:59:40 +0000527
mblighe8819cd2008-02-15 16:48:40 +0000528
529# tests
530
showard909c7a62008-07-15 21:52:38 +0000531def add_test(name, test_type, path, author=None, dependencies=None,
showard3d9899a2008-07-31 02:11:58 +0000532 experimental=True, run_verify=None, test_class=None,
showard909c7a62008-07-15 21:52:38 +0000533 test_time=None, test_category=None, description=None,
534 sync_count=1):
jadmanski0afbb632008-06-06 21:10:57 +0000535 return models.Test.add_object(name=name, test_type=test_type, path=path,
showard909c7a62008-07-15 21:52:38 +0000536 author=author, dependencies=dependencies,
537 experimental=experimental,
538 run_verify=run_verify, test_time=test_time,
539 test_category=test_category,
540 sync_count=sync_count,
jadmanski0afbb632008-06-06 21:10:57 +0000541 test_class=test_class,
542 description=description).id
mblighe8819cd2008-02-15 16:48:40 +0000543
544
545def modify_test(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000546 models.Test.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000547
548
549def delete_test(id):
jadmanski0afbb632008-06-06 21:10:57 +0000550 models.Test.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000551
552
553def get_tests(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000554 return rpc_utils.prepare_for_serialization(
555 models.Test.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000556
557
Moises Osorio2dc7a102014-12-02 18:24:02 -0800558@_timer.decorate
559def get_tests_status_counts_by_job_name_label(job_name_prefix, label_name):
560 """Gets the counts of all passed and failed tests from the matching jobs.
561
562 @param job_name_prefix: Name prefix of the jobs to get the summary from, e.g.,
563 'butterfly-release/R40-6457.21.0/bvt-cq/'.
564 @param label_name: Label that must be set in the jobs, e.g.,
565 'cros-version:butterfly-release/R40-6457.21.0'.
566
567 @returns A summary of the counts of all the passed and failed tests.
568 """
569 job_ids = list(models.Job.objects.filter(
570 name__startswith=job_name_prefix,
571 dependency_labels__name=label_name).values_list(
572 'pk', flat=True))
573 summary = {'passed': 0, 'failed': 0}
574 if not job_ids:
575 return summary
576
577 counts = (tko_models.TestView.objects.filter(
578 afe_job_id__in=job_ids).exclude(
579 test_name='SERVER_JOB').exclude(
580 test_name__startswith='CLIENT_JOB').values(
581 'status').annotate(
582 count=Count('status')))
583 for status in counts:
584 if status['status'] == 'GOOD':
585 summary['passed'] += status['count']
586 else:
587 summary['failed'] += status['count']
588 return summary
589
590
showard2b9a88b2008-06-13 20:55:03 +0000591# profilers
592
593def add_profiler(name, description=None):
594 return models.Profiler.add_object(name=name, description=description).id
595
596
597def modify_profiler(id, **data):
598 models.Profiler.smart_get(id).update_object(data)
599
600
601def delete_profiler(id):
602 models.Profiler.smart_get(id).delete()
603
604
605def get_profilers(**filter_data):
606 return rpc_utils.prepare_for_serialization(
607 models.Profiler.list_objects(filter_data))
608
609
mblighe8819cd2008-02-15 16:48:40 +0000610# users
611
612def add_user(login, access_level=None):
jadmanski0afbb632008-06-06 21:10:57 +0000613 return models.User.add_object(login=login, access_level=access_level).id
mblighe8819cd2008-02-15 16:48:40 +0000614
615
616def modify_user(id, **data):
jadmanski0afbb632008-06-06 21:10:57 +0000617 models.User.smart_get(id).update_object(data)
mblighe8819cd2008-02-15 16:48:40 +0000618
619
620def delete_user(id):
jadmanski0afbb632008-06-06 21:10:57 +0000621 models.User.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000622
623
624def get_users(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000625 return rpc_utils.prepare_for_serialization(
626 models.User.list_objects(filter_data))
mblighe8819cd2008-02-15 16:48:40 +0000627
628
629# acl groups
630
631def add_acl_group(name, description=None):
showard04f2cd82008-07-25 20:53:31 +0000632 group = models.AclGroup.add_object(name=name, description=description)
showard64a95952010-01-13 21:27:16 +0000633 group.users.add(models.User.current_user())
showard04f2cd82008-07-25 20:53:31 +0000634 return group.id
mblighe8819cd2008-02-15 16:48:40 +0000635
636
637def modify_acl_group(id, **data):
showard04f2cd82008-07-25 20:53:31 +0000638 group = models.AclGroup.smart_get(id)
639 group.check_for_acl_violation_acl_group()
640 group.update_object(data)
641 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000642
643
644def acl_group_add_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000645 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000646 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000647 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000648 group.users.add(*users)
mblighe8819cd2008-02-15 16:48:40 +0000649
650
651def acl_group_remove_users(id, users):
jadmanski0afbb632008-06-06 21:10:57 +0000652 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000653 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000654 users = models.User.smart_get_bulk(users)
jadmanski0afbb632008-06-06 21:10:57 +0000655 group.users.remove(*users)
showard04f2cd82008-07-25 20:53:31 +0000656 group.add_current_user_if_empty()
mblighe8819cd2008-02-15 16:48:40 +0000657
658
659def acl_group_add_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000660 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000661 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000662 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000663 group.hosts.add(*hosts)
showard08f981b2008-06-24 21:59:03 +0000664 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000665
666
667def acl_group_remove_hosts(id, hosts):
jadmanski0afbb632008-06-06 21:10:57 +0000668 group = models.AclGroup.smart_get(id)
showard04f2cd82008-07-25 20:53:31 +0000669 group.check_for_acl_violation_acl_group()
showardbe3ec042008-11-12 18:16:07 +0000670 hosts = models.Host.smart_get_bulk(hosts)
jadmanski0afbb632008-06-06 21:10:57 +0000671 group.hosts.remove(*hosts)
showard08f981b2008-06-24 21:59:03 +0000672 group.on_host_membership_change()
mblighe8819cd2008-02-15 16:48:40 +0000673
674
675def delete_acl_group(id):
jadmanski0afbb632008-06-06 21:10:57 +0000676 models.AclGroup.smart_get(id).delete()
mblighe8819cd2008-02-15 16:48:40 +0000677
678
679def get_acl_groups(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000680 acl_groups = models.AclGroup.list_objects(filter_data)
681 for acl_group in acl_groups:
682 acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
683 acl_group['users'] = [user.login
684 for user in acl_group_obj.users.all()]
685 acl_group['hosts'] = [host.hostname
686 for host in acl_group_obj.hosts.all()]
687 return rpc_utils.prepare_for_serialization(acl_groups)
mblighe8819cd2008-02-15 16:48:40 +0000688
689
690# jobs
691
mbligh120351e2009-01-24 01:40:45 +0000692def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
showard91f85102009-10-12 20:34:52 +0000693 client_control_file='', use_container=False,
Matthew Sartori10438092015-06-24 14:30:18 -0700694 profile_only=None, upload_kernel_config=False,
695 db_tests=True):
jadmanski0afbb632008-06-06 21:10:57 +0000696 """
mbligh120351e2009-01-24 01:40:45 +0000697 Generates a client-side control file to load a kernel and run tests.
698
Matthew Sartori10438092015-06-24 14:30:18 -0700699 @param tests List of tests to run. See db_tests for more information.
mbligha3c58d22009-08-24 22:01:51 +0000700 @param kernel A list of kernel info dictionaries configuring which kernels
701 to boot for this job and other options for them
mbligh120351e2009-01-24 01:40:45 +0000702 @param label Name of label to grab kernel config from.
703 @param profilers List of profilers to activate during the job.
704 @param client_control_file The contents of a client-side control file to
705 run at the end of all tests. If this is supplied, all tests must be
706 client side.
707 TODO: in the future we should support server control files directly
708 to wrap with a kernel. That'll require changing the parameter
709 name and adding a boolean to indicate if it is a client or server
710 control file.
711 @param use_container unused argument today. TODO: Enable containers
712 on the host during a client side test.
showard91f85102009-10-12 20:34:52 +0000713 @param profile_only A boolean that indicates what default profile_only
714 mode to use in the control file. Passing None will generate a
715 control file that does not explcitly set the default mode at all.
showard232b7ae2009-11-10 00:46:48 +0000716 @param upload_kernel_config: if enabled it will generate server control
717 file code that uploads the kernel config file to the client and
718 tells the client of the new (local) path when compiling the kernel;
719 the tests must be server side tests
Matthew Sartori10438092015-06-24 14:30:18 -0700720 @param db_tests: if True, the test object can be found in the database
721 backing the test model. In this case, tests is a tuple
722 of test IDs which are used to retrieve the test objects
723 from the database. If False, tests is a tuple of test
724 dictionaries stored client-side in the AFE.
mbligh120351e2009-01-24 01:40:45 +0000725
726 @returns a dict with the following keys:
727 control_file: str, The control file text.
728 is_server: bool, is the control file a server-side control file?
729 synch_count: How many machines the job uses per autoserv execution.
730 synch_count == 1 means the job is asynchronous.
731 dependencies: A list of the names of labels on which the job depends.
732 """
showardd86debe2009-06-10 17:37:56 +0000733 if not tests and not client_control_file:
showard2bab8f42008-11-12 18:15:22 +0000734 return dict(control_file='', is_server=False, synch_count=1,
showard989f25d2008-10-01 11:38:11 +0000735 dependencies=[])
mblighe8819cd2008-02-15 16:48:40 +0000736
showard989f25d2008-10-01 11:38:11 +0000737 cf_info, test_objects, profiler_objects, label = (
showard2b9a88b2008-06-13 20:55:03 +0000738 rpc_utils.prepare_generate_control_file(tests, kernel, label,
Matthew Sartori10438092015-06-24 14:30:18 -0700739 profilers, db_tests))
showard989f25d2008-10-01 11:38:11 +0000740 cf_info['control_file'] = control_file.generate_control(
mbligha3c58d22009-08-24 22:01:51 +0000741 tests=test_objects, kernels=kernel, platform=label,
mbligh120351e2009-01-24 01:40:45 +0000742 profilers=profiler_objects, is_server=cf_info['is_server'],
showard232b7ae2009-11-10 00:46:48 +0000743 client_control_file=client_control_file, profile_only=profile_only,
744 upload_kernel_config=upload_kernel_config)
showard989f25d2008-10-01 11:38:11 +0000745 return cf_info
mblighe8819cd2008-02-15 16:48:40 +0000746
747
jamesren4a41e012010-07-16 22:33:48 +0000748def create_parameterized_job(name, priority, test, parameters, kernel=None,
749 label=None, profilers=(), profiler_parameters=None,
750 use_container=False, profile_only=None,
751 upload_kernel_config=False, hosts=(),
752 meta_hosts=(), one_time_hosts=(),
753 atomic_group_name=None, synch_count=None,
754 is_template=False, timeout=None,
Simran Basi7e605742013-11-12 13:43:36 -0800755 timeout_mins=None, max_runtime_mins=None,
756 run_verify=False, email_list='', dependencies=(),
757 reboot_before=None, reboot_after=None,
758 parse_failed_repair=None, hostless=False,
Dan Shiec1d47d2015-02-13 11:38:13 -0800759 keyvals=None, drone_set=None, run_reset=True,
Dan Shi2a5297b2015-07-23 17:03:29 -0700760 require_ssp=None):
jamesren4a41e012010-07-16 22:33:48 +0000761 """
762 Creates and enqueues a parameterized job.
763
764 Most parameters a combination of the parameters for generate_control_file()
765 and create_job(), with the exception of:
766
767 @param test name or ID of the test to run
768 @param parameters a map of parameter name ->
769 tuple of (param value, param type)
770 @param profiler_parameters a dictionary of parameters for the profilers:
771 key: profiler name
772 value: dict of param name -> tuple of
773 (param value,
774 param type)
775 """
776 # Save the values of the passed arguments here. What we're going to do with
777 # them is pass them all to rpc_utils.get_create_job_common_args(), which
778 # will extract the subset of these arguments that apply for
779 # rpc_utils.create_job_common(), which we then pass in to that function.
780 args = locals()
781
782 # Set up the parameterized job configs
783 test_obj = models.Test.smart_get(test)
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700784 control_type = test_obj.test_type
jamesren4a41e012010-07-16 22:33:48 +0000785
786 try:
787 label = models.Label.smart_get(label)
788 except models.Label.DoesNotExist:
789 label = None
790
791 kernel_objs = models.Kernel.create_kernels(kernel)
792 profiler_objs = [models.Profiler.smart_get(profiler)
793 for profiler in profilers]
794
795 parameterized_job = models.ParameterizedJob.objects.create(
796 test=test_obj, label=label, use_container=use_container,
797 profile_only=profile_only,
798 upload_kernel_config=upload_kernel_config)
799 parameterized_job.kernels.add(*kernel_objs)
800
801 for profiler in profiler_objs:
802 parameterized_profiler = models.ParameterizedJobProfiler.objects.create(
803 parameterized_job=parameterized_job,
804 profiler=profiler)
805 profiler_params = profiler_parameters.get(profiler.name, {})
806 for name, (value, param_type) in profiler_params.iteritems():
807 models.ParameterizedJobProfilerParameter.objects.create(
808 parameterized_job_profiler=parameterized_profiler,
809 parameter_name=name,
810 parameter_value=value,
811 parameter_type=param_type)
812
813 try:
814 for parameter in test_obj.testparameter_set.all():
815 if parameter.name in parameters:
816 param_value, param_type = parameters.pop(parameter.name)
817 parameterized_job.parameterizedjobparameter_set.create(
818 test_parameter=parameter, parameter_value=param_value,
819 parameter_type=param_type)
820
821 if parameters:
822 raise Exception('Extra parameters remain: %r' % parameters)
823
824 return rpc_utils.create_job_common(
825 parameterized_job=parameterized_job.id,
826 control_type=control_type,
827 **rpc_utils.get_create_job_common_args(args))
828 except:
829 parameterized_job.delete()
830 raise
831
832
Simran Basib6ec8ae2014-04-23 12:05:08 -0700833def create_job_page_handler(name, priority, control_file, control_type,
Dan Shid215dbe2015-06-18 16:14:59 -0700834 image=None, hostless=False, firmware_rw_build=None,
835 firmware_ro_build=None, test_source_build=None,
836 **kwargs):
Simran Basib6ec8ae2014-04-23 12:05:08 -0700837 """\
838 Create and enqueue a job.
839
840 @param name name of this job
841 @param priority Integer priority of this job. Higher is more important.
842 @param control_file String contents of the control file.
843 @param control_type Type of control file, Client or Server.
Dan Shid215dbe2015-06-18 16:14:59 -0700844 @param image: ChromeOS build to be installed in the dut. Default to None.
845 @param firmware_rw_build: Firmware build to update RW firmware. Default to
846 None, i.e., RW firmware will not be updated.
847 @param firmware_ro_build: Firmware build to update RO firmware. Default to
848 None, i.e., RO firmware will not be updated.
849 @param test_source_build: Build to be used to retrieve test code. Default
850 to None.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700851 @param kwargs extra args that will be required by create_suite_job or
852 create_job.
853
854 @returns The created Job id number.
855 """
856 control_file = rpc_utils.encode_ascii(control_file)
Jiaxi Luodd67beb2014-07-18 16:28:31 -0700857 if not control_file:
858 raise model_logic.ValidationError({
859 'control_file' : "Control file cannot be empty"})
Simran Basib6ec8ae2014-04-23 12:05:08 -0700860
861 if image and hostless:
Dan Shid215dbe2015-06-18 16:14:59 -0700862 builds = {}
863 builds[provision.CROS_VERSION_PREFIX] = image
864 if firmware_rw_build:
Dan Shi0723bf52015-06-24 10:52:38 -0700865 builds[provision.FW_RW_VERSION_PREFIX] = firmware_rw_build
Dan Shid215dbe2015-06-18 16:14:59 -0700866 if firmware_ro_build:
867 builds[provision.FW_RO_VERSION_PREFIX] = firmware_ro_build
Simran Basib6ec8ae2014-04-23 12:05:08 -0700868 return site_rpc_interface.create_suite_job(
869 name=name, control_file=control_file, priority=priority,
Dan Shid215dbe2015-06-18 16:14:59 -0700870 builds=builds, test_source_build=test_source_build, **kwargs)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700871 return create_job(name, priority, control_file, control_type, image=image,
872 hostless=hostless, **kwargs)
873
874
MK Ryue301eb72015-06-25 12:51:02 -0700875@rpc_utils.route_rpc_to_master
showard12f3e322009-05-13 21:27:42 +0000876def create_job(name, priority, control_file, control_type,
877 hosts=(), meta_hosts=(), one_time_hosts=(),
878 atomic_group_name=None, synch_count=None, is_template=False,
Simran Basi7e605742013-11-12 13:43:36 -0800879 timeout=None, timeout_mins=None, max_runtime_mins=None,
880 run_verify=False, email_list='', dependencies=(),
881 reboot_before=None, reboot_after=None, parse_failed_repair=None,
882 hostless=False, keyvals=None, drone_set=None, image=None,
Dan Shiec1d47d2015-02-13 11:38:13 -0800883 parent_job_id=None, test_retry=0, run_reset=True,
884 require_ssp=None, args=(), **kwargs):
jadmanski0afbb632008-06-06 21:10:57 +0000885 """\
886 Create and enqueue a job.
mblighe8819cd2008-02-15 16:48:40 +0000887
showarda1e74b32009-05-12 17:32:04 +0000888 @param name name of this job
Alex Miller7d658cf2013-09-04 16:00:35 -0700889 @param priority Integer priority of this job. Higher is more important.
showarda1e74b32009-05-12 17:32:04 +0000890 @param control_file String contents of the control file.
891 @param control_type Type of control file, Client or Server.
892 @param synch_count How many machines the job uses per autoserv execution.
Jiaxi Luo90190c92014-06-18 12:35:57 -0700893 synch_count == 1 means the job is asynchronous. If an atomic group is
894 given this value is treated as a minimum.
showarda1e74b32009-05-12 17:32:04 +0000895 @param is_template If true then create a template job.
896 @param timeout Hours after this call returns until the job times out.
Simran Basi7e605742013-11-12 13:43:36 -0800897 @param timeout_mins Minutes after this call returns until the job times
Jiaxi Luo90190c92014-06-18 12:35:57 -0700898 out.
Simran Basi34217022012-11-06 13:43:15 -0800899 @param max_runtime_mins Minutes from job starting time until job times out
showarda1e74b32009-05-12 17:32:04 +0000900 @param run_verify Should the host be verified before running the test?
901 @param email_list String containing emails to mail when the job is done
902 @param dependencies List of label names on which this job depends
903 @param reboot_before Never, If dirty, or Always
904 @param reboot_after Never, If all tests passed, or Always
905 @param parse_failed_repair if true, results of failed repairs launched by
Jiaxi Luo90190c92014-06-18 12:35:57 -0700906 this job will be parsed as part of the job.
showarda9545c02009-12-18 22:44:26 +0000907 @param hostless if true, create a hostless job
showardc1a98d12010-01-15 00:22:22 +0000908 @param keyvals dict of keyvals to associate with the job
showarda1e74b32009-05-12 17:32:04 +0000909 @param hosts List of hosts to run job on.
910 @param meta_hosts List where each entry is a label name, and for each entry
Jiaxi Luo90190c92014-06-18 12:35:57 -0700911 one host will be chosen from that label to run the job on.
showarda1e74b32009-05-12 17:32:04 +0000912 @param one_time_hosts List of hosts not in the database to run the job on.
913 @param atomic_group_name The name of an atomic group to schedule the job on.
jamesren76fcf192010-04-21 20:39:50 +0000914 @param drone_set The name of the drone set to run this test on.
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -0800915 @param image OS image to install before running job.
Aviv Keshet0b9cfc92013-02-05 11:36:02 -0800916 @param parent_job_id id of a job considered to be parent of created job.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700917 @param test_retry Number of times to retry test if the test did not
Jiaxi Luo90190c92014-06-18 12:35:57 -0700918 complete successfully. (optional, default: 0)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700919 @param run_reset Should the host be reset before running the test?
Dan Shiec1d47d2015-02-13 11:38:13 -0800920 @param require_ssp Set to True to require server-side packaging to run the
921 test. If it's set to None, drone will still try to run
922 the server side with server-side packaging. If the
923 autotest-server package doesn't exist for the build or
924 image is not set, drone will run the test without server-
925 side packaging. Default is None.
Jiaxi Luo90190c92014-06-18 12:35:57 -0700926 @param args A list of args to be injected into control file.
Simran Basib6ec8ae2014-04-23 12:05:08 -0700927 @param kwargs extra keyword args. NOT USED.
showardc92da832009-04-07 18:14:34 +0000928
929 @returns The created Job id number.
jadmanski0afbb632008-06-06 21:10:57 +0000930 """
Jiaxi Luo90190c92014-06-18 12:35:57 -0700931 if args:
932 control_file = tools.inject_vars({'args': args}, control_file)
933
Simran Basiab5a1bf2014-05-28 15:39:44 -0700934 if image is None:
935 return rpc_utils.create_job_common(
936 **rpc_utils.get_create_job_common_args(locals()))
937
Simran Basi6157e8e2015-12-07 18:22:34 -0800938 # Translate the image name, in case its a relative build name.
939 ds = dev_server.ImageServer.resolve(image)
940 image = ds.translate(image)
941
Simran Basiab5a1bf2014-05-28 15:39:44 -0700942 # When image is supplied use a known parameterized test already in the
943 # database to pass the OS image path from the front end, through the
944 # scheduler, and finally to autoserv as the --image parameter.
945
946 # The test autoupdate_ParameterizedJob is in afe_autotests and used to
947 # instantiate a Test object and from there a ParameterizedJob.
948 known_test_obj = models.Test.smart_get('autoupdate_ParameterizedJob')
949 known_parameterized_job = models.ParameterizedJob.objects.create(
950 test=known_test_obj)
951
952 # autoupdate_ParameterizedJob has a single parameter, the image parameter,
953 # stored in the table afe_test_parameters. We retrieve and set this
954 # instance of the parameter to the OS image path.
955 image_parameter = known_test_obj.testparameter_set.get(test=known_test_obj,
956 name='image')
957 known_parameterized_job.parameterizedjobparameter_set.create(
958 test_parameter=image_parameter, parameter_value=image,
959 parameter_type='string')
960
Dan Shid215dbe2015-06-18 16:14:59 -0700961 # TODO(crbug.com/502638): save firmware build etc to parameterized_job.
962
Simran Basiab5a1bf2014-05-28 15:39:44 -0700963 # By passing a parameterized_job to create_job_common the job entry in
964 # the afe_jobs table will have the field parameterized_job_id set.
965 # The scheduler uses this id in the afe_parameterized_jobs table to
966 # match this job to our known test, and then with the
967 # afe_parameterized_job_parameters table to get the actual image path.
jamesren4a41e012010-07-16 22:33:48 +0000968 return rpc_utils.create_job_common(
Simran Basiab5a1bf2014-05-28 15:39:44 -0700969 parameterized_job=known_parameterized_job.id,
jamesren4a41e012010-07-16 22:33:48 +0000970 **rpc_utils.get_create_job_common_args(locals()))
mblighe8819cd2008-02-15 16:48:40 +0000971
972
showard9dbdcda2008-10-14 17:34:36 +0000973def abort_host_queue_entries(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +0000974 """\
showard9dbdcda2008-10-14 17:34:36 +0000975 Abort a set of host queue entries.
Fang Deng63b0e452014-12-19 14:38:15 -0800976
977 @return: A list of dictionaries, each contains information
978 about an aborted HQE.
jadmanski0afbb632008-06-06 21:10:57 +0000979 """
showard9dbdcda2008-10-14 17:34:36 +0000980 query = models.HostQueueEntry.query_objects(filter_data)
beepsfaecbce2013-10-29 11:35:10 -0700981
982 # Dont allow aborts on:
983 # 1. Jobs that have already completed (whether or not they were aborted)
984 # 2. Jobs that we have already been aborted (but may not have completed)
985 query = query.filter(complete=False).filter(aborted=False)
showarddc817512008-11-12 18:16:41 +0000986 models.AclGroup.check_abort_permissions(query)
showard9dbdcda2008-10-14 17:34:36 +0000987 host_queue_entries = list(query.select_related())
showard2bab8f42008-11-12 18:15:22 +0000988 rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
mblighe8819cd2008-02-15 16:48:40 +0000989
Simran Basic1b26762013-06-26 14:23:21 -0700990 models.HostQueueEntry.abort_host_queue_entries(host_queue_entries)
Fang Deng63b0e452014-12-19 14:38:15 -0800991 hqe_info = [{'HostQueueEntry': hqe.id, 'Job': hqe.job_id,
992 'Job name': hqe.job.name} for hqe in host_queue_entries]
993 return hqe_info
showard9d821ab2008-07-11 16:54:29 +0000994
995
beeps8bb1f7d2013-08-05 01:30:09 -0700996def abort_special_tasks(**filter_data):
997 """\
998 Abort the special task, or tasks, specified in the filter.
999 """
1000 query = models.SpecialTask.query_objects(filter_data)
1001 special_tasks = query.filter(is_active=True)
1002 for task in special_tasks:
1003 task.abort()
1004
1005
Simran Basi73dae552013-02-25 14:57:46 -08001006def _call_special_tasks_on_hosts(task, hosts):
1007 """\
1008 Schedules a set of hosts for a special task.
1009
1010 @returns A list of hostnames that a special task was created for.
1011 """
1012 models.AclGroup.check_for_acl_violation_hosts(hosts)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001013 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001014 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -08001015 raise ValueError('The following hosts are on shards, please '
1016 'follow the link to the shards and create jobs '
1017 'there instead. %s.' % shard_host_map)
Simran Basi73dae552013-02-25 14:57:46 -08001018 for host in hosts:
1019 models.SpecialTask.schedule_special_task(host, task)
1020 return list(sorted(host.hostname for host in hosts))
1021
1022
MK Ryu5aa25042015-07-28 16:08:04 -07001023def _forward_special_tasks_on_hosts(task, rpc, **filter_data):
1024 """Forward special tasks to corresponding shards.
mbligh4e545a52009-12-19 05:30:39 +00001025
MK Ryu5aa25042015-07-28 16:08:04 -07001026 For master, when special tasks are fired on hosts that are sharded,
1027 forward the RPC to corresponding shards.
1028
1029 For shard, create special task records in local DB.
1030
1031 @param task: Enum value of frontend.afe.models.SpecialTask.Task
1032 @param rpc: RPC name to forward.
1033 @param filter_data: Filter keywords to be used for DB query.
1034
1035 @return: A list of hostnames that a special task was created for.
showard1ff7b2e2009-05-15 23:17:18 +00001036 """
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001037 hosts = models.Host.query_objects(filter_data)
1038 shard_host_map = rpc_utils.bucket_hosts_by_shard(hosts, rpc_hostnames=True)
1039
1040 # Filter out hosts on a shard from those on the master, forward
1041 # rpcs to the shard with an additional hostname__in filter, and
1042 # create a local SpecialTask for each remaining host.
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -08001043 if shard_host_map and not utils.is_shard():
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001044 hosts = [h for h in hosts if h.shard is None]
1045 for shard, hostnames in shard_host_map.iteritems():
1046
1047 # The main client of this module is the frontend website, and
1048 # it invokes it with an 'id' or an 'id__in' filter. Regardless,
1049 # the 'hostname' filter should narrow down the list of hosts on
1050 # each shard even though we supply all the ids in filter_data.
1051 # This method uses hostname instead of id because it fits better
MK Ryu5aa25042015-07-28 16:08:04 -07001052 # with the overall architecture of redirection functions in
1053 # rpc_utils.
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001054 shard_filter = filter_data.copy()
1055 shard_filter['hostname__in'] = hostnames
1056 rpc_utils.run_rpc_on_multiple_hostnames(
MK Ryu5aa25042015-07-28 16:08:04 -07001057 rpc, [shard], **shard_filter)
Prashanth Balasubramanian40981232014-12-16 19:01:58 -08001058
1059 # There is a race condition here if someone assigns a shard to one of these
1060 # hosts before we create the task. The host will stay on the master if:
1061 # 1. The host is not Ready
1062 # 2. The host is Ready but has a task
1063 # But if the host is Ready and doesn't have a task yet, it will get sent
1064 # to the shard as we're creating a task here.
1065
1066 # Given that we only rarely verify Ready hosts it isn't worth putting this
1067 # entire method in a transaction. The worst case scenario is that we have
MK Ryu5aa25042015-07-28 16:08:04 -07001068 # a verify running on a Ready host while the shard is using it, if the
1069 # verify fails no subsequent tasks will be created against the host on the
1070 # master, and verifies are safe enough that this is OK.
1071 return _call_special_tasks_on_hosts(task, hosts)
1072
1073
1074def reverify_hosts(**filter_data):
1075 """\
1076 Schedules a set of hosts for verify.
1077
1078 @returns A list of hostnames that a verify task was created for.
1079 """
1080 return _forward_special_tasks_on_hosts(
1081 models.SpecialTask.Task.VERIFY, 'reverify_hosts', **filter_data)
Simran Basi73dae552013-02-25 14:57:46 -08001082
1083
1084def repair_hosts(**filter_data):
1085 """\
1086 Schedules a set of hosts for repair.
1087
1088 @returns A list of hostnames that a repair task was created for.
1089 """
MK Ryu5aa25042015-07-28 16:08:04 -07001090 return _forward_special_tasks_on_hosts(
1091 models.SpecialTask.Task.REPAIR, 'repair_hosts', **filter_data)
showard1ff7b2e2009-05-15 23:17:18 +00001092
1093
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001094def get_jobs(not_yet_run=False, running=False, finished=False,
1095 suite=False, sub=False, standalone=False, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001096 """\
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001097 Extra status filter args for get_jobs:
jadmanski0afbb632008-06-06 21:10:57 +00001098 -not_yet_run: Include only jobs that have not yet started running.
1099 -running: Include only jobs that have start running but for which not
1100 all hosts have completed.
1101 -finished: Include only jobs for which all hosts have completed (or
1102 aborted).
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001103
1104 Extra type filter args for get_jobs:
1105 -suite: Include only jobs with child jobs.
1106 -sub: Include only jobs with a parent job.
1107 -standalone: Inlcude only jobs with no child or parent jobs.
1108 At most one of these three fields should be specified.
jadmanski0afbb632008-06-06 21:10:57 +00001109 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001110 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1111 running,
1112 finished)
1113 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1114 suite,
1115 sub,
1116 standalone)
showard0957a842009-05-11 19:25:08 +00001117 job_dicts = []
1118 jobs = list(models.Job.query_objects(filter_data))
1119 models.Job.objects.populate_relationships(jobs, models.Label,
1120 'dependencies')
showardc1a98d12010-01-15 00:22:22 +00001121 models.Job.objects.populate_relationships(jobs, models.JobKeyval, 'keyvals')
showard0957a842009-05-11 19:25:08 +00001122 for job in jobs:
1123 job_dict = job.get_object_dict()
1124 job_dict['dependencies'] = ','.join(label.name
1125 for label in job.dependencies)
showardc1a98d12010-01-15 00:22:22 +00001126 job_dict['keyvals'] = dict((keyval.key, keyval.value)
1127 for keyval in job.keyvals)
Eric Lid23bc192011-02-09 14:38:57 -08001128 if job.parameterized_job:
1129 job_dict['image'] = get_parameterized_autoupdate_image_url(job)
showard0957a842009-05-11 19:25:08 +00001130 job_dicts.append(job_dict)
1131 return rpc_utils.prepare_for_serialization(job_dicts)
mblighe8819cd2008-02-15 16:48:40 +00001132
1133
1134def get_num_jobs(not_yet_run=False, running=False, finished=False,
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001135 suite=False, sub=False, standalone=False,
jadmanski0afbb632008-06-06 21:10:57 +00001136 **filter_data):
1137 """\
1138 See get_jobs() for documentation of extra filter parameters.
1139 """
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001140 extra_args = rpc_utils.extra_job_status_filters(not_yet_run,
1141 running,
1142 finished)
1143 filter_data['extra_args'] = rpc_utils.extra_job_type_filters(extra_args,
1144 suite,
1145 sub,
1146 standalone)
jadmanski0afbb632008-06-06 21:10:57 +00001147 return models.Job.query_count(filter_data)
mblighe8819cd2008-02-15 16:48:40 +00001148
1149
mblighe8819cd2008-02-15 16:48:40 +00001150def get_jobs_summary(**filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001151 """\
Jiaxi Luoaac54572014-06-04 13:57:02 -07001152 Like get_jobs(), but adds 'status_counts' and 'result_counts' field.
1153
1154 'status_counts' filed is a dictionary mapping status strings to the number
1155 of hosts currently with that status, i.e. {'Queued' : 4, 'Running' : 2}.
1156
1157 'result_counts' field is piped to tko's rpc_interface and has the return
1158 format specified under get_group_counts.
jadmanski0afbb632008-06-06 21:10:57 +00001159 """
1160 jobs = get_jobs(**filter_data)
1161 ids = [job['id'] for job in jobs]
1162 all_status_counts = models.Job.objects.get_status_counts(ids)
1163 for job in jobs:
1164 job['status_counts'] = all_status_counts[job['id']]
Jiaxi Luoaac54572014-06-04 13:57:02 -07001165 job['result_counts'] = tko_rpc_interface.get_status_counts(
1166 ['afe_job_id', 'afe_job_id'],
1167 header_groups=[['afe_job_id'], ['afe_job_id']],
1168 **{'afe_job_id': job['id']})
jadmanski0afbb632008-06-06 21:10:57 +00001169 return rpc_utils.prepare_for_serialization(jobs)
mblighe8819cd2008-02-15 16:48:40 +00001170
1171
showarda965cef2009-05-15 23:17:41 +00001172def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
showarda8709c52008-07-03 19:44:54 +00001173 """\
1174 Retrieves all the information needed to clone a job.
1175 """
showarda8709c52008-07-03 19:44:54 +00001176 job = models.Job.objects.get(id=id)
showard29f7cd22009-04-29 21:16:24 +00001177 job_info = rpc_utils.get_job_info(job,
showarda965cef2009-05-15 23:17:41 +00001178 preserve_metahosts,
1179 queue_entry_filter_data)
showard945072f2008-09-03 20:34:59 +00001180
showardd9992fe2008-07-31 02:15:03 +00001181 host_dicts = []
showard29f7cd22009-04-29 21:16:24 +00001182 for host in job_info['hosts']:
1183 host_dict = get_hosts(id=host.id)[0]
1184 other_labels = host_dict['labels']
1185 if host_dict['platform']:
1186 other_labels.remove(host_dict['platform'])
1187 host_dict['other_labels'] = ', '.join(other_labels)
showardd9992fe2008-07-31 02:15:03 +00001188 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001189
showard29f7cd22009-04-29 21:16:24 +00001190 for host in job_info['one_time_hosts']:
1191 host_dict = dict(hostname=host.hostname,
1192 id=host.id,
1193 platform='(one-time host)',
1194 locked_text='')
1195 host_dicts.append(host_dict)
showarda8709c52008-07-03 19:44:54 +00001196
showard4d077562009-05-08 18:24:36 +00001197 # convert keys from Label objects to strings (names of labels)
showard29f7cd22009-04-29 21:16:24 +00001198 meta_host_counts = dict((meta_host.name, count) for meta_host, count
showard4d077562009-05-08 18:24:36 +00001199 in job_info['meta_host_counts'].iteritems())
showard29f7cd22009-04-29 21:16:24 +00001200
1201 info = dict(job=job.get_object_dict(),
1202 meta_host_counts=meta_host_counts,
1203 hosts=host_dicts)
1204 info['job']['dependencies'] = job_info['dependencies']
1205 if job_info['atomic_group']:
1206 info['atomic_group_name'] = (job_info['atomic_group']).name
1207 else:
1208 info['atomic_group_name'] = None
jamesren2275ef12010-04-12 18:25:06 +00001209 info['hostless'] = job_info['hostless']
jamesren76fcf192010-04-21 20:39:50 +00001210 info['drone_set'] = job.drone_set and job.drone_set.name
showarda8709c52008-07-03 19:44:54 +00001211
Eric Lid23bc192011-02-09 14:38:57 -08001212 if job.parameterized_job:
1213 info['job']['image'] = get_parameterized_autoupdate_image_url(job)
1214
showarda8709c52008-07-03 19:44:54 +00001215 return rpc_utils.prepare_for_serialization(info)
1216
1217
showard34dc5fa2008-04-24 20:58:40 +00001218# host queue entries
1219
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001220def get_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001221 """\
showardc92da832009-04-07 18:14:34 +00001222 @returns A sequence of nested dictionaries of host and job information.
jadmanski0afbb632008-06-06 21:10:57 +00001223 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001224 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1225 'started_on__lte',
1226 start_time,
1227 end_time,
1228 **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001229 return rpc_utils.prepare_rows_as_nested_dicts(
1230 models.HostQueueEntry.query_objects(filter_data),
1231 ('host', 'atomic_group', 'job'))
showard34dc5fa2008-04-24 20:58:40 +00001232
1233
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001234def get_num_host_queue_entries(start_time=None, end_time=None, **filter_data):
jadmanski0afbb632008-06-06 21:10:57 +00001235 """\
1236 Get the number of host queue entries associated with this job.
1237 """
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001238 filter_data = rpc_utils.inject_times_to_filter('started_on__gte',
1239 'started_on__lte',
1240 start_time,
1241 end_time,
1242 **filter_data)
jadmanski0afbb632008-06-06 21:10:57 +00001243 return models.HostQueueEntry.query_count(filter_data)
showard34dc5fa2008-04-24 20:58:40 +00001244
1245
showard1e935f12008-07-11 00:11:36 +00001246def get_hqe_percentage_complete(**filter_data):
1247 """
showardc92da832009-04-07 18:14:34 +00001248 Computes the fraction of host queue entries matching the given filter data
showard1e935f12008-07-11 00:11:36 +00001249 that are complete.
1250 """
1251 query = models.HostQueueEntry.query_objects(filter_data)
1252 complete_count = query.filter(complete=True).count()
1253 total_count = query.count()
1254 if total_count == 0:
1255 return 1
1256 return float(complete_count) / total_count
1257
1258
showard1a5a4082009-07-28 20:01:37 +00001259# special tasks
1260
1261def get_special_tasks(**filter_data):
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001262 """Get special task entries from the local database.
1263
1264 Query the special tasks table for tasks matching the given
1265 `filter_data`, and return a list of the results. No attempt is
1266 made to forward the call to shards; the buck will stop here.
1267 The caller is expected to know the target shard for such reasons
1268 as:
1269 * The caller is a service (such as gs_offloader) configured
1270 to operate on behalf of one specific shard, and no other.
1271 * The caller has a host as a parameter, and knows that this is
1272 the shard assigned to that host.
1273
1274 @param filter_data Filter keywords to pass to the underlying
1275 database query.
1276
1277 """
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001278 return rpc_utils.prepare_rows_as_nested_dicts(
1279 models.SpecialTask.query_objects(filter_data),
1280 ('host', 'queue_entry'))
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001281
1282
1283def get_host_special_tasks(host_id, **filter_data):
1284 """Get special task entries for a given host.
1285
1286 Query the special tasks table for tasks that ran on the host
1287 given by `host_id` and matching the given `filter_data`.
1288 Return a list of the results. If the host is assigned to a
1289 shard, forward this call to that shard.
1290
1291 @param host_id Id in the database of the target host.
1292 @param filter_data Filter keywords to pass to the underlying
1293 database query.
1294
1295 """
MK Ryu0c1a37d2015-04-30 12:00:55 -07001296 # Retrieve host data even if the host is in an invalid state.
1297 host = models.Host.smart_get(host_id, False)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001298 if not host.shard:
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001299 return get_special_tasks(host_id=host_id, **filter_data)
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001300 else:
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001301 # The return values from AFE methods are post-processed
1302 # objects that aren't JSON-serializable. So, we have to
1303 # call AFE.run() to get the raw, serializable output from
1304 # the shard.
J. Richard Barnetteb5164d62015-04-13 12:59:31 -07001305 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1306 return shard_afe.run('get_special_tasks',
1307 host_id=host_id, **filter_data)
showard1a5a4082009-07-28 20:01:37 +00001308
1309
MK Ryu0c1a37d2015-04-30 12:00:55 -07001310def get_num_special_tasks(**kwargs):
1311 """Get the number of special task entries from the local database.
1312
1313 Query the special tasks table for tasks matching the given 'kwargs',
1314 and return the number of the results. No attempt is made to forward
1315 the call to shards; the buck will stop here.
1316
1317 @param kwargs Filter keywords to pass to the underlying database query.
1318
1319 """
1320 return models.SpecialTask.query_count(kwargs)
1321
1322
1323def get_host_num_special_tasks(host, **kwargs):
1324 """Get special task entries for a given host.
1325
1326 Query the special tasks table for tasks that ran on the host
1327 given by 'host' and matching the given 'kwargs'.
1328 Return a list of the results. If the host is assigned to a
1329 shard, forward this call to that shard.
1330
1331 @param host id or name of a host. More often a hostname.
1332 @param kwargs Filter keywords to pass to the underlying database query.
1333
1334 """
1335 # Retrieve host data even if the host is in an invalid state.
1336 host_model = models.Host.smart_get(host, False)
1337 if not host_model.shard:
1338 return get_num_special_tasks(host=host, **kwargs)
1339 else:
1340 shard_afe = frontend.AFE(server=host_model.shard.rpc_hostname())
1341 return shard_afe.run('get_num_special_tasks', host=host, **kwargs)
1342
1343
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001344def get_status_task(host_id, end_time):
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001345 """Get the "status task" for a host from the local shard.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001346
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001347 Returns a single special task representing the given host's
1348 "status task". The status task is a completed special task that
1349 identifies whether the corresponding host was working or broken
1350 when it completed. A successful task indicates a working host;
1351 a failed task indicates broken.
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001352
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001353 This call will not be forward to a shard; the receiving server
1354 must be the shard that owns the host.
1355
1356 @param host_id Id in the database of the target host.
1357 @param end_time Time reference for the host's status.
1358
1359 @return A single task; its status (successful or not)
1360 corresponds to the status of the host (working or
1361 broken) at the given time. If no task is found, return
1362 `None`.
1363
1364 """
1365 tasklist = rpc_utils.prepare_rows_as_nested_dicts(
1366 status_history.get_status_task(host_id, end_time),
1367 ('host', 'queue_entry'))
1368 return tasklist[0] if tasklist else None
1369
1370
1371def get_host_status_task(host_id, end_time):
1372 """Get the "status task" for a host from its owning shard.
1373
1374 Finds the given host's owning shard, and forwards to it a call
1375 to `get_status_task()` (see above).
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001376
1377 @param host_id Id in the database of the target host.
1378 @param end_time Time reference for the host's status.
1379
1380 @return A single task; its status (successful or not)
1381 corresponds to the status of the host (working or
1382 broken) at the given time. If no task is found, return
1383 `None`.
1384
1385 """
1386 host = models.Host.smart_get(host_id)
1387 if not host.shard:
J. Richard Barnette4d7e6e62015-05-01 10:47:34 -07001388 return get_status_task(host_id, end_time)
J. Richard Barnette39255fa2015-04-14 17:23:41 -07001389 else:
1390 # The return values from AFE methods are post-processed
1391 # objects that aren't JSON-serializable. So, we have to
1392 # call AFE.run() to get the raw, serializable output from
1393 # the shard.
1394 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1395 return shard_afe.run('get_status_task',
1396 host_id=host_id, end_time=end_time)
1397
1398
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001399def get_host_diagnosis_interval(host_id, end_time, success):
1400 """Find a "diagnosis interval" for a given host.
1401
1402 A "diagnosis interval" identifies a start and end time where
1403 the host went from "working" to "broken", or vice versa. The
1404 interval's starting time is the starting time of the last status
1405 task with the old status; the end time is the finish time of the
1406 first status task with the new status.
1407
1408 This routine finds the most recent diagnosis interval for the
1409 given host prior to `end_time`, with a starting status matching
1410 `success`. If `success` is true, the interval will start with a
1411 successful status task; if false the interval will start with a
1412 failed status task.
1413
1414 @param host_id Id in the database of the target host.
1415 @param end_time Time reference for the diagnosis interval.
1416 @param success Whether the diagnosis interval should start
1417 with a successful or failed status task.
1418
1419 @return A list of two strings. The first is the timestamp for
1420 the beginning of the interval; the second is the
1421 timestamp for the end. If the host has never changed
1422 state, the list is empty.
1423
1424 """
1425 host = models.Host.smart_get(host_id)
J. Richard Barnette78f281a2015-06-29 13:24:51 -07001426 if not host.shard or utils.is_shard():
J. Richard Barnette8abbfd62015-06-23 12:46:54 -07001427 return status_history.get_diagnosis_interval(
1428 host_id, end_time, success)
1429 else:
1430 shard_afe = frontend.AFE(server=host.shard.rpc_hostname())
1431 return shard_afe.get_host_diagnosis_interval(
1432 host_id, end_time, success)
1433
1434
showardc0ac3a72009-07-08 21:14:45 +00001435# support for host detail view
1436
MK Ryu0c1a37d2015-04-30 12:00:55 -07001437def get_host_queue_entries_and_special_tasks(host, query_start=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001438 query_limit=None, start_time=None,
1439 end_time=None):
showardc0ac3a72009-07-08 21:14:45 +00001440 """
1441 @returns an interleaved list of HostQueueEntries and SpecialTasks,
1442 in approximate run order. each dict contains keys for type, host,
1443 job, status, started_on, execution_path, and ID.
1444 """
1445 total_limit = None
1446 if query_limit is not None:
1447 total_limit = query_start + query_limit
MK Ryu0c1a37d2015-04-30 12:00:55 -07001448 filter_data_common = {'host': host,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001449 'query_limit': total_limit,
1450 'sort_by': ['-id']}
showardc0ac3a72009-07-08 21:14:45 +00001451
MK Ryu0c1a37d2015-04-30 12:00:55 -07001452 filter_data_special_tasks = rpc_utils.inject_times_to_filter(
1453 'time_started__gte', 'time_started__lte', start_time, end_time,
1454 **filter_data_common)
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001455
MK Ryu0c1a37d2015-04-30 12:00:55 -07001456 queue_entries = get_host_queue_entries(
1457 start_time, end_time, **filter_data_common)
1458 special_tasks = get_host_special_tasks(host, **filter_data_special_tasks)
showardc0ac3a72009-07-08 21:14:45 +00001459
1460 interleaved_entries = rpc_utils.interleave_entries(queue_entries,
1461 special_tasks)
1462 if query_start is not None:
1463 interleaved_entries = interleaved_entries[query_start:]
1464 if query_limit is not None:
1465 interleaved_entries = interleaved_entries[:query_limit]
MK Ryu0c1a37d2015-04-30 12:00:55 -07001466 return rpc_utils.prepare_host_queue_entries_and_special_tasks(
1467 interleaved_entries, queue_entries)
showardc0ac3a72009-07-08 21:14:45 +00001468
1469
MK Ryu0c1a37d2015-04-30 12:00:55 -07001470def get_num_host_queue_entries_and_special_tasks(host, start_time=None,
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001471 end_time=None):
MK Ryu0c1a37d2015-04-30 12:00:55 -07001472 filter_data_common = {'host': host}
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001473
1474 filter_data_queue_entries, filter_data_special_tasks = (
1475 rpc_utils.inject_times_to_hqe_special_tasks_filters(
1476 filter_data_common, start_time, end_time))
1477
1478 return (models.HostQueueEntry.query_count(filter_data_queue_entries)
MK Ryu0c1a37d2015-04-30 12:00:55 -07001479 + get_host_num_special_tasks(**filter_data_special_tasks))
showardc0ac3a72009-07-08 21:14:45 +00001480
1481
showard29f7cd22009-04-29 21:16:24 +00001482# recurring run
1483
1484def get_recurring(**filter_data):
1485 return rpc_utils.prepare_rows_as_nested_dicts(
1486 models.RecurringRun.query_objects(filter_data),
1487 ('job', 'owner'))
1488
1489
1490def get_num_recurring(**filter_data):
1491 return models.RecurringRun.query_count(filter_data)
1492
1493
1494def delete_recurring_runs(**filter_data):
1495 to_delete = models.RecurringRun.query_objects(filter_data)
1496 to_delete.delete()
1497
1498
1499def create_recurring_run(job_id, start_date, loop_period, loop_count):
showard64a95952010-01-13 21:27:16 +00001500 owner = models.User.current_user().login
showard29f7cd22009-04-29 21:16:24 +00001501 job = models.Job.objects.get(id=job_id)
1502 return job.create_recurring_job(start_date=start_date,
1503 loop_period=loop_period,
1504 loop_count=loop_count,
1505 owner=owner)
1506
1507
mblighe8819cd2008-02-15 16:48:40 +00001508# other
1509
showarde0b63622008-08-04 20:58:47 +00001510def echo(data=""):
1511 """\
1512 Returns a passed in string. For doing a basic test to see if RPC calls
1513 can successfully be made.
1514 """
1515 return data
1516
1517
showardb7a52fd2009-04-27 20:10:56 +00001518def get_motd():
1519 """\
1520 Returns the message of the day as a string.
1521 """
1522 return rpc_utils.get_motd()
1523
1524
mblighe8819cd2008-02-15 16:48:40 +00001525def get_static_data():
jadmanski0afbb632008-06-06 21:10:57 +00001526 """\
1527 Returns a dictionary containing a bunch of data that shouldn't change
1528 often and is otherwise inaccessible. This includes:
showardc92da832009-04-07 18:14:34 +00001529
1530 priorities: List of job priority choices.
1531 default_priority: Default priority value for new jobs.
1532 users: Sorted list of all users.
Jiaxi Luo31874592014-06-11 10:36:35 -07001533 labels: Sorted list of labels not start with 'cros-version' and
1534 'fw-version'.
showardc92da832009-04-07 18:14:34 +00001535 atomic_groups: Sorted list of all atomic groups.
1536 tests: Sorted list of all tests.
1537 profilers: Sorted list of all profilers.
1538 current_user: Logged-in username.
1539 host_statuses: Sorted list of possible Host statuses.
1540 job_statuses: Sorted list of possible HostQueueEntry statuses.
Simran Basi7e605742013-11-12 13:43:36 -08001541 job_timeout_default: The default job timeout length in minutes.
showarda1e74b32009-05-12 17:32:04 +00001542 parse_failed_repair_default: Default value for the parse_failed_repair job
Jiaxi Luo31874592014-06-11 10:36:35 -07001543 option.
showardc92da832009-04-07 18:14:34 +00001544 reboot_before_options: A list of valid RebootBefore string enums.
1545 reboot_after_options: A list of valid RebootAfter string enums.
1546 motd: Server's message of the day.
1547 status_dictionary: A mapping from one word job status names to a more
1548 informative description.
jadmanski0afbb632008-06-06 21:10:57 +00001549 """
showard21baa452008-10-21 00:08:39 +00001550
1551 job_fields = models.Job.get_field_dict()
jamesren76fcf192010-04-21 20:39:50 +00001552 default_drone_set_name = models.DroneSet.default_drone_set_name()
1553 drone_sets = ([default_drone_set_name] +
1554 sorted(drone_set.name for drone_set in
1555 models.DroneSet.objects.exclude(
1556 name=default_drone_set_name)))
showard21baa452008-10-21 00:08:39 +00001557
jadmanski0afbb632008-06-06 21:10:57 +00001558 result = {}
Alex Miller7d658cf2013-09-04 16:00:35 -07001559 result['priorities'] = priorities.Priority.choices()
1560 default_priority = priorities.Priority.DEFAULT
1561 result['default_priority'] = 'Default'
1562 result['max_schedulable_priority'] = priorities.Priority.DEFAULT
jadmanski0afbb632008-06-06 21:10:57 +00001563 result['users'] = get_users(sort_by=['login'])
Jiaxi Luo31874592014-06-11 10:36:35 -07001564
1565 label_exclude_filters = [{'name__startswith': 'cros-version'},
Dan Shi65351d62015-08-03 12:03:23 -07001566 {'name__startswith': 'fw-version'},
1567 {'name__startswith': 'fwrw-version'},
1568 {'name__startswith': 'fwro-version'}]
Jiaxi Luo31874592014-06-11 10:36:35 -07001569 result['labels'] = get_labels(
1570 label_exclude_filters,
1571 sort_by=['-platform', 'name'])
1572
showardc92da832009-04-07 18:14:34 +00001573 result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
jadmanski0afbb632008-06-06 21:10:57 +00001574 result['tests'] = get_tests(sort_by=['name'])
showard2b9a88b2008-06-13 20:55:03 +00001575 result['profilers'] = get_profilers(sort_by=['name'])
showard0fc38302008-10-23 00:44:07 +00001576 result['current_user'] = rpc_utils.prepare_for_serialization(
showard64a95952010-01-13 21:27:16 +00001577 models.User.current_user().get_object_dict())
showard2b9a88b2008-06-13 20:55:03 +00001578 result['host_statuses'] = sorted(models.Host.Status.names)
mbligh5a198b92008-12-11 19:33:29 +00001579 result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
Simran Basi7e605742013-11-12 13:43:36 -08001580 result['job_timeout_mins_default'] = models.Job.DEFAULT_TIMEOUT_MINS
Simran Basi34217022012-11-06 13:43:15 -08001581 result['job_max_runtime_mins_default'] = (
1582 models.Job.DEFAULT_MAX_RUNTIME_MINS)
showarda1e74b32009-05-12 17:32:04 +00001583 result['parse_failed_repair_default'] = bool(
1584 models.Job.DEFAULT_PARSE_FAILED_REPAIR)
jamesrendd855242010-03-02 22:23:44 +00001585 result['reboot_before_options'] = model_attributes.RebootBefore.names
1586 result['reboot_after_options'] = model_attributes.RebootAfter.names
showard8fbae652009-01-20 23:23:10 +00001587 result['motd'] = rpc_utils.get_motd()
jamesren76fcf192010-04-21 20:39:50 +00001588 result['drone_sets_enabled'] = models.DroneSet.drone_sets_enabled()
1589 result['drone_sets'] = drone_sets
jamesren4a41e012010-07-16 22:33:48 +00001590 result['parameterized_jobs'] = models.Job.parameterized_jobs_enabled()
showard8ac29b42008-07-17 17:01:55 +00001591
showardd3dc1992009-04-22 21:01:40 +00001592 result['status_dictionary'] = {"Aborted": "Aborted",
showard8ac29b42008-07-17 17:01:55 +00001593 "Verifying": "Verifying Host",
Alex Millerdfff2fd2013-05-28 13:05:06 -07001594 "Provisioning": "Provisioning Host",
showard8ac29b42008-07-17 17:01:55 +00001595 "Pending": "Waiting on other hosts",
1596 "Running": "Running autoserv",
1597 "Completed": "Autoserv completed",
1598 "Failed": "Failed to complete",
showardd823b362008-07-24 16:35:46 +00001599 "Queued": "Queued",
showard5deb6772008-11-04 21:54:33 +00001600 "Starting": "Next in host's queue",
1601 "Stopped": "Other host(s) failed verify",
showardd3dc1992009-04-22 21:01:40 +00001602 "Parsing": "Awaiting parse of final results",
showard29f7cd22009-04-29 21:16:24 +00001603 "Gathering": "Gathering log files",
showard8cc058f2009-09-08 16:26:33 +00001604 "Template": "Template job for recurring run",
mbligh4608b002010-01-05 18:22:35 +00001605 "Waiting": "Waiting for scheduler action",
Dan Shi07e09af2013-04-12 09:31:29 -07001606 "Archiving": "Archiving results",
1607 "Resetting": "Resetting hosts"}
Jiaxi Luo421608e2014-07-07 14:38:00 -07001608
1609 result['wmatrix_url'] = rpc_utils.get_wmatrix_url()
Simran Basi71206ef2014-08-13 13:51:18 -07001610 result['is_moblab'] = bool(utils.is_moblab())
Jiaxi Luo421608e2014-07-07 14:38:00 -07001611
jadmanski0afbb632008-06-06 21:10:57 +00001612 return result
showard29f7cd22009-04-29 21:16:24 +00001613
1614
1615def get_server_time():
1616 return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")