blob: 0ea141841b7754960ed9c81228bcef65cb0ed5d2 [file] [log] [blame]
Aviv Keshet18308922013-02-19 17:49:49 -08001#pylint: disable-msg=C0111
xixuanba232a32016-08-25 17:01:59 -07002"""
mblighe8819cd2008-02-15 16:48:40 +00003Utility functions for rpc_interface.py. We keep them in a separate file so that
4only RPC interface functions go into that file.
5"""
6
7__author__ = 'showard@google.com (Steve Howard)'
8
Allen Li3d4e6112016-12-28 11:10:25 -08009import collections
MK Ryu84573e12015-02-18 15:54:09 -080010import datetime
MK Ryufbb002c2015-06-08 14:13:16 -070011from functools import wraps
MK Ryu84573e12015-02-18 15:54:09 -080012import inspect
13import os
14import sys
Fang Deng7051fe42015-10-20 14:57:28 -070015import django.db.utils
showard3d6ae112009-05-02 00:45:48 +000016import django.http
MK Ryu0a9c82e2015-09-17 17:54:01 -070017
18from autotest_lib.frontend import thread_local
Dan Shi07e09af2013-04-12 09:31:29 -070019from autotest_lib.frontend.afe import models, model_logic
Alex Miller4a193692013-08-21 13:59:01 -070020from autotest_lib.client.common_lib import control_data, error
Allen Li352b86a2016-12-14 12:11:27 -080021from autotest_lib.client.common_lib import global_config
MK Ryu0c1a37d2015-04-30 12:00:55 -070022from autotest_lib.client.common_lib import time_utils
Allen Li3d43e602016-12-08 15:09:51 -080023from autotest_lib.client.common_lib.cros import dev_server
Aviv Keshet14cac442016-11-20 21:44:11 -080024# TODO(akeshet): Replace with monarch once we know how to instrument rpc server
25# with ts_mon.
MK Ryu509516b2015-05-18 12:00:47 -070026from autotest_lib.client.common_lib.cros.graphite import autotest_stats
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -080027from autotest_lib.server import utils as server_utils
MK Ryu9651ca52015-06-08 17:48:22 -070028from autotest_lib.server.cros import provision
29from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
mblighe8819cd2008-02-15 16:48:40 +000030
showarda62866b2008-07-28 21:27:41 +000031NULL_DATETIME = datetime.datetime.max
32NULL_DATE = datetime.date.max
Fang Deng7051fe42015-10-20 14:57:28 -070033DUPLICATE_KEY_MSG = 'Duplicate entry'
showarda62866b2008-07-28 21:27:41 +000034
mblighe8819cd2008-02-15 16:48:40 +000035def prepare_for_serialization(objects):
jadmanski0afbb632008-06-06 21:10:57 +000036 """
37 Prepare Python objects to be returned via RPC.
Aviv Keshet18308922013-02-19 17:49:49 -080038 @param objects: objects to be prepared.
jadmanski0afbb632008-06-06 21:10:57 +000039 """
40 if (isinstance(objects, list) and len(objects) and
41 isinstance(objects[0], dict) and 'id' in objects[0]):
42 objects = gather_unique_dicts(objects)
43 return _prepare_data(objects)
showardb8d34242008-04-25 18:11:16 +000044
45
showardc92da832009-04-07 18:14:34 +000046def prepare_rows_as_nested_dicts(query, nested_dict_column_names):
47 """
48 Prepare a Django query to be returned via RPC as a sequence of nested
49 dictionaries.
50
51 @param query - A Django model query object with a select_related() method.
52 @param nested_dict_column_names - A list of column/attribute names for the
53 rows returned by query to expand into nested dictionaries using
54 their get_object_dict() method when not None.
55
56 @returns An list suitable to returned in an RPC.
57 """
58 all_dicts = []
59 for row in query.select_related():
60 row_dict = row.get_object_dict()
61 for column in nested_dict_column_names:
62 if row_dict[column] is not None:
63 row_dict[column] = getattr(row, column).get_object_dict()
64 all_dicts.append(row_dict)
65 return prepare_for_serialization(all_dicts)
66
67
showardb8d34242008-04-25 18:11:16 +000068def _prepare_data(data):
jadmanski0afbb632008-06-06 21:10:57 +000069 """
70 Recursively process data structures, performing necessary type
71 conversions to values in data to allow for RPC serialization:
72 -convert datetimes to strings
showard2b9a88b2008-06-13 20:55:03 +000073 -convert tuples and sets to lists
jadmanski0afbb632008-06-06 21:10:57 +000074 """
75 if isinstance(data, dict):
76 new_data = {}
77 for key, value in data.iteritems():
78 new_data[key] = _prepare_data(value)
79 return new_data
showard2b9a88b2008-06-13 20:55:03 +000080 elif (isinstance(data, list) or isinstance(data, tuple) or
81 isinstance(data, set)):
jadmanski0afbb632008-06-06 21:10:57 +000082 return [_prepare_data(item) for item in data]
showard98659972008-07-17 17:00:07 +000083 elif isinstance(data, datetime.date):
showarda62866b2008-07-28 21:27:41 +000084 if data is NULL_DATETIME or data is NULL_DATE:
85 return None
jadmanski0afbb632008-06-06 21:10:57 +000086 return str(data)
87 else:
88 return data
mblighe8819cd2008-02-15 16:48:40 +000089
90
Moises Osorio2dda22e2014-09-16 15:56:24 -070091def fetchall_as_list_of_dicts(cursor):
92 """
93 Converts each row in the cursor to a dictionary so that values can be read
94 by using the column name.
95 @param cursor: The database cursor to read from.
96 @returns: A list of each row in the cursor as a dictionary.
97 """
98 desc = cursor.description
99 return [ dict(zip([col[0] for col in desc], row))
100 for row in cursor.fetchall() ]
101
102
showard3d6ae112009-05-02 00:45:48 +0000103def raw_http_response(response_data, content_type=None):
104 response = django.http.HttpResponse(response_data, mimetype=content_type)
105 response['Content-length'] = str(len(response.content))
106 return response
107
108
showardb0dfb9f2008-06-06 18:08:02 +0000109def gather_unique_dicts(dict_iterable):
jadmanski0afbb632008-06-06 21:10:57 +0000110 """\
111 Pick out unique objects (by ID) from an iterable of object dicts.
112 """
113 id_set = set()
114 result = []
115 for obj in dict_iterable:
116 if obj['id'] not in id_set:
117 id_set.add(obj['id'])
118 result.append(obj)
119 return result
showardb0dfb9f2008-06-06 18:08:02 +0000120
121
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700122def extra_job_status_filters(not_yet_run=False, running=False, finished=False):
jadmanski0afbb632008-06-06 21:10:57 +0000123 """\
124 Generate a SQL WHERE clause for job status filtering, and return it in
Simran Basi01984f52015-10-12 15:36:45 -0700125 a dict of keyword args to pass to query.extra().
showard6c65d252009-10-01 18:45:22 +0000126 * not_yet_run: all HQEs are Queued
127 * finished: all HQEs are complete
128 * running: everything else
jadmanski0afbb632008-06-06 21:10:57 +0000129 """
Simran Basi01984f52015-10-12 15:36:45 -0700130 if not (not_yet_run or running or finished):
131 return {}
showardeab66ce2009-12-23 00:03:56 +0000132 not_queued = ('(SELECT job_id FROM afe_host_queue_entries '
133 'WHERE status != "%s")'
showard6c65d252009-10-01 18:45:22 +0000134 % models.HostQueueEntry.Status.QUEUED)
showardeab66ce2009-12-23 00:03:56 +0000135 not_finished = ('(SELECT job_id FROM afe_host_queue_entries '
136 'WHERE not complete)')
showard6c65d252009-10-01 18:45:22 +0000137
Simran Basi01984f52015-10-12 15:36:45 -0700138 where = []
jadmanski0afbb632008-06-06 21:10:57 +0000139 if not_yet_run:
Simran Basi01984f52015-10-12 15:36:45 -0700140 where.append('id NOT IN ' + not_queued)
141 if running:
142 where.append('(id IN %s) AND (id IN %s)' % (not_queued, not_finished))
143 if finished:
144 where.append('id NOT IN ' + not_finished)
145 return {'where': [' OR '.join(['(%s)' % x for x in where])]}
mblighe8819cd2008-02-15 16:48:40 +0000146
147
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700148def extra_job_type_filters(extra_args, suite=False,
149 sub=False, standalone=False):
150 """\
151 Generate a SQL WHERE clause for job status filtering, and return it in
152 a dict of keyword args to pass to query.extra().
153
154 param extra_args: a dict of existing extra_args.
155
156 No more than one of the parameters should be passed as True:
157 * suite: job which is parent of other jobs
158 * sub: job with a parent job
159 * standalone: job with no child or parent jobs
160 """
161 assert not ((suite and sub) or
162 (suite and standalone) or
163 (sub and standalone)), ('Cannot specify more than one '
164 'filter to this function')
165
166 where = extra_args.get('where', [])
167 parent_job_id = ('DISTINCT parent_job_id')
168 child_job_id = ('id')
169 filter_common = ('(SELECT %s FROM afe_jobs '
170 'WHERE parent_job_id IS NOT NULL)')
171
172 if suite:
173 where.append('id IN ' + filter_common % parent_job_id)
174 elif sub:
175 where.append('id IN ' + filter_common % child_job_id)
176 elif standalone:
177 where.append('NOT EXISTS (SELECT 1 from afe_jobs AS sub_query '
178 'WHERE parent_job_id IS NOT NULL'
179 ' AND (sub_query.parent_job_id=afe_jobs.id'
180 ' OR sub_query.id=afe_jobs.id))')
181 else:
182 return extra_args
183
184 extra_args['where'] = where
185 return extra_args
186
187
188
showard87cc38f2009-08-20 23:37:04 +0000189def extra_host_filters(multiple_labels=()):
jadmanski0afbb632008-06-06 21:10:57 +0000190 """\
191 Generate SQL WHERE clauses for matching hosts in an intersection of
192 labels.
193 """
194 extra_args = {}
showardeab66ce2009-12-23 00:03:56 +0000195 where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels '
jadmanski0afbb632008-06-06 21:10:57 +0000196 'where label_id=%s)')
197 extra_args['where'] = [where_str] * len(multiple_labels)
198 extra_args['params'] = [models.Label.smart_get(label).id
199 for label in multiple_labels]
200 return extra_args
showard8e3aa5e2008-04-08 19:42:32 +0000201
202
showard87cc38f2009-08-20 23:37:04 +0000203def get_host_query(multiple_labels, exclude_only_if_needed_labels,
showard8aa84fc2009-09-16 17:17:55 +0000204 exclude_atomic_group_hosts, valid_only, filter_data):
205 if valid_only:
206 query = models.Host.valid_objects.all()
207 else:
208 query = models.Host.objects.all()
209
showard43a3d262008-11-12 18:17:05 +0000210 if exclude_only_if_needed_labels:
211 only_if_needed_labels = models.Label.valid_objects.filter(
212 only_if_needed=True)
showardf7eac6f2008-11-13 21:18:01 +0000213 if only_if_needed_labels.count() > 0:
showard87cc38f2009-08-20 23:37:04 +0000214 only_if_needed_ids = ','.join(
215 str(label['id'])
216 for label in only_if_needed_labels.values('id'))
showardf7eac6f2008-11-13 21:18:01 +0000217 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000218 query, 'afe_hosts_labels', join_key='host_id',
219 join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)'
showard87cc38f2009-08-20 23:37:04 +0000220 % only_if_needed_ids),
221 suffix='_exclude_OIN', exclude=True)
showard8aa84fc2009-09-16 17:17:55 +0000222
showard87cc38f2009-08-20 23:37:04 +0000223 if exclude_atomic_group_hosts:
224 atomic_group_labels = models.Label.valid_objects.filter(
225 atomic_group__isnull=False)
226 if atomic_group_labels.count() > 0:
227 atomic_group_label_ids = ','.join(
228 str(atomic_group['id'])
229 for atomic_group in atomic_group_labels.values('id'))
230 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000231 query, 'afe_hosts_labels', join_key='host_id',
232 join_condition=(
233 'afe_hosts_labels_exclude_AG.label_id IN (%s)'
234 % atomic_group_label_ids),
showard87cc38f2009-08-20 23:37:04 +0000235 suffix='_exclude_AG', exclude=True)
Fang Deng04d30612013-04-10 18:13:13 -0700236 try:
237 assert 'extra_args' not in filter_data
238 filter_data['extra_args'] = extra_host_filters(multiple_labels)
239 return models.Host.query_objects(filter_data, initial_query=query)
240 except models.Label.DoesNotExist as e:
241 return models.Host.objects.none()
showard43a3d262008-11-12 18:17:05 +0000242
243
showard8fd58242008-03-10 21:29:07 +0000244class InconsistencyException(Exception):
jadmanski0afbb632008-06-06 21:10:57 +0000245 'Raised when a list of objects does not have a consistent value'
showard8fd58242008-03-10 21:29:07 +0000246
247
248def get_consistent_value(objects, field):
mblighc5ddfd12008-08-04 17:15:00 +0000249 if not objects:
250 # well a list of nothing is consistent
251 return None
252
jadmanski0afbb632008-06-06 21:10:57 +0000253 value = getattr(objects[0], field)
254 for obj in objects:
255 this_value = getattr(obj, field)
256 if this_value != value:
257 raise InconsistencyException(objects[0], obj)
258 return value
showard8fd58242008-03-10 21:29:07 +0000259
260
Matthew Sartori10438092015-06-24 14:30:18 -0700261def afe_test_dict_to_test_object(test_dict):
262 if not isinstance(test_dict, dict):
263 return test_dict
264
265 numerized_dict = {}
266 for key, value in test_dict.iteritems():
267 try:
268 numerized_dict[key] = int(value)
269 except (ValueError, TypeError):
270 numerized_dict[key] = value
271
272 return type('TestObject', (object,), numerized_dict)
273
274
Michael Tang84a2ecf2016-06-07 15:10:53 -0700275def _check_is_server_test(test_type):
276 """Checks if the test type is a server test.
277
278 @param test_type The test type in enum integer or string.
279
280 @returns A boolean to identify if the test type is server test.
281 """
282 if test_type is not None:
283 if isinstance(test_type, basestring):
284 try:
285 test_type = control_data.CONTROL_TYPE.get_value(test_type)
286 except AttributeError:
287 return False
288 return (test_type == control_data.CONTROL_TYPE.SERVER)
289 return False
290
291
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700292def prepare_generate_control_file(tests, profilers, db_tests=True):
Matthew Sartori10438092015-06-24 14:30:18 -0700293 if db_tests:
294 test_objects = [models.Test.smart_get(test) for test in tests]
295 else:
296 test_objects = [afe_test_dict_to_test_object(test) for test in tests]
297
showard2b9a88b2008-06-13 20:55:03 +0000298 profiler_objects = [models.Profiler.smart_get(profiler)
299 for profiler in profilers]
jadmanski0afbb632008-06-06 21:10:57 +0000300 # ensure tests are all the same type
301 try:
302 test_type = get_consistent_value(test_objects, 'test_type')
303 except InconsistencyException, exc:
304 test1, test2 = exc.args
mblighec5546d2008-06-16 16:51:28 +0000305 raise model_logic.ValidationError(
Matthew Sartori10438092015-06-24 14:30:18 -0700306 {'tests' : 'You cannot run both test_suites and server-side '
jadmanski0afbb632008-06-06 21:10:57 +0000307 'tests together (tests %s and %s differ' % (
308 test1.name, test2.name)})
showard8fd58242008-03-10 21:29:07 +0000309
Michael Tang84a2ecf2016-06-07 15:10:53 -0700310 is_server = _check_is_server_test(test_type)
showard14374b12009-01-31 00:11:54 +0000311 if test_objects:
312 synch_count = max(test.sync_count for test in test_objects)
313 else:
314 synch_count = 1
mblighe8819cd2008-02-15 16:48:40 +0000315
Matthew Sartori10438092015-06-24 14:30:18 -0700316 if db_tests:
317 dependencies = set(label.name for label
318 in models.Label.objects.filter(test__in=test_objects))
319 else:
320 dependencies = reduce(
321 set.union, [set(test.dependencies) for test in test_objects])
showard989f25d2008-10-01 11:38:11 +0000322
showard2bab8f42008-11-12 18:15:22 +0000323 cf_info = dict(is_server=is_server, synch_count=synch_count,
324 dependencies=list(dependencies))
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700325 return cf_info, test_objects, profiler_objects
showard989f25d2008-10-01 11:38:11 +0000326
327
328def check_job_dependencies(host_objects, job_dependencies):
329 """
330 Check that a set of machines satisfies a job's dependencies.
331 host_objects: list of models.Host objects
332 job_dependencies: list of names of labels
333 """
334 # check that hosts satisfy dependencies
335 host_ids = [host.id for host in host_objects]
336 hosts_in_job = models.Host.objects.filter(id__in=host_ids)
337 ok_hosts = hosts_in_job
338 for index, dependency in enumerate(job_dependencies):
Alex Milleraa772002014-04-10 17:51:21 -0700339 if not provision.is_for_special_action(dependency):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700340 ok_hosts = ok_hosts.filter(labels__name=dependency)
showard989f25d2008-10-01 11:38:11 +0000341 failing_hosts = (set(host.hostname for host in host_objects) -
342 set(host.hostname for host in ok_hosts))
343 if failing_hosts:
344 raise model_logic.ValidationError(
Eric Lie0493a42010-11-15 13:05:43 -0800345 {'hosts' : 'Host(s) failed to meet job dependencies (' +
346 (', '.join(job_dependencies)) + '): ' +
347 (', '.join(failing_hosts))})
348
showard989f25d2008-10-01 11:38:11 +0000349
Alex Miller4a193692013-08-21 13:59:01 -0700350def check_job_metahost_dependencies(metahost_objects, job_dependencies):
351 """
352 Check that at least one machine within the metahost spec satisfies the job's
353 dependencies.
354
355 @param metahost_objects A list of label objects representing the metahosts.
356 @param job_dependencies A list of strings of the required label names.
357 @raises NoEligibleHostException If a metahost cannot run the job.
358 """
359 for metahost in metahost_objects:
360 hosts = models.Host.objects.filter(labels=metahost)
361 for label_name in job_dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700362 if not provision.is_for_special_action(label_name):
Alex Miller4a193692013-08-21 13:59:01 -0700363 hosts = hosts.filter(labels__name=label_name)
364 if not any(hosts):
365 raise error.NoEligibleHostException("No hosts within %s satisfy %s."
366 % (metahost.name, ', '.join(job_dependencies)))
367
showard2bab8f42008-11-12 18:15:22 +0000368
369def _execution_key_for(host_queue_entry):
370 return (host_queue_entry.job.id, host_queue_entry.execution_subdir)
371
372
373def check_abort_synchronous_jobs(host_queue_entries):
374 # ensure user isn't aborting part of a synchronous autoserv execution
375 count_per_execution = {}
376 for queue_entry in host_queue_entries:
377 key = _execution_key_for(queue_entry)
378 count_per_execution.setdefault(key, 0)
379 count_per_execution[key] += 1
380
381 for queue_entry in host_queue_entries:
382 if not queue_entry.execution_subdir:
383 continue
384 execution_count = count_per_execution[_execution_key_for(queue_entry)]
385 if execution_count < queue_entry.job.synch_count:
mbligh1ef218d2009-08-03 16:57:56 +0000386 raise model_logic.ValidationError(
387 {'' : 'You cannot abort part of a synchronous job execution '
388 '(%d/%s), %d included, %d expected'
389 % (queue_entry.job.id, queue_entry.execution_subdir,
390 execution_count, queue_entry.job.synch_count)})
showard8fbae652009-01-20 23:23:10 +0000391
392
showardc92da832009-04-07 18:14:34 +0000393def check_atomic_group_create_job(synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700394 dependencies, atomic_group):
showardc92da832009-04-07 18:14:34 +0000395 """
396 Attempt to reject create_job requests with an atomic group that
397 will be impossible to schedule. The checks are not perfect but
398 should catch the most obvious issues.
399
400 @param synch_count - The job's minimum synch count.
401 @param host_objects - A list of models.Host instances.
402 @param metahost_objects - A list of models.Label instances.
403 @param dependencies - A list of job dependency label names.
showardc92da832009-04-07 18:14:34 +0000404 @param labels_by_name - A dictionary mapping label names to models.Label
405 instance. Used to look up instances for dependencies.
406
407 @raises model_logic.ValidationError - When an issue is found.
408 """
Allen Li224aa552016-12-14 17:17:22 -0800409 if synch_count and synch_count > atomic_group.max_number_of_machines:
410 raise model_logic.ValidationError(
411 {'atomic_group_name' :
412 'You have requested a synch_count (%d) greater than the '
413 'maximum machines in the requested Atomic Group (%d).' %
414 (synch_count, atomic_group.max_number_of_machines)})
415
showardc92da832009-04-07 18:14:34 +0000416 # If specific host objects were supplied with an atomic group, verify
417 # that there are enough to satisfy the synch_count.
418 minimum_required = synch_count or 1
419 if (host_objects and not metahost_objects and
420 len(host_objects) < minimum_required):
421 raise model_logic.ValidationError(
422 {'hosts':
423 'only %d hosts provided for job with synch_count = %d' %
424 (len(host_objects), synch_count)})
425
426 # Check that the atomic group has a hope of running this job
427 # given any supplied metahosts and dependancies that may limit.
428
429 # Get a set of hostnames in the atomic group.
430 possible_hosts = set()
431 for label in atomic_group.label_set.all():
432 possible_hosts.update(h.hostname for h in label.host_set.all())
433
434 # Filter out hosts that don't match all of the job dependency labels.
Alex Miller871291b2013-08-08 01:19:20 -0700435 for label in models.Label.objects.filter(name__in=dependencies):
showardc92da832009-04-07 18:14:34 +0000436 hosts_in_label = (h.hostname for h in label.host_set.all())
437 possible_hosts.intersection_update(hosts_in_label)
438
showard225bdc12009-04-13 16:09:21 +0000439 if not host_objects and not metahost_objects:
440 # No hosts or metahosts are required to queue an atomic group Job.
441 # However, if they are given, we respect them below.
442 host_set = possible_hosts
443 else:
444 host_set = set(host.hostname for host in host_objects)
445 unusable_host_set = host_set.difference(possible_hosts)
446 if unusable_host_set:
447 raise model_logic.ValidationError(
448 {'hosts': 'Hosts "%s" are not in Atomic Group "%s"' %
449 (', '.join(sorted(unusable_host_set)), atomic_group.name)})
showardc92da832009-04-07 18:14:34 +0000450
451 # Lookup hosts provided by each meta host and merge them into the
452 # host_set for final counting.
453 for meta_host in metahost_objects:
454 meta_possible = possible_hosts.copy()
455 hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all())
456 meta_possible.intersection_update(hosts_in_meta_host)
457
458 # Count all hosts that this meta_host will provide.
459 host_set.update(meta_possible)
460
461 if len(host_set) < minimum_required:
462 raise model_logic.ValidationError(
463 {'atomic_group_name':
464 'Insufficient hosts in Atomic Group "%s" with the'
465 ' supplied dependencies and meta_hosts.' %
466 (atomic_group.name,)})
467
468
showardbe0d8692009-08-20 23:42:44 +0000469def check_modify_host(update_data):
470 """
471 Sanity check modify_host* requests.
472
473 @param update_data: A dictionary with the changes to make to a host
474 or hosts.
475 """
476 # Only the scheduler (monitor_db) is allowed to modify Host status.
477 # Otherwise race conditions happen as a hosts state is changed out from
478 # beneath tasks being run on a host.
479 if 'status' in update_data:
480 raise model_logic.ValidationError({
481 'status': 'Host status can not be modified by the frontend.'})
482
483
showardce7c0922009-09-11 18:39:24 +0000484def check_modify_host_locking(host, update_data):
485 """
486 Checks when locking/unlocking has been requested if the host is already
487 locked/unlocked.
488
489 @param host: models.Host object to be modified
490 @param update_data: A dictionary with the changes to make to the host.
491 """
492 locked = update_data.get('locked', None)
Matthew Sartori68186332015-04-27 17:19:53 -0700493 lock_reason = update_data.get('lock_reason', None)
showardce7c0922009-09-11 18:39:24 +0000494 if locked is not None:
495 if locked and host.locked:
496 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800497 'locked': 'Host %s already locked by %s on %s.' %
498 (host.hostname, host.locked_by, host.lock_time)})
showardce7c0922009-09-11 18:39:24 +0000499 if not locked and not host.locked:
500 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800501 'locked': 'Host %s already unlocked.' % host.hostname})
Matthew Sartori68186332015-04-27 17:19:53 -0700502 if locked and not lock_reason and not host.locked:
503 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800504 'locked': 'Please provide a reason for locking Host %s' %
505 host.hostname})
showardce7c0922009-09-11 18:39:24 +0000506
507
showard8fbae652009-01-20 23:23:10 +0000508def get_motd():
509 dirname = os.path.dirname(__file__)
510 filename = os.path.join(dirname, "..", "..", "motd.txt")
511 text = ''
512 try:
513 fp = open(filename, "r")
514 try:
515 text = fp.read()
516 finally:
517 fp.close()
518 except:
519 pass
520
521 return text
showard29f7cd22009-04-29 21:16:24 +0000522
523
524def _get_metahost_counts(metahost_objects):
525 metahost_counts = {}
526 for metahost in metahost_objects:
527 metahost_counts.setdefault(metahost, 0)
528 metahost_counts[metahost] += 1
529 return metahost_counts
530
531
showarda965cef2009-05-15 23:17:41 +0000532def get_job_info(job, preserve_metahosts=False, queue_entry_filter_data=None):
showard29f7cd22009-04-29 21:16:24 +0000533 hosts = []
534 one_time_hosts = []
535 meta_hosts = []
536 atomic_group = None
jamesren2275ef12010-04-12 18:25:06 +0000537 hostless = False
showard29f7cd22009-04-29 21:16:24 +0000538
showard4d077562009-05-08 18:24:36 +0000539 queue_entries = job.hostqueueentry_set.all()
showarda965cef2009-05-15 23:17:41 +0000540 if queue_entry_filter_data:
541 queue_entries = models.HostQueueEntry.query_objects(
542 queue_entry_filter_data, initial_query=queue_entries)
showard4d077562009-05-08 18:24:36 +0000543
544 for queue_entry in queue_entries:
showard29f7cd22009-04-29 21:16:24 +0000545 if (queue_entry.host and (preserve_metahosts or
546 not queue_entry.meta_host)):
547 if queue_entry.deleted:
548 continue
549 if queue_entry.host.invalid:
550 one_time_hosts.append(queue_entry.host)
551 else:
552 hosts.append(queue_entry.host)
jamesren2275ef12010-04-12 18:25:06 +0000553 elif queue_entry.meta_host:
showard29f7cd22009-04-29 21:16:24 +0000554 meta_hosts.append(queue_entry.meta_host)
jamesren2275ef12010-04-12 18:25:06 +0000555 else:
556 hostless = True
557
showard29f7cd22009-04-29 21:16:24 +0000558 if atomic_group is None:
559 if queue_entry.atomic_group is not None:
560 atomic_group = queue_entry.atomic_group
561 else:
562 assert atomic_group.name == queue_entry.atomic_group.name, (
563 'DB inconsistency. HostQueueEntries with multiple atomic'
564 ' groups on job %s: %s != %s' % (
565 id, atomic_group.name, queue_entry.atomic_group.name))
566
567 meta_host_counts = _get_metahost_counts(meta_hosts)
568
569 info = dict(dependencies=[label.name for label
570 in job.dependency_labels.all()],
571 hosts=hosts,
572 meta_hosts=meta_hosts,
573 meta_host_counts=meta_host_counts,
574 one_time_hosts=one_time_hosts,
jamesren2275ef12010-04-12 18:25:06 +0000575 atomic_group=atomic_group,
576 hostless=hostless)
showard29f7cd22009-04-29 21:16:24 +0000577 return info
578
579
showard09d80f92009-11-19 01:01:19 +0000580def check_for_duplicate_hosts(host_objects):
Allen Li8239c352016-12-14 17:53:01 -0800581 host_counts = collections.Counter(host_objects)
582 duplicate_hostnames = {host.hostname
583 for host, count in host_counts.iteritems()
584 if count > 1}
showard09d80f92009-11-19 01:01:19 +0000585 if duplicate_hostnames:
586 raise model_logic.ValidationError(
587 {'hosts' : 'Duplicate hosts: %s'
588 % ', '.join(duplicate_hostnames)})
589
590
showarda1e74b32009-05-12 17:32:04 +0000591def create_new_job(owner, options, host_objects, metahost_objects,
592 atomic_group=None):
showard29f7cd22009-04-29 21:16:24 +0000593 all_host_objects = host_objects + metahost_objects
showarda1e74b32009-05-12 17:32:04 +0000594 dependencies = options.get('dependencies', [])
595 synch_count = options.get('synch_count')
showard29f7cd22009-04-29 21:16:24 +0000596
showard29f7cd22009-04-29 21:16:24 +0000597 if atomic_group:
598 check_atomic_group_create_job(
599 synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700600 dependencies, atomic_group)
showard29f7cd22009-04-29 21:16:24 +0000601 else:
602 if synch_count is not None and synch_count > len(all_host_objects):
603 raise model_logic.ValidationError(
604 {'hosts':
605 'only %d hosts provided for job with synch_count = %d' %
606 (len(all_host_objects), synch_count)})
607 atomic_hosts = models.Host.objects.filter(
608 id__in=[host.id for host in host_objects],
609 labels__atomic_group=True)
610 unusable_host_names = [host.hostname for host in atomic_hosts]
611 if unusable_host_names:
612 raise model_logic.ValidationError(
613 {'hosts':
614 'Host(s) "%s" are atomic group hosts but no '
615 'atomic group was specified for this job.' %
616 (', '.join(unusable_host_names),)})
617
showard09d80f92009-11-19 01:01:19 +0000618 check_for_duplicate_hosts(host_objects)
showard29f7cd22009-04-29 21:16:24 +0000619
Aviv Keshetc68807e2013-07-31 16:13:01 -0700620 for label_name in dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700621 if provision.is_for_special_action(label_name):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700622 # TODO: We could save a few queries
623 # if we had a bulk ensure-label-exists function, which used
624 # a bulk .get() call. The win is probably very small.
Alex Miller871291b2013-08-08 01:19:20 -0700625 _ensure_label_exists(label_name)
Aviv Keshetc68807e2013-07-31 16:13:01 -0700626
Alex Miller4a193692013-08-21 13:59:01 -0700627 # This only checks targeted hosts, not hosts eligible due to the metahost
628 check_job_dependencies(host_objects, dependencies)
629 check_job_metahost_dependencies(metahost_objects, dependencies)
630
Alex Miller871291b2013-08-08 01:19:20 -0700631 options['dependencies'] = list(
632 models.Label.objects.filter(name__in=dependencies))
showard29f7cd22009-04-29 21:16:24 +0000633
showarda1e74b32009-05-12 17:32:04 +0000634 for label in metahost_objects + options['dependencies']:
showard29f7cd22009-04-29 21:16:24 +0000635 if label.atomic_group and not atomic_group:
636 raise model_logic.ValidationError(
637 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000638 'Dependency %r requires an atomic group but no '
639 'atomic_group_name or meta_host in an atomic group was '
640 'specified for this job.' % label.name})
showard29f7cd22009-04-29 21:16:24 +0000641 elif (label.atomic_group and
642 label.atomic_group.name != atomic_group.name):
643 raise model_logic.ValidationError(
644 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000645 'meta_hosts or dependency %r requires atomic group '
646 '%r instead of the supplied atomic_group_name=%r.' %
647 (label.name, label.atomic_group.name, atomic_group.name)})
showard29f7cd22009-04-29 21:16:24 +0000648
showarda1e74b32009-05-12 17:32:04 +0000649 job = models.Job.create(owner=owner, options=options,
650 hosts=all_host_objects)
showard29f7cd22009-04-29 21:16:24 +0000651 job.queue(all_host_objects, atomic_group=atomic_group,
showarda1e74b32009-05-12 17:32:04 +0000652 is_template=options.get('is_template', False))
showard29f7cd22009-04-29 21:16:24 +0000653 return job.id
showard0957a842009-05-11 19:25:08 +0000654
655
Aviv Keshetc68807e2013-07-31 16:13:01 -0700656def _ensure_label_exists(name):
657 """
658 Ensure that a label called |name| exists in the Django models.
659
660 This function is to be called from within afe rpcs only, as an
661 alternative to server.cros.provision.ensure_label_exists(...). It works
662 by Django model manipulation, rather than by making another create_label
663 rpc call.
664
665 @param name: the label to check for/create.
666 @raises ValidationError: There was an error in the response that was
667 not because the label already existed.
668 @returns True is a label was created, False otherwise.
669 """
MK Ryu73be9862015-07-06 12:25:00 -0700670 # Make sure this function is not called on shards but only on master.
671 assert not server_utils.is_shard()
Aviv Keshetc68807e2013-07-31 16:13:01 -0700672 try:
673 models.Label.objects.get(name=name)
674 except models.Label.DoesNotExist:
Fang Deng7051fe42015-10-20 14:57:28 -0700675 try:
676 new_label = models.Label.objects.create(name=name)
677 new_label.save()
678 return True
679 except django.db.utils.IntegrityError as e:
680 # It is possible that another suite/test already
681 # created the label between the check and save.
682 if DUPLICATE_KEY_MSG in str(e):
683 return False
684 else:
685 raise
Aviv Keshetc68807e2013-07-31 16:13:01 -0700686 return False
687
688
showard909c9142009-07-07 20:54:42 +0000689def find_platform_and_atomic_group(host):
690 """
691 Figure out the platform name and atomic group name for the given host
692 object. If none, the return value for either will be None.
693
694 @returns (platform name, atomic group name) for the given host.
695 """
showard0957a842009-05-11 19:25:08 +0000696 platforms = [label.name for label in host.label_list if label.platform]
697 if not platforms:
showard909c9142009-07-07 20:54:42 +0000698 platform = None
699 else:
700 platform = platforms[0]
showard0957a842009-05-11 19:25:08 +0000701 if len(platforms) > 1:
702 raise ValueError('Host %s has more than one platform: %s' %
703 (host.hostname, ', '.join(platforms)))
showard909c9142009-07-07 20:54:42 +0000704 for label in host.label_list:
705 if label.atomic_group:
706 atomic_group_name = label.atomic_group.name
707 break
708 else:
709 atomic_group_name = None
710 # Don't check for multiple atomic groups on a host here. That is an
711 # error but should not trip up the RPC interface. monitor_db_cleanup
712 # deals with it. This just returns the first one found.
713 return platform, atomic_group_name
showardc0ac3a72009-07-08 21:14:45 +0000714
715
716# support for get_host_queue_entries_and_special_tasks()
717
MK Ryu0c1a37d2015-04-30 12:00:55 -0700718def _common_entry_to_dict(entry, type, job_dict, exec_path, status, started_on):
showardc0ac3a72009-07-08 21:14:45 +0000719 return dict(type=type,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700720 host=entry['host'],
showardc0ac3a72009-07-08 21:14:45 +0000721 job=job_dict,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700722 execution_path=exec_path,
723 status=status,
724 started_on=started_on,
725 id=str(entry['id']) + type,
726 oid=entry['id'])
showardc0ac3a72009-07-08 21:14:45 +0000727
728
MK Ryu0c1a37d2015-04-30 12:00:55 -0700729def _special_task_to_dict(task, queue_entries):
730 """Transforms a special task dictionary to another form of dictionary.
731
732 @param task Special task as a dictionary type
733 @param queue_entries Host queue entries as a list of dictionaries.
734
735 @return Transformed dictionary for a special task.
736 """
showardc0ac3a72009-07-08 21:14:45 +0000737 job_dict = None
MK Ryu0c1a37d2015-04-30 12:00:55 -0700738 if task['queue_entry']:
739 # Scan queue_entries to get the job detail info.
740 for qentry in queue_entries:
741 if task['queue_entry']['id'] == qentry['id']:
742 job_dict = qentry['job']
743 break
744 # If not found, get it from DB.
745 if job_dict is None:
746 job = models.Job.objects.get(id=task['queue_entry']['job'])
747 job_dict = job.get_object_dict()
748
749 exec_path = server_utils.get_special_task_exec_path(
750 task['host']['hostname'], task['id'], task['task'],
751 time_utils.time_string_to_datetime(task['time_requested']))
752 status = server_utils.get_special_task_status(
753 task['is_complete'], task['success'], task['is_active'])
754 return _common_entry_to_dict(task, task['task'], job_dict,
755 exec_path, status, task['time_started'])
showardc0ac3a72009-07-08 21:14:45 +0000756
757
758def _queue_entry_to_dict(queue_entry):
MK Ryu0c1a37d2015-04-30 12:00:55 -0700759 job_dict = queue_entry['job']
760 tag = server_utils.get_job_tag(job_dict['id'], job_dict['owner'])
761 exec_path = server_utils.get_hqe_exec_path(tag,
762 queue_entry['execution_subdir'])
763 return _common_entry_to_dict(queue_entry, 'Job', job_dict, exec_path,
764 queue_entry['status'], queue_entry['started_on'])
765
766
767def prepare_host_queue_entries_and_special_tasks(interleaved_entries,
768 queue_entries):
769 """
770 Prepare for serialization the interleaved entries of host queue entries
771 and special tasks.
772 Each element in the entries is a dictionary type.
773 The special task dictionary has only a job id for a job and lacks
774 the detail of the job while the host queue entry dictionary has.
775 queue_entries is used to look up the job detail info.
776
777 @param interleaved_entries Host queue entries and special tasks as a list
778 of dictionaries.
779 @param queue_entries Host queue entries as a list of dictionaries.
780
781 @return A post-processed list of dictionaries that is to be serialized.
782 """
783 dict_list = []
784 for e in interleaved_entries:
785 # Distinguish the two mixed entries based on the existence of
786 # the key "task". If an entry has the key, the entry is for
787 # special task. Otherwise, host queue entry.
788 if 'task' in e:
789 dict_list.append(_special_task_to_dict(e, queue_entries))
790 else:
791 dict_list.append(_queue_entry_to_dict(e))
792 return prepare_for_serialization(dict_list)
showardc0ac3a72009-07-08 21:14:45 +0000793
794
795def _compute_next_job_for_tasks(queue_entries, special_tasks):
796 """
797 For each task, try to figure out the next job that ran after that task.
798 This is done using two pieces of information:
799 * if the task has a queue entry, we can use that entry's job ID.
800 * if the task has a time_started, we can try to compare that against the
801 started_on field of queue_entries. this isn't guaranteed to work perfectly
802 since queue_entries may also have null started_on values.
803 * if the task has neither, or if use of time_started fails, just use the
804 last computed job ID.
MK Ryu0c1a37d2015-04-30 12:00:55 -0700805
806 @param queue_entries Host queue entries as a list of dictionaries.
807 @param special_tasks Special tasks as a list of dictionaries.
showardc0ac3a72009-07-08 21:14:45 +0000808 """
809 next_job_id = None # most recently computed next job
810 hqe_index = 0 # index for scanning by started_on times
811 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700812 if task['queue_entry']:
813 next_job_id = task['queue_entry']['job']
814 elif task['time_started'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000815 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700816 if queue_entry['started_on'] is None:
showardc0ac3a72009-07-08 21:14:45 +0000817 continue
MK Ryu0c1a37d2015-04-30 12:00:55 -0700818 t1 = time_utils.time_string_to_datetime(
819 queue_entry['started_on'])
820 t2 = time_utils.time_string_to_datetime(task['time_started'])
821 if t1 < t2:
showardc0ac3a72009-07-08 21:14:45 +0000822 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700823 next_job_id = queue_entry['job']['id']
showardc0ac3a72009-07-08 21:14:45 +0000824
MK Ryu0c1a37d2015-04-30 12:00:55 -0700825 task['next_job_id'] = next_job_id
showardc0ac3a72009-07-08 21:14:45 +0000826
827 # advance hqe_index to just after next_job_id
828 if next_job_id is not None:
829 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700830 if queue_entry['job']['id'] < next_job_id:
showardc0ac3a72009-07-08 21:14:45 +0000831 break
832 hqe_index += 1
833
834
835def interleave_entries(queue_entries, special_tasks):
836 """
837 Both lists should be ordered by descending ID.
838 """
839 _compute_next_job_for_tasks(queue_entries, special_tasks)
840
841 # start with all special tasks that've run since the last job
842 interleaved_entries = []
843 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700844 if task['next_job_id'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000845 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700846 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000847
848 # now interleave queue entries with the remaining special tasks
849 special_task_index = len(interleaved_entries)
850 for queue_entry in queue_entries:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700851 interleaved_entries.append(queue_entry)
showardc0ac3a72009-07-08 21:14:45 +0000852 # add all tasks that ran between this job and the previous one
853 for task in special_tasks[special_task_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700854 if task['next_job_id'] < queue_entry['job']['id']:
showardc0ac3a72009-07-08 21:14:45 +0000855 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700856 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000857 special_task_index += 1
858
859 return interleaved_entries
jamesren4a41e012010-07-16 22:33:48 +0000860
861
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800862def bucket_hosts_by_shard(host_objs, rpc_hostnames=False):
863 """Figure out which hosts are on which shards.
864
865 @param host_objs: A list of host objects.
866 @param rpc_hostnames: If True, the rpc_hostnames of a shard are returned
867 instead of the 'real' shard hostnames. This only matters for testing
868 environments.
869
870 @return: A map of shard hostname: list of hosts on the shard.
871 """
Allen Li3d4e6112016-12-28 11:10:25 -0800872 shard_host_map = collections.defaultdict(list)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800873 for host in host_objs:
874 if host.shard:
875 shard_name = (host.shard.rpc_hostname() if rpc_hostnames
876 else host.shard.hostname)
Allen Li3d4e6112016-12-28 11:10:25 -0800877 shard_host_map[shard_name].append(host.hostname)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800878 return shard_host_map
879
880
Allen Li1453fdf2016-12-14 12:16:37 -0800881def create_job_common(
882 name,
883 priority,
884 control_type,
885 control_file=None,
886 hosts=(),
887 meta_hosts=(),
888 one_time_hosts=(),
889 atomic_group_name=None,
890 synch_count=None,
891 is_template=False,
892 timeout=None,
893 timeout_mins=None,
894 max_runtime_mins=None,
895 run_verify=True,
896 email_list='',
897 dependencies=(),
898 reboot_before=None,
899 reboot_after=None,
900 parse_failed_repair=None,
901 hostless=False,
902 keyvals=None,
903 drone_set=None,
904 parameterized_job=None,
905 parent_job_id=None,
906 test_retry=0,
907 run_reset=True,
908 require_ssp=None):
Aviv Keshet18308922013-02-19 17:49:49 -0800909 #pylint: disable-msg=C0111
jamesren4a41e012010-07-16 22:33:48 +0000910 """
911 Common code between creating "standard" jobs and creating parameterized jobs
912 """
jamesren4a41e012010-07-16 22:33:48 +0000913 # input validation
Allen Lie6203192016-12-14 13:05:53 -0800914 host_args_passed = any((
915 hosts, meta_hosts, one_time_hosts, atomic_group_name))
jamesren4a41e012010-07-16 22:33:48 +0000916 if hostless:
Allen Lie6203192016-12-14 13:05:53 -0800917 if host_args_passed:
jamesren4a41e012010-07-16 22:33:48 +0000918 raise model_logic.ValidationError({
919 'hostless': 'Hostless jobs cannot include any hosts!'})
Allen Lie6203192016-12-14 13:05:53 -0800920 if control_type != control_data.CONTROL_TYPE_NAMES.SERVER:
jamesren4a41e012010-07-16 22:33:48 +0000921 raise model_logic.ValidationError({
922 'control_type': 'Hostless jobs cannot use client-side '
923 'control files'})
Allen Lie6203192016-12-14 13:05:53 -0800924 elif not host_args_passed:
925 raise model_logic.ValidationError({
926 'arguments' : "For host jobs, you must pass at least one of"
927 " 'hosts', 'meta_hosts', 'one_time_hosts',"
928 " 'atomic_group_name'."
929 })
jamesren4a41e012010-07-16 22:33:48 +0000930
Allen Lie6203192016-12-14 13:05:53 -0800931 atomic_groups_by_name = {
932 group.name: group for group in models.AtomicGroup.objects.all()
933 }
Alex Miller871291b2013-08-08 01:19:20 -0700934 label_objects = list(models.Label.objects.filter(name__in=meta_hosts))
jamesren4a41e012010-07-16 22:33:48 +0000935
936 # Schedule on an atomic group automagically if one of the labels given
937 # is an atomic group label and no explicit atomic_group_name was supplied.
938 if not atomic_group_name:
Allen Lie6203192016-12-14 13:05:53 -0800939 atomic_group_name = _get_atomic_group_name_from_labels(label_objects)
940
jamesren4a41e012010-07-16 22:33:48 +0000941 # convert hostnames & meta hosts to host/label objects
942 host_objects = models.Host.smart_get_bulk(hosts)
Allen Lie6203192016-12-14 13:05:53 -0800943 _validate_host_job_sharding(host_objects)
Allen Lidbc08662016-12-14 16:38:21 -0800944 for host in one_time_hosts:
945 this_host = models.Host.create_one_time_host(host)
946 host_objects.append(this_host)
Allen Lie6203192016-12-14 13:05:53 -0800947
jamesren4a41e012010-07-16 22:33:48 +0000948 metahost_objects = []
Alex Miller871291b2013-08-08 01:19:20 -0700949 meta_host_labels_by_name = {label.name: label for label in label_objects}
Allen Lie6203192016-12-14 13:05:53 -0800950 for label_name in meta_hosts:
Alex Miller871291b2013-08-08 01:19:20 -0700951 if label_name in meta_host_labels_by_name:
952 metahost_objects.append(meta_host_labels_by_name[label_name])
jamesren4a41e012010-07-16 22:33:48 +0000953 elif label_name in atomic_groups_by_name:
954 # If given a metahost name that isn't a Label, check to
955 # see if the user was specifying an Atomic Group instead.
956 atomic_group = atomic_groups_by_name[label_name]
957 if atomic_group_name and atomic_group_name != atomic_group.name:
958 raise model_logic.ValidationError({
959 'meta_hosts': (
960 'Label "%s" not found. If assumed to be an '
961 'atomic group it would conflict with the '
962 'supplied atomic group "%s".' % (
963 label_name, atomic_group_name))})
964 atomic_group_name = atomic_group.name
965 else:
966 raise model_logic.ValidationError(
967 {'meta_hosts' : 'Label "%s" not found' % label_name})
968
969 # Create and sanity check an AtomicGroup object if requested.
970 if atomic_group_name:
971 if one_time_hosts:
972 raise model_logic.ValidationError(
973 {'one_time_hosts':
974 'One time hosts cannot be used with an Atomic Group.'})
975 atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
jamesren4a41e012010-07-16 22:33:48 +0000976 else:
977 atomic_group = None
978
jamesren4a41e012010-07-16 22:33:48 +0000979 options = dict(name=name,
980 priority=priority,
981 control_file=control_file,
982 control_type=control_type,
983 is_template=is_template,
984 timeout=timeout,
Simran Basi7e605742013-11-12 13:43:36 -0800985 timeout_mins=timeout_mins,
Simran Basi34217022012-11-06 13:43:15 -0800986 max_runtime_mins=max_runtime_mins,
jamesren4a41e012010-07-16 22:33:48 +0000987 synch_count=synch_count,
988 run_verify=run_verify,
989 email_list=email_list,
990 dependencies=dependencies,
991 reboot_before=reboot_before,
992 reboot_after=reboot_after,
993 parse_failed_repair=parse_failed_repair,
994 keyvals=keyvals,
995 drone_set=drone_set,
Aviv Keshet18308922013-02-19 17:49:49 -0800996 parameterized_job=parameterized_job,
Aviv Keshetcd1ff9b2013-03-01 14:55:19 -0800997 parent_job_id=parent_job_id,
Dan Shi07e09af2013-04-12 09:31:29 -0700998 test_retry=test_retry,
Dan Shiec1d47d2015-02-13 11:38:13 -0800999 run_reset=run_reset,
1000 require_ssp=require_ssp)
Allen Lie6203192016-12-14 13:05:53 -08001001
1002 return create_new_job(owner=models.User.current_user().login,
jamesren4a41e012010-07-16 22:33:48 +00001003 options=options,
1004 host_objects=host_objects,
1005 metahost_objects=metahost_objects,
1006 atomic_group=atomic_group)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001007
1008
Allen Lie6203192016-12-14 13:05:53 -08001009def _get_atomic_group_name_from_labels(label_objects):
1010 """Get atomic group name from label objects.
1011
1012 @returns: atomic group name string or None
1013 """
1014 for label in label_objects:
1015 if label and label.atomic_group:
1016 return label.atomic_group.name
1017
1018
1019def _validate_host_job_sharding(host_objects):
1020 """Check that the hosts obey job sharding rules."""
1021 if not (server_utils.is_shard()
1022 or _allowed_hosts_for_master_job(host_objects)):
1023 shard_host_map = bucket_hosts_by_shard(host_objects)
1024 raise ValueError(
1025 'The following hosts are on shard(s), please create '
1026 'seperate jobs for hosts on each shard: %s ' %
1027 shard_host_map)
1028
1029
1030def _allowed_hosts_for_master_job(host_objects):
1031 """Check that the hosts are allowed for a job on master."""
Allen Lie6203192016-12-14 13:05:53 -08001032 # We disallow the following jobs on master:
1033 # num_shards > 1: this is a job spanning across multiple shards.
1034 # num_shards == 1 but number of hosts on shard is less
1035 # than total number of hosts: this is a job that spans across
1036 # one shard and the master.
Allen Liacb97922016-12-14 13:45:50 -08001037 shard_host_map = bucket_hosts_by_shard(host_objects)
1038 num_shards = len(shard_host_map)
1039 if num_shards > 1:
1040 return False
1041 if num_shards == 1:
1042 hosts_on_shard = shard_host_map.values()[0]
1043 assert len(hosts_on_shard) <= len(host_objects)
1044 return len(hosts_on_shard) == len(host_objects)
1045 else:
1046 return True
Allen Lie6203192016-12-14 13:05:53 -08001047
1048
Simran Basib6ec8ae2014-04-23 12:05:08 -07001049def encode_ascii(control_file):
1050 """Force a control file to only contain ascii characters.
1051
1052 @param control_file: Control file to encode.
1053
1054 @returns the control file in an ascii encoding.
1055
1056 @raises error.ControlFileMalformed: if encoding fails.
1057 """
1058 try:
1059 return control_file.encode('ascii')
1060 except UnicodeDecodeError as e:
Jiaxi Luo421608e2014-07-07 14:38:00 -07001061 raise error.ControlFileMalformed(str(e))
1062
1063
1064def get_wmatrix_url():
1065 """Get wmatrix url from config file.
1066
1067 @returns the wmatrix url or an empty string.
1068 """
1069 return global_config.global_config.get_config_value('AUTOTEST_WEB',
1070 'wmatrix_url',
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001071 default='')
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001072
1073
1074def inject_times_to_filter(start_time_key=None, end_time_key=None,
1075 start_time_value=None, end_time_value=None,
1076 **filter_data):
1077 """Inject the key value pairs of start and end time if provided.
1078
1079 @param start_time_key: A string represents the filter key of start_time.
1080 @param end_time_key: A string represents the filter key of end_time.
1081 @param start_time_value: Start_time value.
1082 @param end_time_value: End_time value.
1083
1084 @returns the injected filter_data.
1085 """
1086 if start_time_value:
1087 filter_data[start_time_key] = start_time_value
1088 if end_time_value:
1089 filter_data[end_time_key] = end_time_value
1090 return filter_data
1091
1092
1093def inject_times_to_hqe_special_tasks_filters(filter_data_common,
1094 start_time, end_time):
1095 """Inject start and end time to hqe and special tasks filters.
1096
1097 @param filter_data_common: Common filter for hqe and special tasks.
1098 @param start_time_key: A string represents the filter key of start_time.
1099 @param end_time_key: A string represents the filter key of end_time.
1100
1101 @returns a pair of hqe and special tasks filters.
1102 """
1103 filter_data_special_tasks = filter_data_common.copy()
1104 return (inject_times_to_filter('started_on__gte', 'started_on__lte',
1105 start_time, end_time, **filter_data_common),
1106 inject_times_to_filter('time_started__gte', 'time_started__lte',
1107 start_time, end_time,
Jakob Juelich59cfe542014-09-02 16:37:46 -07001108 **filter_data_special_tasks))
1109
1110
1111def retrieve_shard(shard_hostname):
1112 """
Jakob Juelich77457572014-09-22 17:02:43 -07001113 Retrieves the shard with the given hostname from the database.
Jakob Juelich59cfe542014-09-02 16:37:46 -07001114
1115 @param shard_hostname: Hostname of the shard to retrieve
1116
Jakob Juelich77457572014-09-22 17:02:43 -07001117 @raises models.Shard.DoesNotExist, if no shard with this hostname was found.
1118
Jakob Juelich59cfe542014-09-02 16:37:46 -07001119 @returns: Shard object
1120 """
MK Ryu509516b2015-05-18 12:00:47 -07001121 timer = autotest_stats.Timer('shard_heartbeat.retrieve_shard')
1122 with timer:
1123 return models.Shard.smart_get(shard_hostname)
Jakob Juelich59cfe542014-09-02 16:37:46 -07001124
1125
Jakob Juelich1b525742014-09-30 13:08:07 -07001126def find_records_for_shard(shard, known_job_ids, known_host_ids):
Jakob Juelich59cfe542014-09-02 16:37:46 -07001127 """Find records that should be sent to a shard.
1128
Jakob Juelicha94efe62014-09-18 16:02:49 -07001129 @param shard: Shard to find records for.
Jakob Juelich1b525742014-09-30 13:08:07 -07001130 @param known_job_ids: List of ids of jobs the shard already has.
1131 @param known_host_ids: List of ids of hosts the shard already has.
Jakob Juelicha94efe62014-09-18 16:02:49 -07001132
Fang Dengf3705992014-12-16 17:32:18 -08001133 @returns: Tuple of three lists for hosts, jobs, and suite job keyvals:
1134 (hosts, jobs, suite_job_keyvals).
Jakob Juelich59cfe542014-09-02 16:37:46 -07001135 """
MK Ryu509516b2015-05-18 12:00:47 -07001136 timer = autotest_stats.Timer('shard_heartbeat')
1137 with timer.get_client('find_hosts'):
1138 hosts = models.Host.assign_to_shard(shard, known_host_ids)
1139 with timer.get_client('find_jobs'):
1140 jobs = models.Job.assign_to_shard(shard, known_job_ids)
1141 with timer.get_client('find_suite_job_keyvals'):
1142 parent_job_ids = [job.parent_job_id for job in jobs]
1143 suite_job_keyvals = models.JobKeyval.objects.filter(
1144 job_id__in=parent_job_ids)
Fang Dengf3705992014-12-16 17:32:18 -08001145 return hosts, jobs, suite_job_keyvals
Jakob Juelicha94efe62014-09-18 16:02:49 -07001146
1147
1148def _persist_records_with_type_sent_from_shard(
1149 shard, records, record_type, *args, **kwargs):
1150 """
1151 Handle records of a specified type that were sent to the shard master.
1152
1153 @param shard: The shard the records were sent from.
1154 @param records: The records sent in their serialized format.
1155 @param record_type: Type of the objects represented by records.
1156 @param args: Additional arguments that will be passed on to the sanity
1157 checks.
1158 @param kwargs: Additional arguments that will be passed on to the sanity
1159 checks.
1160
1161 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1162
1163 @returns: List of primary keys of the processed records.
1164 """
1165 pks = []
1166 for serialized_record in records:
1167 pk = serialized_record['id']
1168 try:
1169 current_record = record_type.objects.get(pk=pk)
1170 except record_type.DoesNotExist:
1171 raise error.UnallowedRecordsSentToMaster(
1172 'Object with pk %s of type %s does not exist on master.' % (
1173 pk, record_type))
1174
1175 current_record.sanity_check_update_from_shard(
1176 shard, serialized_record, *args, **kwargs)
1177
1178 current_record.update_from_serialized(serialized_record)
1179 pks.append(pk)
1180 return pks
1181
1182
1183def persist_records_sent_from_shard(shard, jobs, hqes):
1184 """
1185 Sanity checking then saving serialized records sent to master from shard.
1186
1187 During heartbeats shards upload jobs and hostqueuentries. This performs
1188 some sanity checks on these and then updates the existing records for those
1189 entries with the updated ones from the heartbeat.
1190
1191 The sanity checks include:
1192 - Checking if the objects sent already exist on the master.
1193 - Checking if the objects sent were assigned to this shard.
1194 - hostqueueentries must be sent together with their jobs.
1195
1196 @param shard: The shard the records were sent from.
1197 @param jobs: The jobs the shard sent.
1198 @param hqes: The hostqueuentries the shart sent.
1199
1200 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1201 """
MK Ryu509516b2015-05-18 12:00:47 -07001202 timer = autotest_stats.Timer('shard_heartbeat')
1203 with timer.get_client('persist_jobs'):
1204 job_ids_sent = _persist_records_with_type_sent_from_shard(
1205 shard, jobs, models.Job)
Jakob Juelicha94efe62014-09-18 16:02:49 -07001206
MK Ryu509516b2015-05-18 12:00:47 -07001207 with timer.get_client('persist_hqes'):
1208 _persist_records_with_type_sent_from_shard(
1209 shard, hqes, models.HostQueueEntry, job_ids_sent=job_ids_sent)
Jakob Juelich50e91f72014-10-01 12:43:23 -07001210
1211
Jakob Juelich50e91f72014-10-01 12:43:23 -07001212def forward_single_host_rpc_to_shard(func):
1213 """This decorator forwards rpc calls that modify a host to a shard.
1214
1215 If a host is assigned to a shard, rpcs that change his attributes should be
1216 forwarded to the shard.
1217
1218 This assumes the first argument of the function represents a host id.
1219
1220 @param func: The function to decorate
1221
1222 @returns: The function to replace func with.
1223 """
1224 def replacement(**kwargs):
1225 # Only keyword arguments can be accepted here, as we need the argument
1226 # names to send the rpc. serviceHandler always provides arguments with
1227 # their keywords, so this is not a problem.
MK Ryu8e2c2d02016-01-06 15:24:38 -08001228
1229 # A host record (identified by kwargs['id']) can be deleted in
1230 # func(). Therefore, we should save the data that can be needed later
1231 # before func() is called.
1232 shard_hostname = None
Jakob Juelich50e91f72014-10-01 12:43:23 -07001233 host = models.Host.smart_get(kwargs['id'])
MK Ryu8e2c2d02016-01-06 15:24:38 -08001234 if host and host.shard:
1235 shard_hostname = host.shard.rpc_hostname()
1236 ret = func(**kwargs)
1237 if shard_hostname and not server_utils.is_shard():
MK Ryu26f0c932015-05-28 18:14:33 -07001238 run_rpc_on_multiple_hostnames(func.func_name,
MK Ryu8e2c2d02016-01-06 15:24:38 -08001239 [shard_hostname],
Jakob Juelich50e91f72014-10-01 12:43:23 -07001240 **kwargs)
MK Ryu8e2c2d02016-01-06 15:24:38 -08001241 return ret
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -08001242
1243 return replacement
1244
1245
MK Ryufb5e3a82015-07-01 12:21:20 -07001246def fanout_rpc(host_objs, rpc_name, include_hostnames=True, **kwargs):
1247 """Fanout the given rpc to shards of given hosts.
1248
1249 @param host_objs: Host objects for the rpc.
1250 @param rpc_name: The name of the rpc.
1251 @param include_hostnames: If True, include the hostnames in the kwargs.
1252 Hostnames are not always necessary, this functions is designed to
1253 send rpcs to the shard a host is on, the rpcs themselves could be
1254 related to labels, acls etc.
1255 @param kwargs: The kwargs for the rpc.
1256 """
1257 # Figure out which hosts are on which shards.
1258 shard_host_map = bucket_hosts_by_shard(
1259 host_objs, rpc_hostnames=True)
1260
1261 # Execute the rpc against the appropriate shards.
1262 for shard, hostnames in shard_host_map.iteritems():
1263 if include_hostnames:
1264 kwargs['hosts'] = hostnames
1265 try:
1266 run_rpc_on_multiple_hostnames(rpc_name, [shard], **kwargs)
1267 except:
1268 ei = sys.exc_info()
1269 new_exc = error.RPCException('RPC %s failed on shard %s due to '
1270 '%s: %s' % (rpc_name, shard, ei[0].__name__, ei[1]))
1271 raise new_exc.__class__, new_exc, ei[2]
1272
1273
Jakob Juelich50e91f72014-10-01 12:43:23 -07001274def run_rpc_on_multiple_hostnames(rpc_call, shard_hostnames, **kwargs):
1275 """Runs an rpc to multiple AFEs
1276
1277 This is i.e. used to propagate changes made to hosts after they are assigned
1278 to a shard.
1279
1280 @param rpc_call: Name of the rpc endpoint to call.
1281 @param shard_hostnames: List of hostnames to run the rpcs on.
1282 @param **kwargs: Keyword arguments to pass in the rpcs.
1283 """
MK Ryufb5e3a82015-07-01 12:21:20 -07001284 # Make sure this function is not called on shards but only on master.
1285 assert not server_utils.is_shard()
Jakob Juelich50e91f72014-10-01 12:43:23 -07001286 for shard_hostname in shard_hostnames:
MK Ryu0a9c82e2015-09-17 17:54:01 -07001287 afe = frontend_wrappers.RetryingAFE(server=shard_hostname,
1288 user=thread_local.get_user())
Jakob Juelich50e91f72014-10-01 12:43:23 -07001289 afe.run(rpc_call, **kwargs)
MK Ryu9c5fbbe2015-02-11 15:46:22 -08001290
1291
1292def get_label(name):
1293 """Gets a label object using a given name.
1294
1295 @param name: Label name.
1296 @raises model.Label.DoesNotExist: when there is no label matching
1297 the given name.
1298 @return: a label object matching the given name.
1299 """
1300 try:
1301 label = models.Label.smart_get(name)
1302 except models.Label.DoesNotExist:
1303 return None
1304 return label
1305
1306
xixuanba232a32016-08-25 17:01:59 -07001307# TODO: hide the following rpcs under is_moblab
1308def moblab_only(func):
1309 """Ensure moblab specific functions only run on Moblab devices."""
1310 def verify(*args, **kwargs):
1311 if not server_utils.is_moblab():
1312 raise error.RPCException('RPC: %s can only run on Moblab Systems!',
1313 func.__name__)
1314 return func(*args, **kwargs)
1315 return verify
1316
1317
MK Ryufbb002c2015-06-08 14:13:16 -07001318def route_rpc_to_master(func):
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001319 """Route RPC to master AFE.
MK Ryu2d107562015-02-24 17:45:02 -08001320
MK Ryu6f5eadb2015-09-04 10:50:47 -07001321 When a shard receives an RPC decorated by this, the RPC is just
1322 forwarded to the master.
1323 When the master gets the RPC, the RPC function is executed.
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001324
MK Ryu6f5eadb2015-09-04 10:50:47 -07001325 @param func: An RPC function to decorate
1326
1327 @returns: A function replacing the RPC func.
MK Ryu2d107562015-02-24 17:45:02 -08001328 """
Allen Li54121d02016-12-12 17:35:53 -08001329 argspec = inspect.getargspec(func)
1330 if argspec.varargs is not None:
1331 raise Exception('RPC function must not have *args.')
1332
MK Ryufbb002c2015-06-08 14:13:16 -07001333 @wraps(func)
MK Ryuf6ab8a72015-07-06 10:19:48 -07001334 def replacement(*args, **kwargs):
Allen Lice51f372016-12-12 17:48:51 -08001335 """We need special handling when decorating an RPC that can be called
1336 directly using positional arguments.
1337
1338 One example is rpc_interface.create_job().
1339 rpc_interface.create_job_page_handler() calls the function using both
1340 positional and keyword arguments. Since frontend.RpcClient.run()
1341 takes only keyword arguments for an RPC, positional arguments of the
1342 RPC function need to be transformed into keyword arguments.
MK Ryu6f5eadb2015-09-04 10:50:47 -07001343 """
Allen Li416c4052016-12-12 17:46:46 -08001344 kwargs = _convert_to_kwargs_only(func, args, kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001345 if server_utils.is_shard():
MK Ryu9651ca52015-06-08 17:48:22 -07001346 afe = frontend_wrappers.RetryingAFE(
Fang Deng0cb2a3b2015-12-10 17:59:00 -08001347 server=server_utils.get_global_afe_hostname(),
MK Ryu0a9c82e2015-09-17 17:54:01 -07001348 user=thread_local.get_user())
MK Ryu9651ca52015-06-08 17:48:22 -07001349 return afe.run(func.func_name, **kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001350 return func(**kwargs)
Allen Li54121d02016-12-12 17:35:53 -08001351
MK Ryufbb002c2015-06-08 14:13:16 -07001352 return replacement
Dan Shi5e8fa182016-04-15 11:04:36 -07001353
1354
Allen Li416c4052016-12-12 17:46:46 -08001355def _convert_to_kwargs_only(func, args, kwargs):
1356 """Convert a function call's arguments to a kwargs dict.
1357
1358 This is best illustrated with an example. Given:
1359
Allen Liab8d3792016-12-12 18:00:31 -08001360 def foo(a, b, **kwargs):
1361 pass
1362 _to_kwargs(foo, (1, 2), {'c': 3}) # corresponding to foo(1, 2, c=3)
Allen Li416c4052016-12-12 17:46:46 -08001363
1364 foo(**kwargs)
1365
1366 @param func: function whose signature to use
1367 @param args: positional arguments of call
1368 @param kwargs: keyword arguments of call
1369
1370 @returns: kwargs dict
1371 """
Allen Li416c4052016-12-12 17:46:46 -08001372 argspec = inspect.getargspec(func)
Allen Liab8d3792016-12-12 18:00:31 -08001373 # callargs looks like {'a': 1, 'b': 2, 'kwargs': {'c': 3}}
1374 callargs = inspect.getcallargs(func, *args, **kwargs)
1375 if argspec.keywords is None:
1376 kwargs = {}
1377 else:
1378 kwargs = callargs.pop(argspec.keywords)
1379 kwargs.update(callargs)
Allen Li416c4052016-12-12 17:46:46 -08001380 return kwargs
1381
1382
Dan Shi5e8fa182016-04-15 11:04:36 -07001383def get_sample_dut(board, pool):
1384 """Get a dut with the given board and pool.
1385
1386 This method is used to help to locate a dut with the given board and pool.
1387 The dut then can be used to identify a devserver in the same subnet.
1388
1389 @param board: Name of the board.
1390 @param pool: Name of the pool.
1391
1392 @return: Name of a dut with the given board and pool.
1393 """
Allen Li3d43e602016-12-08 15:09:51 -08001394 if not (dev_server.PREFER_LOCAL_DEVSERVER and pool and board):
Dan Shi5e8fa182016-04-15 11:04:36 -07001395 return None
Dan Shic3d702b2016-12-21 03:05:09 +00001396
1397 hosts = get_host_query(
1398 ('pool:%s' % pool, 'board:%s' % board), False, False, True, {})
1399 if not hosts:
Dan Shi5e8fa182016-04-15 11:04:36 -07001400 return None
Dan Shic3d702b2016-12-21 03:05:09 +00001401
1402 return list(hosts)[0].get_object_dict()['hostname']