blob: 92643be2bee380f9ead3a31eaf282f1e6fc9dbed [file] [log] [blame]
Aviv Keshet18308922013-02-19 17:49:49 -08001#pylint: disable-msg=C0111
xixuanba232a32016-08-25 17:01:59 -07002"""
mblighe8819cd2008-02-15 16:48:40 +00003Utility functions for rpc_interface.py. We keep them in a separate file so that
4only RPC interface functions go into that file.
5"""
6
7__author__ = 'showard@google.com (Steve Howard)'
8
Allen Li3d4e6112016-12-28 11:10:25 -08009import collections
MK Ryu84573e12015-02-18 15:54:09 -080010import datetime
MK Ryufbb002c2015-06-08 14:13:16 -070011from functools import wraps
MK Ryu84573e12015-02-18 15:54:09 -080012import inspect
13import os
14import sys
Fang Deng7051fe42015-10-20 14:57:28 -070015import django.db.utils
showard3d6ae112009-05-02 00:45:48 +000016import django.http
MK Ryu0a9c82e2015-09-17 17:54:01 -070017
18from autotest_lib.frontend import thread_local
Dan Shi07e09af2013-04-12 09:31:29 -070019from autotest_lib.frontend.afe import models, model_logic
Alex Miller4a193692013-08-21 13:59:01 -070020from autotest_lib.client.common_lib import control_data, error
Allen Li352b86a2016-12-14 12:11:27 -080021from autotest_lib.client.common_lib import global_config
MK Ryu0c1a37d2015-04-30 12:00:55 -070022from autotest_lib.client.common_lib import time_utils
Allen Li3d43e602016-12-08 15:09:51 -080023from autotest_lib.client.common_lib.cros import dev_server
Aviv Keshet14cac442016-11-20 21:44:11 -080024# TODO(akeshet): Replace with monarch once we know how to instrument rpc server
25# with ts_mon.
MK Ryu509516b2015-05-18 12:00:47 -070026from autotest_lib.client.common_lib.cros.graphite import autotest_stats
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -080027from autotest_lib.server import utils as server_utils
MK Ryu9651ca52015-06-08 17:48:22 -070028from autotest_lib.server.cros import provision
29from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
mblighe8819cd2008-02-15 16:48:40 +000030
showarda62866b2008-07-28 21:27:41 +000031NULL_DATETIME = datetime.datetime.max
32NULL_DATE = datetime.date.max
Fang Deng7051fe42015-10-20 14:57:28 -070033DUPLICATE_KEY_MSG = 'Duplicate entry'
showarda62866b2008-07-28 21:27:41 +000034
mblighe8819cd2008-02-15 16:48:40 +000035def prepare_for_serialization(objects):
jadmanski0afbb632008-06-06 21:10:57 +000036 """
37 Prepare Python objects to be returned via RPC.
Aviv Keshet18308922013-02-19 17:49:49 -080038 @param objects: objects to be prepared.
jadmanski0afbb632008-06-06 21:10:57 +000039 """
40 if (isinstance(objects, list) and len(objects) and
41 isinstance(objects[0], dict) and 'id' in objects[0]):
Allen Liafb7d322016-12-14 18:40:20 -080042 objects = _gather_unique_dicts(objects)
jadmanski0afbb632008-06-06 21:10:57 +000043 return _prepare_data(objects)
showardb8d34242008-04-25 18:11:16 +000044
45
showardc92da832009-04-07 18:14:34 +000046def prepare_rows_as_nested_dicts(query, nested_dict_column_names):
47 """
48 Prepare a Django query to be returned via RPC as a sequence of nested
49 dictionaries.
50
51 @param query - A Django model query object with a select_related() method.
52 @param nested_dict_column_names - A list of column/attribute names for the
53 rows returned by query to expand into nested dictionaries using
54 their get_object_dict() method when not None.
55
56 @returns An list suitable to returned in an RPC.
57 """
58 all_dicts = []
59 for row in query.select_related():
60 row_dict = row.get_object_dict()
61 for column in nested_dict_column_names:
62 if row_dict[column] is not None:
63 row_dict[column] = getattr(row, column).get_object_dict()
64 all_dicts.append(row_dict)
65 return prepare_for_serialization(all_dicts)
66
67
showardb8d34242008-04-25 18:11:16 +000068def _prepare_data(data):
jadmanski0afbb632008-06-06 21:10:57 +000069 """
70 Recursively process data structures, performing necessary type
71 conversions to values in data to allow for RPC serialization:
72 -convert datetimes to strings
showard2b9a88b2008-06-13 20:55:03 +000073 -convert tuples and sets to lists
jadmanski0afbb632008-06-06 21:10:57 +000074 """
75 if isinstance(data, dict):
76 new_data = {}
77 for key, value in data.iteritems():
78 new_data[key] = _prepare_data(value)
79 return new_data
showard2b9a88b2008-06-13 20:55:03 +000080 elif (isinstance(data, list) or isinstance(data, tuple) or
81 isinstance(data, set)):
jadmanski0afbb632008-06-06 21:10:57 +000082 return [_prepare_data(item) for item in data]
showard98659972008-07-17 17:00:07 +000083 elif isinstance(data, datetime.date):
showarda62866b2008-07-28 21:27:41 +000084 if data is NULL_DATETIME or data is NULL_DATE:
85 return None
jadmanski0afbb632008-06-06 21:10:57 +000086 return str(data)
87 else:
88 return data
mblighe8819cd2008-02-15 16:48:40 +000089
90
Moises Osorio2dda22e2014-09-16 15:56:24 -070091def fetchall_as_list_of_dicts(cursor):
92 """
93 Converts each row in the cursor to a dictionary so that values can be read
94 by using the column name.
95 @param cursor: The database cursor to read from.
96 @returns: A list of each row in the cursor as a dictionary.
97 """
98 desc = cursor.description
99 return [ dict(zip([col[0] for col in desc], row))
100 for row in cursor.fetchall() ]
101
102
showard3d6ae112009-05-02 00:45:48 +0000103def raw_http_response(response_data, content_type=None):
104 response = django.http.HttpResponse(response_data, mimetype=content_type)
105 response['Content-length'] = str(len(response.content))
106 return response
107
108
Allen Liafb7d322016-12-14 18:40:20 -0800109def _gather_unique_dicts(dict_iterable):
jadmanski0afbb632008-06-06 21:10:57 +0000110 """\
111 Pick out unique objects (by ID) from an iterable of object dicts.
112 """
Allen Li07478c02016-12-14 18:39:30 -0800113 objects = collections.OrderedDict()
jadmanski0afbb632008-06-06 21:10:57 +0000114 for obj in dict_iterable:
Allen Li07478c02016-12-14 18:39:30 -0800115 objects.setdefault(obj['id'], obj)
116 return objects.values()
showardb0dfb9f2008-06-06 18:08:02 +0000117
118
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700119def extra_job_status_filters(not_yet_run=False, running=False, finished=False):
jadmanski0afbb632008-06-06 21:10:57 +0000120 """\
121 Generate a SQL WHERE clause for job status filtering, and return it in
Simran Basi01984f52015-10-12 15:36:45 -0700122 a dict of keyword args to pass to query.extra().
showard6c65d252009-10-01 18:45:22 +0000123 * not_yet_run: all HQEs are Queued
124 * finished: all HQEs are complete
125 * running: everything else
jadmanski0afbb632008-06-06 21:10:57 +0000126 """
Simran Basi01984f52015-10-12 15:36:45 -0700127 if not (not_yet_run or running or finished):
128 return {}
showardeab66ce2009-12-23 00:03:56 +0000129 not_queued = ('(SELECT job_id FROM afe_host_queue_entries '
130 'WHERE status != "%s")'
showard6c65d252009-10-01 18:45:22 +0000131 % models.HostQueueEntry.Status.QUEUED)
showardeab66ce2009-12-23 00:03:56 +0000132 not_finished = ('(SELECT job_id FROM afe_host_queue_entries '
133 'WHERE not complete)')
showard6c65d252009-10-01 18:45:22 +0000134
Simran Basi01984f52015-10-12 15:36:45 -0700135 where = []
jadmanski0afbb632008-06-06 21:10:57 +0000136 if not_yet_run:
Simran Basi01984f52015-10-12 15:36:45 -0700137 where.append('id NOT IN ' + not_queued)
138 if running:
139 where.append('(id IN %s) AND (id IN %s)' % (not_queued, not_finished))
140 if finished:
141 where.append('id NOT IN ' + not_finished)
142 return {'where': [' OR '.join(['(%s)' % x for x in where])]}
mblighe8819cd2008-02-15 16:48:40 +0000143
144
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700145def extra_job_type_filters(extra_args, suite=False,
146 sub=False, standalone=False):
147 """\
148 Generate a SQL WHERE clause for job status filtering, and return it in
149 a dict of keyword args to pass to query.extra().
150
151 param extra_args: a dict of existing extra_args.
152
153 No more than one of the parameters should be passed as True:
154 * suite: job which is parent of other jobs
155 * sub: job with a parent job
156 * standalone: job with no child or parent jobs
157 """
158 assert not ((suite and sub) or
159 (suite and standalone) or
160 (sub and standalone)), ('Cannot specify more than one '
161 'filter to this function')
162
163 where = extra_args.get('where', [])
164 parent_job_id = ('DISTINCT parent_job_id')
165 child_job_id = ('id')
166 filter_common = ('(SELECT %s FROM afe_jobs '
167 'WHERE parent_job_id IS NOT NULL)')
168
169 if suite:
170 where.append('id IN ' + filter_common % parent_job_id)
171 elif sub:
172 where.append('id IN ' + filter_common % child_job_id)
173 elif standalone:
174 where.append('NOT EXISTS (SELECT 1 from afe_jobs AS sub_query '
175 'WHERE parent_job_id IS NOT NULL'
176 ' AND (sub_query.parent_job_id=afe_jobs.id'
177 ' OR sub_query.id=afe_jobs.id))')
178 else:
179 return extra_args
180
181 extra_args['where'] = where
182 return extra_args
183
184
185
showard87cc38f2009-08-20 23:37:04 +0000186def extra_host_filters(multiple_labels=()):
jadmanski0afbb632008-06-06 21:10:57 +0000187 """\
188 Generate SQL WHERE clauses for matching hosts in an intersection of
189 labels.
190 """
191 extra_args = {}
showardeab66ce2009-12-23 00:03:56 +0000192 where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels '
jadmanski0afbb632008-06-06 21:10:57 +0000193 'where label_id=%s)')
194 extra_args['where'] = [where_str] * len(multiple_labels)
195 extra_args['params'] = [models.Label.smart_get(label).id
196 for label in multiple_labels]
197 return extra_args
showard8e3aa5e2008-04-08 19:42:32 +0000198
199
showard87cc38f2009-08-20 23:37:04 +0000200def get_host_query(multiple_labels, exclude_only_if_needed_labels,
showard8aa84fc2009-09-16 17:17:55 +0000201 exclude_atomic_group_hosts, valid_only, filter_data):
202 if valid_only:
203 query = models.Host.valid_objects.all()
204 else:
205 query = models.Host.objects.all()
206
showard43a3d262008-11-12 18:17:05 +0000207 if exclude_only_if_needed_labels:
208 only_if_needed_labels = models.Label.valid_objects.filter(
209 only_if_needed=True)
showardf7eac6f2008-11-13 21:18:01 +0000210 if only_if_needed_labels.count() > 0:
showard87cc38f2009-08-20 23:37:04 +0000211 only_if_needed_ids = ','.join(
212 str(label['id'])
213 for label in only_if_needed_labels.values('id'))
showardf7eac6f2008-11-13 21:18:01 +0000214 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000215 query, 'afe_hosts_labels', join_key='host_id',
216 join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)'
showard87cc38f2009-08-20 23:37:04 +0000217 % only_if_needed_ids),
218 suffix='_exclude_OIN', exclude=True)
showard8aa84fc2009-09-16 17:17:55 +0000219
showard87cc38f2009-08-20 23:37:04 +0000220 if exclude_atomic_group_hosts:
221 atomic_group_labels = models.Label.valid_objects.filter(
222 atomic_group__isnull=False)
223 if atomic_group_labels.count() > 0:
224 atomic_group_label_ids = ','.join(
225 str(atomic_group['id'])
226 for atomic_group in atomic_group_labels.values('id'))
227 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000228 query, 'afe_hosts_labels', join_key='host_id',
229 join_condition=(
230 'afe_hosts_labels_exclude_AG.label_id IN (%s)'
231 % atomic_group_label_ids),
showard87cc38f2009-08-20 23:37:04 +0000232 suffix='_exclude_AG', exclude=True)
Fang Deng04d30612013-04-10 18:13:13 -0700233 try:
234 assert 'extra_args' not in filter_data
235 filter_data['extra_args'] = extra_host_filters(multiple_labels)
236 return models.Host.query_objects(filter_data, initial_query=query)
Allen Li12b9bc12016-12-14 18:41:19 -0800237 except models.Label.DoesNotExist:
Fang Deng04d30612013-04-10 18:13:13 -0700238 return models.Host.objects.none()
showard43a3d262008-11-12 18:17:05 +0000239
240
showard8fd58242008-03-10 21:29:07 +0000241class InconsistencyException(Exception):
jadmanski0afbb632008-06-06 21:10:57 +0000242 'Raised when a list of objects does not have a consistent value'
showard8fd58242008-03-10 21:29:07 +0000243
244
245def get_consistent_value(objects, field):
mblighc5ddfd12008-08-04 17:15:00 +0000246 if not objects:
247 # well a list of nothing is consistent
248 return None
249
jadmanski0afbb632008-06-06 21:10:57 +0000250 value = getattr(objects[0], field)
251 for obj in objects:
252 this_value = getattr(obj, field)
253 if this_value != value:
254 raise InconsistencyException(objects[0], obj)
255 return value
showard8fd58242008-03-10 21:29:07 +0000256
257
Matthew Sartori10438092015-06-24 14:30:18 -0700258def afe_test_dict_to_test_object(test_dict):
259 if not isinstance(test_dict, dict):
260 return test_dict
261
262 numerized_dict = {}
263 for key, value in test_dict.iteritems():
264 try:
265 numerized_dict[key] = int(value)
266 except (ValueError, TypeError):
267 numerized_dict[key] = value
268
269 return type('TestObject', (object,), numerized_dict)
270
271
Michael Tang84a2ecf2016-06-07 15:10:53 -0700272def _check_is_server_test(test_type):
273 """Checks if the test type is a server test.
274
275 @param test_type The test type in enum integer or string.
276
277 @returns A boolean to identify if the test type is server test.
278 """
279 if test_type is not None:
280 if isinstance(test_type, basestring):
281 try:
282 test_type = control_data.CONTROL_TYPE.get_value(test_type)
283 except AttributeError:
284 return False
285 return (test_type == control_data.CONTROL_TYPE.SERVER)
286 return False
287
288
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700289def prepare_generate_control_file(tests, profilers, db_tests=True):
Matthew Sartori10438092015-06-24 14:30:18 -0700290 if db_tests:
291 test_objects = [models.Test.smart_get(test) for test in tests]
292 else:
293 test_objects = [afe_test_dict_to_test_object(test) for test in tests]
294
showard2b9a88b2008-06-13 20:55:03 +0000295 profiler_objects = [models.Profiler.smart_get(profiler)
296 for profiler in profilers]
jadmanski0afbb632008-06-06 21:10:57 +0000297 # ensure tests are all the same type
298 try:
299 test_type = get_consistent_value(test_objects, 'test_type')
300 except InconsistencyException, exc:
301 test1, test2 = exc.args
mblighec5546d2008-06-16 16:51:28 +0000302 raise model_logic.ValidationError(
Matthew Sartori10438092015-06-24 14:30:18 -0700303 {'tests' : 'You cannot run both test_suites and server-side '
jadmanski0afbb632008-06-06 21:10:57 +0000304 'tests together (tests %s and %s differ' % (
305 test1.name, test2.name)})
showard8fd58242008-03-10 21:29:07 +0000306
Michael Tang84a2ecf2016-06-07 15:10:53 -0700307 is_server = _check_is_server_test(test_type)
showard14374b12009-01-31 00:11:54 +0000308 if test_objects:
309 synch_count = max(test.sync_count for test in test_objects)
310 else:
311 synch_count = 1
mblighe8819cd2008-02-15 16:48:40 +0000312
Matthew Sartori10438092015-06-24 14:30:18 -0700313 if db_tests:
314 dependencies = set(label.name for label
315 in models.Label.objects.filter(test__in=test_objects))
316 else:
317 dependencies = reduce(
318 set.union, [set(test.dependencies) for test in test_objects])
showard989f25d2008-10-01 11:38:11 +0000319
showard2bab8f42008-11-12 18:15:22 +0000320 cf_info = dict(is_server=is_server, synch_count=synch_count,
321 dependencies=list(dependencies))
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700322 return cf_info, test_objects, profiler_objects
showard989f25d2008-10-01 11:38:11 +0000323
324
325def check_job_dependencies(host_objects, job_dependencies):
326 """
327 Check that a set of machines satisfies a job's dependencies.
328 host_objects: list of models.Host objects
329 job_dependencies: list of names of labels
330 """
331 # check that hosts satisfy dependencies
332 host_ids = [host.id for host in host_objects]
333 hosts_in_job = models.Host.objects.filter(id__in=host_ids)
334 ok_hosts = hosts_in_job
335 for index, dependency in enumerate(job_dependencies):
Alex Milleraa772002014-04-10 17:51:21 -0700336 if not provision.is_for_special_action(dependency):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700337 ok_hosts = ok_hosts.filter(labels__name=dependency)
showard989f25d2008-10-01 11:38:11 +0000338 failing_hosts = (set(host.hostname for host in host_objects) -
339 set(host.hostname for host in ok_hosts))
340 if failing_hosts:
341 raise model_logic.ValidationError(
Eric Lie0493a42010-11-15 13:05:43 -0800342 {'hosts' : 'Host(s) failed to meet job dependencies (' +
343 (', '.join(job_dependencies)) + '): ' +
344 (', '.join(failing_hosts))})
345
showard989f25d2008-10-01 11:38:11 +0000346
Alex Miller4a193692013-08-21 13:59:01 -0700347def check_job_metahost_dependencies(metahost_objects, job_dependencies):
348 """
349 Check that at least one machine within the metahost spec satisfies the job's
350 dependencies.
351
352 @param metahost_objects A list of label objects representing the metahosts.
353 @param job_dependencies A list of strings of the required label names.
354 @raises NoEligibleHostException If a metahost cannot run the job.
355 """
356 for metahost in metahost_objects:
357 hosts = models.Host.objects.filter(labels=metahost)
358 for label_name in job_dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700359 if not provision.is_for_special_action(label_name):
Alex Miller4a193692013-08-21 13:59:01 -0700360 hosts = hosts.filter(labels__name=label_name)
361 if not any(hosts):
362 raise error.NoEligibleHostException("No hosts within %s satisfy %s."
363 % (metahost.name, ', '.join(job_dependencies)))
364
showard2bab8f42008-11-12 18:15:22 +0000365
366def _execution_key_for(host_queue_entry):
367 return (host_queue_entry.job.id, host_queue_entry.execution_subdir)
368
369
370def check_abort_synchronous_jobs(host_queue_entries):
371 # ensure user isn't aborting part of a synchronous autoserv execution
372 count_per_execution = {}
373 for queue_entry in host_queue_entries:
374 key = _execution_key_for(queue_entry)
375 count_per_execution.setdefault(key, 0)
376 count_per_execution[key] += 1
377
378 for queue_entry in host_queue_entries:
379 if not queue_entry.execution_subdir:
380 continue
381 execution_count = count_per_execution[_execution_key_for(queue_entry)]
382 if execution_count < queue_entry.job.synch_count:
mbligh1ef218d2009-08-03 16:57:56 +0000383 raise model_logic.ValidationError(
384 {'' : 'You cannot abort part of a synchronous job execution '
385 '(%d/%s), %d included, %d expected'
386 % (queue_entry.job.id, queue_entry.execution_subdir,
387 execution_count, queue_entry.job.synch_count)})
showard8fbae652009-01-20 23:23:10 +0000388
389
showardc92da832009-04-07 18:14:34 +0000390def check_atomic_group_create_job(synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700391 dependencies, atomic_group):
showardc92da832009-04-07 18:14:34 +0000392 """
393 Attempt to reject create_job requests with an atomic group that
394 will be impossible to schedule. The checks are not perfect but
395 should catch the most obvious issues.
396
397 @param synch_count - The job's minimum synch count.
398 @param host_objects - A list of models.Host instances.
399 @param metahost_objects - A list of models.Label instances.
400 @param dependencies - A list of job dependency label names.
showardc92da832009-04-07 18:14:34 +0000401 @param labels_by_name - A dictionary mapping label names to models.Label
402 instance. Used to look up instances for dependencies.
403
404 @raises model_logic.ValidationError - When an issue is found.
405 """
Allen Li224aa552016-12-14 17:17:22 -0800406 if synch_count and synch_count > atomic_group.max_number_of_machines:
407 raise model_logic.ValidationError(
408 {'atomic_group_name' :
409 'You have requested a synch_count (%d) greater than the '
410 'maximum machines in the requested Atomic Group (%d).' %
411 (synch_count, atomic_group.max_number_of_machines)})
412
showardc92da832009-04-07 18:14:34 +0000413 # If specific host objects were supplied with an atomic group, verify
414 # that there are enough to satisfy the synch_count.
415 minimum_required = synch_count or 1
416 if (host_objects and not metahost_objects and
417 len(host_objects) < minimum_required):
418 raise model_logic.ValidationError(
419 {'hosts':
420 'only %d hosts provided for job with synch_count = %d' %
421 (len(host_objects), synch_count)})
422
423 # Check that the atomic group has a hope of running this job
424 # given any supplied metahosts and dependancies that may limit.
425
426 # Get a set of hostnames in the atomic group.
427 possible_hosts = set()
428 for label in atomic_group.label_set.all():
429 possible_hosts.update(h.hostname for h in label.host_set.all())
430
431 # Filter out hosts that don't match all of the job dependency labels.
Alex Miller871291b2013-08-08 01:19:20 -0700432 for label in models.Label.objects.filter(name__in=dependencies):
showardc92da832009-04-07 18:14:34 +0000433 hosts_in_label = (h.hostname for h in label.host_set.all())
434 possible_hosts.intersection_update(hosts_in_label)
435
showard225bdc12009-04-13 16:09:21 +0000436 if not host_objects and not metahost_objects:
437 # No hosts or metahosts are required to queue an atomic group Job.
438 # However, if they are given, we respect them below.
439 host_set = possible_hosts
440 else:
441 host_set = set(host.hostname for host in host_objects)
442 unusable_host_set = host_set.difference(possible_hosts)
443 if unusable_host_set:
444 raise model_logic.ValidationError(
445 {'hosts': 'Hosts "%s" are not in Atomic Group "%s"' %
446 (', '.join(sorted(unusable_host_set)), atomic_group.name)})
showardc92da832009-04-07 18:14:34 +0000447
448 # Lookup hosts provided by each meta host and merge them into the
449 # host_set for final counting.
450 for meta_host in metahost_objects:
451 meta_possible = possible_hosts.copy()
452 hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all())
453 meta_possible.intersection_update(hosts_in_meta_host)
454
455 # Count all hosts that this meta_host will provide.
456 host_set.update(meta_possible)
457
458 if len(host_set) < minimum_required:
459 raise model_logic.ValidationError(
460 {'atomic_group_name':
461 'Insufficient hosts in Atomic Group "%s" with the'
462 ' supplied dependencies and meta_hosts.' %
463 (atomic_group.name,)})
464
465
showardbe0d8692009-08-20 23:42:44 +0000466def check_modify_host(update_data):
467 """
468 Sanity check modify_host* requests.
469
470 @param update_data: A dictionary with the changes to make to a host
471 or hosts.
472 """
473 # Only the scheduler (monitor_db) is allowed to modify Host status.
474 # Otherwise race conditions happen as a hosts state is changed out from
475 # beneath tasks being run on a host.
476 if 'status' in update_data:
477 raise model_logic.ValidationError({
478 'status': 'Host status can not be modified by the frontend.'})
479
480
showardce7c0922009-09-11 18:39:24 +0000481def check_modify_host_locking(host, update_data):
482 """
483 Checks when locking/unlocking has been requested if the host is already
484 locked/unlocked.
485
486 @param host: models.Host object to be modified
487 @param update_data: A dictionary with the changes to make to the host.
488 """
489 locked = update_data.get('locked', None)
Matthew Sartori68186332015-04-27 17:19:53 -0700490 lock_reason = update_data.get('lock_reason', None)
showardce7c0922009-09-11 18:39:24 +0000491 if locked is not None:
492 if locked and host.locked:
493 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800494 'locked': 'Host %s already locked by %s on %s.' %
495 (host.hostname, host.locked_by, host.lock_time)})
showardce7c0922009-09-11 18:39:24 +0000496 if not locked and not host.locked:
497 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800498 'locked': 'Host %s already unlocked.' % host.hostname})
Matthew Sartori68186332015-04-27 17:19:53 -0700499 if locked and not lock_reason and not host.locked:
500 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800501 'locked': 'Please provide a reason for locking Host %s' %
502 host.hostname})
showardce7c0922009-09-11 18:39:24 +0000503
504
showard8fbae652009-01-20 23:23:10 +0000505def get_motd():
506 dirname = os.path.dirname(__file__)
507 filename = os.path.join(dirname, "..", "..", "motd.txt")
508 text = ''
509 try:
510 fp = open(filename, "r")
511 try:
512 text = fp.read()
513 finally:
514 fp.close()
515 except:
516 pass
517
518 return text
showard29f7cd22009-04-29 21:16:24 +0000519
520
521def _get_metahost_counts(metahost_objects):
522 metahost_counts = {}
523 for metahost in metahost_objects:
524 metahost_counts.setdefault(metahost, 0)
525 metahost_counts[metahost] += 1
526 return metahost_counts
527
528
showarda965cef2009-05-15 23:17:41 +0000529def get_job_info(job, preserve_metahosts=False, queue_entry_filter_data=None):
showard29f7cd22009-04-29 21:16:24 +0000530 hosts = []
531 one_time_hosts = []
532 meta_hosts = []
533 atomic_group = None
jamesren2275ef12010-04-12 18:25:06 +0000534 hostless = False
showard29f7cd22009-04-29 21:16:24 +0000535
showard4d077562009-05-08 18:24:36 +0000536 queue_entries = job.hostqueueentry_set.all()
showarda965cef2009-05-15 23:17:41 +0000537 if queue_entry_filter_data:
538 queue_entries = models.HostQueueEntry.query_objects(
539 queue_entry_filter_data, initial_query=queue_entries)
showard4d077562009-05-08 18:24:36 +0000540
541 for queue_entry in queue_entries:
showard29f7cd22009-04-29 21:16:24 +0000542 if (queue_entry.host and (preserve_metahosts or
543 not queue_entry.meta_host)):
544 if queue_entry.deleted:
545 continue
546 if queue_entry.host.invalid:
547 one_time_hosts.append(queue_entry.host)
548 else:
549 hosts.append(queue_entry.host)
jamesren2275ef12010-04-12 18:25:06 +0000550 elif queue_entry.meta_host:
showard29f7cd22009-04-29 21:16:24 +0000551 meta_hosts.append(queue_entry.meta_host)
jamesren2275ef12010-04-12 18:25:06 +0000552 else:
553 hostless = True
554
showard29f7cd22009-04-29 21:16:24 +0000555 if atomic_group is None:
556 if queue_entry.atomic_group is not None:
557 atomic_group = queue_entry.atomic_group
558 else:
559 assert atomic_group.name == queue_entry.atomic_group.name, (
560 'DB inconsistency. HostQueueEntries with multiple atomic'
561 ' groups on job %s: %s != %s' % (
562 id, atomic_group.name, queue_entry.atomic_group.name))
563
564 meta_host_counts = _get_metahost_counts(meta_hosts)
565
566 info = dict(dependencies=[label.name for label
567 in job.dependency_labels.all()],
568 hosts=hosts,
569 meta_hosts=meta_hosts,
570 meta_host_counts=meta_host_counts,
571 one_time_hosts=one_time_hosts,
jamesren2275ef12010-04-12 18:25:06 +0000572 atomic_group=atomic_group,
573 hostless=hostless)
showard29f7cd22009-04-29 21:16:24 +0000574 return info
575
576
showard09d80f92009-11-19 01:01:19 +0000577def check_for_duplicate_hosts(host_objects):
Allen Li8239c352016-12-14 17:53:01 -0800578 host_counts = collections.Counter(host_objects)
579 duplicate_hostnames = {host.hostname
580 for host, count in host_counts.iteritems()
581 if count > 1}
showard09d80f92009-11-19 01:01:19 +0000582 if duplicate_hostnames:
583 raise model_logic.ValidationError(
584 {'hosts' : 'Duplicate hosts: %s'
585 % ', '.join(duplicate_hostnames)})
586
587
showarda1e74b32009-05-12 17:32:04 +0000588def create_new_job(owner, options, host_objects, metahost_objects,
589 atomic_group=None):
showard29f7cd22009-04-29 21:16:24 +0000590 all_host_objects = host_objects + metahost_objects
showarda1e74b32009-05-12 17:32:04 +0000591 dependencies = options.get('dependencies', [])
592 synch_count = options.get('synch_count')
showard29f7cd22009-04-29 21:16:24 +0000593
showard29f7cd22009-04-29 21:16:24 +0000594 if atomic_group:
595 check_atomic_group_create_job(
596 synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700597 dependencies, atomic_group)
showard29f7cd22009-04-29 21:16:24 +0000598 else:
599 if synch_count is not None and synch_count > len(all_host_objects):
600 raise model_logic.ValidationError(
601 {'hosts':
602 'only %d hosts provided for job with synch_count = %d' %
603 (len(all_host_objects), synch_count)})
604 atomic_hosts = models.Host.objects.filter(
605 id__in=[host.id for host in host_objects],
606 labels__atomic_group=True)
607 unusable_host_names = [host.hostname for host in atomic_hosts]
608 if unusable_host_names:
609 raise model_logic.ValidationError(
610 {'hosts':
611 'Host(s) "%s" are atomic group hosts but no '
612 'atomic group was specified for this job.' %
613 (', '.join(unusable_host_names),)})
614
showard09d80f92009-11-19 01:01:19 +0000615 check_for_duplicate_hosts(host_objects)
showard29f7cd22009-04-29 21:16:24 +0000616
Aviv Keshetc68807e2013-07-31 16:13:01 -0700617 for label_name in dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700618 if provision.is_for_special_action(label_name):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700619 # TODO: We could save a few queries
620 # if we had a bulk ensure-label-exists function, which used
621 # a bulk .get() call. The win is probably very small.
Alex Miller871291b2013-08-08 01:19:20 -0700622 _ensure_label_exists(label_name)
Aviv Keshetc68807e2013-07-31 16:13:01 -0700623
Alex Miller4a193692013-08-21 13:59:01 -0700624 # This only checks targeted hosts, not hosts eligible due to the metahost
625 check_job_dependencies(host_objects, dependencies)
626 check_job_metahost_dependencies(metahost_objects, dependencies)
627
Alex Miller871291b2013-08-08 01:19:20 -0700628 options['dependencies'] = list(
629 models.Label.objects.filter(name__in=dependencies))
showard29f7cd22009-04-29 21:16:24 +0000630
showarda1e74b32009-05-12 17:32:04 +0000631 for label in metahost_objects + options['dependencies']:
showard29f7cd22009-04-29 21:16:24 +0000632 if label.atomic_group and not atomic_group:
633 raise model_logic.ValidationError(
634 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000635 'Dependency %r requires an atomic group but no '
636 'atomic_group_name or meta_host in an atomic group was '
637 'specified for this job.' % label.name})
showard29f7cd22009-04-29 21:16:24 +0000638 elif (label.atomic_group and
639 label.atomic_group.name != atomic_group.name):
640 raise model_logic.ValidationError(
641 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000642 'meta_hosts or dependency %r requires atomic group '
643 '%r instead of the supplied atomic_group_name=%r.' %
644 (label.name, label.atomic_group.name, atomic_group.name)})
showard29f7cd22009-04-29 21:16:24 +0000645
showarda1e74b32009-05-12 17:32:04 +0000646 job = models.Job.create(owner=owner, options=options,
647 hosts=all_host_objects)
showard29f7cd22009-04-29 21:16:24 +0000648 job.queue(all_host_objects, atomic_group=atomic_group,
showarda1e74b32009-05-12 17:32:04 +0000649 is_template=options.get('is_template', False))
showard29f7cd22009-04-29 21:16:24 +0000650 return job.id
showard0957a842009-05-11 19:25:08 +0000651
652
Aviv Keshetc68807e2013-07-31 16:13:01 -0700653def _ensure_label_exists(name):
654 """
655 Ensure that a label called |name| exists in the Django models.
656
657 This function is to be called from within afe rpcs only, as an
658 alternative to server.cros.provision.ensure_label_exists(...). It works
659 by Django model manipulation, rather than by making another create_label
660 rpc call.
661
662 @param name: the label to check for/create.
663 @raises ValidationError: There was an error in the response that was
664 not because the label already existed.
665 @returns True is a label was created, False otherwise.
666 """
MK Ryu73be9862015-07-06 12:25:00 -0700667 # Make sure this function is not called on shards but only on master.
668 assert not server_utils.is_shard()
Aviv Keshetc68807e2013-07-31 16:13:01 -0700669 try:
670 models.Label.objects.get(name=name)
671 except models.Label.DoesNotExist:
Fang Deng7051fe42015-10-20 14:57:28 -0700672 try:
673 new_label = models.Label.objects.create(name=name)
674 new_label.save()
675 return True
676 except django.db.utils.IntegrityError as e:
677 # It is possible that another suite/test already
678 # created the label between the check and save.
679 if DUPLICATE_KEY_MSG in str(e):
680 return False
681 else:
682 raise
Aviv Keshetc68807e2013-07-31 16:13:01 -0700683 return False
684
685
showard909c9142009-07-07 20:54:42 +0000686def find_platform_and_atomic_group(host):
687 """
688 Figure out the platform name and atomic group name for the given host
689 object. If none, the return value for either will be None.
690
691 @returns (platform name, atomic group name) for the given host.
692 """
showard0957a842009-05-11 19:25:08 +0000693 platforms = [label.name for label in host.label_list if label.platform]
694 if not platforms:
showard909c9142009-07-07 20:54:42 +0000695 platform = None
696 else:
697 platform = platforms[0]
showard0957a842009-05-11 19:25:08 +0000698 if len(platforms) > 1:
699 raise ValueError('Host %s has more than one platform: %s' %
700 (host.hostname, ', '.join(platforms)))
showard909c9142009-07-07 20:54:42 +0000701 for label in host.label_list:
702 if label.atomic_group:
703 atomic_group_name = label.atomic_group.name
704 break
705 else:
706 atomic_group_name = None
707 # Don't check for multiple atomic groups on a host here. That is an
708 # error but should not trip up the RPC interface. monitor_db_cleanup
709 # deals with it. This just returns the first one found.
710 return platform, atomic_group_name
showardc0ac3a72009-07-08 21:14:45 +0000711
712
713# support for get_host_queue_entries_and_special_tasks()
714
MK Ryu0c1a37d2015-04-30 12:00:55 -0700715def _common_entry_to_dict(entry, type, job_dict, exec_path, status, started_on):
showardc0ac3a72009-07-08 21:14:45 +0000716 return dict(type=type,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700717 host=entry['host'],
showardc0ac3a72009-07-08 21:14:45 +0000718 job=job_dict,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700719 execution_path=exec_path,
720 status=status,
721 started_on=started_on,
722 id=str(entry['id']) + type,
723 oid=entry['id'])
showardc0ac3a72009-07-08 21:14:45 +0000724
725
MK Ryu0c1a37d2015-04-30 12:00:55 -0700726def _special_task_to_dict(task, queue_entries):
727 """Transforms a special task dictionary to another form of dictionary.
728
729 @param task Special task as a dictionary type
730 @param queue_entries Host queue entries as a list of dictionaries.
731
732 @return Transformed dictionary for a special task.
733 """
showardc0ac3a72009-07-08 21:14:45 +0000734 job_dict = None
MK Ryu0c1a37d2015-04-30 12:00:55 -0700735 if task['queue_entry']:
736 # Scan queue_entries to get the job detail info.
737 for qentry in queue_entries:
738 if task['queue_entry']['id'] == qentry['id']:
739 job_dict = qentry['job']
740 break
741 # If not found, get it from DB.
742 if job_dict is None:
743 job = models.Job.objects.get(id=task['queue_entry']['job'])
744 job_dict = job.get_object_dict()
745
746 exec_path = server_utils.get_special_task_exec_path(
747 task['host']['hostname'], task['id'], task['task'],
748 time_utils.time_string_to_datetime(task['time_requested']))
749 status = server_utils.get_special_task_status(
750 task['is_complete'], task['success'], task['is_active'])
751 return _common_entry_to_dict(task, task['task'], job_dict,
752 exec_path, status, task['time_started'])
showardc0ac3a72009-07-08 21:14:45 +0000753
754
755def _queue_entry_to_dict(queue_entry):
MK Ryu0c1a37d2015-04-30 12:00:55 -0700756 job_dict = queue_entry['job']
757 tag = server_utils.get_job_tag(job_dict['id'], job_dict['owner'])
758 exec_path = server_utils.get_hqe_exec_path(tag,
759 queue_entry['execution_subdir'])
760 return _common_entry_to_dict(queue_entry, 'Job', job_dict, exec_path,
761 queue_entry['status'], queue_entry['started_on'])
762
763
764def prepare_host_queue_entries_and_special_tasks(interleaved_entries,
765 queue_entries):
766 """
767 Prepare for serialization the interleaved entries of host queue entries
768 and special tasks.
769 Each element in the entries is a dictionary type.
770 The special task dictionary has only a job id for a job and lacks
771 the detail of the job while the host queue entry dictionary has.
772 queue_entries is used to look up the job detail info.
773
774 @param interleaved_entries Host queue entries and special tasks as a list
775 of dictionaries.
776 @param queue_entries Host queue entries as a list of dictionaries.
777
778 @return A post-processed list of dictionaries that is to be serialized.
779 """
780 dict_list = []
781 for e in interleaved_entries:
782 # Distinguish the two mixed entries based on the existence of
783 # the key "task". If an entry has the key, the entry is for
784 # special task. Otherwise, host queue entry.
785 if 'task' in e:
786 dict_list.append(_special_task_to_dict(e, queue_entries))
787 else:
788 dict_list.append(_queue_entry_to_dict(e))
789 return prepare_for_serialization(dict_list)
showardc0ac3a72009-07-08 21:14:45 +0000790
791
792def _compute_next_job_for_tasks(queue_entries, special_tasks):
793 """
794 For each task, try to figure out the next job that ran after that task.
795 This is done using two pieces of information:
796 * if the task has a queue entry, we can use that entry's job ID.
797 * if the task has a time_started, we can try to compare that against the
798 started_on field of queue_entries. this isn't guaranteed to work perfectly
799 since queue_entries may also have null started_on values.
800 * if the task has neither, or if use of time_started fails, just use the
801 last computed job ID.
MK Ryu0c1a37d2015-04-30 12:00:55 -0700802
803 @param queue_entries Host queue entries as a list of dictionaries.
804 @param special_tasks Special tasks as a list of dictionaries.
showardc0ac3a72009-07-08 21:14:45 +0000805 """
806 next_job_id = None # most recently computed next job
807 hqe_index = 0 # index for scanning by started_on times
808 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700809 if task['queue_entry']:
810 next_job_id = task['queue_entry']['job']
811 elif task['time_started'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000812 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700813 if queue_entry['started_on'] is None:
showardc0ac3a72009-07-08 21:14:45 +0000814 continue
MK Ryu0c1a37d2015-04-30 12:00:55 -0700815 t1 = time_utils.time_string_to_datetime(
816 queue_entry['started_on'])
817 t2 = time_utils.time_string_to_datetime(task['time_started'])
818 if t1 < t2:
showardc0ac3a72009-07-08 21:14:45 +0000819 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700820 next_job_id = queue_entry['job']['id']
showardc0ac3a72009-07-08 21:14:45 +0000821
MK Ryu0c1a37d2015-04-30 12:00:55 -0700822 task['next_job_id'] = next_job_id
showardc0ac3a72009-07-08 21:14:45 +0000823
824 # advance hqe_index to just after next_job_id
825 if next_job_id is not None:
826 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700827 if queue_entry['job']['id'] < next_job_id:
showardc0ac3a72009-07-08 21:14:45 +0000828 break
829 hqe_index += 1
830
831
832def interleave_entries(queue_entries, special_tasks):
833 """
834 Both lists should be ordered by descending ID.
835 """
836 _compute_next_job_for_tasks(queue_entries, special_tasks)
837
838 # start with all special tasks that've run since the last job
839 interleaved_entries = []
840 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700841 if task['next_job_id'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000842 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700843 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000844
845 # now interleave queue entries with the remaining special tasks
846 special_task_index = len(interleaved_entries)
847 for queue_entry in queue_entries:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700848 interleaved_entries.append(queue_entry)
showardc0ac3a72009-07-08 21:14:45 +0000849 # add all tasks that ran between this job and the previous one
850 for task in special_tasks[special_task_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700851 if task['next_job_id'] < queue_entry['job']['id']:
showardc0ac3a72009-07-08 21:14:45 +0000852 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700853 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000854 special_task_index += 1
855
856 return interleaved_entries
jamesren4a41e012010-07-16 22:33:48 +0000857
858
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800859def bucket_hosts_by_shard(host_objs, rpc_hostnames=False):
860 """Figure out which hosts are on which shards.
861
862 @param host_objs: A list of host objects.
863 @param rpc_hostnames: If True, the rpc_hostnames of a shard are returned
864 instead of the 'real' shard hostnames. This only matters for testing
865 environments.
866
867 @return: A map of shard hostname: list of hosts on the shard.
868 """
Allen Li3d4e6112016-12-28 11:10:25 -0800869 shard_host_map = collections.defaultdict(list)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800870 for host in host_objs:
871 if host.shard:
872 shard_name = (host.shard.rpc_hostname() if rpc_hostnames
873 else host.shard.hostname)
Allen Li3d4e6112016-12-28 11:10:25 -0800874 shard_host_map[shard_name].append(host.hostname)
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800875 return shard_host_map
876
877
Allen Li1453fdf2016-12-14 12:16:37 -0800878def create_job_common(
879 name,
880 priority,
881 control_type,
882 control_file=None,
883 hosts=(),
884 meta_hosts=(),
885 one_time_hosts=(),
886 atomic_group_name=None,
887 synch_count=None,
888 is_template=False,
889 timeout=None,
890 timeout_mins=None,
891 max_runtime_mins=None,
892 run_verify=True,
893 email_list='',
894 dependencies=(),
895 reboot_before=None,
896 reboot_after=None,
897 parse_failed_repair=None,
898 hostless=False,
899 keyvals=None,
900 drone_set=None,
Allen Li1453fdf2016-12-14 12:16:37 -0800901 parent_job_id=None,
902 test_retry=0,
903 run_reset=True,
904 require_ssp=None):
Aviv Keshet18308922013-02-19 17:49:49 -0800905 #pylint: disable-msg=C0111
jamesren4a41e012010-07-16 22:33:48 +0000906 """
907 Common code between creating "standard" jobs and creating parameterized jobs
908 """
jamesren4a41e012010-07-16 22:33:48 +0000909 # input validation
Allen Lie6203192016-12-14 13:05:53 -0800910 host_args_passed = any((
911 hosts, meta_hosts, one_time_hosts, atomic_group_name))
jamesren4a41e012010-07-16 22:33:48 +0000912 if hostless:
Allen Lie6203192016-12-14 13:05:53 -0800913 if host_args_passed:
jamesren4a41e012010-07-16 22:33:48 +0000914 raise model_logic.ValidationError({
915 'hostless': 'Hostless jobs cannot include any hosts!'})
Allen Lie6203192016-12-14 13:05:53 -0800916 if control_type != control_data.CONTROL_TYPE_NAMES.SERVER:
jamesren4a41e012010-07-16 22:33:48 +0000917 raise model_logic.ValidationError({
918 'control_type': 'Hostless jobs cannot use client-side '
919 'control files'})
Allen Lie6203192016-12-14 13:05:53 -0800920 elif not host_args_passed:
921 raise model_logic.ValidationError({
922 'arguments' : "For host jobs, you must pass at least one of"
923 " 'hosts', 'meta_hosts', 'one_time_hosts',"
924 " 'atomic_group_name'."
925 })
jamesren4a41e012010-07-16 22:33:48 +0000926
Allen Lie6203192016-12-14 13:05:53 -0800927 atomic_groups_by_name = {
928 group.name: group for group in models.AtomicGroup.objects.all()
929 }
Alex Miller871291b2013-08-08 01:19:20 -0700930 label_objects = list(models.Label.objects.filter(name__in=meta_hosts))
jamesren4a41e012010-07-16 22:33:48 +0000931
932 # Schedule on an atomic group automagically if one of the labels given
933 # is an atomic group label and no explicit atomic_group_name was supplied.
934 if not atomic_group_name:
Allen Lie6203192016-12-14 13:05:53 -0800935 atomic_group_name = _get_atomic_group_name_from_labels(label_objects)
936
jamesren4a41e012010-07-16 22:33:48 +0000937 # convert hostnames & meta hosts to host/label objects
938 host_objects = models.Host.smart_get_bulk(hosts)
Allen Lie6203192016-12-14 13:05:53 -0800939 _validate_host_job_sharding(host_objects)
Allen Lidbc08662016-12-14 16:38:21 -0800940 for host in one_time_hosts:
941 this_host = models.Host.create_one_time_host(host)
942 host_objects.append(this_host)
Allen Lie6203192016-12-14 13:05:53 -0800943
jamesren4a41e012010-07-16 22:33:48 +0000944 metahost_objects = []
Alex Miller871291b2013-08-08 01:19:20 -0700945 meta_host_labels_by_name = {label.name: label for label in label_objects}
Allen Lie6203192016-12-14 13:05:53 -0800946 for label_name in meta_hosts:
Alex Miller871291b2013-08-08 01:19:20 -0700947 if label_name in meta_host_labels_by_name:
948 metahost_objects.append(meta_host_labels_by_name[label_name])
jamesren4a41e012010-07-16 22:33:48 +0000949 elif label_name in atomic_groups_by_name:
950 # If given a metahost name that isn't a Label, check to
951 # see if the user was specifying an Atomic Group instead.
952 atomic_group = atomic_groups_by_name[label_name]
953 if atomic_group_name and atomic_group_name != atomic_group.name:
954 raise model_logic.ValidationError({
955 'meta_hosts': (
956 'Label "%s" not found. If assumed to be an '
957 'atomic group it would conflict with the '
958 'supplied atomic group "%s".' % (
959 label_name, atomic_group_name))})
960 atomic_group_name = atomic_group.name
961 else:
962 raise model_logic.ValidationError(
963 {'meta_hosts' : 'Label "%s" not found' % label_name})
964
965 # Create and sanity check an AtomicGroup object if requested.
966 if atomic_group_name:
967 if one_time_hosts:
968 raise model_logic.ValidationError(
969 {'one_time_hosts':
970 'One time hosts cannot be used with an Atomic Group.'})
971 atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
jamesren4a41e012010-07-16 22:33:48 +0000972 else:
973 atomic_group = None
974
jamesren4a41e012010-07-16 22:33:48 +0000975 options = dict(name=name,
976 priority=priority,
977 control_file=control_file,
978 control_type=control_type,
979 is_template=is_template,
980 timeout=timeout,
Simran Basi7e605742013-11-12 13:43:36 -0800981 timeout_mins=timeout_mins,
Simran Basi34217022012-11-06 13:43:15 -0800982 max_runtime_mins=max_runtime_mins,
jamesren4a41e012010-07-16 22:33:48 +0000983 synch_count=synch_count,
984 run_verify=run_verify,
985 email_list=email_list,
986 dependencies=dependencies,
987 reboot_before=reboot_before,
988 reboot_after=reboot_after,
989 parse_failed_repair=parse_failed_repair,
990 keyvals=keyvals,
991 drone_set=drone_set,
Aviv Keshetcd1ff9b2013-03-01 14:55:19 -0800992 parent_job_id=parent_job_id,
Dan Shi07e09af2013-04-12 09:31:29 -0700993 test_retry=test_retry,
Dan Shiec1d47d2015-02-13 11:38:13 -0800994 run_reset=run_reset,
995 require_ssp=require_ssp)
Allen Lie6203192016-12-14 13:05:53 -0800996
997 return create_new_job(owner=models.User.current_user().login,
jamesren4a41e012010-07-16 22:33:48 +0000998 options=options,
999 host_objects=host_objects,
1000 metahost_objects=metahost_objects,
1001 atomic_group=atomic_group)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001002
1003
Allen Lie6203192016-12-14 13:05:53 -08001004def _get_atomic_group_name_from_labels(label_objects):
1005 """Get atomic group name from label objects.
1006
1007 @returns: atomic group name string or None
1008 """
1009 for label in label_objects:
1010 if label and label.atomic_group:
1011 return label.atomic_group.name
1012
1013
1014def _validate_host_job_sharding(host_objects):
1015 """Check that the hosts obey job sharding rules."""
1016 if not (server_utils.is_shard()
1017 or _allowed_hosts_for_master_job(host_objects)):
1018 shard_host_map = bucket_hosts_by_shard(host_objects)
1019 raise ValueError(
1020 'The following hosts are on shard(s), please create '
1021 'seperate jobs for hosts on each shard: %s ' %
1022 shard_host_map)
1023
1024
1025def _allowed_hosts_for_master_job(host_objects):
1026 """Check that the hosts are allowed for a job on master."""
Allen Lie6203192016-12-14 13:05:53 -08001027 # We disallow the following jobs on master:
1028 # num_shards > 1: this is a job spanning across multiple shards.
1029 # num_shards == 1 but number of hosts on shard is less
1030 # than total number of hosts: this is a job that spans across
1031 # one shard and the master.
Allen Liacb97922016-12-14 13:45:50 -08001032 shard_host_map = bucket_hosts_by_shard(host_objects)
1033 num_shards = len(shard_host_map)
1034 if num_shards > 1:
1035 return False
1036 if num_shards == 1:
1037 hosts_on_shard = shard_host_map.values()[0]
1038 assert len(hosts_on_shard) <= len(host_objects)
1039 return len(hosts_on_shard) == len(host_objects)
1040 else:
1041 return True
Allen Lie6203192016-12-14 13:05:53 -08001042
1043
Simran Basib6ec8ae2014-04-23 12:05:08 -07001044def encode_ascii(control_file):
1045 """Force a control file to only contain ascii characters.
1046
1047 @param control_file: Control file to encode.
1048
1049 @returns the control file in an ascii encoding.
1050
1051 @raises error.ControlFileMalformed: if encoding fails.
1052 """
1053 try:
1054 return control_file.encode('ascii')
1055 except UnicodeDecodeError as e:
Jiaxi Luo421608e2014-07-07 14:38:00 -07001056 raise error.ControlFileMalformed(str(e))
1057
1058
1059def get_wmatrix_url():
1060 """Get wmatrix url from config file.
1061
1062 @returns the wmatrix url or an empty string.
1063 """
1064 return global_config.global_config.get_config_value('AUTOTEST_WEB',
1065 'wmatrix_url',
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001066 default='')
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001067
1068
1069def inject_times_to_filter(start_time_key=None, end_time_key=None,
1070 start_time_value=None, end_time_value=None,
1071 **filter_data):
1072 """Inject the key value pairs of start and end time if provided.
1073
1074 @param start_time_key: A string represents the filter key of start_time.
1075 @param end_time_key: A string represents the filter key of end_time.
1076 @param start_time_value: Start_time value.
1077 @param end_time_value: End_time value.
1078
1079 @returns the injected filter_data.
1080 """
1081 if start_time_value:
1082 filter_data[start_time_key] = start_time_value
1083 if end_time_value:
1084 filter_data[end_time_key] = end_time_value
1085 return filter_data
1086
1087
1088def inject_times_to_hqe_special_tasks_filters(filter_data_common,
1089 start_time, end_time):
1090 """Inject start and end time to hqe and special tasks filters.
1091
1092 @param filter_data_common: Common filter for hqe and special tasks.
1093 @param start_time_key: A string represents the filter key of start_time.
1094 @param end_time_key: A string represents the filter key of end_time.
1095
1096 @returns a pair of hqe and special tasks filters.
1097 """
1098 filter_data_special_tasks = filter_data_common.copy()
1099 return (inject_times_to_filter('started_on__gte', 'started_on__lte',
1100 start_time, end_time, **filter_data_common),
1101 inject_times_to_filter('time_started__gte', 'time_started__lte',
1102 start_time, end_time,
Jakob Juelich59cfe542014-09-02 16:37:46 -07001103 **filter_data_special_tasks))
1104
1105
1106def retrieve_shard(shard_hostname):
1107 """
Jakob Juelich77457572014-09-22 17:02:43 -07001108 Retrieves the shard with the given hostname from the database.
Jakob Juelich59cfe542014-09-02 16:37:46 -07001109
1110 @param shard_hostname: Hostname of the shard to retrieve
1111
Jakob Juelich77457572014-09-22 17:02:43 -07001112 @raises models.Shard.DoesNotExist, if no shard with this hostname was found.
1113
Jakob Juelich59cfe542014-09-02 16:37:46 -07001114 @returns: Shard object
1115 """
MK Ryu509516b2015-05-18 12:00:47 -07001116 timer = autotest_stats.Timer('shard_heartbeat.retrieve_shard')
1117 with timer:
1118 return models.Shard.smart_get(shard_hostname)
Jakob Juelich59cfe542014-09-02 16:37:46 -07001119
1120
Jakob Juelich1b525742014-09-30 13:08:07 -07001121def find_records_for_shard(shard, known_job_ids, known_host_ids):
Jakob Juelich59cfe542014-09-02 16:37:46 -07001122 """Find records that should be sent to a shard.
1123
Jakob Juelicha94efe62014-09-18 16:02:49 -07001124 @param shard: Shard to find records for.
Jakob Juelich1b525742014-09-30 13:08:07 -07001125 @param known_job_ids: List of ids of jobs the shard already has.
1126 @param known_host_ids: List of ids of hosts the shard already has.
Jakob Juelicha94efe62014-09-18 16:02:49 -07001127
Fang Dengf3705992014-12-16 17:32:18 -08001128 @returns: Tuple of three lists for hosts, jobs, and suite job keyvals:
1129 (hosts, jobs, suite_job_keyvals).
Jakob Juelich59cfe542014-09-02 16:37:46 -07001130 """
MK Ryu509516b2015-05-18 12:00:47 -07001131 timer = autotest_stats.Timer('shard_heartbeat')
1132 with timer.get_client('find_hosts'):
1133 hosts = models.Host.assign_to_shard(shard, known_host_ids)
1134 with timer.get_client('find_jobs'):
1135 jobs = models.Job.assign_to_shard(shard, known_job_ids)
1136 with timer.get_client('find_suite_job_keyvals'):
1137 parent_job_ids = [job.parent_job_id for job in jobs]
1138 suite_job_keyvals = models.JobKeyval.objects.filter(
1139 job_id__in=parent_job_ids)
Fang Dengf3705992014-12-16 17:32:18 -08001140 return hosts, jobs, suite_job_keyvals
Jakob Juelicha94efe62014-09-18 16:02:49 -07001141
1142
1143def _persist_records_with_type_sent_from_shard(
1144 shard, records, record_type, *args, **kwargs):
1145 """
1146 Handle records of a specified type that were sent to the shard master.
1147
1148 @param shard: The shard the records were sent from.
1149 @param records: The records sent in their serialized format.
1150 @param record_type: Type of the objects represented by records.
1151 @param args: Additional arguments that will be passed on to the sanity
1152 checks.
1153 @param kwargs: Additional arguments that will be passed on to the sanity
1154 checks.
1155
1156 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1157
1158 @returns: List of primary keys of the processed records.
1159 """
1160 pks = []
1161 for serialized_record in records:
1162 pk = serialized_record['id']
1163 try:
1164 current_record = record_type.objects.get(pk=pk)
1165 except record_type.DoesNotExist:
1166 raise error.UnallowedRecordsSentToMaster(
1167 'Object with pk %s of type %s does not exist on master.' % (
1168 pk, record_type))
1169
1170 current_record.sanity_check_update_from_shard(
1171 shard, serialized_record, *args, **kwargs)
1172
1173 current_record.update_from_serialized(serialized_record)
1174 pks.append(pk)
1175 return pks
1176
1177
1178def persist_records_sent_from_shard(shard, jobs, hqes):
1179 """
1180 Sanity checking then saving serialized records sent to master from shard.
1181
1182 During heartbeats shards upload jobs and hostqueuentries. This performs
1183 some sanity checks on these and then updates the existing records for those
1184 entries with the updated ones from the heartbeat.
1185
1186 The sanity checks include:
1187 - Checking if the objects sent already exist on the master.
1188 - Checking if the objects sent were assigned to this shard.
1189 - hostqueueentries must be sent together with their jobs.
1190
1191 @param shard: The shard the records were sent from.
1192 @param jobs: The jobs the shard sent.
1193 @param hqes: The hostqueuentries the shart sent.
1194
1195 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1196 """
MK Ryu509516b2015-05-18 12:00:47 -07001197 timer = autotest_stats.Timer('shard_heartbeat')
1198 with timer.get_client('persist_jobs'):
1199 job_ids_sent = _persist_records_with_type_sent_from_shard(
1200 shard, jobs, models.Job)
Jakob Juelicha94efe62014-09-18 16:02:49 -07001201
MK Ryu509516b2015-05-18 12:00:47 -07001202 with timer.get_client('persist_hqes'):
1203 _persist_records_with_type_sent_from_shard(
1204 shard, hqes, models.HostQueueEntry, job_ids_sent=job_ids_sent)
Jakob Juelich50e91f72014-10-01 12:43:23 -07001205
1206
Jakob Juelich50e91f72014-10-01 12:43:23 -07001207def forward_single_host_rpc_to_shard(func):
1208 """This decorator forwards rpc calls that modify a host to a shard.
1209
1210 If a host is assigned to a shard, rpcs that change his attributes should be
1211 forwarded to the shard.
1212
1213 This assumes the first argument of the function represents a host id.
1214
1215 @param func: The function to decorate
1216
1217 @returns: The function to replace func with.
1218 """
1219 def replacement(**kwargs):
1220 # Only keyword arguments can be accepted here, as we need the argument
1221 # names to send the rpc. serviceHandler always provides arguments with
1222 # their keywords, so this is not a problem.
MK Ryu8e2c2d02016-01-06 15:24:38 -08001223
1224 # A host record (identified by kwargs['id']) can be deleted in
1225 # func(). Therefore, we should save the data that can be needed later
1226 # before func() is called.
1227 shard_hostname = None
Jakob Juelich50e91f72014-10-01 12:43:23 -07001228 host = models.Host.smart_get(kwargs['id'])
MK Ryu8e2c2d02016-01-06 15:24:38 -08001229 if host and host.shard:
1230 shard_hostname = host.shard.rpc_hostname()
1231 ret = func(**kwargs)
1232 if shard_hostname and not server_utils.is_shard():
MK Ryu26f0c932015-05-28 18:14:33 -07001233 run_rpc_on_multiple_hostnames(func.func_name,
MK Ryu8e2c2d02016-01-06 15:24:38 -08001234 [shard_hostname],
Jakob Juelich50e91f72014-10-01 12:43:23 -07001235 **kwargs)
MK Ryu8e2c2d02016-01-06 15:24:38 -08001236 return ret
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -08001237
1238 return replacement
1239
1240
MK Ryufb5e3a82015-07-01 12:21:20 -07001241def fanout_rpc(host_objs, rpc_name, include_hostnames=True, **kwargs):
1242 """Fanout the given rpc to shards of given hosts.
1243
1244 @param host_objs: Host objects for the rpc.
1245 @param rpc_name: The name of the rpc.
1246 @param include_hostnames: If True, include the hostnames in the kwargs.
1247 Hostnames are not always necessary, this functions is designed to
1248 send rpcs to the shard a host is on, the rpcs themselves could be
1249 related to labels, acls etc.
1250 @param kwargs: The kwargs for the rpc.
1251 """
1252 # Figure out which hosts are on which shards.
1253 shard_host_map = bucket_hosts_by_shard(
1254 host_objs, rpc_hostnames=True)
1255
1256 # Execute the rpc against the appropriate shards.
1257 for shard, hostnames in shard_host_map.iteritems():
1258 if include_hostnames:
1259 kwargs['hosts'] = hostnames
1260 try:
1261 run_rpc_on_multiple_hostnames(rpc_name, [shard], **kwargs)
1262 except:
1263 ei = sys.exc_info()
1264 new_exc = error.RPCException('RPC %s failed on shard %s due to '
1265 '%s: %s' % (rpc_name, shard, ei[0].__name__, ei[1]))
1266 raise new_exc.__class__, new_exc, ei[2]
1267
1268
Jakob Juelich50e91f72014-10-01 12:43:23 -07001269def run_rpc_on_multiple_hostnames(rpc_call, shard_hostnames, **kwargs):
1270 """Runs an rpc to multiple AFEs
1271
1272 This is i.e. used to propagate changes made to hosts after they are assigned
1273 to a shard.
1274
1275 @param rpc_call: Name of the rpc endpoint to call.
1276 @param shard_hostnames: List of hostnames to run the rpcs on.
1277 @param **kwargs: Keyword arguments to pass in the rpcs.
1278 """
MK Ryufb5e3a82015-07-01 12:21:20 -07001279 # Make sure this function is not called on shards but only on master.
1280 assert not server_utils.is_shard()
Jakob Juelich50e91f72014-10-01 12:43:23 -07001281 for shard_hostname in shard_hostnames:
MK Ryu0a9c82e2015-09-17 17:54:01 -07001282 afe = frontend_wrappers.RetryingAFE(server=shard_hostname,
1283 user=thread_local.get_user())
Jakob Juelich50e91f72014-10-01 12:43:23 -07001284 afe.run(rpc_call, **kwargs)
MK Ryu9c5fbbe2015-02-11 15:46:22 -08001285
1286
1287def get_label(name):
1288 """Gets a label object using a given name.
1289
1290 @param name: Label name.
1291 @raises model.Label.DoesNotExist: when there is no label matching
1292 the given name.
1293 @return: a label object matching the given name.
1294 """
1295 try:
1296 label = models.Label.smart_get(name)
1297 except models.Label.DoesNotExist:
1298 return None
1299 return label
1300
1301
xixuanba232a32016-08-25 17:01:59 -07001302# TODO: hide the following rpcs under is_moblab
1303def moblab_only(func):
1304 """Ensure moblab specific functions only run on Moblab devices."""
1305 def verify(*args, **kwargs):
1306 if not server_utils.is_moblab():
1307 raise error.RPCException('RPC: %s can only run on Moblab Systems!',
1308 func.__name__)
1309 return func(*args, **kwargs)
1310 return verify
1311
1312
MK Ryufbb002c2015-06-08 14:13:16 -07001313def route_rpc_to_master(func):
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001314 """Route RPC to master AFE.
MK Ryu2d107562015-02-24 17:45:02 -08001315
MK Ryu6f5eadb2015-09-04 10:50:47 -07001316 When a shard receives an RPC decorated by this, the RPC is just
1317 forwarded to the master.
1318 When the master gets the RPC, the RPC function is executed.
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001319
MK Ryu6f5eadb2015-09-04 10:50:47 -07001320 @param func: An RPC function to decorate
1321
1322 @returns: A function replacing the RPC func.
MK Ryu2d107562015-02-24 17:45:02 -08001323 """
Allen Li54121d02016-12-12 17:35:53 -08001324 argspec = inspect.getargspec(func)
1325 if argspec.varargs is not None:
1326 raise Exception('RPC function must not have *args.')
1327
MK Ryufbb002c2015-06-08 14:13:16 -07001328 @wraps(func)
MK Ryuf6ab8a72015-07-06 10:19:48 -07001329 def replacement(*args, **kwargs):
Allen Lice51f372016-12-12 17:48:51 -08001330 """We need special handling when decorating an RPC that can be called
1331 directly using positional arguments.
1332
1333 One example is rpc_interface.create_job().
1334 rpc_interface.create_job_page_handler() calls the function using both
1335 positional and keyword arguments. Since frontend.RpcClient.run()
1336 takes only keyword arguments for an RPC, positional arguments of the
1337 RPC function need to be transformed into keyword arguments.
MK Ryu6f5eadb2015-09-04 10:50:47 -07001338 """
Allen Li416c4052016-12-12 17:46:46 -08001339 kwargs = _convert_to_kwargs_only(func, args, kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001340 if server_utils.is_shard():
MK Ryu9651ca52015-06-08 17:48:22 -07001341 afe = frontend_wrappers.RetryingAFE(
Fang Deng0cb2a3b2015-12-10 17:59:00 -08001342 server=server_utils.get_global_afe_hostname(),
MK Ryu0a9c82e2015-09-17 17:54:01 -07001343 user=thread_local.get_user())
MK Ryu9651ca52015-06-08 17:48:22 -07001344 return afe.run(func.func_name, **kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001345 return func(**kwargs)
Allen Li54121d02016-12-12 17:35:53 -08001346
MK Ryufbb002c2015-06-08 14:13:16 -07001347 return replacement
Dan Shi5e8fa182016-04-15 11:04:36 -07001348
1349
Allen Li416c4052016-12-12 17:46:46 -08001350def _convert_to_kwargs_only(func, args, kwargs):
1351 """Convert a function call's arguments to a kwargs dict.
1352
1353 This is best illustrated with an example. Given:
1354
Allen Liab8d3792016-12-12 18:00:31 -08001355 def foo(a, b, **kwargs):
1356 pass
1357 _to_kwargs(foo, (1, 2), {'c': 3}) # corresponding to foo(1, 2, c=3)
Allen Li416c4052016-12-12 17:46:46 -08001358
1359 foo(**kwargs)
1360
1361 @param func: function whose signature to use
1362 @param args: positional arguments of call
1363 @param kwargs: keyword arguments of call
1364
1365 @returns: kwargs dict
1366 """
Allen Li416c4052016-12-12 17:46:46 -08001367 argspec = inspect.getargspec(func)
Allen Liab8d3792016-12-12 18:00:31 -08001368 # callargs looks like {'a': 1, 'b': 2, 'kwargs': {'c': 3}}
1369 callargs = inspect.getcallargs(func, *args, **kwargs)
1370 if argspec.keywords is None:
1371 kwargs = {}
1372 else:
1373 kwargs = callargs.pop(argspec.keywords)
1374 kwargs.update(callargs)
Allen Li416c4052016-12-12 17:46:46 -08001375 return kwargs
1376
1377
Dan Shi5e8fa182016-04-15 11:04:36 -07001378def get_sample_dut(board, pool):
1379 """Get a dut with the given board and pool.
1380
1381 This method is used to help to locate a dut with the given board and pool.
1382 The dut then can be used to identify a devserver in the same subnet.
1383
1384 @param board: Name of the board.
1385 @param pool: Name of the pool.
1386
1387 @return: Name of a dut with the given board and pool.
1388 """
Allen Li3d43e602016-12-08 15:09:51 -08001389 if not (dev_server.PREFER_LOCAL_DEVSERVER and pool and board):
Dan Shi5e8fa182016-04-15 11:04:36 -07001390 return None
Allen Lia6250712017-02-06 12:20:39 -08001391 hosts = list(get_host_query(
1392 multiple_labels=('pool:%s' % pool, 'board:%s' % board),
1393 exclude_only_if_needed_labels=False,
1394 exclude_atomic_group_hosts=False,
1395 valid_only=True,
1396 filter_data={},
1397 ))
Dan Shic3d702b2016-12-21 03:05:09 +00001398 if not hosts:
Dan Shi5e8fa182016-04-15 11:04:36 -07001399 return None
Allen Lia6250712017-02-06 12:20:39 -08001400 else:
Allen Li127977d2017-02-06 12:21:07 -08001401 return hosts[0].hostname