blob: 367c5cfe8fabe0ed49f8dfb39631f3fb37824547 [file] [log] [blame]
Aviv Keshet18308922013-02-19 17:49:49 -08001#pylint: disable-msg=C0111
xixuanba232a32016-08-25 17:01:59 -07002"""
mblighe8819cd2008-02-15 16:48:40 +00003Utility functions for rpc_interface.py. We keep them in a separate file so that
4only RPC interface functions go into that file.
5"""
6
7__author__ = 'showard@google.com (Steve Howard)'
8
MK Ryu84573e12015-02-18 15:54:09 -08009import datetime
MK Ryufbb002c2015-06-08 14:13:16 -070010from functools import wraps
MK Ryu84573e12015-02-18 15:54:09 -080011import inspect
12import os
13import sys
Fang Deng7051fe42015-10-20 14:57:28 -070014import django.db.utils
showard3d6ae112009-05-02 00:45:48 +000015import django.http
MK Ryu0a9c82e2015-09-17 17:54:01 -070016
17from autotest_lib.frontend import thread_local
Dan Shi07e09af2013-04-12 09:31:29 -070018from autotest_lib.frontend.afe import models, model_logic
Alex Miller4a193692013-08-21 13:59:01 -070019from autotest_lib.client.common_lib import control_data, error
Jiaxi Luo421608e2014-07-07 14:38:00 -070020from autotest_lib.client.common_lib import global_config, priorities
MK Ryu0c1a37d2015-04-30 12:00:55 -070021from autotest_lib.client.common_lib import time_utils
Allen Li3d43e602016-12-08 15:09:51 -080022from autotest_lib.client.common_lib.cros import dev_server
Aviv Keshet14cac442016-11-20 21:44:11 -080023# TODO(akeshet): Replace with monarch once we know how to instrument rpc server
24# with ts_mon.
MK Ryu509516b2015-05-18 12:00:47 -070025from autotest_lib.client.common_lib.cros.graphite import autotest_stats
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -080026from autotest_lib.server import utils as server_utils
MK Ryu9651ca52015-06-08 17:48:22 -070027from autotest_lib.server.cros import provision
28from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
mblighe8819cd2008-02-15 16:48:40 +000029
showarda62866b2008-07-28 21:27:41 +000030NULL_DATETIME = datetime.datetime.max
31NULL_DATE = datetime.date.max
Fang Deng7051fe42015-10-20 14:57:28 -070032DUPLICATE_KEY_MSG = 'Duplicate entry'
showarda62866b2008-07-28 21:27:41 +000033
mblighe8819cd2008-02-15 16:48:40 +000034def prepare_for_serialization(objects):
jadmanski0afbb632008-06-06 21:10:57 +000035 """
36 Prepare Python objects to be returned via RPC.
Aviv Keshet18308922013-02-19 17:49:49 -080037 @param objects: objects to be prepared.
jadmanski0afbb632008-06-06 21:10:57 +000038 """
39 if (isinstance(objects, list) and len(objects) and
40 isinstance(objects[0], dict) and 'id' in objects[0]):
41 objects = gather_unique_dicts(objects)
42 return _prepare_data(objects)
showardb8d34242008-04-25 18:11:16 +000043
44
showardc92da832009-04-07 18:14:34 +000045def prepare_rows_as_nested_dicts(query, nested_dict_column_names):
46 """
47 Prepare a Django query to be returned via RPC as a sequence of nested
48 dictionaries.
49
50 @param query - A Django model query object with a select_related() method.
51 @param nested_dict_column_names - A list of column/attribute names for the
52 rows returned by query to expand into nested dictionaries using
53 their get_object_dict() method when not None.
54
55 @returns An list suitable to returned in an RPC.
56 """
57 all_dicts = []
58 for row in query.select_related():
59 row_dict = row.get_object_dict()
60 for column in nested_dict_column_names:
61 if row_dict[column] is not None:
62 row_dict[column] = getattr(row, column).get_object_dict()
63 all_dicts.append(row_dict)
64 return prepare_for_serialization(all_dicts)
65
66
showardb8d34242008-04-25 18:11:16 +000067def _prepare_data(data):
jadmanski0afbb632008-06-06 21:10:57 +000068 """
69 Recursively process data structures, performing necessary type
70 conversions to values in data to allow for RPC serialization:
71 -convert datetimes to strings
showard2b9a88b2008-06-13 20:55:03 +000072 -convert tuples and sets to lists
jadmanski0afbb632008-06-06 21:10:57 +000073 """
74 if isinstance(data, dict):
75 new_data = {}
76 for key, value in data.iteritems():
77 new_data[key] = _prepare_data(value)
78 return new_data
showard2b9a88b2008-06-13 20:55:03 +000079 elif (isinstance(data, list) or isinstance(data, tuple) or
80 isinstance(data, set)):
jadmanski0afbb632008-06-06 21:10:57 +000081 return [_prepare_data(item) for item in data]
showard98659972008-07-17 17:00:07 +000082 elif isinstance(data, datetime.date):
showarda62866b2008-07-28 21:27:41 +000083 if data is NULL_DATETIME or data is NULL_DATE:
84 return None
jadmanski0afbb632008-06-06 21:10:57 +000085 return str(data)
86 else:
87 return data
mblighe8819cd2008-02-15 16:48:40 +000088
89
Moises Osorio2dda22e2014-09-16 15:56:24 -070090def fetchall_as_list_of_dicts(cursor):
91 """
92 Converts each row in the cursor to a dictionary so that values can be read
93 by using the column name.
94 @param cursor: The database cursor to read from.
95 @returns: A list of each row in the cursor as a dictionary.
96 """
97 desc = cursor.description
98 return [ dict(zip([col[0] for col in desc], row))
99 for row in cursor.fetchall() ]
100
101
showard3d6ae112009-05-02 00:45:48 +0000102def raw_http_response(response_data, content_type=None):
103 response = django.http.HttpResponse(response_data, mimetype=content_type)
104 response['Content-length'] = str(len(response.content))
105 return response
106
107
showardb0dfb9f2008-06-06 18:08:02 +0000108def gather_unique_dicts(dict_iterable):
jadmanski0afbb632008-06-06 21:10:57 +0000109 """\
110 Pick out unique objects (by ID) from an iterable of object dicts.
111 """
112 id_set = set()
113 result = []
114 for obj in dict_iterable:
115 if obj['id'] not in id_set:
116 id_set.add(obj['id'])
117 result.append(obj)
118 return result
showardb0dfb9f2008-06-06 18:08:02 +0000119
120
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700121def extra_job_status_filters(not_yet_run=False, running=False, finished=False):
jadmanski0afbb632008-06-06 21:10:57 +0000122 """\
123 Generate a SQL WHERE clause for job status filtering, and return it in
Simran Basi01984f52015-10-12 15:36:45 -0700124 a dict of keyword args to pass to query.extra().
showard6c65d252009-10-01 18:45:22 +0000125 * not_yet_run: all HQEs are Queued
126 * finished: all HQEs are complete
127 * running: everything else
jadmanski0afbb632008-06-06 21:10:57 +0000128 """
Simran Basi01984f52015-10-12 15:36:45 -0700129 if not (not_yet_run or running or finished):
130 return {}
showardeab66ce2009-12-23 00:03:56 +0000131 not_queued = ('(SELECT job_id FROM afe_host_queue_entries '
132 'WHERE status != "%s")'
showard6c65d252009-10-01 18:45:22 +0000133 % models.HostQueueEntry.Status.QUEUED)
showardeab66ce2009-12-23 00:03:56 +0000134 not_finished = ('(SELECT job_id FROM afe_host_queue_entries '
135 'WHERE not complete)')
showard6c65d252009-10-01 18:45:22 +0000136
Simran Basi01984f52015-10-12 15:36:45 -0700137 where = []
jadmanski0afbb632008-06-06 21:10:57 +0000138 if not_yet_run:
Simran Basi01984f52015-10-12 15:36:45 -0700139 where.append('id NOT IN ' + not_queued)
140 if running:
141 where.append('(id IN %s) AND (id IN %s)' % (not_queued, not_finished))
142 if finished:
143 where.append('id NOT IN ' + not_finished)
144 return {'where': [' OR '.join(['(%s)' % x for x in where])]}
mblighe8819cd2008-02-15 16:48:40 +0000145
146
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700147def extra_job_type_filters(extra_args, suite=False,
148 sub=False, standalone=False):
149 """\
150 Generate a SQL WHERE clause for job status filtering, and return it in
151 a dict of keyword args to pass to query.extra().
152
153 param extra_args: a dict of existing extra_args.
154
155 No more than one of the parameters should be passed as True:
156 * suite: job which is parent of other jobs
157 * sub: job with a parent job
158 * standalone: job with no child or parent jobs
159 """
160 assert not ((suite and sub) or
161 (suite and standalone) or
162 (sub and standalone)), ('Cannot specify more than one '
163 'filter to this function')
164
165 where = extra_args.get('where', [])
166 parent_job_id = ('DISTINCT parent_job_id')
167 child_job_id = ('id')
168 filter_common = ('(SELECT %s FROM afe_jobs '
169 'WHERE parent_job_id IS NOT NULL)')
170
171 if suite:
172 where.append('id IN ' + filter_common % parent_job_id)
173 elif sub:
174 where.append('id IN ' + filter_common % child_job_id)
175 elif standalone:
176 where.append('NOT EXISTS (SELECT 1 from afe_jobs AS sub_query '
177 'WHERE parent_job_id IS NOT NULL'
178 ' AND (sub_query.parent_job_id=afe_jobs.id'
179 ' OR sub_query.id=afe_jobs.id))')
180 else:
181 return extra_args
182
183 extra_args['where'] = where
184 return extra_args
185
186
187
showard87cc38f2009-08-20 23:37:04 +0000188def extra_host_filters(multiple_labels=()):
jadmanski0afbb632008-06-06 21:10:57 +0000189 """\
190 Generate SQL WHERE clauses for matching hosts in an intersection of
191 labels.
192 """
193 extra_args = {}
showardeab66ce2009-12-23 00:03:56 +0000194 where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels '
jadmanski0afbb632008-06-06 21:10:57 +0000195 'where label_id=%s)')
196 extra_args['where'] = [where_str] * len(multiple_labels)
197 extra_args['params'] = [models.Label.smart_get(label).id
198 for label in multiple_labels]
199 return extra_args
showard8e3aa5e2008-04-08 19:42:32 +0000200
201
showard87cc38f2009-08-20 23:37:04 +0000202def get_host_query(multiple_labels, exclude_only_if_needed_labels,
showard8aa84fc2009-09-16 17:17:55 +0000203 exclude_atomic_group_hosts, valid_only, filter_data):
204 if valid_only:
205 query = models.Host.valid_objects.all()
206 else:
207 query = models.Host.objects.all()
208
showard43a3d262008-11-12 18:17:05 +0000209 if exclude_only_if_needed_labels:
210 only_if_needed_labels = models.Label.valid_objects.filter(
211 only_if_needed=True)
showardf7eac6f2008-11-13 21:18:01 +0000212 if only_if_needed_labels.count() > 0:
showard87cc38f2009-08-20 23:37:04 +0000213 only_if_needed_ids = ','.join(
214 str(label['id'])
215 for label in only_if_needed_labels.values('id'))
showardf7eac6f2008-11-13 21:18:01 +0000216 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000217 query, 'afe_hosts_labels', join_key='host_id',
218 join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)'
showard87cc38f2009-08-20 23:37:04 +0000219 % only_if_needed_ids),
220 suffix='_exclude_OIN', exclude=True)
showard8aa84fc2009-09-16 17:17:55 +0000221
showard87cc38f2009-08-20 23:37:04 +0000222 if exclude_atomic_group_hosts:
223 atomic_group_labels = models.Label.valid_objects.filter(
224 atomic_group__isnull=False)
225 if atomic_group_labels.count() > 0:
226 atomic_group_label_ids = ','.join(
227 str(atomic_group['id'])
228 for atomic_group in atomic_group_labels.values('id'))
229 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000230 query, 'afe_hosts_labels', join_key='host_id',
231 join_condition=(
232 'afe_hosts_labels_exclude_AG.label_id IN (%s)'
233 % atomic_group_label_ids),
showard87cc38f2009-08-20 23:37:04 +0000234 suffix='_exclude_AG', exclude=True)
Fang Deng04d30612013-04-10 18:13:13 -0700235 try:
236 assert 'extra_args' not in filter_data
237 filter_data['extra_args'] = extra_host_filters(multiple_labels)
238 return models.Host.query_objects(filter_data, initial_query=query)
239 except models.Label.DoesNotExist as e:
240 return models.Host.objects.none()
showard43a3d262008-11-12 18:17:05 +0000241
242
showard8fd58242008-03-10 21:29:07 +0000243class InconsistencyException(Exception):
jadmanski0afbb632008-06-06 21:10:57 +0000244 'Raised when a list of objects does not have a consistent value'
showard8fd58242008-03-10 21:29:07 +0000245
246
247def get_consistent_value(objects, field):
mblighc5ddfd12008-08-04 17:15:00 +0000248 if not objects:
249 # well a list of nothing is consistent
250 return None
251
jadmanski0afbb632008-06-06 21:10:57 +0000252 value = getattr(objects[0], field)
253 for obj in objects:
254 this_value = getattr(obj, field)
255 if this_value != value:
256 raise InconsistencyException(objects[0], obj)
257 return value
showard8fd58242008-03-10 21:29:07 +0000258
259
Matthew Sartori10438092015-06-24 14:30:18 -0700260def afe_test_dict_to_test_object(test_dict):
261 if not isinstance(test_dict, dict):
262 return test_dict
263
264 numerized_dict = {}
265 for key, value in test_dict.iteritems():
266 try:
267 numerized_dict[key] = int(value)
268 except (ValueError, TypeError):
269 numerized_dict[key] = value
270
271 return type('TestObject', (object,), numerized_dict)
272
273
Michael Tang84a2ecf2016-06-07 15:10:53 -0700274def _check_is_server_test(test_type):
275 """Checks if the test type is a server test.
276
277 @param test_type The test type in enum integer or string.
278
279 @returns A boolean to identify if the test type is server test.
280 """
281 if test_type is not None:
282 if isinstance(test_type, basestring):
283 try:
284 test_type = control_data.CONTROL_TYPE.get_value(test_type)
285 except AttributeError:
286 return False
287 return (test_type == control_data.CONTROL_TYPE.SERVER)
288 return False
289
290
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700291def prepare_generate_control_file(tests, profilers, db_tests=True):
Matthew Sartori10438092015-06-24 14:30:18 -0700292 if db_tests:
293 test_objects = [models.Test.smart_get(test) for test in tests]
294 else:
295 test_objects = [afe_test_dict_to_test_object(test) for test in tests]
296
showard2b9a88b2008-06-13 20:55:03 +0000297 profiler_objects = [models.Profiler.smart_get(profiler)
298 for profiler in profilers]
jadmanski0afbb632008-06-06 21:10:57 +0000299 # ensure tests are all the same type
300 try:
301 test_type = get_consistent_value(test_objects, 'test_type')
302 except InconsistencyException, exc:
303 test1, test2 = exc.args
mblighec5546d2008-06-16 16:51:28 +0000304 raise model_logic.ValidationError(
Matthew Sartori10438092015-06-24 14:30:18 -0700305 {'tests' : 'You cannot run both test_suites and server-side '
jadmanski0afbb632008-06-06 21:10:57 +0000306 'tests together (tests %s and %s differ' % (
307 test1.name, test2.name)})
showard8fd58242008-03-10 21:29:07 +0000308
Michael Tang84a2ecf2016-06-07 15:10:53 -0700309 is_server = _check_is_server_test(test_type)
showard14374b12009-01-31 00:11:54 +0000310 if test_objects:
311 synch_count = max(test.sync_count for test in test_objects)
312 else:
313 synch_count = 1
mblighe8819cd2008-02-15 16:48:40 +0000314
Matthew Sartori10438092015-06-24 14:30:18 -0700315 if db_tests:
316 dependencies = set(label.name for label
317 in models.Label.objects.filter(test__in=test_objects))
318 else:
319 dependencies = reduce(
320 set.union, [set(test.dependencies) for test in test_objects])
showard989f25d2008-10-01 11:38:11 +0000321
showard2bab8f42008-11-12 18:15:22 +0000322 cf_info = dict(is_server=is_server, synch_count=synch_count,
323 dependencies=list(dependencies))
Richard Barnette8e33b4e2016-05-21 12:12:26 -0700324 return cf_info, test_objects, profiler_objects
showard989f25d2008-10-01 11:38:11 +0000325
326
327def check_job_dependencies(host_objects, job_dependencies):
328 """
329 Check that a set of machines satisfies a job's dependencies.
330 host_objects: list of models.Host objects
331 job_dependencies: list of names of labels
332 """
333 # check that hosts satisfy dependencies
334 host_ids = [host.id for host in host_objects]
335 hosts_in_job = models.Host.objects.filter(id__in=host_ids)
336 ok_hosts = hosts_in_job
337 for index, dependency in enumerate(job_dependencies):
Alex Milleraa772002014-04-10 17:51:21 -0700338 if not provision.is_for_special_action(dependency):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700339 ok_hosts = ok_hosts.filter(labels__name=dependency)
showard989f25d2008-10-01 11:38:11 +0000340 failing_hosts = (set(host.hostname for host in host_objects) -
341 set(host.hostname for host in ok_hosts))
342 if failing_hosts:
343 raise model_logic.ValidationError(
Eric Lie0493a42010-11-15 13:05:43 -0800344 {'hosts' : 'Host(s) failed to meet job dependencies (' +
345 (', '.join(job_dependencies)) + '): ' +
346 (', '.join(failing_hosts))})
347
showard989f25d2008-10-01 11:38:11 +0000348
Alex Miller4a193692013-08-21 13:59:01 -0700349def check_job_metahost_dependencies(metahost_objects, job_dependencies):
350 """
351 Check that at least one machine within the metahost spec satisfies the job's
352 dependencies.
353
354 @param metahost_objects A list of label objects representing the metahosts.
355 @param job_dependencies A list of strings of the required label names.
356 @raises NoEligibleHostException If a metahost cannot run the job.
357 """
358 for metahost in metahost_objects:
359 hosts = models.Host.objects.filter(labels=metahost)
360 for label_name in job_dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700361 if not provision.is_for_special_action(label_name):
Alex Miller4a193692013-08-21 13:59:01 -0700362 hosts = hosts.filter(labels__name=label_name)
363 if not any(hosts):
364 raise error.NoEligibleHostException("No hosts within %s satisfy %s."
365 % (metahost.name, ', '.join(job_dependencies)))
366
showard2bab8f42008-11-12 18:15:22 +0000367
368def _execution_key_for(host_queue_entry):
369 return (host_queue_entry.job.id, host_queue_entry.execution_subdir)
370
371
372def check_abort_synchronous_jobs(host_queue_entries):
373 # ensure user isn't aborting part of a synchronous autoserv execution
374 count_per_execution = {}
375 for queue_entry in host_queue_entries:
376 key = _execution_key_for(queue_entry)
377 count_per_execution.setdefault(key, 0)
378 count_per_execution[key] += 1
379
380 for queue_entry in host_queue_entries:
381 if not queue_entry.execution_subdir:
382 continue
383 execution_count = count_per_execution[_execution_key_for(queue_entry)]
384 if execution_count < queue_entry.job.synch_count:
mbligh1ef218d2009-08-03 16:57:56 +0000385 raise model_logic.ValidationError(
386 {'' : 'You cannot abort part of a synchronous job execution '
387 '(%d/%s), %d included, %d expected'
388 % (queue_entry.job.id, queue_entry.execution_subdir,
389 execution_count, queue_entry.job.synch_count)})
showard8fbae652009-01-20 23:23:10 +0000390
391
showardc92da832009-04-07 18:14:34 +0000392def check_atomic_group_create_job(synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700393 dependencies, atomic_group):
showardc92da832009-04-07 18:14:34 +0000394 """
395 Attempt to reject create_job requests with an atomic group that
396 will be impossible to schedule. The checks are not perfect but
397 should catch the most obvious issues.
398
399 @param synch_count - The job's minimum synch count.
400 @param host_objects - A list of models.Host instances.
401 @param metahost_objects - A list of models.Label instances.
402 @param dependencies - A list of job dependency label names.
showardc92da832009-04-07 18:14:34 +0000403 @param labels_by_name - A dictionary mapping label names to models.Label
404 instance. Used to look up instances for dependencies.
405
406 @raises model_logic.ValidationError - When an issue is found.
407 """
408 # If specific host objects were supplied with an atomic group, verify
409 # that there are enough to satisfy the synch_count.
410 minimum_required = synch_count or 1
411 if (host_objects and not metahost_objects and
412 len(host_objects) < minimum_required):
413 raise model_logic.ValidationError(
414 {'hosts':
415 'only %d hosts provided for job with synch_count = %d' %
416 (len(host_objects), synch_count)})
417
418 # Check that the atomic group has a hope of running this job
419 # given any supplied metahosts and dependancies that may limit.
420
421 # Get a set of hostnames in the atomic group.
422 possible_hosts = set()
423 for label in atomic_group.label_set.all():
424 possible_hosts.update(h.hostname for h in label.host_set.all())
425
426 # Filter out hosts that don't match all of the job dependency labels.
Alex Miller871291b2013-08-08 01:19:20 -0700427 for label in models.Label.objects.filter(name__in=dependencies):
showardc92da832009-04-07 18:14:34 +0000428 hosts_in_label = (h.hostname for h in label.host_set.all())
429 possible_hosts.intersection_update(hosts_in_label)
430
showard225bdc12009-04-13 16:09:21 +0000431 if not host_objects and not metahost_objects:
432 # No hosts or metahosts are required to queue an atomic group Job.
433 # However, if they are given, we respect them below.
434 host_set = possible_hosts
435 else:
436 host_set = set(host.hostname for host in host_objects)
437 unusable_host_set = host_set.difference(possible_hosts)
438 if unusable_host_set:
439 raise model_logic.ValidationError(
440 {'hosts': 'Hosts "%s" are not in Atomic Group "%s"' %
441 (', '.join(sorted(unusable_host_set)), atomic_group.name)})
showardc92da832009-04-07 18:14:34 +0000442
443 # Lookup hosts provided by each meta host and merge them into the
444 # host_set for final counting.
445 for meta_host in metahost_objects:
446 meta_possible = possible_hosts.copy()
447 hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all())
448 meta_possible.intersection_update(hosts_in_meta_host)
449
450 # Count all hosts that this meta_host will provide.
451 host_set.update(meta_possible)
452
453 if len(host_set) < minimum_required:
454 raise model_logic.ValidationError(
455 {'atomic_group_name':
456 'Insufficient hosts in Atomic Group "%s" with the'
457 ' supplied dependencies and meta_hosts.' %
458 (atomic_group.name,)})
459
460
showardbe0d8692009-08-20 23:42:44 +0000461def check_modify_host(update_data):
462 """
463 Sanity check modify_host* requests.
464
465 @param update_data: A dictionary with the changes to make to a host
466 or hosts.
467 """
468 # Only the scheduler (monitor_db) is allowed to modify Host status.
469 # Otherwise race conditions happen as a hosts state is changed out from
470 # beneath tasks being run on a host.
471 if 'status' in update_data:
472 raise model_logic.ValidationError({
473 'status': 'Host status can not be modified by the frontend.'})
474
475
showardce7c0922009-09-11 18:39:24 +0000476def check_modify_host_locking(host, update_data):
477 """
478 Checks when locking/unlocking has been requested if the host is already
479 locked/unlocked.
480
481 @param host: models.Host object to be modified
482 @param update_data: A dictionary with the changes to make to the host.
483 """
484 locked = update_data.get('locked', None)
Matthew Sartori68186332015-04-27 17:19:53 -0700485 lock_reason = update_data.get('lock_reason', None)
showardce7c0922009-09-11 18:39:24 +0000486 if locked is not None:
487 if locked and host.locked:
488 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800489 'locked': 'Host %s already locked by %s on %s.' %
490 (host.hostname, host.locked_by, host.lock_time)})
showardce7c0922009-09-11 18:39:24 +0000491 if not locked and not host.locked:
492 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800493 'locked': 'Host %s already unlocked.' % host.hostname})
Matthew Sartori68186332015-04-27 17:19:53 -0700494 if locked and not lock_reason and not host.locked:
495 raise model_logic.ValidationError({
Shuqian Zhao4c0d2902016-01-12 17:03:15 -0800496 'locked': 'Please provide a reason for locking Host %s' %
497 host.hostname})
showardce7c0922009-09-11 18:39:24 +0000498
499
showard8fbae652009-01-20 23:23:10 +0000500def get_motd():
501 dirname = os.path.dirname(__file__)
502 filename = os.path.join(dirname, "..", "..", "motd.txt")
503 text = ''
504 try:
505 fp = open(filename, "r")
506 try:
507 text = fp.read()
508 finally:
509 fp.close()
510 except:
511 pass
512
513 return text
showard29f7cd22009-04-29 21:16:24 +0000514
515
516def _get_metahost_counts(metahost_objects):
517 metahost_counts = {}
518 for metahost in metahost_objects:
519 metahost_counts.setdefault(metahost, 0)
520 metahost_counts[metahost] += 1
521 return metahost_counts
522
523
showarda965cef2009-05-15 23:17:41 +0000524def get_job_info(job, preserve_metahosts=False, queue_entry_filter_data=None):
showard29f7cd22009-04-29 21:16:24 +0000525 hosts = []
526 one_time_hosts = []
527 meta_hosts = []
528 atomic_group = None
jamesren2275ef12010-04-12 18:25:06 +0000529 hostless = False
showard29f7cd22009-04-29 21:16:24 +0000530
showard4d077562009-05-08 18:24:36 +0000531 queue_entries = job.hostqueueentry_set.all()
showarda965cef2009-05-15 23:17:41 +0000532 if queue_entry_filter_data:
533 queue_entries = models.HostQueueEntry.query_objects(
534 queue_entry_filter_data, initial_query=queue_entries)
showard4d077562009-05-08 18:24:36 +0000535
536 for queue_entry in queue_entries:
showard29f7cd22009-04-29 21:16:24 +0000537 if (queue_entry.host and (preserve_metahosts or
538 not queue_entry.meta_host)):
539 if queue_entry.deleted:
540 continue
541 if queue_entry.host.invalid:
542 one_time_hosts.append(queue_entry.host)
543 else:
544 hosts.append(queue_entry.host)
jamesren2275ef12010-04-12 18:25:06 +0000545 elif queue_entry.meta_host:
showard29f7cd22009-04-29 21:16:24 +0000546 meta_hosts.append(queue_entry.meta_host)
jamesren2275ef12010-04-12 18:25:06 +0000547 else:
548 hostless = True
549
showard29f7cd22009-04-29 21:16:24 +0000550 if atomic_group is None:
551 if queue_entry.atomic_group is not None:
552 atomic_group = queue_entry.atomic_group
553 else:
554 assert atomic_group.name == queue_entry.atomic_group.name, (
555 'DB inconsistency. HostQueueEntries with multiple atomic'
556 ' groups on job %s: %s != %s' % (
557 id, atomic_group.name, queue_entry.atomic_group.name))
558
559 meta_host_counts = _get_metahost_counts(meta_hosts)
560
561 info = dict(dependencies=[label.name for label
562 in job.dependency_labels.all()],
563 hosts=hosts,
564 meta_hosts=meta_hosts,
565 meta_host_counts=meta_host_counts,
566 one_time_hosts=one_time_hosts,
jamesren2275ef12010-04-12 18:25:06 +0000567 atomic_group=atomic_group,
568 hostless=hostless)
showard29f7cd22009-04-29 21:16:24 +0000569 return info
570
571
showard09d80f92009-11-19 01:01:19 +0000572def check_for_duplicate_hosts(host_objects):
573 host_ids = set()
574 duplicate_hostnames = set()
575 for host in host_objects:
576 if host.id in host_ids:
577 duplicate_hostnames.add(host.hostname)
578 host_ids.add(host.id)
579
580 if duplicate_hostnames:
581 raise model_logic.ValidationError(
582 {'hosts' : 'Duplicate hosts: %s'
583 % ', '.join(duplicate_hostnames)})
584
585
showarda1e74b32009-05-12 17:32:04 +0000586def create_new_job(owner, options, host_objects, metahost_objects,
587 atomic_group=None):
showard29f7cd22009-04-29 21:16:24 +0000588 all_host_objects = host_objects + metahost_objects
showarda1e74b32009-05-12 17:32:04 +0000589 dependencies = options.get('dependencies', [])
590 synch_count = options.get('synch_count')
showard29f7cd22009-04-29 21:16:24 +0000591
showard29f7cd22009-04-29 21:16:24 +0000592 if atomic_group:
593 check_atomic_group_create_job(
594 synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700595 dependencies, atomic_group)
showard29f7cd22009-04-29 21:16:24 +0000596 else:
597 if synch_count is not None and synch_count > len(all_host_objects):
598 raise model_logic.ValidationError(
599 {'hosts':
600 'only %d hosts provided for job with synch_count = %d' %
601 (len(all_host_objects), synch_count)})
602 atomic_hosts = models.Host.objects.filter(
603 id__in=[host.id for host in host_objects],
604 labels__atomic_group=True)
605 unusable_host_names = [host.hostname for host in atomic_hosts]
606 if unusable_host_names:
607 raise model_logic.ValidationError(
608 {'hosts':
609 'Host(s) "%s" are atomic group hosts but no '
610 'atomic group was specified for this job.' %
611 (', '.join(unusable_host_names),)})
612
showard09d80f92009-11-19 01:01:19 +0000613 check_for_duplicate_hosts(host_objects)
showard29f7cd22009-04-29 21:16:24 +0000614
Aviv Keshetc68807e2013-07-31 16:13:01 -0700615 for label_name in dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700616 if provision.is_for_special_action(label_name):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700617 # TODO: We could save a few queries
618 # if we had a bulk ensure-label-exists function, which used
619 # a bulk .get() call. The win is probably very small.
Alex Miller871291b2013-08-08 01:19:20 -0700620 _ensure_label_exists(label_name)
Aviv Keshetc68807e2013-07-31 16:13:01 -0700621
Alex Miller4a193692013-08-21 13:59:01 -0700622 # This only checks targeted hosts, not hosts eligible due to the metahost
623 check_job_dependencies(host_objects, dependencies)
624 check_job_metahost_dependencies(metahost_objects, dependencies)
625
Alex Miller871291b2013-08-08 01:19:20 -0700626 options['dependencies'] = list(
627 models.Label.objects.filter(name__in=dependencies))
showard29f7cd22009-04-29 21:16:24 +0000628
showarda1e74b32009-05-12 17:32:04 +0000629 for label in metahost_objects + options['dependencies']:
showard29f7cd22009-04-29 21:16:24 +0000630 if label.atomic_group and not atomic_group:
631 raise model_logic.ValidationError(
632 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000633 'Dependency %r requires an atomic group but no '
634 'atomic_group_name or meta_host in an atomic group was '
635 'specified for this job.' % label.name})
showard29f7cd22009-04-29 21:16:24 +0000636 elif (label.atomic_group and
637 label.atomic_group.name != atomic_group.name):
638 raise model_logic.ValidationError(
639 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000640 'meta_hosts or dependency %r requires atomic group '
641 '%r instead of the supplied atomic_group_name=%r.' %
642 (label.name, label.atomic_group.name, atomic_group.name)})
showard29f7cd22009-04-29 21:16:24 +0000643
showarda1e74b32009-05-12 17:32:04 +0000644 job = models.Job.create(owner=owner, options=options,
645 hosts=all_host_objects)
showard29f7cd22009-04-29 21:16:24 +0000646 job.queue(all_host_objects, atomic_group=atomic_group,
showarda1e74b32009-05-12 17:32:04 +0000647 is_template=options.get('is_template', False))
showard29f7cd22009-04-29 21:16:24 +0000648 return job.id
showard0957a842009-05-11 19:25:08 +0000649
650
Aviv Keshetc68807e2013-07-31 16:13:01 -0700651def _ensure_label_exists(name):
652 """
653 Ensure that a label called |name| exists in the Django models.
654
655 This function is to be called from within afe rpcs only, as an
656 alternative to server.cros.provision.ensure_label_exists(...). It works
657 by Django model manipulation, rather than by making another create_label
658 rpc call.
659
660 @param name: the label to check for/create.
661 @raises ValidationError: There was an error in the response that was
662 not because the label already existed.
663 @returns True is a label was created, False otherwise.
664 """
MK Ryu73be9862015-07-06 12:25:00 -0700665 # Make sure this function is not called on shards but only on master.
666 assert not server_utils.is_shard()
Aviv Keshetc68807e2013-07-31 16:13:01 -0700667 try:
668 models.Label.objects.get(name=name)
669 except models.Label.DoesNotExist:
Fang Deng7051fe42015-10-20 14:57:28 -0700670 try:
671 new_label = models.Label.objects.create(name=name)
672 new_label.save()
673 return True
674 except django.db.utils.IntegrityError as e:
675 # It is possible that another suite/test already
676 # created the label between the check and save.
677 if DUPLICATE_KEY_MSG in str(e):
678 return False
679 else:
680 raise
Aviv Keshetc68807e2013-07-31 16:13:01 -0700681 return False
682
683
showard909c9142009-07-07 20:54:42 +0000684def find_platform_and_atomic_group(host):
685 """
686 Figure out the platform name and atomic group name for the given host
687 object. If none, the return value for either will be None.
688
689 @returns (platform name, atomic group name) for the given host.
690 """
showard0957a842009-05-11 19:25:08 +0000691 platforms = [label.name for label in host.label_list if label.platform]
692 if not platforms:
showard909c9142009-07-07 20:54:42 +0000693 platform = None
694 else:
695 platform = platforms[0]
showard0957a842009-05-11 19:25:08 +0000696 if len(platforms) > 1:
697 raise ValueError('Host %s has more than one platform: %s' %
698 (host.hostname, ', '.join(platforms)))
showard909c9142009-07-07 20:54:42 +0000699 for label in host.label_list:
700 if label.atomic_group:
701 atomic_group_name = label.atomic_group.name
702 break
703 else:
704 atomic_group_name = None
705 # Don't check for multiple atomic groups on a host here. That is an
706 # error but should not trip up the RPC interface. monitor_db_cleanup
707 # deals with it. This just returns the first one found.
708 return platform, atomic_group_name
showardc0ac3a72009-07-08 21:14:45 +0000709
710
711# support for get_host_queue_entries_and_special_tasks()
712
MK Ryu0c1a37d2015-04-30 12:00:55 -0700713def _common_entry_to_dict(entry, type, job_dict, exec_path, status, started_on):
showardc0ac3a72009-07-08 21:14:45 +0000714 return dict(type=type,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700715 host=entry['host'],
showardc0ac3a72009-07-08 21:14:45 +0000716 job=job_dict,
MK Ryu0c1a37d2015-04-30 12:00:55 -0700717 execution_path=exec_path,
718 status=status,
719 started_on=started_on,
720 id=str(entry['id']) + type,
721 oid=entry['id'])
showardc0ac3a72009-07-08 21:14:45 +0000722
723
MK Ryu0c1a37d2015-04-30 12:00:55 -0700724def _special_task_to_dict(task, queue_entries):
725 """Transforms a special task dictionary to another form of dictionary.
726
727 @param task Special task as a dictionary type
728 @param queue_entries Host queue entries as a list of dictionaries.
729
730 @return Transformed dictionary for a special task.
731 """
showardc0ac3a72009-07-08 21:14:45 +0000732 job_dict = None
MK Ryu0c1a37d2015-04-30 12:00:55 -0700733 if task['queue_entry']:
734 # Scan queue_entries to get the job detail info.
735 for qentry in queue_entries:
736 if task['queue_entry']['id'] == qentry['id']:
737 job_dict = qentry['job']
738 break
739 # If not found, get it from DB.
740 if job_dict is None:
741 job = models.Job.objects.get(id=task['queue_entry']['job'])
742 job_dict = job.get_object_dict()
743
744 exec_path = server_utils.get_special_task_exec_path(
745 task['host']['hostname'], task['id'], task['task'],
746 time_utils.time_string_to_datetime(task['time_requested']))
747 status = server_utils.get_special_task_status(
748 task['is_complete'], task['success'], task['is_active'])
749 return _common_entry_to_dict(task, task['task'], job_dict,
750 exec_path, status, task['time_started'])
showardc0ac3a72009-07-08 21:14:45 +0000751
752
753def _queue_entry_to_dict(queue_entry):
MK Ryu0c1a37d2015-04-30 12:00:55 -0700754 job_dict = queue_entry['job']
755 tag = server_utils.get_job_tag(job_dict['id'], job_dict['owner'])
756 exec_path = server_utils.get_hqe_exec_path(tag,
757 queue_entry['execution_subdir'])
758 return _common_entry_to_dict(queue_entry, 'Job', job_dict, exec_path,
759 queue_entry['status'], queue_entry['started_on'])
760
761
762def prepare_host_queue_entries_and_special_tasks(interleaved_entries,
763 queue_entries):
764 """
765 Prepare for serialization the interleaved entries of host queue entries
766 and special tasks.
767 Each element in the entries is a dictionary type.
768 The special task dictionary has only a job id for a job and lacks
769 the detail of the job while the host queue entry dictionary has.
770 queue_entries is used to look up the job detail info.
771
772 @param interleaved_entries Host queue entries and special tasks as a list
773 of dictionaries.
774 @param queue_entries Host queue entries as a list of dictionaries.
775
776 @return A post-processed list of dictionaries that is to be serialized.
777 """
778 dict_list = []
779 for e in interleaved_entries:
780 # Distinguish the two mixed entries based on the existence of
781 # the key "task". If an entry has the key, the entry is for
782 # special task. Otherwise, host queue entry.
783 if 'task' in e:
784 dict_list.append(_special_task_to_dict(e, queue_entries))
785 else:
786 dict_list.append(_queue_entry_to_dict(e))
787 return prepare_for_serialization(dict_list)
showardc0ac3a72009-07-08 21:14:45 +0000788
789
790def _compute_next_job_for_tasks(queue_entries, special_tasks):
791 """
792 For each task, try to figure out the next job that ran after that task.
793 This is done using two pieces of information:
794 * if the task has a queue entry, we can use that entry's job ID.
795 * if the task has a time_started, we can try to compare that against the
796 started_on field of queue_entries. this isn't guaranteed to work perfectly
797 since queue_entries may also have null started_on values.
798 * if the task has neither, or if use of time_started fails, just use the
799 last computed job ID.
MK Ryu0c1a37d2015-04-30 12:00:55 -0700800
801 @param queue_entries Host queue entries as a list of dictionaries.
802 @param special_tasks Special tasks as a list of dictionaries.
showardc0ac3a72009-07-08 21:14:45 +0000803 """
804 next_job_id = None # most recently computed next job
805 hqe_index = 0 # index for scanning by started_on times
806 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700807 if task['queue_entry']:
808 next_job_id = task['queue_entry']['job']
809 elif task['time_started'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000810 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700811 if queue_entry['started_on'] is None:
showardc0ac3a72009-07-08 21:14:45 +0000812 continue
MK Ryu0c1a37d2015-04-30 12:00:55 -0700813 t1 = time_utils.time_string_to_datetime(
814 queue_entry['started_on'])
815 t2 = time_utils.time_string_to_datetime(task['time_started'])
816 if t1 < t2:
showardc0ac3a72009-07-08 21:14:45 +0000817 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700818 next_job_id = queue_entry['job']['id']
showardc0ac3a72009-07-08 21:14:45 +0000819
MK Ryu0c1a37d2015-04-30 12:00:55 -0700820 task['next_job_id'] = next_job_id
showardc0ac3a72009-07-08 21:14:45 +0000821
822 # advance hqe_index to just after next_job_id
823 if next_job_id is not None:
824 for queue_entry in queue_entries[hqe_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700825 if queue_entry['job']['id'] < next_job_id:
showardc0ac3a72009-07-08 21:14:45 +0000826 break
827 hqe_index += 1
828
829
830def interleave_entries(queue_entries, special_tasks):
831 """
832 Both lists should be ordered by descending ID.
833 """
834 _compute_next_job_for_tasks(queue_entries, special_tasks)
835
836 # start with all special tasks that've run since the last job
837 interleaved_entries = []
838 for task in special_tasks:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700839 if task['next_job_id'] is not None:
showardc0ac3a72009-07-08 21:14:45 +0000840 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700841 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000842
843 # now interleave queue entries with the remaining special tasks
844 special_task_index = len(interleaved_entries)
845 for queue_entry in queue_entries:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700846 interleaved_entries.append(queue_entry)
showardc0ac3a72009-07-08 21:14:45 +0000847 # add all tasks that ran between this job and the previous one
848 for task in special_tasks[special_task_index:]:
MK Ryu0c1a37d2015-04-30 12:00:55 -0700849 if task['next_job_id'] < queue_entry['job']['id']:
showardc0ac3a72009-07-08 21:14:45 +0000850 break
MK Ryu0c1a37d2015-04-30 12:00:55 -0700851 interleaved_entries.append(task)
showardc0ac3a72009-07-08 21:14:45 +0000852 special_task_index += 1
853
854 return interleaved_entries
jamesren4a41e012010-07-16 22:33:48 +0000855
856
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800857def bucket_hosts_by_shard(host_objs, rpc_hostnames=False):
858 """Figure out which hosts are on which shards.
859
860 @param host_objs: A list of host objects.
861 @param rpc_hostnames: If True, the rpc_hostnames of a shard are returned
862 instead of the 'real' shard hostnames. This only matters for testing
863 environments.
864
865 @return: A map of shard hostname: list of hosts on the shard.
866 """
867 shard_host_map = {}
868 for host in host_objs:
869 if host.shard:
870 shard_name = (host.shard.rpc_hostname() if rpc_hostnames
871 else host.shard.hostname)
872 shard_host_map.setdefault(shard_name, []).append(host.hostname)
873 return shard_host_map
874
875
jamesren4a41e012010-07-16 22:33:48 +0000876def get_create_job_common_args(local_args):
877 """
878 Returns a dict containing only the args that apply for create_job_common
879
880 Returns a subset of local_args, which contains only the arguments that can
881 be passed in to create_job_common().
882 """
Alex Miller7d658cf2013-09-04 16:00:35 -0700883 # This code is only here to not kill suites scheduling tests when priority
884 # becomes an int instead of a string.
885 if isinstance(local_args['priority'], str):
886 local_args['priority'] = priorities.Priority.DEFAULT
887 # </migration hack>
jamesren4a41e012010-07-16 22:33:48 +0000888 arg_names, _, _, _ = inspect.getargspec(create_job_common)
889 return dict(item for item in local_args.iteritems() if item[0] in arg_names)
890
891
892def create_job_common(name, priority, control_type, control_file=None,
893 hosts=(), meta_hosts=(), one_time_hosts=(),
894 atomic_group_name=None, synch_count=None,
Simran Basi7e605742013-11-12 13:43:36 -0800895 is_template=False, timeout=None, timeout_mins=None,
896 max_runtime_mins=None, run_verify=True, email_list='',
897 dependencies=(), reboot_before=None, reboot_after=None,
jamesren4a41e012010-07-16 22:33:48 +0000898 parse_failed_repair=None, hostless=False, keyvals=None,
Aviv Keshet18308922013-02-19 17:49:49 -0800899 drone_set=None, parameterized_job=None,
Dan Shiec1d47d2015-02-13 11:38:13 -0800900 parent_job_id=None, test_retry=0, run_reset=True,
901 require_ssp=None):
Aviv Keshet18308922013-02-19 17:49:49 -0800902 #pylint: disable-msg=C0111
jamesren4a41e012010-07-16 22:33:48 +0000903 """
904 Common code between creating "standard" jobs and creating parameterized jobs
905 """
906 user = models.User.current_user()
907 owner = user.login
908
jamesren4a41e012010-07-16 22:33:48 +0000909 # input validation
910 if not (hosts or meta_hosts or one_time_hosts or atomic_group_name
911 or hostless):
912 raise model_logic.ValidationError({
913 'arguments' : "You must pass at least one of 'hosts', "
914 "'meta_hosts', 'one_time_hosts', "
915 "'atomic_group_name', or 'hostless'"
916 })
917
918 if hostless:
919 if hosts or meta_hosts or one_time_hosts or atomic_group_name:
920 raise model_logic.ValidationError({
921 'hostless': 'Hostless jobs cannot include any hosts!'})
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700922 server_type = control_data.CONTROL_TYPE_NAMES.SERVER
jamesren4a41e012010-07-16 22:33:48 +0000923 if control_type != server_type:
924 raise model_logic.ValidationError({
925 'control_type': 'Hostless jobs cannot use client-side '
926 'control files'})
927
Alex Miller871291b2013-08-08 01:19:20 -0700928 atomic_groups_by_name = dict((ag.name, ag)
jamesren4a41e012010-07-16 22:33:48 +0000929 for ag in models.AtomicGroup.objects.all())
Alex Miller871291b2013-08-08 01:19:20 -0700930 label_objects = list(models.Label.objects.filter(name__in=meta_hosts))
jamesren4a41e012010-07-16 22:33:48 +0000931
932 # Schedule on an atomic group automagically if one of the labels given
933 # is an atomic group label and no explicit atomic_group_name was supplied.
934 if not atomic_group_name:
Alex Miller871291b2013-08-08 01:19:20 -0700935 for label in label_objects:
jamesren4a41e012010-07-16 22:33:48 +0000936 if label and label.atomic_group:
937 atomic_group_name = label.atomic_group.name
938 break
jamesren4a41e012010-07-16 22:33:48 +0000939 # convert hostnames & meta hosts to host/label objects
940 host_objects = models.Host.smart_get_bulk(hosts)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800941 if not server_utils.is_shard():
Prashanth Balasubramanian6edaaf92014-11-24 16:36:25 -0800942 shard_host_map = bucket_hosts_by_shard(host_objects)
Prashanth Balasubramanian8c98ac12014-12-23 11:26:44 -0800943 num_shards = len(shard_host_map)
944 if (num_shards > 1 or (num_shards == 1 and
945 len(shard_host_map.values()[0]) != len(host_objects))):
946 # We disallow the following jobs on master:
947 # num_shards > 1: this is a job spanning across multiple shards.
948 # num_shards == 1 but number of hosts on shard is less
949 # than total number of hosts: this is a job that spans across
950 # one shard and the master.
951 raise ValueError(
952 'The following hosts are on shard(s), please create '
953 'seperate jobs for hosts on each shard: %s ' %
954 shard_host_map)
jamesren4a41e012010-07-16 22:33:48 +0000955 metahost_objects = []
Alex Miller871291b2013-08-08 01:19:20 -0700956 meta_host_labels_by_name = {label.name: label for label in label_objects}
jamesren4a41e012010-07-16 22:33:48 +0000957 for label_name in meta_hosts or []:
Alex Miller871291b2013-08-08 01:19:20 -0700958 if label_name in meta_host_labels_by_name:
959 metahost_objects.append(meta_host_labels_by_name[label_name])
jamesren4a41e012010-07-16 22:33:48 +0000960 elif label_name in atomic_groups_by_name:
961 # If given a metahost name that isn't a Label, check to
962 # see if the user was specifying an Atomic Group instead.
963 atomic_group = atomic_groups_by_name[label_name]
964 if atomic_group_name and atomic_group_name != atomic_group.name:
965 raise model_logic.ValidationError({
966 'meta_hosts': (
967 'Label "%s" not found. If assumed to be an '
968 'atomic group it would conflict with the '
969 'supplied atomic group "%s".' % (
970 label_name, atomic_group_name))})
971 atomic_group_name = atomic_group.name
972 else:
973 raise model_logic.ValidationError(
974 {'meta_hosts' : 'Label "%s" not found' % label_name})
975
976 # Create and sanity check an AtomicGroup object if requested.
977 if atomic_group_name:
978 if one_time_hosts:
979 raise model_logic.ValidationError(
980 {'one_time_hosts':
981 'One time hosts cannot be used with an Atomic Group.'})
982 atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
983 if synch_count and synch_count > atomic_group.max_number_of_machines:
984 raise model_logic.ValidationError(
985 {'atomic_group_name' :
986 'You have requested a synch_count (%d) greater than the '
987 'maximum machines in the requested Atomic Group (%d).' %
988 (synch_count, atomic_group.max_number_of_machines)})
989 else:
990 atomic_group = None
991
992 for host in one_time_hosts or []:
993 this_host = models.Host.create_one_time_host(host)
994 host_objects.append(this_host)
995
996 options = dict(name=name,
997 priority=priority,
998 control_file=control_file,
999 control_type=control_type,
1000 is_template=is_template,
1001 timeout=timeout,
Simran Basi7e605742013-11-12 13:43:36 -08001002 timeout_mins=timeout_mins,
Simran Basi34217022012-11-06 13:43:15 -08001003 max_runtime_mins=max_runtime_mins,
jamesren4a41e012010-07-16 22:33:48 +00001004 synch_count=synch_count,
1005 run_verify=run_verify,
1006 email_list=email_list,
1007 dependencies=dependencies,
1008 reboot_before=reboot_before,
1009 reboot_after=reboot_after,
1010 parse_failed_repair=parse_failed_repair,
1011 keyvals=keyvals,
1012 drone_set=drone_set,
Aviv Keshet18308922013-02-19 17:49:49 -08001013 parameterized_job=parameterized_job,
Aviv Keshetcd1ff9b2013-03-01 14:55:19 -08001014 parent_job_id=parent_job_id,
Dan Shi07e09af2013-04-12 09:31:29 -07001015 test_retry=test_retry,
Dan Shiec1d47d2015-02-13 11:38:13 -08001016 run_reset=run_reset,
1017 require_ssp=require_ssp)
jamesren4a41e012010-07-16 22:33:48 +00001018 return create_new_job(owner=owner,
1019 options=options,
1020 host_objects=host_objects,
1021 metahost_objects=metahost_objects,
1022 atomic_group=atomic_group)
Simran Basib6ec8ae2014-04-23 12:05:08 -07001023
1024
1025def encode_ascii(control_file):
1026 """Force a control file to only contain ascii characters.
1027
1028 @param control_file: Control file to encode.
1029
1030 @returns the control file in an ascii encoding.
1031
1032 @raises error.ControlFileMalformed: if encoding fails.
1033 """
1034 try:
1035 return control_file.encode('ascii')
1036 except UnicodeDecodeError as e:
Jiaxi Luo421608e2014-07-07 14:38:00 -07001037 raise error.ControlFileMalformed(str(e))
1038
1039
1040def get_wmatrix_url():
1041 """Get wmatrix url from config file.
1042
1043 @returns the wmatrix url or an empty string.
1044 """
1045 return global_config.global_config.get_config_value('AUTOTEST_WEB',
1046 'wmatrix_url',
Jiaxi Luo15cbf372014-07-01 19:20:20 -07001047 default='')
Jiaxi Luo57bc1952014-07-22 15:27:30 -07001048
1049
1050def inject_times_to_filter(start_time_key=None, end_time_key=None,
1051 start_time_value=None, end_time_value=None,
1052 **filter_data):
1053 """Inject the key value pairs of start and end time if provided.
1054
1055 @param start_time_key: A string represents the filter key of start_time.
1056 @param end_time_key: A string represents the filter key of end_time.
1057 @param start_time_value: Start_time value.
1058 @param end_time_value: End_time value.
1059
1060 @returns the injected filter_data.
1061 """
1062 if start_time_value:
1063 filter_data[start_time_key] = start_time_value
1064 if end_time_value:
1065 filter_data[end_time_key] = end_time_value
1066 return filter_data
1067
1068
1069def inject_times_to_hqe_special_tasks_filters(filter_data_common,
1070 start_time, end_time):
1071 """Inject start and end time to hqe and special tasks filters.
1072
1073 @param filter_data_common: Common filter for hqe and special tasks.
1074 @param start_time_key: A string represents the filter key of start_time.
1075 @param end_time_key: A string represents the filter key of end_time.
1076
1077 @returns a pair of hqe and special tasks filters.
1078 """
1079 filter_data_special_tasks = filter_data_common.copy()
1080 return (inject_times_to_filter('started_on__gte', 'started_on__lte',
1081 start_time, end_time, **filter_data_common),
1082 inject_times_to_filter('time_started__gte', 'time_started__lte',
1083 start_time, end_time,
Jakob Juelich59cfe542014-09-02 16:37:46 -07001084 **filter_data_special_tasks))
1085
1086
1087def retrieve_shard(shard_hostname):
1088 """
Jakob Juelich77457572014-09-22 17:02:43 -07001089 Retrieves the shard with the given hostname from the database.
Jakob Juelich59cfe542014-09-02 16:37:46 -07001090
1091 @param shard_hostname: Hostname of the shard to retrieve
1092
Jakob Juelich77457572014-09-22 17:02:43 -07001093 @raises models.Shard.DoesNotExist, if no shard with this hostname was found.
1094
Jakob Juelich59cfe542014-09-02 16:37:46 -07001095 @returns: Shard object
1096 """
MK Ryu509516b2015-05-18 12:00:47 -07001097 timer = autotest_stats.Timer('shard_heartbeat.retrieve_shard')
1098 with timer:
1099 return models.Shard.smart_get(shard_hostname)
Jakob Juelich59cfe542014-09-02 16:37:46 -07001100
1101
Jakob Juelich1b525742014-09-30 13:08:07 -07001102def find_records_for_shard(shard, known_job_ids, known_host_ids):
Jakob Juelich59cfe542014-09-02 16:37:46 -07001103 """Find records that should be sent to a shard.
1104
Jakob Juelicha94efe62014-09-18 16:02:49 -07001105 @param shard: Shard to find records for.
Jakob Juelich1b525742014-09-30 13:08:07 -07001106 @param known_job_ids: List of ids of jobs the shard already has.
1107 @param known_host_ids: List of ids of hosts the shard already has.
Jakob Juelicha94efe62014-09-18 16:02:49 -07001108
Fang Dengf3705992014-12-16 17:32:18 -08001109 @returns: Tuple of three lists for hosts, jobs, and suite job keyvals:
1110 (hosts, jobs, suite_job_keyvals).
Jakob Juelich59cfe542014-09-02 16:37:46 -07001111 """
MK Ryu509516b2015-05-18 12:00:47 -07001112 timer = autotest_stats.Timer('shard_heartbeat')
1113 with timer.get_client('find_hosts'):
1114 hosts = models.Host.assign_to_shard(shard, known_host_ids)
1115 with timer.get_client('find_jobs'):
1116 jobs = models.Job.assign_to_shard(shard, known_job_ids)
1117 with timer.get_client('find_suite_job_keyvals'):
1118 parent_job_ids = [job.parent_job_id for job in jobs]
1119 suite_job_keyvals = models.JobKeyval.objects.filter(
1120 job_id__in=parent_job_ids)
Fang Dengf3705992014-12-16 17:32:18 -08001121 return hosts, jobs, suite_job_keyvals
Jakob Juelicha94efe62014-09-18 16:02:49 -07001122
1123
1124def _persist_records_with_type_sent_from_shard(
1125 shard, records, record_type, *args, **kwargs):
1126 """
1127 Handle records of a specified type that were sent to the shard master.
1128
1129 @param shard: The shard the records were sent from.
1130 @param records: The records sent in their serialized format.
1131 @param record_type: Type of the objects represented by records.
1132 @param args: Additional arguments that will be passed on to the sanity
1133 checks.
1134 @param kwargs: Additional arguments that will be passed on to the sanity
1135 checks.
1136
1137 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1138
1139 @returns: List of primary keys of the processed records.
1140 """
1141 pks = []
1142 for serialized_record in records:
1143 pk = serialized_record['id']
1144 try:
1145 current_record = record_type.objects.get(pk=pk)
1146 except record_type.DoesNotExist:
1147 raise error.UnallowedRecordsSentToMaster(
1148 'Object with pk %s of type %s does not exist on master.' % (
1149 pk, record_type))
1150
1151 current_record.sanity_check_update_from_shard(
1152 shard, serialized_record, *args, **kwargs)
1153
1154 current_record.update_from_serialized(serialized_record)
1155 pks.append(pk)
1156 return pks
1157
1158
1159def persist_records_sent_from_shard(shard, jobs, hqes):
1160 """
1161 Sanity checking then saving serialized records sent to master from shard.
1162
1163 During heartbeats shards upload jobs and hostqueuentries. This performs
1164 some sanity checks on these and then updates the existing records for those
1165 entries with the updated ones from the heartbeat.
1166
1167 The sanity checks include:
1168 - Checking if the objects sent already exist on the master.
1169 - Checking if the objects sent were assigned to this shard.
1170 - hostqueueentries must be sent together with their jobs.
1171
1172 @param shard: The shard the records were sent from.
1173 @param jobs: The jobs the shard sent.
1174 @param hqes: The hostqueuentries the shart sent.
1175
1176 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1177 """
MK Ryu509516b2015-05-18 12:00:47 -07001178 timer = autotest_stats.Timer('shard_heartbeat')
1179 with timer.get_client('persist_jobs'):
1180 job_ids_sent = _persist_records_with_type_sent_from_shard(
1181 shard, jobs, models.Job)
Jakob Juelicha94efe62014-09-18 16:02:49 -07001182
MK Ryu509516b2015-05-18 12:00:47 -07001183 with timer.get_client('persist_hqes'):
1184 _persist_records_with_type_sent_from_shard(
1185 shard, hqes, models.HostQueueEntry, job_ids_sent=job_ids_sent)
Jakob Juelich50e91f72014-10-01 12:43:23 -07001186
1187
Jakob Juelich50e91f72014-10-01 12:43:23 -07001188def forward_single_host_rpc_to_shard(func):
1189 """This decorator forwards rpc calls that modify a host to a shard.
1190
1191 If a host is assigned to a shard, rpcs that change his attributes should be
1192 forwarded to the shard.
1193
1194 This assumes the first argument of the function represents a host id.
1195
1196 @param func: The function to decorate
1197
1198 @returns: The function to replace func with.
1199 """
1200 def replacement(**kwargs):
1201 # Only keyword arguments can be accepted here, as we need the argument
1202 # names to send the rpc. serviceHandler always provides arguments with
1203 # their keywords, so this is not a problem.
MK Ryu8e2c2d02016-01-06 15:24:38 -08001204
1205 # A host record (identified by kwargs['id']) can be deleted in
1206 # func(). Therefore, we should save the data that can be needed later
1207 # before func() is called.
1208 shard_hostname = None
Jakob Juelich50e91f72014-10-01 12:43:23 -07001209 host = models.Host.smart_get(kwargs['id'])
MK Ryu8e2c2d02016-01-06 15:24:38 -08001210 if host and host.shard:
1211 shard_hostname = host.shard.rpc_hostname()
1212 ret = func(**kwargs)
1213 if shard_hostname and not server_utils.is_shard():
MK Ryu26f0c932015-05-28 18:14:33 -07001214 run_rpc_on_multiple_hostnames(func.func_name,
MK Ryu8e2c2d02016-01-06 15:24:38 -08001215 [shard_hostname],
Jakob Juelich50e91f72014-10-01 12:43:23 -07001216 **kwargs)
MK Ryu8e2c2d02016-01-06 15:24:38 -08001217 return ret
Prashanth Balasubramanian5949b4a2014-11-23 12:58:30 -08001218
1219 return replacement
1220
1221
MK Ryufb5e3a82015-07-01 12:21:20 -07001222def fanout_rpc(host_objs, rpc_name, include_hostnames=True, **kwargs):
1223 """Fanout the given rpc to shards of given hosts.
1224
1225 @param host_objs: Host objects for the rpc.
1226 @param rpc_name: The name of the rpc.
1227 @param include_hostnames: If True, include the hostnames in the kwargs.
1228 Hostnames are not always necessary, this functions is designed to
1229 send rpcs to the shard a host is on, the rpcs themselves could be
1230 related to labels, acls etc.
1231 @param kwargs: The kwargs for the rpc.
1232 """
1233 # Figure out which hosts are on which shards.
1234 shard_host_map = bucket_hosts_by_shard(
1235 host_objs, rpc_hostnames=True)
1236
1237 # Execute the rpc against the appropriate shards.
1238 for shard, hostnames in shard_host_map.iteritems():
1239 if include_hostnames:
1240 kwargs['hosts'] = hostnames
1241 try:
1242 run_rpc_on_multiple_hostnames(rpc_name, [shard], **kwargs)
1243 except:
1244 ei = sys.exc_info()
1245 new_exc = error.RPCException('RPC %s failed on shard %s due to '
1246 '%s: %s' % (rpc_name, shard, ei[0].__name__, ei[1]))
1247 raise new_exc.__class__, new_exc, ei[2]
1248
1249
Jakob Juelich50e91f72014-10-01 12:43:23 -07001250def run_rpc_on_multiple_hostnames(rpc_call, shard_hostnames, **kwargs):
1251 """Runs an rpc to multiple AFEs
1252
1253 This is i.e. used to propagate changes made to hosts after they are assigned
1254 to a shard.
1255
1256 @param rpc_call: Name of the rpc endpoint to call.
1257 @param shard_hostnames: List of hostnames to run the rpcs on.
1258 @param **kwargs: Keyword arguments to pass in the rpcs.
1259 """
MK Ryufb5e3a82015-07-01 12:21:20 -07001260 # Make sure this function is not called on shards but only on master.
1261 assert not server_utils.is_shard()
Jakob Juelich50e91f72014-10-01 12:43:23 -07001262 for shard_hostname in shard_hostnames:
MK Ryu0a9c82e2015-09-17 17:54:01 -07001263 afe = frontend_wrappers.RetryingAFE(server=shard_hostname,
1264 user=thread_local.get_user())
Jakob Juelich50e91f72014-10-01 12:43:23 -07001265 afe.run(rpc_call, **kwargs)
MK Ryu9c5fbbe2015-02-11 15:46:22 -08001266
1267
1268def get_label(name):
1269 """Gets a label object using a given name.
1270
1271 @param name: Label name.
1272 @raises model.Label.DoesNotExist: when there is no label matching
1273 the given name.
1274 @return: a label object matching the given name.
1275 """
1276 try:
1277 label = models.Label.smart_get(name)
1278 except models.Label.DoesNotExist:
1279 return None
1280 return label
1281
1282
xixuanba232a32016-08-25 17:01:59 -07001283# TODO: hide the following rpcs under is_moblab
1284def moblab_only(func):
1285 """Ensure moblab specific functions only run on Moblab devices."""
1286 def verify(*args, **kwargs):
1287 if not server_utils.is_moblab():
1288 raise error.RPCException('RPC: %s can only run on Moblab Systems!',
1289 func.__name__)
1290 return func(*args, **kwargs)
1291 return verify
1292
1293
MK Ryufbb002c2015-06-08 14:13:16 -07001294def route_rpc_to_master(func):
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001295 """Route RPC to master AFE.
MK Ryu2d107562015-02-24 17:45:02 -08001296
MK Ryu6f5eadb2015-09-04 10:50:47 -07001297 When a shard receives an RPC decorated by this, the RPC is just
1298 forwarded to the master.
1299 When the master gets the RPC, the RPC function is executed.
J. Richard Barnettefdfcd662015-04-13 17:20:29 -07001300
MK Ryu6f5eadb2015-09-04 10:50:47 -07001301 @param func: An RPC function to decorate
1302
1303 @returns: A function replacing the RPC func.
MK Ryu2d107562015-02-24 17:45:02 -08001304 """
Allen Li54121d02016-12-12 17:35:53 -08001305 argspec = inspect.getargspec(func)
1306 if argspec.varargs is not None:
1307 raise Exception('RPC function must not have *args.')
1308
MK Ryufbb002c2015-06-08 14:13:16 -07001309 @wraps(func)
MK Ryuf6ab8a72015-07-06 10:19:48 -07001310 def replacement(*args, **kwargs):
Allen Lice51f372016-12-12 17:48:51 -08001311 """We need special handling when decorating an RPC that can be called
1312 directly using positional arguments.
1313
1314 One example is rpc_interface.create_job().
1315 rpc_interface.create_job_page_handler() calls the function using both
1316 positional and keyword arguments. Since frontend.RpcClient.run()
1317 takes only keyword arguments for an RPC, positional arguments of the
1318 RPC function need to be transformed into keyword arguments.
MK Ryu6f5eadb2015-09-04 10:50:47 -07001319 """
Allen Li416c4052016-12-12 17:46:46 -08001320 kwargs = _convert_to_kwargs_only(func, args, kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001321 if server_utils.is_shard():
MK Ryu9651ca52015-06-08 17:48:22 -07001322 afe = frontend_wrappers.RetryingAFE(
Fang Deng0cb2a3b2015-12-10 17:59:00 -08001323 server=server_utils.get_global_afe_hostname(),
MK Ryu0a9c82e2015-09-17 17:54:01 -07001324 user=thread_local.get_user())
MK Ryu9651ca52015-06-08 17:48:22 -07001325 return afe.run(func.func_name, **kwargs)
MK Ryufbb002c2015-06-08 14:13:16 -07001326 return func(**kwargs)
Allen Li54121d02016-12-12 17:35:53 -08001327
MK Ryufbb002c2015-06-08 14:13:16 -07001328 return replacement
Dan Shi5e8fa182016-04-15 11:04:36 -07001329
1330
Allen Li416c4052016-12-12 17:46:46 -08001331def _convert_to_kwargs_only(func, args, kwargs):
1332 """Convert a function call's arguments to a kwargs dict.
1333
1334 This is best illustrated with an example. Given:
1335
1336 def foo(a, b=None, **kwargs): pass
1337
1338 If foo is called like `foo(1, 2, c=3)`, this would correspond to:
1339
1340 args = (1, 2)
1341 kwargs = {'c': 3}
1342
1343 Calling `_convert_to_kwargs(foo, (1, 2), {'c': 3})` would return:
1344
1345 {'a': 1, 'b': 2, 'c': 3}
1346
1347 This could then be passed to foo as a keyword arguments dict:
1348
1349 foo(**kwargs)
1350
1351 @param func: function whose signature to use
1352 @param args: positional arguments of call
1353 @param kwargs: keyword arguments of call
1354
1355 @returns: kwargs dict
1356 """
1357 # inspect.getcallargs() is a useful utility to achieve the goal,
1358 # however, we need an additional effort when an RPC function has
1359 # **kwargs argument.
1360 # Let's say we have a following form of RPC function.
1361 #
1362 # def rpcfunc(a, b, **kwargs)
1363 #
1364 # When we call the function like "rpcfunc(1, 2, id=3, name='mk')",
1365 # inspect.getcallargs() returns a dictionary like below.
1366 #
1367 # {'a':1, 'b':2, 'kwargs': {'id':3, 'name':'mk'}}
1368 #
1369 # This is an incorrect form of arguments to pass to the rpc function.
1370 # Instead, the dictionary should be like this.
1371 #
1372 # {'a':1, 'b':2, 'id':3, 'name':'mk'}
1373
1374 argspec = inspect.getargspec(func)
1375 funcargs = inspect.getcallargs(func, *args, **kwargs)
1376 kwargs = dict()
1377 for k, v in funcargs.iteritems():
1378 if argspec.keywords and k == argspec.keywords:
1379 kwargs.update(v)
1380 else:
1381 kwargs[k] = v
1382 return kwargs
1383
1384
Dan Shi5e8fa182016-04-15 11:04:36 -07001385def get_sample_dut(board, pool):
1386 """Get a dut with the given board and pool.
1387
1388 This method is used to help to locate a dut with the given board and pool.
1389 The dut then can be used to identify a devserver in the same subnet.
1390
1391 @param board: Name of the board.
1392 @param pool: Name of the pool.
1393
1394 @return: Name of a dut with the given board and pool.
1395 """
Allen Li3d43e602016-12-08 15:09:51 -08001396 if not (dev_server.PREFER_LOCAL_DEVSERVER and pool and board):
Dan Shi5e8fa182016-04-15 11:04:36 -07001397 return None
Dan Shic3d702b2016-12-21 03:05:09 +00001398
1399 hosts = get_host_query(
1400 ('pool:%s' % pool, 'board:%s' % board), False, False, True, {})
1401 if not hosts:
Dan Shi5e8fa182016-04-15 11:04:36 -07001402 return None
Dan Shic3d702b2016-12-21 03:05:09 +00001403
1404 return list(hosts)[0].get_object_dict()['hostname']