blob: dd8c9639d3cac0f5f5d1325cb69aa440c43fba6b [file] [log] [blame]
Aviv Keshet18308922013-02-19 17:49:49 -08001#pylint: disable-msg=C0111
mblighe8819cd2008-02-15 16:48:40 +00002"""\
3Utility functions for rpc_interface.py. We keep them in a separate file so that
4only RPC interface functions go into that file.
5"""
6
7__author__ = 'showard@google.com (Steve Howard)'
8
Aviv Keshet18308922013-02-19 17:49:49 -08009import datetime, os, inspect
showard3d6ae112009-05-02 00:45:48 +000010import django.http
Dan Shi07e09af2013-04-12 09:31:29 -070011from autotest_lib.frontend.afe import models, model_logic
Alex Miller4a193692013-08-21 13:59:01 -070012from autotest_lib.client.common_lib import control_data, error
Jiaxi Luo421608e2014-07-07 14:38:00 -070013from autotest_lib.client.common_lib import global_config, priorities
Aviv Keshetc68807e2013-07-31 16:13:01 -070014from autotest_lib.server.cros import provision
mblighe8819cd2008-02-15 16:48:40 +000015
showarda62866b2008-07-28 21:27:41 +000016NULL_DATETIME = datetime.datetime.max
17NULL_DATE = datetime.date.max
18
mblighe8819cd2008-02-15 16:48:40 +000019def prepare_for_serialization(objects):
jadmanski0afbb632008-06-06 21:10:57 +000020 """
21 Prepare Python objects to be returned via RPC.
Aviv Keshet18308922013-02-19 17:49:49 -080022 @param objects: objects to be prepared.
jadmanski0afbb632008-06-06 21:10:57 +000023 """
24 if (isinstance(objects, list) and len(objects) and
25 isinstance(objects[0], dict) and 'id' in objects[0]):
26 objects = gather_unique_dicts(objects)
27 return _prepare_data(objects)
showardb8d34242008-04-25 18:11:16 +000028
29
showardc92da832009-04-07 18:14:34 +000030def prepare_rows_as_nested_dicts(query, nested_dict_column_names):
31 """
32 Prepare a Django query to be returned via RPC as a sequence of nested
33 dictionaries.
34
35 @param query - A Django model query object with a select_related() method.
36 @param nested_dict_column_names - A list of column/attribute names for the
37 rows returned by query to expand into nested dictionaries using
38 their get_object_dict() method when not None.
39
40 @returns An list suitable to returned in an RPC.
41 """
42 all_dicts = []
43 for row in query.select_related():
44 row_dict = row.get_object_dict()
45 for column in nested_dict_column_names:
46 if row_dict[column] is not None:
47 row_dict[column] = getattr(row, column).get_object_dict()
48 all_dicts.append(row_dict)
49 return prepare_for_serialization(all_dicts)
50
51
showardb8d34242008-04-25 18:11:16 +000052def _prepare_data(data):
jadmanski0afbb632008-06-06 21:10:57 +000053 """
54 Recursively process data structures, performing necessary type
55 conversions to values in data to allow for RPC serialization:
56 -convert datetimes to strings
showard2b9a88b2008-06-13 20:55:03 +000057 -convert tuples and sets to lists
jadmanski0afbb632008-06-06 21:10:57 +000058 """
59 if isinstance(data, dict):
60 new_data = {}
61 for key, value in data.iteritems():
62 new_data[key] = _prepare_data(value)
63 return new_data
showard2b9a88b2008-06-13 20:55:03 +000064 elif (isinstance(data, list) or isinstance(data, tuple) or
65 isinstance(data, set)):
jadmanski0afbb632008-06-06 21:10:57 +000066 return [_prepare_data(item) for item in data]
showard98659972008-07-17 17:00:07 +000067 elif isinstance(data, datetime.date):
showarda62866b2008-07-28 21:27:41 +000068 if data is NULL_DATETIME or data is NULL_DATE:
69 return None
jadmanski0afbb632008-06-06 21:10:57 +000070 return str(data)
71 else:
72 return data
mblighe8819cd2008-02-15 16:48:40 +000073
74
Moises Osorio2dda22e2014-09-16 15:56:24 -070075def fetchall_as_list_of_dicts(cursor):
76 """
77 Converts each row in the cursor to a dictionary so that values can be read
78 by using the column name.
79 @param cursor: The database cursor to read from.
80 @returns: A list of each row in the cursor as a dictionary.
81 """
82 desc = cursor.description
83 return [ dict(zip([col[0] for col in desc], row))
84 for row in cursor.fetchall() ]
85
86
showard3d6ae112009-05-02 00:45:48 +000087def raw_http_response(response_data, content_type=None):
88 response = django.http.HttpResponse(response_data, mimetype=content_type)
89 response['Content-length'] = str(len(response.content))
90 return response
91
92
showardb0dfb9f2008-06-06 18:08:02 +000093def gather_unique_dicts(dict_iterable):
jadmanski0afbb632008-06-06 21:10:57 +000094 """\
95 Pick out unique objects (by ID) from an iterable of object dicts.
96 """
97 id_set = set()
98 result = []
99 for obj in dict_iterable:
100 if obj['id'] not in id_set:
101 id_set.add(obj['id'])
102 result.append(obj)
103 return result
showardb0dfb9f2008-06-06 18:08:02 +0000104
105
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700106def extra_job_status_filters(not_yet_run=False, running=False, finished=False):
jadmanski0afbb632008-06-06 21:10:57 +0000107 """\
108 Generate a SQL WHERE clause for job status filtering, and return it in
109 a dict of keyword args to pass to query.extra(). No more than one of
110 the parameters should be passed as True.
showard6c65d252009-10-01 18:45:22 +0000111 * not_yet_run: all HQEs are Queued
112 * finished: all HQEs are complete
113 * running: everything else
jadmanski0afbb632008-06-06 21:10:57 +0000114 """
115 assert not ((not_yet_run and running) or
116 (not_yet_run and finished) or
117 (running and finished)), ('Cannot specify more than one '
118 'filter to this function')
showard6c65d252009-10-01 18:45:22 +0000119
showardeab66ce2009-12-23 00:03:56 +0000120 not_queued = ('(SELECT job_id FROM afe_host_queue_entries '
121 'WHERE status != "%s")'
showard6c65d252009-10-01 18:45:22 +0000122 % models.HostQueueEntry.Status.QUEUED)
showardeab66ce2009-12-23 00:03:56 +0000123 not_finished = ('(SELECT job_id FROM afe_host_queue_entries '
124 'WHERE not complete)')
showard6c65d252009-10-01 18:45:22 +0000125
jadmanski0afbb632008-06-06 21:10:57 +0000126 if not_yet_run:
showard6c65d252009-10-01 18:45:22 +0000127 where = ['id NOT IN ' + not_queued]
jadmanski0afbb632008-06-06 21:10:57 +0000128 elif running:
showard6c65d252009-10-01 18:45:22 +0000129 where = ['(id IN %s) AND (id IN %s)' % (not_queued, not_finished)]
jadmanski0afbb632008-06-06 21:10:57 +0000130 elif finished:
showard6c65d252009-10-01 18:45:22 +0000131 where = ['id NOT IN ' + not_finished]
jadmanski0afbb632008-06-06 21:10:57 +0000132 else:
showard10f41672009-05-13 21:28:25 +0000133 return {}
jadmanski0afbb632008-06-06 21:10:57 +0000134 return {'where': where}
mblighe8819cd2008-02-15 16:48:40 +0000135
136
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700137def extra_job_type_filters(extra_args, suite=False,
138 sub=False, standalone=False):
139 """\
140 Generate a SQL WHERE clause for job status filtering, and return it in
141 a dict of keyword args to pass to query.extra().
142
143 param extra_args: a dict of existing extra_args.
144
145 No more than one of the parameters should be passed as True:
146 * suite: job which is parent of other jobs
147 * sub: job with a parent job
148 * standalone: job with no child or parent jobs
149 """
150 assert not ((suite and sub) or
151 (suite and standalone) or
152 (sub and standalone)), ('Cannot specify more than one '
153 'filter to this function')
154
155 where = extra_args.get('where', [])
156 parent_job_id = ('DISTINCT parent_job_id')
157 child_job_id = ('id')
158 filter_common = ('(SELECT %s FROM afe_jobs '
159 'WHERE parent_job_id IS NOT NULL)')
160
161 if suite:
162 where.append('id IN ' + filter_common % parent_job_id)
163 elif sub:
164 where.append('id IN ' + filter_common % child_job_id)
165 elif standalone:
166 where.append('NOT EXISTS (SELECT 1 from afe_jobs AS sub_query '
167 'WHERE parent_job_id IS NOT NULL'
168 ' AND (sub_query.parent_job_id=afe_jobs.id'
169 ' OR sub_query.id=afe_jobs.id))')
170 else:
171 return extra_args
172
173 extra_args['where'] = where
174 return extra_args
175
176
177
showard87cc38f2009-08-20 23:37:04 +0000178def extra_host_filters(multiple_labels=()):
jadmanski0afbb632008-06-06 21:10:57 +0000179 """\
180 Generate SQL WHERE clauses for matching hosts in an intersection of
181 labels.
182 """
183 extra_args = {}
showardeab66ce2009-12-23 00:03:56 +0000184 where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels '
jadmanski0afbb632008-06-06 21:10:57 +0000185 'where label_id=%s)')
186 extra_args['where'] = [where_str] * len(multiple_labels)
187 extra_args['params'] = [models.Label.smart_get(label).id
188 for label in multiple_labels]
189 return extra_args
showard8e3aa5e2008-04-08 19:42:32 +0000190
191
showard87cc38f2009-08-20 23:37:04 +0000192def get_host_query(multiple_labels, exclude_only_if_needed_labels,
showard8aa84fc2009-09-16 17:17:55 +0000193 exclude_atomic_group_hosts, valid_only, filter_data):
194 if valid_only:
195 query = models.Host.valid_objects.all()
196 else:
197 query = models.Host.objects.all()
198
showard43a3d262008-11-12 18:17:05 +0000199 if exclude_only_if_needed_labels:
200 only_if_needed_labels = models.Label.valid_objects.filter(
201 only_if_needed=True)
showardf7eac6f2008-11-13 21:18:01 +0000202 if only_if_needed_labels.count() > 0:
showard87cc38f2009-08-20 23:37:04 +0000203 only_if_needed_ids = ','.join(
204 str(label['id'])
205 for label in only_if_needed_labels.values('id'))
showardf7eac6f2008-11-13 21:18:01 +0000206 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000207 query, 'afe_hosts_labels', join_key='host_id',
208 join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)'
showard87cc38f2009-08-20 23:37:04 +0000209 % only_if_needed_ids),
210 suffix='_exclude_OIN', exclude=True)
showard8aa84fc2009-09-16 17:17:55 +0000211
showard87cc38f2009-08-20 23:37:04 +0000212 if exclude_atomic_group_hosts:
213 atomic_group_labels = models.Label.valid_objects.filter(
214 atomic_group__isnull=False)
215 if atomic_group_labels.count() > 0:
216 atomic_group_label_ids = ','.join(
217 str(atomic_group['id'])
218 for atomic_group in atomic_group_labels.values('id'))
219 query = models.Host.objects.add_join(
showardeab66ce2009-12-23 00:03:56 +0000220 query, 'afe_hosts_labels', join_key='host_id',
221 join_condition=(
222 'afe_hosts_labels_exclude_AG.label_id IN (%s)'
223 % atomic_group_label_ids),
showard87cc38f2009-08-20 23:37:04 +0000224 suffix='_exclude_AG', exclude=True)
Fang Deng04d30612013-04-10 18:13:13 -0700225 try:
226 assert 'extra_args' not in filter_data
227 filter_data['extra_args'] = extra_host_filters(multiple_labels)
228 return models.Host.query_objects(filter_data, initial_query=query)
229 except models.Label.DoesNotExist as e:
230 return models.Host.objects.none()
showard43a3d262008-11-12 18:17:05 +0000231
232
showard8fd58242008-03-10 21:29:07 +0000233class InconsistencyException(Exception):
jadmanski0afbb632008-06-06 21:10:57 +0000234 'Raised when a list of objects does not have a consistent value'
showard8fd58242008-03-10 21:29:07 +0000235
236
237def get_consistent_value(objects, field):
mblighc5ddfd12008-08-04 17:15:00 +0000238 if not objects:
239 # well a list of nothing is consistent
240 return None
241
jadmanski0afbb632008-06-06 21:10:57 +0000242 value = getattr(objects[0], field)
243 for obj in objects:
244 this_value = getattr(obj, field)
245 if this_value != value:
246 raise InconsistencyException(objects[0], obj)
247 return value
showard8fd58242008-03-10 21:29:07 +0000248
249
showard2b9a88b2008-06-13 20:55:03 +0000250def prepare_generate_control_file(tests, kernel, label, profilers):
jadmanski0afbb632008-06-06 21:10:57 +0000251 test_objects = [models.Test.smart_get(test) for test in tests]
showard2b9a88b2008-06-13 20:55:03 +0000252 profiler_objects = [models.Profiler.smart_get(profiler)
253 for profiler in profilers]
jadmanski0afbb632008-06-06 21:10:57 +0000254 # ensure tests are all the same type
255 try:
256 test_type = get_consistent_value(test_objects, 'test_type')
257 except InconsistencyException, exc:
258 test1, test2 = exc.args
mblighec5546d2008-06-16 16:51:28 +0000259 raise model_logic.ValidationError(
jadmanski0afbb632008-06-06 21:10:57 +0000260 {'tests' : 'You cannot run both server- and client-side '
261 'tests together (tests %s and %s differ' % (
262 test1.name, test2.name)})
showard8fd58242008-03-10 21:29:07 +0000263
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700264 is_server = (test_type == control_data.CONTROL_TYPE.SERVER)
showard14374b12009-01-31 00:11:54 +0000265 if test_objects:
266 synch_count = max(test.sync_count for test in test_objects)
267 else:
268 synch_count = 1
jadmanski0afbb632008-06-06 21:10:57 +0000269 if label:
270 label = models.Label.smart_get(label)
mblighe8819cd2008-02-15 16:48:40 +0000271
showard989f25d2008-10-01 11:38:11 +0000272 dependencies = set(label.name for label
273 in models.Label.objects.filter(test__in=test_objects))
274
showard2bab8f42008-11-12 18:15:22 +0000275 cf_info = dict(is_server=is_server, synch_count=synch_count,
276 dependencies=list(dependencies))
277 return cf_info, test_objects, profiler_objects, label
showard989f25d2008-10-01 11:38:11 +0000278
279
280def check_job_dependencies(host_objects, job_dependencies):
281 """
282 Check that a set of machines satisfies a job's dependencies.
283 host_objects: list of models.Host objects
284 job_dependencies: list of names of labels
285 """
286 # check that hosts satisfy dependencies
287 host_ids = [host.id for host in host_objects]
288 hosts_in_job = models.Host.objects.filter(id__in=host_ids)
289 ok_hosts = hosts_in_job
290 for index, dependency in enumerate(job_dependencies):
Alex Milleraa772002014-04-10 17:51:21 -0700291 if not provision.is_for_special_action(dependency):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700292 ok_hosts = ok_hosts.filter(labels__name=dependency)
showard989f25d2008-10-01 11:38:11 +0000293 failing_hosts = (set(host.hostname for host in host_objects) -
294 set(host.hostname for host in ok_hosts))
295 if failing_hosts:
296 raise model_logic.ValidationError(
Eric Lie0493a42010-11-15 13:05:43 -0800297 {'hosts' : 'Host(s) failed to meet job dependencies (' +
298 (', '.join(job_dependencies)) + '): ' +
299 (', '.join(failing_hosts))})
300
showard989f25d2008-10-01 11:38:11 +0000301
Alex Miller4a193692013-08-21 13:59:01 -0700302def check_job_metahost_dependencies(metahost_objects, job_dependencies):
303 """
304 Check that at least one machine within the metahost spec satisfies the job's
305 dependencies.
306
307 @param metahost_objects A list of label objects representing the metahosts.
308 @param job_dependencies A list of strings of the required label names.
309 @raises NoEligibleHostException If a metahost cannot run the job.
310 """
311 for metahost in metahost_objects:
312 hosts = models.Host.objects.filter(labels=metahost)
313 for label_name in job_dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700314 if not provision.is_for_special_action(label_name):
Alex Miller4a193692013-08-21 13:59:01 -0700315 hosts = hosts.filter(labels__name=label_name)
316 if not any(hosts):
317 raise error.NoEligibleHostException("No hosts within %s satisfy %s."
318 % (metahost.name, ', '.join(job_dependencies)))
319
showard2bab8f42008-11-12 18:15:22 +0000320
321def _execution_key_for(host_queue_entry):
322 return (host_queue_entry.job.id, host_queue_entry.execution_subdir)
323
324
325def check_abort_synchronous_jobs(host_queue_entries):
326 # ensure user isn't aborting part of a synchronous autoserv execution
327 count_per_execution = {}
328 for queue_entry in host_queue_entries:
329 key = _execution_key_for(queue_entry)
330 count_per_execution.setdefault(key, 0)
331 count_per_execution[key] += 1
332
333 for queue_entry in host_queue_entries:
334 if not queue_entry.execution_subdir:
335 continue
336 execution_count = count_per_execution[_execution_key_for(queue_entry)]
337 if execution_count < queue_entry.job.synch_count:
mbligh1ef218d2009-08-03 16:57:56 +0000338 raise model_logic.ValidationError(
339 {'' : 'You cannot abort part of a synchronous job execution '
340 '(%d/%s), %d included, %d expected'
341 % (queue_entry.job.id, queue_entry.execution_subdir,
342 execution_count, queue_entry.job.synch_count)})
showard8fbae652009-01-20 23:23:10 +0000343
344
showardc92da832009-04-07 18:14:34 +0000345def check_atomic_group_create_job(synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700346 dependencies, atomic_group):
showardc92da832009-04-07 18:14:34 +0000347 """
348 Attempt to reject create_job requests with an atomic group that
349 will be impossible to schedule. The checks are not perfect but
350 should catch the most obvious issues.
351
352 @param synch_count - The job's minimum synch count.
353 @param host_objects - A list of models.Host instances.
354 @param metahost_objects - A list of models.Label instances.
355 @param dependencies - A list of job dependency label names.
showardc92da832009-04-07 18:14:34 +0000356 @param labels_by_name - A dictionary mapping label names to models.Label
357 instance. Used to look up instances for dependencies.
358
359 @raises model_logic.ValidationError - When an issue is found.
360 """
361 # If specific host objects were supplied with an atomic group, verify
362 # that there are enough to satisfy the synch_count.
363 minimum_required = synch_count or 1
364 if (host_objects and not metahost_objects and
365 len(host_objects) < minimum_required):
366 raise model_logic.ValidationError(
367 {'hosts':
368 'only %d hosts provided for job with synch_count = %d' %
369 (len(host_objects), synch_count)})
370
371 # Check that the atomic group has a hope of running this job
372 # given any supplied metahosts and dependancies that may limit.
373
374 # Get a set of hostnames in the atomic group.
375 possible_hosts = set()
376 for label in atomic_group.label_set.all():
377 possible_hosts.update(h.hostname for h in label.host_set.all())
378
379 # Filter out hosts that don't match all of the job dependency labels.
Alex Miller871291b2013-08-08 01:19:20 -0700380 for label in models.Label.objects.filter(name__in=dependencies):
showardc92da832009-04-07 18:14:34 +0000381 hosts_in_label = (h.hostname for h in label.host_set.all())
382 possible_hosts.intersection_update(hosts_in_label)
383
showard225bdc12009-04-13 16:09:21 +0000384 if not host_objects and not metahost_objects:
385 # No hosts or metahosts are required to queue an atomic group Job.
386 # However, if they are given, we respect them below.
387 host_set = possible_hosts
388 else:
389 host_set = set(host.hostname for host in host_objects)
390 unusable_host_set = host_set.difference(possible_hosts)
391 if unusable_host_set:
392 raise model_logic.ValidationError(
393 {'hosts': 'Hosts "%s" are not in Atomic Group "%s"' %
394 (', '.join(sorted(unusable_host_set)), atomic_group.name)})
showardc92da832009-04-07 18:14:34 +0000395
396 # Lookup hosts provided by each meta host and merge them into the
397 # host_set for final counting.
398 for meta_host in metahost_objects:
399 meta_possible = possible_hosts.copy()
400 hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all())
401 meta_possible.intersection_update(hosts_in_meta_host)
402
403 # Count all hosts that this meta_host will provide.
404 host_set.update(meta_possible)
405
406 if len(host_set) < minimum_required:
407 raise model_logic.ValidationError(
408 {'atomic_group_name':
409 'Insufficient hosts in Atomic Group "%s" with the'
410 ' supplied dependencies and meta_hosts.' %
411 (atomic_group.name,)})
412
413
showardbe0d8692009-08-20 23:42:44 +0000414def check_modify_host(update_data):
415 """
416 Sanity check modify_host* requests.
417
418 @param update_data: A dictionary with the changes to make to a host
419 or hosts.
420 """
421 # Only the scheduler (monitor_db) is allowed to modify Host status.
422 # Otherwise race conditions happen as a hosts state is changed out from
423 # beneath tasks being run on a host.
424 if 'status' in update_data:
425 raise model_logic.ValidationError({
426 'status': 'Host status can not be modified by the frontend.'})
427
428
showardce7c0922009-09-11 18:39:24 +0000429def check_modify_host_locking(host, update_data):
430 """
431 Checks when locking/unlocking has been requested if the host is already
432 locked/unlocked.
433
434 @param host: models.Host object to be modified
435 @param update_data: A dictionary with the changes to make to the host.
436 """
437 locked = update_data.get('locked', None)
438 if locked is not None:
439 if locked and host.locked:
440 raise model_logic.ValidationError({
441 'locked': 'Host already locked by %s on %s.' %
442 (host.locked_by, host.lock_time)})
443 if not locked and not host.locked:
444 raise model_logic.ValidationError({
445 'locked': 'Host already unlocked.'})
446
447
showard8fbae652009-01-20 23:23:10 +0000448def get_motd():
449 dirname = os.path.dirname(__file__)
450 filename = os.path.join(dirname, "..", "..", "motd.txt")
451 text = ''
452 try:
453 fp = open(filename, "r")
454 try:
455 text = fp.read()
456 finally:
457 fp.close()
458 except:
459 pass
460
461 return text
showard29f7cd22009-04-29 21:16:24 +0000462
463
464def _get_metahost_counts(metahost_objects):
465 metahost_counts = {}
466 for metahost in metahost_objects:
467 metahost_counts.setdefault(metahost, 0)
468 metahost_counts[metahost] += 1
469 return metahost_counts
470
471
showarda965cef2009-05-15 23:17:41 +0000472def get_job_info(job, preserve_metahosts=False, queue_entry_filter_data=None):
showard29f7cd22009-04-29 21:16:24 +0000473 hosts = []
474 one_time_hosts = []
475 meta_hosts = []
476 atomic_group = None
jamesren2275ef12010-04-12 18:25:06 +0000477 hostless = False
showard29f7cd22009-04-29 21:16:24 +0000478
showard4d077562009-05-08 18:24:36 +0000479 queue_entries = job.hostqueueentry_set.all()
showarda965cef2009-05-15 23:17:41 +0000480 if queue_entry_filter_data:
481 queue_entries = models.HostQueueEntry.query_objects(
482 queue_entry_filter_data, initial_query=queue_entries)
showard4d077562009-05-08 18:24:36 +0000483
484 for queue_entry in queue_entries:
showard29f7cd22009-04-29 21:16:24 +0000485 if (queue_entry.host and (preserve_metahosts or
486 not queue_entry.meta_host)):
487 if queue_entry.deleted:
488 continue
489 if queue_entry.host.invalid:
490 one_time_hosts.append(queue_entry.host)
491 else:
492 hosts.append(queue_entry.host)
jamesren2275ef12010-04-12 18:25:06 +0000493 elif queue_entry.meta_host:
showard29f7cd22009-04-29 21:16:24 +0000494 meta_hosts.append(queue_entry.meta_host)
jamesren2275ef12010-04-12 18:25:06 +0000495 else:
496 hostless = True
497
showard29f7cd22009-04-29 21:16:24 +0000498 if atomic_group is None:
499 if queue_entry.atomic_group is not None:
500 atomic_group = queue_entry.atomic_group
501 else:
502 assert atomic_group.name == queue_entry.atomic_group.name, (
503 'DB inconsistency. HostQueueEntries with multiple atomic'
504 ' groups on job %s: %s != %s' % (
505 id, atomic_group.name, queue_entry.atomic_group.name))
506
507 meta_host_counts = _get_metahost_counts(meta_hosts)
508
509 info = dict(dependencies=[label.name for label
510 in job.dependency_labels.all()],
511 hosts=hosts,
512 meta_hosts=meta_hosts,
513 meta_host_counts=meta_host_counts,
514 one_time_hosts=one_time_hosts,
jamesren2275ef12010-04-12 18:25:06 +0000515 atomic_group=atomic_group,
516 hostless=hostless)
showard29f7cd22009-04-29 21:16:24 +0000517 return info
518
519
showard09d80f92009-11-19 01:01:19 +0000520def check_for_duplicate_hosts(host_objects):
521 host_ids = set()
522 duplicate_hostnames = set()
523 for host in host_objects:
524 if host.id in host_ids:
525 duplicate_hostnames.add(host.hostname)
526 host_ids.add(host.id)
527
528 if duplicate_hostnames:
529 raise model_logic.ValidationError(
530 {'hosts' : 'Duplicate hosts: %s'
531 % ', '.join(duplicate_hostnames)})
532
533
showarda1e74b32009-05-12 17:32:04 +0000534def create_new_job(owner, options, host_objects, metahost_objects,
535 atomic_group=None):
showard29f7cd22009-04-29 21:16:24 +0000536 all_host_objects = host_objects + metahost_objects
537 metahost_counts = _get_metahost_counts(metahost_objects)
showarda1e74b32009-05-12 17:32:04 +0000538 dependencies = options.get('dependencies', [])
539 synch_count = options.get('synch_count')
showard29f7cd22009-04-29 21:16:24 +0000540
showard29f7cd22009-04-29 21:16:24 +0000541 if atomic_group:
542 check_atomic_group_create_job(
543 synch_count, host_objects, metahost_objects,
Alex Miller871291b2013-08-08 01:19:20 -0700544 dependencies, atomic_group)
showard29f7cd22009-04-29 21:16:24 +0000545 else:
546 if synch_count is not None and synch_count > len(all_host_objects):
547 raise model_logic.ValidationError(
548 {'hosts':
549 'only %d hosts provided for job with synch_count = %d' %
550 (len(all_host_objects), synch_count)})
551 atomic_hosts = models.Host.objects.filter(
552 id__in=[host.id for host in host_objects],
553 labels__atomic_group=True)
554 unusable_host_names = [host.hostname for host in atomic_hosts]
555 if unusable_host_names:
556 raise model_logic.ValidationError(
557 {'hosts':
558 'Host(s) "%s" are atomic group hosts but no '
559 'atomic group was specified for this job.' %
560 (', '.join(unusable_host_names),)})
561
showard09d80f92009-11-19 01:01:19 +0000562 check_for_duplicate_hosts(host_objects)
showard29f7cd22009-04-29 21:16:24 +0000563
Aviv Keshetc68807e2013-07-31 16:13:01 -0700564 for label_name in dependencies:
Alex Milleraa772002014-04-10 17:51:21 -0700565 if provision.is_for_special_action(label_name):
Aviv Keshetc68807e2013-07-31 16:13:01 -0700566 # TODO: We could save a few queries
567 # if we had a bulk ensure-label-exists function, which used
568 # a bulk .get() call. The win is probably very small.
Alex Miller871291b2013-08-08 01:19:20 -0700569 _ensure_label_exists(label_name)
Aviv Keshetc68807e2013-07-31 16:13:01 -0700570
Alex Miller4a193692013-08-21 13:59:01 -0700571 # This only checks targeted hosts, not hosts eligible due to the metahost
572 check_job_dependencies(host_objects, dependencies)
573 check_job_metahost_dependencies(metahost_objects, dependencies)
574
Alex Miller871291b2013-08-08 01:19:20 -0700575 options['dependencies'] = list(
576 models.Label.objects.filter(name__in=dependencies))
showard29f7cd22009-04-29 21:16:24 +0000577
showarda1e74b32009-05-12 17:32:04 +0000578 for label in metahost_objects + options['dependencies']:
showard29f7cd22009-04-29 21:16:24 +0000579 if label.atomic_group and not atomic_group:
580 raise model_logic.ValidationError(
581 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000582 'Dependency %r requires an atomic group but no '
583 'atomic_group_name or meta_host in an atomic group was '
584 'specified for this job.' % label.name})
showard29f7cd22009-04-29 21:16:24 +0000585 elif (label.atomic_group and
586 label.atomic_group.name != atomic_group.name):
587 raise model_logic.ValidationError(
588 {'atomic_group_name':
showardc8730322009-06-30 01:56:38 +0000589 'meta_hosts or dependency %r requires atomic group '
590 '%r instead of the supplied atomic_group_name=%r.' %
591 (label.name, label.atomic_group.name, atomic_group.name)})
showard29f7cd22009-04-29 21:16:24 +0000592
showarda1e74b32009-05-12 17:32:04 +0000593 job = models.Job.create(owner=owner, options=options,
594 hosts=all_host_objects)
showard29f7cd22009-04-29 21:16:24 +0000595 job.queue(all_host_objects, atomic_group=atomic_group,
showarda1e74b32009-05-12 17:32:04 +0000596 is_template=options.get('is_template', False))
showard29f7cd22009-04-29 21:16:24 +0000597 return job.id
showard0957a842009-05-11 19:25:08 +0000598
599
Aviv Keshetc68807e2013-07-31 16:13:01 -0700600def _ensure_label_exists(name):
601 """
602 Ensure that a label called |name| exists in the Django models.
603
604 This function is to be called from within afe rpcs only, as an
605 alternative to server.cros.provision.ensure_label_exists(...). It works
606 by Django model manipulation, rather than by making another create_label
607 rpc call.
608
609 @param name: the label to check for/create.
610 @raises ValidationError: There was an error in the response that was
611 not because the label already existed.
612 @returns True is a label was created, False otherwise.
613 """
614 try:
615 models.Label.objects.get(name=name)
616 except models.Label.DoesNotExist:
617 new_label = models.Label.objects.create(name=name)
618 new_label.save()
619 return True
620 return False
621
622
showard909c9142009-07-07 20:54:42 +0000623def find_platform_and_atomic_group(host):
624 """
625 Figure out the platform name and atomic group name for the given host
626 object. If none, the return value for either will be None.
627
628 @returns (platform name, atomic group name) for the given host.
629 """
showard0957a842009-05-11 19:25:08 +0000630 platforms = [label.name for label in host.label_list if label.platform]
631 if not platforms:
showard909c9142009-07-07 20:54:42 +0000632 platform = None
633 else:
634 platform = platforms[0]
showard0957a842009-05-11 19:25:08 +0000635 if len(platforms) > 1:
636 raise ValueError('Host %s has more than one platform: %s' %
637 (host.hostname, ', '.join(platforms)))
showard909c9142009-07-07 20:54:42 +0000638 for label in host.label_list:
639 if label.atomic_group:
640 atomic_group_name = label.atomic_group.name
641 break
642 else:
643 atomic_group_name = None
644 # Don't check for multiple atomic groups on a host here. That is an
645 # error but should not trip up the RPC interface. monitor_db_cleanup
646 # deals with it. This just returns the first one found.
647 return platform, atomic_group_name
showardc0ac3a72009-07-08 21:14:45 +0000648
649
650# support for get_host_queue_entries_and_special_tasks()
651
652def _common_entry_to_dict(entry, type, job_dict):
653 return dict(type=type,
654 host=entry.host.get_object_dict(),
655 job=job_dict,
656 execution_path=entry.execution_path(),
657 status=entry.status,
658 started_on=entry.started_on,
Jiaxi Luocb91d2e2014-06-30 10:37:22 -0700659 id=str(entry.id) + type,
660 oid=entry.id)
showardc0ac3a72009-07-08 21:14:45 +0000661
662
663def _special_task_to_dict(special_task):
664 job_dict = None
665 if special_task.queue_entry:
666 job_dict = special_task.queue_entry.job.get_object_dict()
667 return _common_entry_to_dict(special_task, special_task.task, job_dict)
668
669
670def _queue_entry_to_dict(queue_entry):
671 return _common_entry_to_dict(queue_entry, 'Job',
672 queue_entry.job.get_object_dict())
673
674
675def _compute_next_job_for_tasks(queue_entries, special_tasks):
676 """
677 For each task, try to figure out the next job that ran after that task.
678 This is done using two pieces of information:
679 * if the task has a queue entry, we can use that entry's job ID.
680 * if the task has a time_started, we can try to compare that against the
681 started_on field of queue_entries. this isn't guaranteed to work perfectly
682 since queue_entries may also have null started_on values.
683 * if the task has neither, or if use of time_started fails, just use the
684 last computed job ID.
685 """
686 next_job_id = None # most recently computed next job
687 hqe_index = 0 # index for scanning by started_on times
688 for task in special_tasks:
689 if task.queue_entry:
690 next_job_id = task.queue_entry.job.id
691 elif task.time_started is not None:
692 for queue_entry in queue_entries[hqe_index:]:
693 if queue_entry.started_on is None:
694 continue
695 if queue_entry.started_on < task.time_started:
696 break
697 next_job_id = queue_entry.job.id
698
699 task.next_job_id = next_job_id
700
701 # advance hqe_index to just after next_job_id
702 if next_job_id is not None:
703 for queue_entry in queue_entries[hqe_index:]:
704 if queue_entry.job.id < next_job_id:
705 break
706 hqe_index += 1
707
708
709def interleave_entries(queue_entries, special_tasks):
710 """
711 Both lists should be ordered by descending ID.
712 """
713 _compute_next_job_for_tasks(queue_entries, special_tasks)
714
715 # start with all special tasks that've run since the last job
716 interleaved_entries = []
717 for task in special_tasks:
718 if task.next_job_id is not None:
719 break
720 interleaved_entries.append(_special_task_to_dict(task))
721
722 # now interleave queue entries with the remaining special tasks
723 special_task_index = len(interleaved_entries)
724 for queue_entry in queue_entries:
725 interleaved_entries.append(_queue_entry_to_dict(queue_entry))
726 # add all tasks that ran between this job and the previous one
727 for task in special_tasks[special_task_index:]:
728 if task.next_job_id < queue_entry.job.id:
729 break
730 interleaved_entries.append(_special_task_to_dict(task))
731 special_task_index += 1
732
733 return interleaved_entries
jamesren4a41e012010-07-16 22:33:48 +0000734
735
736def get_create_job_common_args(local_args):
737 """
738 Returns a dict containing only the args that apply for create_job_common
739
740 Returns a subset of local_args, which contains only the arguments that can
741 be passed in to create_job_common().
742 """
Alex Miller7d658cf2013-09-04 16:00:35 -0700743 # This code is only here to not kill suites scheduling tests when priority
744 # becomes an int instead of a string.
745 if isinstance(local_args['priority'], str):
746 local_args['priority'] = priorities.Priority.DEFAULT
747 # </migration hack>
jamesren4a41e012010-07-16 22:33:48 +0000748 arg_names, _, _, _ = inspect.getargspec(create_job_common)
749 return dict(item for item in local_args.iteritems() if item[0] in arg_names)
750
751
752def create_job_common(name, priority, control_type, control_file=None,
753 hosts=(), meta_hosts=(), one_time_hosts=(),
754 atomic_group_name=None, synch_count=None,
Simran Basi7e605742013-11-12 13:43:36 -0800755 is_template=False, timeout=None, timeout_mins=None,
756 max_runtime_mins=None, run_verify=True, email_list='',
757 dependencies=(), reboot_before=None, reboot_after=None,
jamesren4a41e012010-07-16 22:33:48 +0000758 parse_failed_repair=None, hostless=False, keyvals=None,
Aviv Keshet18308922013-02-19 17:49:49 -0800759 drone_set=None, parameterized_job=None,
Dan Shi07e09af2013-04-12 09:31:29 -0700760 parent_job_id=None, test_retry=0, run_reset=True):
Aviv Keshet18308922013-02-19 17:49:49 -0800761 #pylint: disable-msg=C0111
jamesren4a41e012010-07-16 22:33:48 +0000762 """
763 Common code between creating "standard" jobs and creating parameterized jobs
764 """
765 user = models.User.current_user()
766 owner = user.login
767
jamesren4a41e012010-07-16 22:33:48 +0000768 # input validation
769 if not (hosts or meta_hosts or one_time_hosts or atomic_group_name
770 or hostless):
771 raise model_logic.ValidationError({
772 'arguments' : "You must pass at least one of 'hosts', "
773 "'meta_hosts', 'one_time_hosts', "
774 "'atomic_group_name', or 'hostless'"
775 })
776
777 if hostless:
778 if hosts or meta_hosts or one_time_hosts or atomic_group_name:
779 raise model_logic.ValidationError({
780 'hostless': 'Hostless jobs cannot include any hosts!'})
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700781 server_type = control_data.CONTROL_TYPE_NAMES.SERVER
jamesren4a41e012010-07-16 22:33:48 +0000782 if control_type != server_type:
783 raise model_logic.ValidationError({
784 'control_type': 'Hostless jobs cannot use client-side '
785 'control files'})
786
Alex Miller871291b2013-08-08 01:19:20 -0700787 atomic_groups_by_name = dict((ag.name, ag)
jamesren4a41e012010-07-16 22:33:48 +0000788 for ag in models.AtomicGroup.objects.all())
Alex Miller871291b2013-08-08 01:19:20 -0700789 label_objects = list(models.Label.objects.filter(name__in=meta_hosts))
jamesren4a41e012010-07-16 22:33:48 +0000790
791 # Schedule on an atomic group automagically if one of the labels given
792 # is an atomic group label and no explicit atomic_group_name was supplied.
793 if not atomic_group_name:
Alex Miller871291b2013-08-08 01:19:20 -0700794 for label in label_objects:
jamesren4a41e012010-07-16 22:33:48 +0000795 if label and label.atomic_group:
796 atomic_group_name = label.atomic_group.name
797 break
798
799 # convert hostnames & meta hosts to host/label objects
800 host_objects = models.Host.smart_get_bulk(hosts)
801 metahost_objects = []
Alex Miller871291b2013-08-08 01:19:20 -0700802 meta_host_labels_by_name = {label.name: label for label in label_objects}
jamesren4a41e012010-07-16 22:33:48 +0000803 for label_name in meta_hosts or []:
Alex Miller871291b2013-08-08 01:19:20 -0700804 if label_name in meta_host_labels_by_name:
805 metahost_objects.append(meta_host_labels_by_name[label_name])
jamesren4a41e012010-07-16 22:33:48 +0000806 elif label_name in atomic_groups_by_name:
807 # If given a metahost name that isn't a Label, check to
808 # see if the user was specifying an Atomic Group instead.
809 atomic_group = atomic_groups_by_name[label_name]
810 if atomic_group_name and atomic_group_name != atomic_group.name:
811 raise model_logic.ValidationError({
812 'meta_hosts': (
813 'Label "%s" not found. If assumed to be an '
814 'atomic group it would conflict with the '
815 'supplied atomic group "%s".' % (
816 label_name, atomic_group_name))})
817 atomic_group_name = atomic_group.name
818 else:
819 raise model_logic.ValidationError(
820 {'meta_hosts' : 'Label "%s" not found' % label_name})
821
822 # Create and sanity check an AtomicGroup object if requested.
823 if atomic_group_name:
824 if one_time_hosts:
825 raise model_logic.ValidationError(
826 {'one_time_hosts':
827 'One time hosts cannot be used with an Atomic Group.'})
828 atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
829 if synch_count and synch_count > atomic_group.max_number_of_machines:
830 raise model_logic.ValidationError(
831 {'atomic_group_name' :
832 'You have requested a synch_count (%d) greater than the '
833 'maximum machines in the requested Atomic Group (%d).' %
834 (synch_count, atomic_group.max_number_of_machines)})
835 else:
836 atomic_group = None
837
838 for host in one_time_hosts or []:
839 this_host = models.Host.create_one_time_host(host)
840 host_objects.append(this_host)
841
842 options = dict(name=name,
843 priority=priority,
844 control_file=control_file,
845 control_type=control_type,
846 is_template=is_template,
847 timeout=timeout,
Simran Basi7e605742013-11-12 13:43:36 -0800848 timeout_mins=timeout_mins,
Simran Basi34217022012-11-06 13:43:15 -0800849 max_runtime_mins=max_runtime_mins,
jamesren4a41e012010-07-16 22:33:48 +0000850 synch_count=synch_count,
851 run_verify=run_verify,
852 email_list=email_list,
853 dependencies=dependencies,
854 reboot_before=reboot_before,
855 reboot_after=reboot_after,
856 parse_failed_repair=parse_failed_repair,
857 keyvals=keyvals,
858 drone_set=drone_set,
Aviv Keshet18308922013-02-19 17:49:49 -0800859 parameterized_job=parameterized_job,
Aviv Keshetcd1ff9b2013-03-01 14:55:19 -0800860 parent_job_id=parent_job_id,
Dan Shi07e09af2013-04-12 09:31:29 -0700861 test_retry=test_retry,
862 run_reset=run_reset)
jamesren4a41e012010-07-16 22:33:48 +0000863 return create_new_job(owner=owner,
864 options=options,
865 host_objects=host_objects,
866 metahost_objects=metahost_objects,
867 atomic_group=atomic_group)
Simran Basib6ec8ae2014-04-23 12:05:08 -0700868
869
870def encode_ascii(control_file):
871 """Force a control file to only contain ascii characters.
872
873 @param control_file: Control file to encode.
874
875 @returns the control file in an ascii encoding.
876
877 @raises error.ControlFileMalformed: if encoding fails.
878 """
879 try:
880 return control_file.encode('ascii')
881 except UnicodeDecodeError as e:
Jiaxi Luo421608e2014-07-07 14:38:00 -0700882 raise error.ControlFileMalformed(str(e))
883
884
885def get_wmatrix_url():
886 """Get wmatrix url from config file.
887
888 @returns the wmatrix url or an empty string.
889 """
890 return global_config.global_config.get_config_value('AUTOTEST_WEB',
891 'wmatrix_url',
Jiaxi Luo15cbf372014-07-01 19:20:20 -0700892 default='')
Jiaxi Luo57bc1952014-07-22 15:27:30 -0700893
894
895def inject_times_to_filter(start_time_key=None, end_time_key=None,
896 start_time_value=None, end_time_value=None,
897 **filter_data):
898 """Inject the key value pairs of start and end time if provided.
899
900 @param start_time_key: A string represents the filter key of start_time.
901 @param end_time_key: A string represents the filter key of end_time.
902 @param start_time_value: Start_time value.
903 @param end_time_value: End_time value.
904
905 @returns the injected filter_data.
906 """
907 if start_time_value:
908 filter_data[start_time_key] = start_time_value
909 if end_time_value:
910 filter_data[end_time_key] = end_time_value
911 return filter_data
912
913
914def inject_times_to_hqe_special_tasks_filters(filter_data_common,
915 start_time, end_time):
916 """Inject start and end time to hqe and special tasks filters.
917
918 @param filter_data_common: Common filter for hqe and special tasks.
919 @param start_time_key: A string represents the filter key of start_time.
920 @param end_time_key: A string represents the filter key of end_time.
921
922 @returns a pair of hqe and special tasks filters.
923 """
924 filter_data_special_tasks = filter_data_common.copy()
925 return (inject_times_to_filter('started_on__gte', 'started_on__lte',
926 start_time, end_time, **filter_data_common),
927 inject_times_to_filter('time_started__gte', 'time_started__lte',
928 start_time, end_time,
Jakob Juelich59cfe542014-09-02 16:37:46 -0700929 **filter_data_special_tasks))
930
931
932def retrieve_shard(shard_hostname):
933 """
Jakob Juelich77457572014-09-22 17:02:43 -0700934 Retrieves the shard with the given hostname from the database.
Jakob Juelich59cfe542014-09-02 16:37:46 -0700935
936 @param shard_hostname: Hostname of the shard to retrieve
937
Jakob Juelich77457572014-09-22 17:02:43 -0700938 @raises models.Shard.DoesNotExist, if no shard with this hostname was found.
939
Jakob Juelich59cfe542014-09-02 16:37:46 -0700940 @returns: Shard object
941 """
Jakob Juelich77457572014-09-22 17:02:43 -0700942 return models.Shard.smart_get(shard_hostname)
Jakob Juelich59cfe542014-09-02 16:37:46 -0700943
944
945def find_records_for_shard(shard):
946 """Find records that should be sent to a shard.
947
Jakob Juelicha94efe62014-09-18 16:02:49 -0700948 @param shard: Shard to find records for.
949
950 @returns: Tuple of two lists for hosts and jobs: (hosts, jobs).
Jakob Juelich59cfe542014-09-02 16:37:46 -0700951 """
952 hosts = models.Host.assign_to_shard(shard)
953 jobs = models.Job.assign_to_shard(shard)
954
955 return hosts, jobs
Jakob Juelicha94efe62014-09-18 16:02:49 -0700956
957
958def _persist_records_with_type_sent_from_shard(
959 shard, records, record_type, *args, **kwargs):
960 """
961 Handle records of a specified type that were sent to the shard master.
962
963 @param shard: The shard the records were sent from.
964 @param records: The records sent in their serialized format.
965 @param record_type: Type of the objects represented by records.
966 @param args: Additional arguments that will be passed on to the sanity
967 checks.
968 @param kwargs: Additional arguments that will be passed on to the sanity
969 checks.
970
971 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
972
973 @returns: List of primary keys of the processed records.
974 """
975 pks = []
976 for serialized_record in records:
977 pk = serialized_record['id']
978 try:
979 current_record = record_type.objects.get(pk=pk)
980 except record_type.DoesNotExist:
981 raise error.UnallowedRecordsSentToMaster(
982 'Object with pk %s of type %s does not exist on master.' % (
983 pk, record_type))
984
985 current_record.sanity_check_update_from_shard(
986 shard, serialized_record, *args, **kwargs)
987
988 current_record.update_from_serialized(serialized_record)
989 pks.append(pk)
990 return pks
991
992
993def persist_records_sent_from_shard(shard, jobs, hqes):
994 """
995 Sanity checking then saving serialized records sent to master from shard.
996
997 During heartbeats shards upload jobs and hostqueuentries. This performs
998 some sanity checks on these and then updates the existing records for those
999 entries with the updated ones from the heartbeat.
1000
1001 The sanity checks include:
1002 - Checking if the objects sent already exist on the master.
1003 - Checking if the objects sent were assigned to this shard.
1004 - hostqueueentries must be sent together with their jobs.
1005
1006 @param shard: The shard the records were sent from.
1007 @param jobs: The jobs the shard sent.
1008 @param hqes: The hostqueuentries the shart sent.
1009
1010 @raises error.UnallowedRecordsSentToMaster if any of the sanity checks fail.
1011 """
1012 job_ids_sent = _persist_records_with_type_sent_from_shard(
1013 shard, jobs, models.Job)
1014
1015 _persist_records_with_type_sent_from_shard(
1016 shard, hqes, models.HostQueueEntry, job_ids_sent=job_ids_sent)