blob: 0e25d05cfa4bf8b5dcafa2d271269634f6ae8375 [file] [log] [blame]
mblighe8819cd2008-02-15 16:48:40 +00001"""\
2Utility functions for rpc_interface.py. We keep them in a separate file so that
3only RPC interface functions go into that file.
4"""
5
6__author__ = 'showard@google.com (Steve Howard)'
7
8import datetime, xmlrpclib, threading
mblighec5546d2008-06-16 16:51:28 +00009from frontend.afe import models, model_logic
mblighe8819cd2008-02-15 16:48:40 +000010
showarda62866b2008-07-28 21:27:41 +000011NULL_DATETIME = datetime.datetime.max
12NULL_DATE = datetime.date.max
13
mblighe8819cd2008-02-15 16:48:40 +000014def prepare_for_serialization(objects):
jadmanski0afbb632008-06-06 21:10:57 +000015 """
16 Prepare Python objects to be returned via RPC.
17 """
18 if (isinstance(objects, list) and len(objects) and
19 isinstance(objects[0], dict) and 'id' in objects[0]):
20 objects = gather_unique_dicts(objects)
21 return _prepare_data(objects)
showardb8d34242008-04-25 18:11:16 +000022
23
24def _prepare_data(data):
jadmanski0afbb632008-06-06 21:10:57 +000025 """
26 Recursively process data structures, performing necessary type
27 conversions to values in data to allow for RPC serialization:
28 -convert datetimes to strings
showard2b9a88b2008-06-13 20:55:03 +000029 -convert tuples and sets to lists
jadmanski0afbb632008-06-06 21:10:57 +000030 """
31 if isinstance(data, dict):
32 new_data = {}
33 for key, value in data.iteritems():
34 new_data[key] = _prepare_data(value)
35 return new_data
showard2b9a88b2008-06-13 20:55:03 +000036 elif (isinstance(data, list) or isinstance(data, tuple) or
37 isinstance(data, set)):
jadmanski0afbb632008-06-06 21:10:57 +000038 return [_prepare_data(item) for item in data]
showard98659972008-07-17 17:00:07 +000039 elif isinstance(data, datetime.date):
showarda62866b2008-07-28 21:27:41 +000040 if data is NULL_DATETIME or data is NULL_DATE:
41 return None
jadmanski0afbb632008-06-06 21:10:57 +000042 return str(data)
43 else:
44 return data
mblighe8819cd2008-02-15 16:48:40 +000045
46
showardb0dfb9f2008-06-06 18:08:02 +000047def gather_unique_dicts(dict_iterable):
jadmanski0afbb632008-06-06 21:10:57 +000048 """\
49 Pick out unique objects (by ID) from an iterable of object dicts.
50 """
51 id_set = set()
52 result = []
53 for obj in dict_iterable:
54 if obj['id'] not in id_set:
55 id_set.add(obj['id'])
56 result.append(obj)
57 return result
showardb0dfb9f2008-06-06 18:08:02 +000058
59
mblighe8819cd2008-02-15 16:48:40 +000060def extra_job_filters(not_yet_run=False, running=False, finished=False):
jadmanski0afbb632008-06-06 21:10:57 +000061 """\
62 Generate a SQL WHERE clause for job status filtering, and return it in
63 a dict of keyword args to pass to query.extra(). No more than one of
64 the parameters should be passed as True.
65 """
66 assert not ((not_yet_run and running) or
67 (not_yet_run and finished) or
68 (running and finished)), ('Cannot specify more than one '
69 'filter to this function')
70 if not_yet_run:
71 where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
72 'WHERE active OR complete)']
73 elif running:
74 where = ['(id IN (SELECT job_id FROM host_queue_entries '
75 'WHERE active OR complete)) AND '
76 '(id IN (SELECT job_id FROM host_queue_entries '
77 'WHERE not complete OR active))']
78 elif finished:
79 where = ['id NOT IN (SELECT job_id FROM host_queue_entries '
80 'WHERE not complete OR active)']
81 else:
82 return None
83 return {'where': where}
mblighe8819cd2008-02-15 16:48:40 +000084
85
showard8e3aa5e2008-04-08 19:42:32 +000086def extra_host_filters(multiple_labels=[]):
jadmanski0afbb632008-06-06 21:10:57 +000087 """\
88 Generate SQL WHERE clauses for matching hosts in an intersection of
89 labels.
90 """
91 extra_args = {}
92 where_str = ('hosts.id in (select host_id from hosts_labels '
93 'where label_id=%s)')
94 extra_args['where'] = [where_str] * len(multiple_labels)
95 extra_args['params'] = [models.Label.smart_get(label).id
96 for label in multiple_labels]
97 return extra_args
showard8e3aa5e2008-04-08 19:42:32 +000098
99
showard8fd58242008-03-10 21:29:07 +0000100class InconsistencyException(Exception):
jadmanski0afbb632008-06-06 21:10:57 +0000101 'Raised when a list of objects does not have a consistent value'
showard8fd58242008-03-10 21:29:07 +0000102
103
104def get_consistent_value(objects, field):
mblighc5ddfd12008-08-04 17:15:00 +0000105 if not objects:
106 # well a list of nothing is consistent
107 return None
108
jadmanski0afbb632008-06-06 21:10:57 +0000109 value = getattr(objects[0], field)
110 for obj in objects:
111 this_value = getattr(obj, field)
112 if this_value != value:
113 raise InconsistencyException(objects[0], obj)
114 return value
showard8fd58242008-03-10 21:29:07 +0000115
116
showard2b9a88b2008-06-13 20:55:03 +0000117def prepare_generate_control_file(tests, kernel, label, profilers):
jadmanski0afbb632008-06-06 21:10:57 +0000118 test_objects = [models.Test.smart_get(test) for test in tests]
showard2b9a88b2008-06-13 20:55:03 +0000119 profiler_objects = [models.Profiler.smart_get(profiler)
120 for profiler in profilers]
jadmanski0afbb632008-06-06 21:10:57 +0000121 # ensure tests are all the same type
122 try:
123 test_type = get_consistent_value(test_objects, 'test_type')
124 except InconsistencyException, exc:
125 test1, test2 = exc.args
mblighec5546d2008-06-16 16:51:28 +0000126 raise model_logic.ValidationError(
jadmanski0afbb632008-06-06 21:10:57 +0000127 {'tests' : 'You cannot run both server- and client-side '
128 'tests together (tests %s and %s differ' % (
129 test1.name, test2.name)})
showard8fd58242008-03-10 21:29:07 +0000130
jadmanski0afbb632008-06-06 21:10:57 +0000131 is_server = (test_type == models.Test.Types.SERVER)
showard2bab8f42008-11-12 18:15:22 +0000132 synch_count = max(test.sync_count for test in test_objects)
jadmanski0afbb632008-06-06 21:10:57 +0000133 if label:
134 label = models.Label.smart_get(label)
mblighe8819cd2008-02-15 16:48:40 +0000135
showard989f25d2008-10-01 11:38:11 +0000136 dependencies = set(label.name for label
137 in models.Label.objects.filter(test__in=test_objects))
138
showard2bab8f42008-11-12 18:15:22 +0000139 cf_info = dict(is_server=is_server, synch_count=synch_count,
140 dependencies=list(dependencies))
141 return cf_info, test_objects, profiler_objects, label
showard989f25d2008-10-01 11:38:11 +0000142
143
144def check_job_dependencies(host_objects, job_dependencies):
145 """
146 Check that a set of machines satisfies a job's dependencies.
147 host_objects: list of models.Host objects
148 job_dependencies: list of names of labels
149 """
150 # check that hosts satisfy dependencies
151 host_ids = [host.id for host in host_objects]
152 hosts_in_job = models.Host.objects.filter(id__in=host_ids)
153 ok_hosts = hosts_in_job
154 for index, dependency in enumerate(job_dependencies):
155 ok_hosts &= models.Host.objects.filter_custom_join(
156 '_label%d' % index, labels__name=dependency)
157 failing_hosts = (set(host.hostname for host in host_objects) -
158 set(host.hostname for host in ok_hosts))
159 if failing_hosts:
160 raise model_logic.ValidationError(
161 {'hosts' : 'Host(s) failed to meet job dependencies: ' +
162 ', '.join(failing_hosts)})
163
164 # check for hosts that have only_if_needed labels that aren't requested
165 labels_not_requested = models.Label.objects.filter(only_if_needed=True,
166 host__id__in=host_ids)
167 labels_not_requested = labels_not_requested.exclude(
168 name__in=job_dependencies)
169 errors = []
170 for label in labels_not_requested:
171 hosts_in_label = hosts_in_job.filter(labels=label)
172 errors.append('Cannot use hosts with label "%s" unless requested: %s' %
173 (label.name,
174 ', '.join(host.hostname for host in hosts_in_label)))
175 if errors:
176 raise model_logic.ValidationError({'hosts' : '\n'.join(errors)})
showard2bab8f42008-11-12 18:15:22 +0000177
178
179def _execution_key_for(host_queue_entry):
180 return (host_queue_entry.job.id, host_queue_entry.execution_subdir)
181
182
183def check_abort_synchronous_jobs(host_queue_entries):
184 # ensure user isn't aborting part of a synchronous autoserv execution
185 count_per_execution = {}
186 for queue_entry in host_queue_entries:
187 key = _execution_key_for(queue_entry)
188 count_per_execution.setdefault(key, 0)
189 count_per_execution[key] += 1
190
191 for queue_entry in host_queue_entries:
192 if not queue_entry.execution_subdir:
193 continue
194 execution_count = count_per_execution[_execution_key_for(queue_entry)]
195 if execution_count < queue_entry.job.synch_count:
196 raise model_logic.ValidationError(
197 {'' : 'You cannot abort part of a synchronous job execution '
198 '(%d/%s, %d included, %d expected'
199 % (queue_entry.job.id, queue_entry.execution_subdir,
200 execution_subdir, queue_entry.job.synch_count)})