Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 1 | #pylint: disable-msg=C0111 |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 2 | """\ |
| 3 | Utility functions for rpc_interface.py. We keep them in a separate file so that |
| 4 | only RPC interface functions go into that file. |
| 5 | """ |
| 6 | |
| 7 | __author__ = 'showard@google.com (Steve Howard)' |
| 8 | |
Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 9 | import datetime, os, inspect |
showard | 3d6ae11 | 2009-05-02 00:45:48 +0000 | [diff] [blame] | 10 | import django.http |
Dan Shi | 07e09af | 2013-04-12 09:31:29 -0700 | [diff] [blame^] | 11 | from autotest_lib.frontend.afe import models, model_logic |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 12 | from autotest_lib.client.common_lib import control_data |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 13 | |
showard | a62866b | 2008-07-28 21:27:41 +0000 | [diff] [blame] | 14 | NULL_DATETIME = datetime.datetime.max |
| 15 | NULL_DATE = datetime.date.max |
| 16 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 17 | def prepare_for_serialization(objects): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 18 | """ |
| 19 | Prepare Python objects to be returned via RPC. |
Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 20 | @param objects: objects to be prepared. |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 21 | """ |
| 22 | if (isinstance(objects, list) and len(objects) and |
| 23 | isinstance(objects[0], dict) and 'id' in objects[0]): |
| 24 | objects = gather_unique_dicts(objects) |
| 25 | return _prepare_data(objects) |
showard | b8d3424 | 2008-04-25 18:11:16 +0000 | [diff] [blame] | 26 | |
| 27 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 28 | def prepare_rows_as_nested_dicts(query, nested_dict_column_names): |
| 29 | """ |
| 30 | Prepare a Django query to be returned via RPC as a sequence of nested |
| 31 | dictionaries. |
| 32 | |
| 33 | @param query - A Django model query object with a select_related() method. |
| 34 | @param nested_dict_column_names - A list of column/attribute names for the |
| 35 | rows returned by query to expand into nested dictionaries using |
| 36 | their get_object_dict() method when not None. |
| 37 | |
| 38 | @returns An list suitable to returned in an RPC. |
| 39 | """ |
| 40 | all_dicts = [] |
| 41 | for row in query.select_related(): |
| 42 | row_dict = row.get_object_dict() |
| 43 | for column in nested_dict_column_names: |
| 44 | if row_dict[column] is not None: |
| 45 | row_dict[column] = getattr(row, column).get_object_dict() |
| 46 | all_dicts.append(row_dict) |
| 47 | return prepare_for_serialization(all_dicts) |
| 48 | |
| 49 | |
showard | b8d3424 | 2008-04-25 18:11:16 +0000 | [diff] [blame] | 50 | def _prepare_data(data): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 51 | """ |
| 52 | Recursively process data structures, performing necessary type |
| 53 | conversions to values in data to allow for RPC serialization: |
| 54 | -convert datetimes to strings |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 55 | -convert tuples and sets to lists |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 56 | """ |
| 57 | if isinstance(data, dict): |
| 58 | new_data = {} |
| 59 | for key, value in data.iteritems(): |
| 60 | new_data[key] = _prepare_data(value) |
| 61 | return new_data |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 62 | elif (isinstance(data, list) or isinstance(data, tuple) or |
| 63 | isinstance(data, set)): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 64 | return [_prepare_data(item) for item in data] |
showard | 9865997 | 2008-07-17 17:00:07 +0000 | [diff] [blame] | 65 | elif isinstance(data, datetime.date): |
showard | a62866b | 2008-07-28 21:27:41 +0000 | [diff] [blame] | 66 | if data is NULL_DATETIME or data is NULL_DATE: |
| 67 | return None |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 68 | return str(data) |
| 69 | else: |
| 70 | return data |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 71 | |
| 72 | |
showard | 3d6ae11 | 2009-05-02 00:45:48 +0000 | [diff] [blame] | 73 | def raw_http_response(response_data, content_type=None): |
| 74 | response = django.http.HttpResponse(response_data, mimetype=content_type) |
| 75 | response['Content-length'] = str(len(response.content)) |
| 76 | return response |
| 77 | |
| 78 | |
showard | b0dfb9f | 2008-06-06 18:08:02 +0000 | [diff] [blame] | 79 | def gather_unique_dicts(dict_iterable): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 80 | """\ |
| 81 | Pick out unique objects (by ID) from an iterable of object dicts. |
| 82 | """ |
| 83 | id_set = set() |
| 84 | result = [] |
| 85 | for obj in dict_iterable: |
| 86 | if obj['id'] not in id_set: |
| 87 | id_set.add(obj['id']) |
| 88 | result.append(obj) |
| 89 | return result |
showard | b0dfb9f | 2008-06-06 18:08:02 +0000 | [diff] [blame] | 90 | |
| 91 | |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 92 | def extra_job_filters(not_yet_run=False, running=False, finished=False): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 93 | """\ |
| 94 | Generate a SQL WHERE clause for job status filtering, and return it in |
| 95 | a dict of keyword args to pass to query.extra(). No more than one of |
| 96 | the parameters should be passed as True. |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 97 | * not_yet_run: all HQEs are Queued |
| 98 | * finished: all HQEs are complete |
| 99 | * running: everything else |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 100 | """ |
| 101 | assert not ((not_yet_run and running) or |
| 102 | (not_yet_run and finished) or |
| 103 | (running and finished)), ('Cannot specify more than one ' |
| 104 | 'filter to this function') |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 105 | |
showard | eab66ce | 2009-12-23 00:03:56 +0000 | [diff] [blame] | 106 | not_queued = ('(SELECT job_id FROM afe_host_queue_entries ' |
| 107 | 'WHERE status != "%s")' |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 108 | % models.HostQueueEntry.Status.QUEUED) |
showard | eab66ce | 2009-12-23 00:03:56 +0000 | [diff] [blame] | 109 | not_finished = ('(SELECT job_id FROM afe_host_queue_entries ' |
| 110 | 'WHERE not complete)') |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 111 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 112 | if not_yet_run: |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 113 | where = ['id NOT IN ' + not_queued] |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 114 | elif running: |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 115 | where = ['(id IN %s) AND (id IN %s)' % (not_queued, not_finished)] |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 116 | elif finished: |
showard | 6c65d25 | 2009-10-01 18:45:22 +0000 | [diff] [blame] | 117 | where = ['id NOT IN ' + not_finished] |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 118 | else: |
showard | 10f4167 | 2009-05-13 21:28:25 +0000 | [diff] [blame] | 119 | return {} |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 120 | return {'where': where} |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 121 | |
| 122 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 123 | def extra_host_filters(multiple_labels=()): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 124 | """\ |
| 125 | Generate SQL WHERE clauses for matching hosts in an intersection of |
| 126 | labels. |
| 127 | """ |
| 128 | extra_args = {} |
showard | eab66ce | 2009-12-23 00:03:56 +0000 | [diff] [blame] | 129 | where_str = ('afe_hosts.id in (select host_id from afe_hosts_labels ' |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 130 | 'where label_id=%s)') |
| 131 | extra_args['where'] = [where_str] * len(multiple_labels) |
| 132 | extra_args['params'] = [models.Label.smart_get(label).id |
| 133 | for label in multiple_labels] |
| 134 | return extra_args |
showard | 8e3aa5e | 2008-04-08 19:42:32 +0000 | [diff] [blame] | 135 | |
| 136 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 137 | def get_host_query(multiple_labels, exclude_only_if_needed_labels, |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 138 | exclude_atomic_group_hosts, valid_only, filter_data): |
| 139 | if valid_only: |
| 140 | query = models.Host.valid_objects.all() |
| 141 | else: |
| 142 | query = models.Host.objects.all() |
| 143 | |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 144 | if exclude_only_if_needed_labels: |
| 145 | only_if_needed_labels = models.Label.valid_objects.filter( |
| 146 | only_if_needed=True) |
showard | f7eac6f | 2008-11-13 21:18:01 +0000 | [diff] [blame] | 147 | if only_if_needed_labels.count() > 0: |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 148 | only_if_needed_ids = ','.join( |
| 149 | str(label['id']) |
| 150 | for label in only_if_needed_labels.values('id')) |
showard | f7eac6f | 2008-11-13 21:18:01 +0000 | [diff] [blame] | 151 | query = models.Host.objects.add_join( |
showard | eab66ce | 2009-12-23 00:03:56 +0000 | [diff] [blame] | 152 | query, 'afe_hosts_labels', join_key='host_id', |
| 153 | join_condition=('afe_hosts_labels_exclude_OIN.label_id IN (%s)' |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 154 | % only_if_needed_ids), |
| 155 | suffix='_exclude_OIN', exclude=True) |
showard | 8aa84fc | 2009-09-16 17:17:55 +0000 | [diff] [blame] | 156 | |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 157 | if exclude_atomic_group_hosts: |
| 158 | atomic_group_labels = models.Label.valid_objects.filter( |
| 159 | atomic_group__isnull=False) |
| 160 | if atomic_group_labels.count() > 0: |
| 161 | atomic_group_label_ids = ','.join( |
| 162 | str(atomic_group['id']) |
| 163 | for atomic_group in atomic_group_labels.values('id')) |
| 164 | query = models.Host.objects.add_join( |
showard | eab66ce | 2009-12-23 00:03:56 +0000 | [diff] [blame] | 165 | query, 'afe_hosts_labels', join_key='host_id', |
| 166 | join_condition=( |
| 167 | 'afe_hosts_labels_exclude_AG.label_id IN (%s)' |
| 168 | % atomic_group_label_ids), |
showard | 87cc38f | 2009-08-20 23:37:04 +0000 | [diff] [blame] | 169 | suffix='_exclude_AG', exclude=True) |
Fang Deng | 04d3061 | 2013-04-10 18:13:13 -0700 | [diff] [blame] | 170 | try: |
| 171 | assert 'extra_args' not in filter_data |
| 172 | filter_data['extra_args'] = extra_host_filters(multiple_labels) |
| 173 | return models.Host.query_objects(filter_data, initial_query=query) |
| 174 | except models.Label.DoesNotExist as e: |
| 175 | return models.Host.objects.none() |
showard | 43a3d26 | 2008-11-12 18:17:05 +0000 | [diff] [blame] | 176 | |
| 177 | |
showard | 8fd5824 | 2008-03-10 21:29:07 +0000 | [diff] [blame] | 178 | class InconsistencyException(Exception): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 179 | 'Raised when a list of objects does not have a consistent value' |
showard | 8fd5824 | 2008-03-10 21:29:07 +0000 | [diff] [blame] | 180 | |
| 181 | |
| 182 | def get_consistent_value(objects, field): |
mbligh | c5ddfd1 | 2008-08-04 17:15:00 +0000 | [diff] [blame] | 183 | if not objects: |
| 184 | # well a list of nothing is consistent |
| 185 | return None |
| 186 | |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 187 | value = getattr(objects[0], field) |
| 188 | for obj in objects: |
| 189 | this_value = getattr(obj, field) |
| 190 | if this_value != value: |
| 191 | raise InconsistencyException(objects[0], obj) |
| 192 | return value |
showard | 8fd5824 | 2008-03-10 21:29:07 +0000 | [diff] [blame] | 193 | |
| 194 | |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 195 | def prepare_generate_control_file(tests, kernel, label, profilers): |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 196 | test_objects = [models.Test.smart_get(test) for test in tests] |
showard | 2b9a88b | 2008-06-13 20:55:03 +0000 | [diff] [blame] | 197 | profiler_objects = [models.Profiler.smart_get(profiler) |
| 198 | for profiler in profilers] |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 199 | # ensure tests are all the same type |
| 200 | try: |
| 201 | test_type = get_consistent_value(test_objects, 'test_type') |
| 202 | except InconsistencyException, exc: |
| 203 | test1, test2 = exc.args |
mbligh | ec5546d | 2008-06-16 16:51:28 +0000 | [diff] [blame] | 204 | raise model_logic.ValidationError( |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 205 | {'tests' : 'You cannot run both server- and client-side ' |
| 206 | 'tests together (tests %s and %s differ' % ( |
| 207 | test1.name, test2.name)}) |
showard | 8fd5824 | 2008-03-10 21:29:07 +0000 | [diff] [blame] | 208 | |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 209 | is_server = (test_type == control_data.CONTROL_TYPE.SERVER) |
showard | 14374b1 | 2009-01-31 00:11:54 +0000 | [diff] [blame] | 210 | if test_objects: |
| 211 | synch_count = max(test.sync_count for test in test_objects) |
| 212 | else: |
| 213 | synch_count = 1 |
jadmanski | 0afbb63 | 2008-06-06 21:10:57 +0000 | [diff] [blame] | 214 | if label: |
| 215 | label = models.Label.smart_get(label) |
mbligh | e8819cd | 2008-02-15 16:48:40 +0000 | [diff] [blame] | 216 | |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 217 | dependencies = set(label.name for label |
| 218 | in models.Label.objects.filter(test__in=test_objects)) |
| 219 | |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 220 | cf_info = dict(is_server=is_server, synch_count=synch_count, |
| 221 | dependencies=list(dependencies)) |
| 222 | return cf_info, test_objects, profiler_objects, label |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 223 | |
| 224 | |
| 225 | def check_job_dependencies(host_objects, job_dependencies): |
| 226 | """ |
| 227 | Check that a set of machines satisfies a job's dependencies. |
| 228 | host_objects: list of models.Host objects |
| 229 | job_dependencies: list of names of labels |
| 230 | """ |
| 231 | # check that hosts satisfy dependencies |
| 232 | host_ids = [host.id for host in host_objects] |
| 233 | hosts_in_job = models.Host.objects.filter(id__in=host_ids) |
| 234 | ok_hosts = hosts_in_job |
| 235 | for index, dependency in enumerate(job_dependencies): |
showard | a5288b4 | 2009-07-28 20:06:08 +0000 | [diff] [blame] | 236 | ok_hosts = ok_hosts.filter(labels__name=dependency) |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 237 | failing_hosts = (set(host.hostname for host in host_objects) - |
| 238 | set(host.hostname for host in ok_hosts)) |
| 239 | if failing_hosts: |
| 240 | raise model_logic.ValidationError( |
Eric Li | e0493a4 | 2010-11-15 13:05:43 -0800 | [diff] [blame] | 241 | {'hosts' : 'Host(s) failed to meet job dependencies (' + |
| 242 | (', '.join(job_dependencies)) + '): ' + |
| 243 | (', '.join(failing_hosts))}) |
| 244 | |
showard | 989f25d | 2008-10-01 11:38:11 +0000 | [diff] [blame] | 245 | |
showard | 2bab8f4 | 2008-11-12 18:15:22 +0000 | [diff] [blame] | 246 | |
| 247 | def _execution_key_for(host_queue_entry): |
| 248 | return (host_queue_entry.job.id, host_queue_entry.execution_subdir) |
| 249 | |
| 250 | |
| 251 | def check_abort_synchronous_jobs(host_queue_entries): |
| 252 | # ensure user isn't aborting part of a synchronous autoserv execution |
| 253 | count_per_execution = {} |
| 254 | for queue_entry in host_queue_entries: |
| 255 | key = _execution_key_for(queue_entry) |
| 256 | count_per_execution.setdefault(key, 0) |
| 257 | count_per_execution[key] += 1 |
| 258 | |
| 259 | for queue_entry in host_queue_entries: |
| 260 | if not queue_entry.execution_subdir: |
| 261 | continue |
| 262 | execution_count = count_per_execution[_execution_key_for(queue_entry)] |
| 263 | if execution_count < queue_entry.job.synch_count: |
mbligh | 1ef218d | 2009-08-03 16:57:56 +0000 | [diff] [blame] | 264 | raise model_logic.ValidationError( |
| 265 | {'' : 'You cannot abort part of a synchronous job execution ' |
| 266 | '(%d/%s), %d included, %d expected' |
| 267 | % (queue_entry.job.id, queue_entry.execution_subdir, |
| 268 | execution_count, queue_entry.job.synch_count)}) |
showard | 8fbae65 | 2009-01-20 23:23:10 +0000 | [diff] [blame] | 269 | |
| 270 | |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 271 | def check_atomic_group_create_job(synch_count, host_objects, metahost_objects, |
| 272 | dependencies, atomic_group, labels_by_name): |
| 273 | """ |
| 274 | Attempt to reject create_job requests with an atomic group that |
| 275 | will be impossible to schedule. The checks are not perfect but |
| 276 | should catch the most obvious issues. |
| 277 | |
| 278 | @param synch_count - The job's minimum synch count. |
| 279 | @param host_objects - A list of models.Host instances. |
| 280 | @param metahost_objects - A list of models.Label instances. |
| 281 | @param dependencies - A list of job dependency label names. |
| 282 | @param atomic_group - The models.AtomicGroup instance. |
| 283 | @param labels_by_name - A dictionary mapping label names to models.Label |
| 284 | instance. Used to look up instances for dependencies. |
| 285 | |
| 286 | @raises model_logic.ValidationError - When an issue is found. |
| 287 | """ |
| 288 | # If specific host objects were supplied with an atomic group, verify |
| 289 | # that there are enough to satisfy the synch_count. |
| 290 | minimum_required = synch_count or 1 |
| 291 | if (host_objects and not metahost_objects and |
| 292 | len(host_objects) < minimum_required): |
| 293 | raise model_logic.ValidationError( |
| 294 | {'hosts': |
| 295 | 'only %d hosts provided for job with synch_count = %d' % |
| 296 | (len(host_objects), synch_count)}) |
| 297 | |
| 298 | # Check that the atomic group has a hope of running this job |
| 299 | # given any supplied metahosts and dependancies that may limit. |
| 300 | |
| 301 | # Get a set of hostnames in the atomic group. |
| 302 | possible_hosts = set() |
| 303 | for label in atomic_group.label_set.all(): |
| 304 | possible_hosts.update(h.hostname for h in label.host_set.all()) |
| 305 | |
| 306 | # Filter out hosts that don't match all of the job dependency labels. |
| 307 | for label_name in set(dependencies): |
| 308 | label = labels_by_name[label_name] |
| 309 | hosts_in_label = (h.hostname for h in label.host_set.all()) |
| 310 | possible_hosts.intersection_update(hosts_in_label) |
| 311 | |
showard | 225bdc1 | 2009-04-13 16:09:21 +0000 | [diff] [blame] | 312 | if not host_objects and not metahost_objects: |
| 313 | # No hosts or metahosts are required to queue an atomic group Job. |
| 314 | # However, if they are given, we respect them below. |
| 315 | host_set = possible_hosts |
| 316 | else: |
| 317 | host_set = set(host.hostname for host in host_objects) |
| 318 | unusable_host_set = host_set.difference(possible_hosts) |
| 319 | if unusable_host_set: |
| 320 | raise model_logic.ValidationError( |
| 321 | {'hosts': 'Hosts "%s" are not in Atomic Group "%s"' % |
| 322 | (', '.join(sorted(unusable_host_set)), atomic_group.name)}) |
showard | c92da83 | 2009-04-07 18:14:34 +0000 | [diff] [blame] | 323 | |
| 324 | # Lookup hosts provided by each meta host and merge them into the |
| 325 | # host_set for final counting. |
| 326 | for meta_host in metahost_objects: |
| 327 | meta_possible = possible_hosts.copy() |
| 328 | hosts_in_meta_host = (h.hostname for h in meta_host.host_set.all()) |
| 329 | meta_possible.intersection_update(hosts_in_meta_host) |
| 330 | |
| 331 | # Count all hosts that this meta_host will provide. |
| 332 | host_set.update(meta_possible) |
| 333 | |
| 334 | if len(host_set) < minimum_required: |
| 335 | raise model_logic.ValidationError( |
| 336 | {'atomic_group_name': |
| 337 | 'Insufficient hosts in Atomic Group "%s" with the' |
| 338 | ' supplied dependencies and meta_hosts.' % |
| 339 | (atomic_group.name,)}) |
| 340 | |
| 341 | |
showard | be0d869 | 2009-08-20 23:42:44 +0000 | [diff] [blame] | 342 | def check_modify_host(update_data): |
| 343 | """ |
| 344 | Sanity check modify_host* requests. |
| 345 | |
| 346 | @param update_data: A dictionary with the changes to make to a host |
| 347 | or hosts. |
| 348 | """ |
| 349 | # Only the scheduler (monitor_db) is allowed to modify Host status. |
| 350 | # Otherwise race conditions happen as a hosts state is changed out from |
| 351 | # beneath tasks being run on a host. |
| 352 | if 'status' in update_data: |
| 353 | raise model_logic.ValidationError({ |
| 354 | 'status': 'Host status can not be modified by the frontend.'}) |
| 355 | |
| 356 | |
showard | ce7c092 | 2009-09-11 18:39:24 +0000 | [diff] [blame] | 357 | def check_modify_host_locking(host, update_data): |
| 358 | """ |
| 359 | Checks when locking/unlocking has been requested if the host is already |
| 360 | locked/unlocked. |
| 361 | |
| 362 | @param host: models.Host object to be modified |
| 363 | @param update_data: A dictionary with the changes to make to the host. |
| 364 | """ |
| 365 | locked = update_data.get('locked', None) |
| 366 | if locked is not None: |
| 367 | if locked and host.locked: |
| 368 | raise model_logic.ValidationError({ |
| 369 | 'locked': 'Host already locked by %s on %s.' % |
| 370 | (host.locked_by, host.lock_time)}) |
| 371 | if not locked and not host.locked: |
| 372 | raise model_logic.ValidationError({ |
| 373 | 'locked': 'Host already unlocked.'}) |
| 374 | |
| 375 | |
showard | 8fbae65 | 2009-01-20 23:23:10 +0000 | [diff] [blame] | 376 | def get_motd(): |
| 377 | dirname = os.path.dirname(__file__) |
| 378 | filename = os.path.join(dirname, "..", "..", "motd.txt") |
| 379 | text = '' |
| 380 | try: |
| 381 | fp = open(filename, "r") |
| 382 | try: |
| 383 | text = fp.read() |
| 384 | finally: |
| 385 | fp.close() |
| 386 | except: |
| 387 | pass |
| 388 | |
| 389 | return text |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 390 | |
| 391 | |
| 392 | def _get_metahost_counts(metahost_objects): |
| 393 | metahost_counts = {} |
| 394 | for metahost in metahost_objects: |
| 395 | metahost_counts.setdefault(metahost, 0) |
| 396 | metahost_counts[metahost] += 1 |
| 397 | return metahost_counts |
| 398 | |
| 399 | |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 400 | def get_job_info(job, preserve_metahosts=False, queue_entry_filter_data=None): |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 401 | hosts = [] |
| 402 | one_time_hosts = [] |
| 403 | meta_hosts = [] |
| 404 | atomic_group = None |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 405 | hostless = False |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 406 | |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 407 | queue_entries = job.hostqueueentry_set.all() |
showard | a965cef | 2009-05-15 23:17:41 +0000 | [diff] [blame] | 408 | if queue_entry_filter_data: |
| 409 | queue_entries = models.HostQueueEntry.query_objects( |
| 410 | queue_entry_filter_data, initial_query=queue_entries) |
showard | 4d07756 | 2009-05-08 18:24:36 +0000 | [diff] [blame] | 411 | |
| 412 | for queue_entry in queue_entries: |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 413 | if (queue_entry.host and (preserve_metahosts or |
| 414 | not queue_entry.meta_host)): |
| 415 | if queue_entry.deleted: |
| 416 | continue |
| 417 | if queue_entry.host.invalid: |
| 418 | one_time_hosts.append(queue_entry.host) |
| 419 | else: |
| 420 | hosts.append(queue_entry.host) |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 421 | elif queue_entry.meta_host: |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 422 | meta_hosts.append(queue_entry.meta_host) |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 423 | else: |
| 424 | hostless = True |
| 425 | |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 426 | if atomic_group is None: |
| 427 | if queue_entry.atomic_group is not None: |
| 428 | atomic_group = queue_entry.atomic_group |
| 429 | else: |
| 430 | assert atomic_group.name == queue_entry.atomic_group.name, ( |
| 431 | 'DB inconsistency. HostQueueEntries with multiple atomic' |
| 432 | ' groups on job %s: %s != %s' % ( |
| 433 | id, atomic_group.name, queue_entry.atomic_group.name)) |
| 434 | |
| 435 | meta_host_counts = _get_metahost_counts(meta_hosts) |
| 436 | |
| 437 | info = dict(dependencies=[label.name for label |
| 438 | in job.dependency_labels.all()], |
| 439 | hosts=hosts, |
| 440 | meta_hosts=meta_hosts, |
| 441 | meta_host_counts=meta_host_counts, |
| 442 | one_time_hosts=one_time_hosts, |
jamesren | 2275ef1 | 2010-04-12 18:25:06 +0000 | [diff] [blame] | 443 | atomic_group=atomic_group, |
| 444 | hostless=hostless) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 445 | return info |
| 446 | |
| 447 | |
showard | 09d80f9 | 2009-11-19 01:01:19 +0000 | [diff] [blame] | 448 | def check_for_duplicate_hosts(host_objects): |
| 449 | host_ids = set() |
| 450 | duplicate_hostnames = set() |
| 451 | for host in host_objects: |
| 452 | if host.id in host_ids: |
| 453 | duplicate_hostnames.add(host.hostname) |
| 454 | host_ids.add(host.id) |
| 455 | |
| 456 | if duplicate_hostnames: |
| 457 | raise model_logic.ValidationError( |
| 458 | {'hosts' : 'Duplicate hosts: %s' |
| 459 | % ', '.join(duplicate_hostnames)}) |
| 460 | |
| 461 | |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 462 | def create_new_job(owner, options, host_objects, metahost_objects, |
| 463 | atomic_group=None): |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 464 | labels_by_name = dict((label.name, label) |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 465 | for label in models.Label.objects.all()) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 466 | all_host_objects = host_objects + metahost_objects |
| 467 | metahost_counts = _get_metahost_counts(metahost_objects) |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 468 | dependencies = options.get('dependencies', []) |
| 469 | synch_count = options.get('synch_count') |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 470 | |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 471 | if atomic_group: |
| 472 | check_atomic_group_create_job( |
| 473 | synch_count, host_objects, metahost_objects, |
| 474 | dependencies, atomic_group, labels_by_name) |
| 475 | else: |
| 476 | if synch_count is not None and synch_count > len(all_host_objects): |
| 477 | raise model_logic.ValidationError( |
| 478 | {'hosts': |
| 479 | 'only %d hosts provided for job with synch_count = %d' % |
| 480 | (len(all_host_objects), synch_count)}) |
| 481 | atomic_hosts = models.Host.objects.filter( |
| 482 | id__in=[host.id for host in host_objects], |
| 483 | labels__atomic_group=True) |
| 484 | unusable_host_names = [host.hostname for host in atomic_hosts] |
| 485 | if unusable_host_names: |
| 486 | raise model_logic.ValidationError( |
| 487 | {'hosts': |
| 488 | 'Host(s) "%s" are atomic group hosts but no ' |
| 489 | 'atomic group was specified for this job.' % |
| 490 | (', '.join(unusable_host_names),)}) |
| 491 | |
showard | 09d80f9 | 2009-11-19 01:01:19 +0000 | [diff] [blame] | 492 | check_for_duplicate_hosts(host_objects) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 493 | |
| 494 | check_job_dependencies(host_objects, dependencies) |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 495 | options['dependencies'] = [labels_by_name[label_name] |
| 496 | for label_name in dependencies] |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 497 | |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 498 | for label in metahost_objects + options['dependencies']: |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 499 | if label.atomic_group and not atomic_group: |
| 500 | raise model_logic.ValidationError( |
| 501 | {'atomic_group_name': |
showard | c873032 | 2009-06-30 01:56:38 +0000 | [diff] [blame] | 502 | 'Dependency %r requires an atomic group but no ' |
| 503 | 'atomic_group_name or meta_host in an atomic group was ' |
| 504 | 'specified for this job.' % label.name}) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 505 | elif (label.atomic_group and |
| 506 | label.atomic_group.name != atomic_group.name): |
| 507 | raise model_logic.ValidationError( |
| 508 | {'atomic_group_name': |
showard | c873032 | 2009-06-30 01:56:38 +0000 | [diff] [blame] | 509 | 'meta_hosts or dependency %r requires atomic group ' |
| 510 | '%r instead of the supplied atomic_group_name=%r.' % |
| 511 | (label.name, label.atomic_group.name, atomic_group.name)}) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 512 | |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 513 | job = models.Job.create(owner=owner, options=options, |
| 514 | hosts=all_host_objects) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 515 | job.queue(all_host_objects, atomic_group=atomic_group, |
showard | a1e74b3 | 2009-05-12 17:32:04 +0000 | [diff] [blame] | 516 | is_template=options.get('is_template', False)) |
showard | 29f7cd2 | 2009-04-29 21:16:24 +0000 | [diff] [blame] | 517 | return job.id |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 518 | |
| 519 | |
showard | 909c914 | 2009-07-07 20:54:42 +0000 | [diff] [blame] | 520 | def find_platform_and_atomic_group(host): |
| 521 | """ |
| 522 | Figure out the platform name and atomic group name for the given host |
| 523 | object. If none, the return value for either will be None. |
| 524 | |
| 525 | @returns (platform name, atomic group name) for the given host. |
| 526 | """ |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 527 | platforms = [label.name for label in host.label_list if label.platform] |
| 528 | if not platforms: |
showard | 909c914 | 2009-07-07 20:54:42 +0000 | [diff] [blame] | 529 | platform = None |
| 530 | else: |
| 531 | platform = platforms[0] |
showard | 0957a84 | 2009-05-11 19:25:08 +0000 | [diff] [blame] | 532 | if len(platforms) > 1: |
| 533 | raise ValueError('Host %s has more than one platform: %s' % |
| 534 | (host.hostname, ', '.join(platforms))) |
showard | 909c914 | 2009-07-07 20:54:42 +0000 | [diff] [blame] | 535 | for label in host.label_list: |
| 536 | if label.atomic_group: |
| 537 | atomic_group_name = label.atomic_group.name |
| 538 | break |
| 539 | else: |
| 540 | atomic_group_name = None |
| 541 | # Don't check for multiple atomic groups on a host here. That is an |
| 542 | # error but should not trip up the RPC interface. monitor_db_cleanup |
| 543 | # deals with it. This just returns the first one found. |
| 544 | return platform, atomic_group_name |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 545 | |
| 546 | |
| 547 | # support for get_host_queue_entries_and_special_tasks() |
| 548 | |
| 549 | def _common_entry_to_dict(entry, type, job_dict): |
| 550 | return dict(type=type, |
| 551 | host=entry.host.get_object_dict(), |
| 552 | job=job_dict, |
| 553 | execution_path=entry.execution_path(), |
| 554 | status=entry.status, |
| 555 | started_on=entry.started_on, |
showard | 8fb1fde | 2009-07-11 01:47:16 +0000 | [diff] [blame] | 556 | id=str(entry.id) + type) |
showard | c0ac3a7 | 2009-07-08 21:14:45 +0000 | [diff] [blame] | 557 | |
| 558 | |
| 559 | def _special_task_to_dict(special_task): |
| 560 | job_dict = None |
| 561 | if special_task.queue_entry: |
| 562 | job_dict = special_task.queue_entry.job.get_object_dict() |
| 563 | return _common_entry_to_dict(special_task, special_task.task, job_dict) |
| 564 | |
| 565 | |
| 566 | def _queue_entry_to_dict(queue_entry): |
| 567 | return _common_entry_to_dict(queue_entry, 'Job', |
| 568 | queue_entry.job.get_object_dict()) |
| 569 | |
| 570 | |
| 571 | def _compute_next_job_for_tasks(queue_entries, special_tasks): |
| 572 | """ |
| 573 | For each task, try to figure out the next job that ran after that task. |
| 574 | This is done using two pieces of information: |
| 575 | * if the task has a queue entry, we can use that entry's job ID. |
| 576 | * if the task has a time_started, we can try to compare that against the |
| 577 | started_on field of queue_entries. this isn't guaranteed to work perfectly |
| 578 | since queue_entries may also have null started_on values. |
| 579 | * if the task has neither, or if use of time_started fails, just use the |
| 580 | last computed job ID. |
| 581 | """ |
| 582 | next_job_id = None # most recently computed next job |
| 583 | hqe_index = 0 # index for scanning by started_on times |
| 584 | for task in special_tasks: |
| 585 | if task.queue_entry: |
| 586 | next_job_id = task.queue_entry.job.id |
| 587 | elif task.time_started is not None: |
| 588 | for queue_entry in queue_entries[hqe_index:]: |
| 589 | if queue_entry.started_on is None: |
| 590 | continue |
| 591 | if queue_entry.started_on < task.time_started: |
| 592 | break |
| 593 | next_job_id = queue_entry.job.id |
| 594 | |
| 595 | task.next_job_id = next_job_id |
| 596 | |
| 597 | # advance hqe_index to just after next_job_id |
| 598 | if next_job_id is not None: |
| 599 | for queue_entry in queue_entries[hqe_index:]: |
| 600 | if queue_entry.job.id < next_job_id: |
| 601 | break |
| 602 | hqe_index += 1 |
| 603 | |
| 604 | |
| 605 | def interleave_entries(queue_entries, special_tasks): |
| 606 | """ |
| 607 | Both lists should be ordered by descending ID. |
| 608 | """ |
| 609 | _compute_next_job_for_tasks(queue_entries, special_tasks) |
| 610 | |
| 611 | # start with all special tasks that've run since the last job |
| 612 | interleaved_entries = [] |
| 613 | for task in special_tasks: |
| 614 | if task.next_job_id is not None: |
| 615 | break |
| 616 | interleaved_entries.append(_special_task_to_dict(task)) |
| 617 | |
| 618 | # now interleave queue entries with the remaining special tasks |
| 619 | special_task_index = len(interleaved_entries) |
| 620 | for queue_entry in queue_entries: |
| 621 | interleaved_entries.append(_queue_entry_to_dict(queue_entry)) |
| 622 | # add all tasks that ran between this job and the previous one |
| 623 | for task in special_tasks[special_task_index:]: |
| 624 | if task.next_job_id < queue_entry.job.id: |
| 625 | break |
| 626 | interleaved_entries.append(_special_task_to_dict(task)) |
| 627 | special_task_index += 1 |
| 628 | |
| 629 | return interleaved_entries |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 630 | |
| 631 | |
| 632 | def get_create_job_common_args(local_args): |
| 633 | """ |
| 634 | Returns a dict containing only the args that apply for create_job_common |
| 635 | |
| 636 | Returns a subset of local_args, which contains only the arguments that can |
| 637 | be passed in to create_job_common(). |
| 638 | """ |
| 639 | arg_names, _, _, _ = inspect.getargspec(create_job_common) |
| 640 | return dict(item for item in local_args.iteritems() if item[0] in arg_names) |
| 641 | |
| 642 | |
| 643 | def create_job_common(name, priority, control_type, control_file=None, |
| 644 | hosts=(), meta_hosts=(), one_time_hosts=(), |
| 645 | atomic_group_name=None, synch_count=None, |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 646 | is_template=False, timeout=None, max_runtime_mins=None, |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 647 | run_verify=True, email_list='', dependencies=(), |
| 648 | reboot_before=None, reboot_after=None, |
| 649 | parse_failed_repair=None, hostless=False, keyvals=None, |
Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 650 | drone_set=None, parameterized_job=None, |
Dan Shi | 07e09af | 2013-04-12 09:31:29 -0700 | [diff] [blame^] | 651 | parent_job_id=None, test_retry=0, run_reset=True): |
Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 652 | #pylint: disable-msg=C0111 |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 653 | """ |
| 654 | Common code between creating "standard" jobs and creating parameterized jobs |
| 655 | """ |
| 656 | user = models.User.current_user() |
| 657 | owner = user.login |
| 658 | |
| 659 | # Convert metahost names to lower case, to avoid case sensitivity issues |
| 660 | meta_hosts = [meta_host.lower() for meta_host in meta_hosts] |
| 661 | |
| 662 | # input validation |
| 663 | if not (hosts or meta_hosts or one_time_hosts or atomic_group_name |
| 664 | or hostless): |
| 665 | raise model_logic.ValidationError({ |
| 666 | 'arguments' : "You must pass at least one of 'hosts', " |
| 667 | "'meta_hosts', 'one_time_hosts', " |
| 668 | "'atomic_group_name', or 'hostless'" |
| 669 | }) |
| 670 | |
| 671 | if hostless: |
| 672 | if hosts or meta_hosts or one_time_hosts or atomic_group_name: |
| 673 | raise model_logic.ValidationError({ |
| 674 | 'hostless': 'Hostless jobs cannot include any hosts!'}) |
Aviv Keshet | 3dd8beb | 2013-05-13 17:36:04 -0700 | [diff] [blame] | 675 | server_type = control_data.CONTROL_TYPE_NAMES.SERVER |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 676 | if control_type != server_type: |
| 677 | raise model_logic.ValidationError({ |
| 678 | 'control_type': 'Hostless jobs cannot use client-side ' |
| 679 | 'control files'}) |
| 680 | |
| 681 | labels_by_name = dict((label.name.lower(), label) |
| 682 | for label in models.Label.objects.all()) |
| 683 | atomic_groups_by_name = dict((ag.name.lower(), ag) |
| 684 | for ag in models.AtomicGroup.objects.all()) |
| 685 | |
| 686 | # Schedule on an atomic group automagically if one of the labels given |
| 687 | # is an atomic group label and no explicit atomic_group_name was supplied. |
| 688 | if not atomic_group_name: |
| 689 | for label_name in meta_hosts or []: |
| 690 | label = labels_by_name.get(label_name) |
| 691 | if label and label.atomic_group: |
| 692 | atomic_group_name = label.atomic_group.name |
| 693 | break |
| 694 | |
| 695 | # convert hostnames & meta hosts to host/label objects |
| 696 | host_objects = models.Host.smart_get_bulk(hosts) |
| 697 | metahost_objects = [] |
| 698 | for label_name in meta_hosts or []: |
| 699 | if label_name in labels_by_name: |
| 700 | label = labels_by_name[label_name] |
| 701 | metahost_objects.append(label) |
| 702 | elif label_name in atomic_groups_by_name: |
| 703 | # If given a metahost name that isn't a Label, check to |
| 704 | # see if the user was specifying an Atomic Group instead. |
| 705 | atomic_group = atomic_groups_by_name[label_name] |
| 706 | if atomic_group_name and atomic_group_name != atomic_group.name: |
| 707 | raise model_logic.ValidationError({ |
| 708 | 'meta_hosts': ( |
| 709 | 'Label "%s" not found. If assumed to be an ' |
| 710 | 'atomic group it would conflict with the ' |
| 711 | 'supplied atomic group "%s".' % ( |
| 712 | label_name, atomic_group_name))}) |
| 713 | atomic_group_name = atomic_group.name |
| 714 | else: |
| 715 | raise model_logic.ValidationError( |
| 716 | {'meta_hosts' : 'Label "%s" not found' % label_name}) |
| 717 | |
| 718 | # Create and sanity check an AtomicGroup object if requested. |
| 719 | if atomic_group_name: |
| 720 | if one_time_hosts: |
| 721 | raise model_logic.ValidationError( |
| 722 | {'one_time_hosts': |
| 723 | 'One time hosts cannot be used with an Atomic Group.'}) |
| 724 | atomic_group = models.AtomicGroup.smart_get(atomic_group_name) |
| 725 | if synch_count and synch_count > atomic_group.max_number_of_machines: |
| 726 | raise model_logic.ValidationError( |
| 727 | {'atomic_group_name' : |
| 728 | 'You have requested a synch_count (%d) greater than the ' |
| 729 | 'maximum machines in the requested Atomic Group (%d).' % |
| 730 | (synch_count, atomic_group.max_number_of_machines)}) |
| 731 | else: |
| 732 | atomic_group = None |
| 733 | |
| 734 | for host in one_time_hosts or []: |
| 735 | this_host = models.Host.create_one_time_host(host) |
| 736 | host_objects.append(this_host) |
| 737 | |
| 738 | options = dict(name=name, |
| 739 | priority=priority, |
| 740 | control_file=control_file, |
| 741 | control_type=control_type, |
| 742 | is_template=is_template, |
| 743 | timeout=timeout, |
Simran Basi | 3421702 | 2012-11-06 13:43:15 -0800 | [diff] [blame] | 744 | max_runtime_mins=max_runtime_mins, |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 745 | synch_count=synch_count, |
| 746 | run_verify=run_verify, |
| 747 | email_list=email_list, |
| 748 | dependencies=dependencies, |
| 749 | reboot_before=reboot_before, |
| 750 | reboot_after=reboot_after, |
| 751 | parse_failed_repair=parse_failed_repair, |
| 752 | keyvals=keyvals, |
| 753 | drone_set=drone_set, |
Aviv Keshet | 1830892 | 2013-02-19 17:49:49 -0800 | [diff] [blame] | 754 | parameterized_job=parameterized_job, |
Aviv Keshet | cd1ff9b | 2013-03-01 14:55:19 -0800 | [diff] [blame] | 755 | parent_job_id=parent_job_id, |
Dan Shi | 07e09af | 2013-04-12 09:31:29 -0700 | [diff] [blame^] | 756 | test_retry=test_retry, |
| 757 | run_reset=run_reset) |
jamesren | 4a41e01 | 2010-07-16 22:33:48 +0000 | [diff] [blame] | 758 | return create_new_job(owner=owner, |
| 759 | options=options, |
| 760 | host_objects=host_objects, |
| 761 | metahost_objects=metahost_objects, |
| 762 | atomic_group=atomic_group) |