blob: 8d8d61cf326a1f144094de5e890a0715c53adf8e [file] [log] [blame]
mblighbe630eb2008-08-01 16:41:48 +00001#
2# Copyright 2008 Google Inc. All Rights Reserved.
3
4"""
5The job module contains the objects and methods used to
6manage jobs in Autotest.
7
8The valid actions are:
9list: lists job(s)
10create: create a job
11abort: abort job(s)
12stat: detailed listing of job(s)
13
14The common options are:
15
16See topic_common.py for a High Level Design and Algorithm.
17"""
18
19import getpass, os, pwd, re, socket, sys
20from autotest_lib.cli import topic_common, action_common
21
22
23class job(topic_common.atest):
24 """Job class
mbligh5a496082009-08-03 16:44:54 +000025 atest job [create|clone|list|stat|abort] <options>"""
26 usage_action = '[create|clone|list|stat|abort]'
mblighbe630eb2008-08-01 16:41:48 +000027 topic = msg_topic = 'job'
28 msg_items = '<job_ids>'
29
30
31 def _convert_status(self, results):
32 for result in results:
mbligh10a47332008-08-11 19:37:46 +000033 total = sum(result['status_counts'].values())
mbligh47dc4d22009-02-12 21:48:34 +000034 status = ['%s=%s(%.1f%%)' % (key, val, 100.0*float(val)/total)
mbligh10a47332008-08-11 19:37:46 +000035 for key, val in result['status_counts'].iteritems()]
mblighbe630eb2008-08-01 16:41:48 +000036 status.sort()
37 result['status_counts'] = ', '.join(status)
38
39
mbligh5a496082009-08-03 16:44:54 +000040 def backward_compatibility(self, action, argv):
41 """ 'job create --clone' became 'job clone --id' """
42 if action == 'create':
43 for option in ['-l', '--clone']:
44 if option in argv:
45 argv[argv.index(option)] = '--id'
46 action = 'clone'
47 return action
48
49
mblighbe630eb2008-08-01 16:41:48 +000050class job_help(job):
51 """Just here to get the atest logic working.
52 Usage is set by its parent"""
53 pass
54
55
56class job_list_stat(action_common.atest_list, job):
mbligh9deeefa2009-05-01 23:11:08 +000057 def __init__(self):
58 super(job_list_stat, self).__init__()
59
60 self.topic_parse_info = topic_common.item_parse_info(
61 attribute_name='jobs',
62 use_leftover=True)
63
64
mblighbe630eb2008-08-01 16:41:48 +000065 def __split_jobs_between_ids_names(self):
66 job_ids = []
67 job_names = []
68
69 # Sort between job IDs and names
70 for job_id in self.jobs:
71 if job_id.isdigit():
72 job_ids.append(job_id)
73 else:
74 job_names.append(job_id)
75 return (job_ids, job_names)
76
77
78 def execute_on_ids_and_names(self, op, filters={},
79 check_results={'id__in': 'id',
80 'name__in': 'id'},
81 tag_id='id__in', tag_name='name__in'):
82 if not self.jobs:
83 # Want everything
84 return super(job_list_stat, self).execute(op=op, filters=filters)
85
86 all_jobs = []
87 (job_ids, job_names) = self.__split_jobs_between_ids_names()
88
89 for items, tag in [(job_ids, tag_id),
90 (job_names, tag_name)]:
91 if items:
92 new_filters = filters.copy()
93 new_filters[tag] = items
94 jobs = super(job_list_stat,
95 self).execute(op=op,
96 filters=new_filters,
97 check_results=check_results)
98 all_jobs.extend(jobs)
99
100 return all_jobs
101
102
103class job_list(job_list_stat):
104 """atest job list [<jobs>] [--all] [--running] [--user <username>]"""
105 def __init__(self):
106 super(job_list, self).__init__()
107 self.parser.add_option('-a', '--all', help='List jobs for all '
108 'users.', action='store_true', default=False)
109 self.parser.add_option('-r', '--running', help='List only running '
110 'jobs', action='store_true')
111 self.parser.add_option('-u', '--user', help='List jobs for given '
112 'user', type='string')
113
114
115 def parse(self):
mbligh9deeefa2009-05-01 23:11:08 +0000116 options, leftover = super(job_list, self).parse()
mblighbe630eb2008-08-01 16:41:48 +0000117 self.all = options.all
118 self.data['running'] = options.running
119 if options.user:
120 if options.all:
121 self.invalid_syntax('Only specify --all or --user, not both.')
122 else:
123 self.data['owner'] = options.user
124 elif not options.all and not self.jobs:
125 self.data['owner'] = getpass.getuser()
126
mbligh9deeefa2009-05-01 23:11:08 +0000127 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000128
129
130 def execute(self):
131 return self.execute_on_ids_and_names(op='get_jobs_summary',
132 filters=self.data)
133
134
135 def output(self, results):
136 keys = ['id', 'owner', 'name', 'status_counts']
137 if self.verbose:
138 keys.extend(['priority', 'control_type', 'created_on'])
139 self._convert_status(results)
140 super(job_list, self).output(results, keys)
141
142
143
144class job_stat(job_list_stat):
145 """atest job stat <job>"""
146 usage_action = 'stat'
147
148 def __init__(self):
149 super(job_stat, self).__init__()
150 self.parser.add_option('-f', '--control-file',
151 help='Display the control file',
152 action='store_true', default=False)
153
154
155 def parse(self):
mbligh9deeefa2009-05-01 23:11:08 +0000156 options, leftover = super(job_stat, self).parse(req_items='jobs')
mblighbe630eb2008-08-01 16:41:48 +0000157 if not self.jobs:
158 self.invalid_syntax('Must specify at least one job.')
159
160 self.show_control_file = options.control_file
161
mbligh9deeefa2009-05-01 23:11:08 +0000162 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000163
164
165 def _merge_results(self, summary, qes):
166 hosts_status = {}
167 for qe in qes:
168 if qe['host']:
169 job_id = qe['job']['id']
170 hostname = qe['host']['hostname']
171 hosts_status.setdefault(job_id,
172 {}).setdefault(qe['status'],
173 []).append(hostname)
174
175 for job in summary:
176 job_id = job['id']
177 if hosts_status.has_key(job_id):
178 this_job = hosts_status[job_id]
mbligh47dc4d22009-02-12 21:48:34 +0000179 host_per_status = ['%s=%s' %(status, ','.join(host))
mblighbe630eb2008-08-01 16:41:48 +0000180 for status, host in this_job.iteritems()]
181 job['hosts_status'] = ', '.join(host_per_status)
182 else:
183 job['hosts_status'] = ''
184 return summary
185
186
187 def execute(self):
188 summary = self.execute_on_ids_and_names(op='get_jobs_summary')
189
190 # Get the real hostnames
191 qes = self.execute_on_ids_and_names(op='get_host_queue_entries',
192 check_results={},
193 tag_id='job__in',
194 tag_name='job__name__in')
195
196 self._convert_status(summary)
197
198 return self._merge_results(summary, qes)
199
200
201 def output(self, results):
202 if not self.verbose:
203 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status']
204 else:
205 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status',
showard2bab8f42008-11-12 18:15:22 +0000206 'owner', 'control_type', 'synch_count', 'created_on',
showarda1e74b32009-05-12 17:32:04 +0000207 'run_verify', 'reboot_before', 'reboot_after',
208 'parse_failed_repair']
mblighbe630eb2008-08-01 16:41:48 +0000209
210 if self.show_control_file:
211 keys.append('control_file')
212
213 super(job_stat, self).output(results, keys)
214
215
mbligh5a496082009-08-03 16:44:54 +0000216class job_create_or_clone(action_common.atest_create, job):
217 """Class containing the code common to the job create and clone actions"""
218 msg_items = 'job_name'
219
220 def __init__(self):
221 super(job_create_or_clone, self).__init__()
222 self.hosts = []
223 self.data_item_key = 'name'
224 self.parser.add_option('-p', '--priority', help='Job priority (low, '
225 'medium, high, urgent), default=medium',
226 type='choice', choices=('low', 'medium', 'high',
227 'urgent'), default='medium')
228 self.parser.add_option('-b', '--labels',
229 help='Comma separated list of labels '
230 'to get machine list from.', default='')
231 self.parser.add_option('-m', '--machine', help='List of machines to '
232 'run on')
233 self.parser.add_option('-M', '--mlist',
234 help='File listing machines to use',
235 type='string', metavar='MACHINE_FLIST')
236 self.parser.add_option('--one-time-hosts',
237 help='List of one time hosts')
238 self.parser.add_option('-e', '--email',
239 help='A comma seperated list of '
240 'email addresses to notify of job completion',
241 default='')
242
243
mbligh56f1f4a2009-08-03 16:45:12 +0000244 def _parse_hosts(self, args):
245 """ Parses the arguments to generate a list of hosts and meta_hosts
246 A host is a regular name, a meta_host is n*label or *label.
247 These can be mixed on the CLI, and separated by either commas or
248 spaces, e.g.: 5*Machine_Label host0 5*Machine_Label2,host2 """
249
250 hosts = []
251 meta_hosts = []
252
253 for arg in args:
254 for host in arg.split(','):
255 if re.match('^[0-9]+[*]', host):
256 num, host = host.split('*', 1)
257 meta_hosts += int(num) * [host]
258 elif re.match('^[*](\w*)', host):
259 meta_hosts += [re.match('^[*](\w*)', host).group(1)]
260 elif host != '' and host not in hosts:
261 # Real hostname and not a duplicate
262 hosts.append(host)
263
264 return (hosts, meta_hosts)
265
266
mbligh5a496082009-08-03 16:44:54 +0000267 def parse(self):
268 host_info = topic_common.item_parse_info(attribute_name='hosts',
269 inline_option='machine',
270 filename_option='mlist')
271 job_info = topic_common.item_parse_info(attribute_name='jobname',
272 use_leftover=True)
273 oth_info = topic_common.item_parse_info(attribute_name='one_time_hosts',
274 inline_option='one_time_hosts')
275
276 options, leftover = super(job_create_or_clone,
277 self).parse([host_info, job_info, oth_info],
278 req_items='jobname')
279 self.data = {}
280 if len(self.jobname) > 1:
281 self.invalid_syntax('Too many arguments specified, only expected '
282 'to receive job name: %s' % self.jobname)
283 self.jobname = self.jobname[0]
284
285 if options.priority:
286 self.data['priority'] = options.priority.capitalize()
287
288 if self.one_time_hosts:
289 self.data['one_time_hosts'] = self.one_time_hosts
290
291 if options.labels:
292 labels = options.labels.split(',')
293 labels = [label.strip() for label in labels if label.strip()]
294 label_hosts = self.execute_rpc(op='get_hosts',
295 multiple_labels=labels)
296 for host in label_hosts:
297 self.hosts.append(host['hostname'])
298
299 self.data['name'] = self.jobname
300
301 (self.data['hosts'],
mbligh56f1f4a2009-08-03 16:45:12 +0000302 self.data['meta_hosts']) = self._parse_hosts(self.hosts)
mbligh5a496082009-08-03 16:44:54 +0000303
304 self.data['email_list'] = options.email
305
306 return options, leftover
307
308
309 def create_job(self):
310 job_id = self.execute_rpc(op='create_job', **self.data)
311 return ['%s (id %s)' % (self.jobname, job_id)]
312
313
314 def get_items(self):
315 return [self.jobname]
316
317
318
319class job_create(job_create_or_clone):
mblighbe630eb2008-08-01 16:41:48 +0000320 """atest job create [--priority <Low|Medium|High|Urgent>]
mbligha212d712009-02-11 01:22:36 +0000321 [--synch_count] [--control-file </path/to/cfile>]
mblighbe630eb2008-08-01 16:41:48 +0000322 [--on-server] [--test <test1,test2>] [--kernel <http://kernel>]
323 [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
showardb27f4ad2009-05-01 00:08:26 +0000324 [--labels <list of labels of machines to run on>]
showard21baa452008-10-21 00:08:39 +0000325 [--reboot_before <option>] [--reboot_after <option>]
showard12f3e322009-05-13 21:27:42 +0000326 [--noverify] [--timeout <timeout>] [--max_runtime <max runtime>]
327 [--one-time-hosts <hosts>] [--email <email>]
328 [--dependencies <labels this job is dependent on>]
showarda1e74b32009-05-12 17:32:04 +0000329 [--atomic_group <atomic group name>] [--parse-failed-repair <option>]
mblighae64d3a2008-10-15 04:13:52 +0000330 job_name
331
332 Creating a job is rather different from the other create operations,
333 so it only uses the __init__() and output() from its superclass.
334 """
mblighbe630eb2008-08-01 16:41:48 +0000335 op_action = 'create'
mblighbe630eb2008-08-01 16:41:48 +0000336
337 def __init__(self):
338 super(job_create, self).__init__()
mblighbe630eb2008-08-01 16:41:48 +0000339 self.ctrl_file_data = {}
showard7bce1022008-11-14 22:51:05 +0000340 self.parser.add_option('-y', '--synch_count', type=int,
showard2bab8f42008-11-12 18:15:22 +0000341 help='Number of machines to use per autoserv '
mbligh7ffdb8b2009-01-21 19:01:51 +0000342 'execution')
mblighbe630eb2008-08-01 16:41:48 +0000343 self.parser.add_option('-f', '--control-file',
344 help='use this control file', metavar='FILE')
345 self.parser.add_option('-s', '--server',
346 help='This is server-side job',
347 action='store_true', default=False)
348 self.parser.add_option('-t', '--test',
mbligh51148c72008-08-11 20:23:58 +0000349 help='List of tests to run')
mbligha3c58d22009-08-24 22:01:51 +0000350
351 self.parser.add_option('-k', '--kernel', help='A comma separated list'
352 ' of kernel versions/URLs/filenames to run the'
353 ' job on')
354 self.parser.add_option('--kernel-cmdline', help='A string that will be'
355 ' given as cmdline to the booted kernel(s)'
356 ' specified by the -k option')
mbligh5a496082009-08-03 16:44:54 +0000357
showardb27f4ad2009-05-01 00:08:26 +0000358 self.parser.add_option('-d', '--dependencies', help='Comma separated '
359 'list of labels this job is dependent on.',
360 default='')
showard648a35c2009-05-01 00:08:42 +0000361 self.parser.add_option('-G', '--atomic_group', help='Name of an Atomic '
362 'Group to schedule this job on.',
363 default='')
mbligh5a496082009-08-03 16:44:54 +0000364
mblighb9a8b162008-10-29 16:47:29 +0000365 self.parser.add_option('-B', '--reboot_before',
showard21baa452008-10-21 00:08:39 +0000366 help='Whether or not to reboot the machine '
367 'before the job (never/if dirty/always)',
368 type='choice',
369 choices=('never', 'if dirty', 'always'))
370 self.parser.add_option('-a', '--reboot_after',
371 help='Whether or not to reboot the machine '
372 'after the job (never/if all tests passed/'
373 'always)',
374 type='choice',
375 choices=('never', 'if all tests passed',
376 'always'))
mbligh5a496082009-08-03 16:44:54 +0000377
showarda1e74b32009-05-12 17:32:04 +0000378 self.parser.add_option('--parse-failed-repair',
379 help='Whether or not to parse failed repair '
380 'results as part of the job',
381 type='choice',
382 choices=('true', 'false'))
mbligh5d0b4b32008-12-22 14:43:01 +0000383 self.parser.add_option('-n', '--noverify',
384 help='Do not run verify for job',
385 default=False, action='store_true')
386 self.parser.add_option('-o', '--timeout', help='Job timeout in hours.',
387 metavar='TIMEOUT')
showard12f3e322009-05-13 21:27:42 +0000388 self.parser.add_option('--max_runtime',
389 help='Job maximum runtime in hours')
mblighbe630eb2008-08-01 16:41:48 +0000390
391
mbligha3c58d22009-08-24 22:01:51 +0000392 @staticmethod
393 def _get_kernel_data(kernel_list, cmdline):
394 # the RPC supports cmdline per kernel version in a dictionary
395 kernels = []
mbligh6aaab2e2009-09-03 20:25:19 +0000396 for version in re.split(r'[, ]+', kernel_list):
397 if not version:
398 continue
mbligha3c58d22009-08-24 22:01:51 +0000399 kernel_info = {'version': version}
400 if cmdline:
401 kernel_info['cmdline'] = cmdline
402 kernels.append(kernel_info)
403
404 return kernels
405
406
mblighbe630eb2008-08-01 16:41:48 +0000407 def parse(self):
mbligh5a496082009-08-03 16:44:54 +0000408 options, leftover = super(job_create, self).parse()
mblighbe630eb2008-08-01 16:41:48 +0000409
mbligh9deeefa2009-05-01 23:11:08 +0000410 if (len(self.hosts) == 0 and not self.one_time_hosts
showard648a35c2009-05-01 00:08:42 +0000411 and not options.labels and not options.atomic_group):
mblighce348642009-02-12 21:50:39 +0000412 self.invalid_syntax('Must specify at least one machine '
showard648a35c2009-05-01 00:08:42 +0000413 'or an atomic group '
414 '(-m, -M, -b, -G or --one-time-hosts).')
mblighbe630eb2008-08-01 16:41:48 +0000415 if not options.control_file and not options.test:
416 self.invalid_syntax('Must specify either --test or --control-file'
417 ' to create a job.')
418 if options.control_file and options.test:
419 self.invalid_syntax('Can only specify one of --control-file or '
420 '--test, not both.')
mbligh120351e2009-01-24 01:40:45 +0000421 if options.kernel:
mbligha3c58d22009-08-24 22:01:51 +0000422 self.ctrl_file_data['kernel'] = self._get_kernel_data(
423 options.kernel, options.kernel_cmdline)
mbligh120351e2009-01-24 01:40:45 +0000424 self.ctrl_file_data['do_push_packages'] = True
mblighbe630eb2008-08-01 16:41:48 +0000425 if options.control_file:
mblighbe630eb2008-08-01 16:41:48 +0000426 try:
mbligh120351e2009-01-24 01:40:45 +0000427 control_file_f = open(options.control_file)
428 try:
429 control_file_data = control_file_f.read()
430 finally:
431 control_file_f.close()
mblighbe630eb2008-08-01 16:41:48 +0000432 except IOError:
433 self.generic_error('Unable to read from specified '
434 'control-file: %s' % options.control_file)
mbligh120351e2009-01-24 01:40:45 +0000435 if options.kernel:
436 if options.server:
437 self.invalid_syntax(
438 'A control file and a kernel may only be specified'
439 ' together on client side jobs.')
440 # execute() will pass this to the AFE server to wrap this
441 # control file up to include the kernel installation steps.
442 self.ctrl_file_data['client_control_file'] = control_file_data
443 else:
444 self.data['control_file'] = control_file_data
mbligh4eae22a2008-10-10 16:09:46 +0000445 if options.test:
showard2bab8f42008-11-12 18:15:22 +0000446 if options.server:
mblighb9a8b162008-10-29 16:47:29 +0000447 self.invalid_syntax('If you specify tests, then the '
showard2bab8f42008-11-12 18:15:22 +0000448 'client/server setting is implicit and '
449 'cannot be overriden.')
mbligh4eae22a2008-10-10 16:09:46 +0000450 tests = [t.strip() for t in options.test.split(',') if t.strip()]
mbligh120351e2009-01-24 01:40:45 +0000451 self.ctrl_file_data['tests'] = tests
mbligh4eae22a2008-10-10 16:09:46 +0000452
mblighbe630eb2008-08-01 16:41:48 +0000453
showard21baa452008-10-21 00:08:39 +0000454 if options.reboot_before:
455 self.data['reboot_before'] = options.reboot_before.capitalize()
456 if options.reboot_after:
457 self.data['reboot_after'] = options.reboot_after.capitalize()
showarda1e74b32009-05-12 17:32:04 +0000458 if options.parse_failed_repair:
459 self.data['parse_failed_repair'] = (
460 options.parse_failed_repair == 'true')
mbligh5d0b4b32008-12-22 14:43:01 +0000461 if options.noverify:
462 self.data['run_verify'] = False
463 if options.timeout:
464 self.data['timeout'] = options.timeout
showard12f3e322009-05-13 21:27:42 +0000465 if options.max_runtime:
466 self.data['max_runtime_hrs'] = options.max_runtime
mblighbe630eb2008-08-01 16:41:48 +0000467
showard648a35c2009-05-01 00:08:42 +0000468 if options.atomic_group:
469 self.data['atomic_group_name'] = options.atomic_group
470
showardb27f4ad2009-05-01 00:08:26 +0000471 deps = options.dependencies.split(',')
mblighb9a8b162008-10-29 16:47:29 +0000472 deps = [dep.strip() for dep in deps if dep.strip()]
473 self.data['dependencies'] = deps
mblighbe630eb2008-08-01 16:41:48 +0000474
mbligh7ffdb8b2009-01-21 19:01:51 +0000475 if options.synch_count:
476 self.data['synch_count'] = options.synch_count
mblighbe630eb2008-08-01 16:41:48 +0000477 if options.server:
478 self.data['control_type'] = 'Server'
479 else:
480 self.data['control_type'] = 'Client'
481
mbligh9deeefa2009-05-01 23:11:08 +0000482 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000483
484
485 def execute(self):
486 if self.ctrl_file_data:
mbligh120351e2009-01-24 01:40:45 +0000487 uploading_kernel = 'kernel' in self.ctrl_file_data
488 if uploading_kernel:
mbligh8c7b04c2009-03-25 18:01:56 +0000489 default_timeout = socket.getdefaulttimeout()
mblighbe630eb2008-08-01 16:41:48 +0000490 socket.setdefaulttimeout(topic_common.UPLOAD_SOCKET_TIMEOUT)
491 print 'Uploading Kernel: this may take a while...',
mbligh120351e2009-01-24 01:40:45 +0000492 sys.stdout.flush()
493 try:
494 cf_info = self.execute_rpc(op='generate_control_file',
495 item=self.jobname,
496 **self.ctrl_file_data)
497 finally:
498 if uploading_kernel:
mbligh8c7b04c2009-03-25 18:01:56 +0000499 socket.setdefaulttimeout(default_timeout)
500
mbligh120351e2009-01-24 01:40:45 +0000501 if uploading_kernel:
mblighbe630eb2008-08-01 16:41:48 +0000502 print 'Done'
showard989f25d2008-10-01 11:38:11 +0000503 self.data['control_file'] = cf_info['control_file']
mbligh7ffdb8b2009-01-21 19:01:51 +0000504 if 'synch_count' not in self.data:
505 self.data['synch_count'] = cf_info['synch_count']
showard989f25d2008-10-01 11:38:11 +0000506 if cf_info['is_server']:
mblighbe630eb2008-08-01 16:41:48 +0000507 self.data['control_type'] = 'Server'
508 else:
509 self.data['control_type'] = 'Client'
mblighae64d3a2008-10-15 04:13:52 +0000510
mblighb9a8b162008-10-29 16:47:29 +0000511 # Get the union of the 2 sets of dependencies
512 deps = set(self.data['dependencies'])
showarda6fe9c62008-11-03 19:04:25 +0000513 deps = sorted(deps.union(cf_info['dependencies']))
mblighb9a8b162008-10-29 16:47:29 +0000514 self.data['dependencies'] = list(deps)
mblighae64d3a2008-10-15 04:13:52 +0000515
mbligh7ffdb8b2009-01-21 19:01:51 +0000516 if 'synch_count' not in self.data:
517 self.data['synch_count'] = 1
518
mbligh5a496082009-08-03 16:44:54 +0000519 return self.create_job()
mblighbe630eb2008-08-01 16:41:48 +0000520
521
mbligh5a496082009-08-03 16:44:54 +0000522class job_clone(job_create_or_clone):
523 """atest job clone [--priority <Low|Medium|High|Urgent>]
524 [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
525 [--labels <list of labels of machines to run on>]
526 [--one-time-hosts <hosts>] [--email <email>]
527 job_name
528
529 Cloning a job is rather different from the other create operations,
530 so it only uses the __init__() and output() from its superclass.
531 """
532 op_action = 'clone'
533 usage_action = 'clone'
534
535 def __init__(self):
536 super(job_clone, self).__init__()
537 self.parser.add_option('-i', '--id', help='Job id to clone',
538 default=False,
539 metavar='JOB_ID')
540 self.parser.add_option('-r', '--reuse-hosts',
541 help='Use the exact same hosts as the '
542 'cloned job.',
543 action='store_true', default=False)
544
545
546 def parse(self):
547 options, leftover = super(job_clone, self).parse()
548
549 self.clone_id = options.id
550 self.reuse_hosts = options.reuse_hosts
551
mbligh56f1f4a2009-08-03 16:45:12 +0000552 host_specified = self.hosts or self.one_time_hosts or options.labels
553 if self.reuse_hosts and host_specified:
554 self.invalid_syntax('Cannot specify hosts and reuse the same '
555 'ones as the cloned job.')
556
557 if not (self.reuse_hosts or host_specified):
558 self.invalid_syntax('Must reuse or specify at least one '
559 'machine (-r, -m, -M, -b or '
560 '--one-time-hosts).')
mbligh5a496082009-08-03 16:44:54 +0000561
562 return options, leftover
563
564
565 def execute(self):
566 clone_info = self.execute_rpc(op='get_info_for_clone',
567 id=self.clone_id,
568 preserve_metahosts=self.reuse_hosts)
mbligh5a496082009-08-03 16:44:54 +0000569
570 # Remove fields from clone data that cannot be reused
mbligh56f1f4a2009-08-03 16:45:12 +0000571 for field in ('name', 'created_on', 'id', 'owner'):
572 del clone_info['job'][field]
mbligh5a496082009-08-03 16:44:54 +0000573
574 # Keyword args cannot be unicode strings
mbligh56f1f4a2009-08-03 16:45:12 +0000575 self.data.update((str(key), val)
576 for key, val in clone_info['job'].iteritems())
mbligh5a496082009-08-03 16:44:54 +0000577
mbligh56f1f4a2009-08-03 16:45:12 +0000578 if self.reuse_hosts:
579 # Convert host list from clone info that can be used for job_create
580 for label, qty in clone_info['meta_host_counts'].iteritems():
581 self.data['meta_hosts'].extend([label]*qty)
mbligh5a496082009-08-03 16:44:54 +0000582
mbligh56f1f4a2009-08-03 16:45:12 +0000583 self.data['hosts'].extend(host['hostname']
584 for host in clone_info['hosts'])
mbligh5a496082009-08-03 16:44:54 +0000585
586 return self.create_job()
mblighbe630eb2008-08-01 16:41:48 +0000587
588
589class job_abort(job, action_common.atest_delete):
590 """atest job abort <job(s)>"""
591 usage_action = op_action = 'abort'
592 msg_done = 'Aborted'
593
594 def parse(self):
mbligh9deeefa2009-05-01 23:11:08 +0000595 job_info = topic_common.item_parse_info(attribute_name='jobids',
596 use_leftover=True)
597 options, leftover = super(job_abort, self).parse([job_info],
598 req_items='jobids')
mblighbe630eb2008-08-01 16:41:48 +0000599
600
mbligh206d50a2008-11-13 01:19:25 +0000601 def execute(self):
602 data = {'job__id__in': self.jobids}
603 self.execute_rpc(op='abort_host_queue_entries', **data)
604 print 'Aborting jobs: %s' % ', '.join(self.jobids)
605
606
mblighbe630eb2008-08-01 16:41:48 +0000607 def get_items(self):
608 return self.jobids