blob: ddf4e68fe15ac6e831f32803b8f461dd21b3b721 [file] [log] [blame]
mblighbe630eb2008-08-01 16:41:48 +00001#
2# Copyright 2008 Google Inc. All Rights Reserved.
3
4"""
5The job module contains the objects and methods used to
6manage jobs in Autotest.
7
8The valid actions are:
9list: lists job(s)
10create: create a job
11abort: abort job(s)
12stat: detailed listing of job(s)
13
14The common options are:
15
16See topic_common.py for a High Level Design and Algorithm.
17"""
18
19import getpass, os, pwd, re, socket, sys
20from autotest_lib.cli import topic_common, action_common
Aviv Keshet3dd8beb2013-05-13 17:36:04 -070021from autotest_lib.client.common_lib import control_data
mblighbe630eb2008-08-01 16:41:48 +000022
23
24class job(topic_common.atest):
25 """Job class
mbligh5a496082009-08-03 16:44:54 +000026 atest job [create|clone|list|stat|abort] <options>"""
27 usage_action = '[create|clone|list|stat|abort]'
mblighbe630eb2008-08-01 16:41:48 +000028 topic = msg_topic = 'job'
29 msg_items = '<job_ids>'
30
31
32 def _convert_status(self, results):
33 for result in results:
mbligh10a47332008-08-11 19:37:46 +000034 total = sum(result['status_counts'].values())
mbligh47dc4d22009-02-12 21:48:34 +000035 status = ['%s=%s(%.1f%%)' % (key, val, 100.0*float(val)/total)
mbligh10a47332008-08-11 19:37:46 +000036 for key, val in result['status_counts'].iteritems()]
mblighbe630eb2008-08-01 16:41:48 +000037 status.sort()
38 result['status_counts'] = ', '.join(status)
39
40
mbligh5a496082009-08-03 16:44:54 +000041 def backward_compatibility(self, action, argv):
42 """ 'job create --clone' became 'job clone --id' """
43 if action == 'create':
44 for option in ['-l', '--clone']:
45 if option in argv:
46 argv[argv.index(option)] = '--id'
47 action = 'clone'
48 return action
49
50
mblighbe630eb2008-08-01 16:41:48 +000051class job_help(job):
52 """Just here to get the atest logic working.
53 Usage is set by its parent"""
54 pass
55
56
57class job_list_stat(action_common.atest_list, job):
mbligh9deeefa2009-05-01 23:11:08 +000058 def __init__(self):
59 super(job_list_stat, self).__init__()
60
61 self.topic_parse_info = topic_common.item_parse_info(
62 attribute_name='jobs',
63 use_leftover=True)
64
65
mblighbe630eb2008-08-01 16:41:48 +000066 def __split_jobs_between_ids_names(self):
67 job_ids = []
68 job_names = []
69
70 # Sort between job IDs and names
71 for job_id in self.jobs:
72 if job_id.isdigit():
73 job_ids.append(job_id)
74 else:
75 job_names.append(job_id)
76 return (job_ids, job_names)
77
78
79 def execute_on_ids_and_names(self, op, filters={},
80 check_results={'id__in': 'id',
81 'name__in': 'id'},
82 tag_id='id__in', tag_name='name__in'):
83 if not self.jobs:
84 # Want everything
85 return super(job_list_stat, self).execute(op=op, filters=filters)
86
87 all_jobs = []
88 (job_ids, job_names) = self.__split_jobs_between_ids_names()
89
90 for items, tag in [(job_ids, tag_id),
91 (job_names, tag_name)]:
92 if items:
93 new_filters = filters.copy()
94 new_filters[tag] = items
95 jobs = super(job_list_stat,
96 self).execute(op=op,
97 filters=new_filters,
98 check_results=check_results)
99 all_jobs.extend(jobs)
100
101 return all_jobs
102
103
104class job_list(job_list_stat):
105 """atest job list [<jobs>] [--all] [--running] [--user <username>]"""
106 def __init__(self):
107 super(job_list, self).__init__()
108 self.parser.add_option('-a', '--all', help='List jobs for all '
109 'users.', action='store_true', default=False)
110 self.parser.add_option('-r', '--running', help='List only running '
111 'jobs', action='store_true')
112 self.parser.add_option('-u', '--user', help='List jobs for given '
113 'user', type='string')
114
115
116 def parse(self):
mbligh9deeefa2009-05-01 23:11:08 +0000117 options, leftover = super(job_list, self).parse()
mblighbe630eb2008-08-01 16:41:48 +0000118 self.all = options.all
119 self.data['running'] = options.running
120 if options.user:
121 if options.all:
122 self.invalid_syntax('Only specify --all or --user, not both.')
123 else:
124 self.data['owner'] = options.user
125 elif not options.all and not self.jobs:
126 self.data['owner'] = getpass.getuser()
127
mbligh9deeefa2009-05-01 23:11:08 +0000128 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000129
130
131 def execute(self):
132 return self.execute_on_ids_and_names(op='get_jobs_summary',
133 filters=self.data)
134
135
136 def output(self, results):
137 keys = ['id', 'owner', 'name', 'status_counts']
138 if self.verbose:
139 keys.extend(['priority', 'control_type', 'created_on'])
140 self._convert_status(results)
141 super(job_list, self).output(results, keys)
142
143
144
145class job_stat(job_list_stat):
146 """atest job stat <job>"""
147 usage_action = 'stat'
148
149 def __init__(self):
150 super(job_stat, self).__init__()
151 self.parser.add_option('-f', '--control-file',
152 help='Display the control file',
153 action='store_true', default=False)
mblighfca5ed12009-11-06 02:59:56 +0000154 self.parser.add_option('-N', '--list-hosts',
155 help='Display only a list of hosts',
156 action='store_true')
157 self.parser.add_option('-s', '--list-hosts-status',
158 help='Display only the hosts in these statuses '
159 'for a job.', action='store')
mblighbe630eb2008-08-01 16:41:48 +0000160
161
162 def parse(self):
mblighfca5ed12009-11-06 02:59:56 +0000163 status_list = topic_common.item_parse_info(
164 attribute_name='status_list',
165 inline_option='list_hosts_status')
166 options, leftover = super(job_stat, self).parse([status_list],
167 req_items='jobs')
168
mblighbe630eb2008-08-01 16:41:48 +0000169 if not self.jobs:
170 self.invalid_syntax('Must specify at least one job.')
171
172 self.show_control_file = options.control_file
mblighfca5ed12009-11-06 02:59:56 +0000173 self.list_hosts = options.list_hosts
174
175 if self.list_hosts and self.status_list:
176 self.invalid_syntax('--list-hosts is implicit when using '
177 '--list-hosts-status.')
178 if len(self.jobs) > 1 and (self.list_hosts or self.status_list):
179 self.invalid_syntax('--list-hosts and --list-hosts-status should '
180 'only be used on a single job.')
mblighbe630eb2008-08-01 16:41:48 +0000181
mbligh9deeefa2009-05-01 23:11:08 +0000182 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000183
184
185 def _merge_results(self, summary, qes):
186 hosts_status = {}
187 for qe in qes:
188 if qe['host']:
189 job_id = qe['job']['id']
190 hostname = qe['host']['hostname']
191 hosts_status.setdefault(job_id,
192 {}).setdefault(qe['status'],
193 []).append(hostname)
194
195 for job in summary:
196 job_id = job['id']
197 if hosts_status.has_key(job_id):
198 this_job = hosts_status[job_id]
mblighfca5ed12009-11-06 02:59:56 +0000199 job['hosts'] = ' '.join(' '.join(host) for host in
200 this_job.itervalues())
201 host_per_status = ['%s="%s"' %(status, ' '.join(host))
mblighbe630eb2008-08-01 16:41:48 +0000202 for status, host in this_job.iteritems()]
203 job['hosts_status'] = ', '.join(host_per_status)
mblighfca5ed12009-11-06 02:59:56 +0000204 if self.status_list:
205 statuses = set(s.lower() for s in self.status_list)
206 all_hosts = [s for s in host_per_status if s.split('=',
207 1)[0].lower() in statuses]
208 job['hosts_selected_status'] = '\n'.join(all_hosts)
mblighbe630eb2008-08-01 16:41:48 +0000209 else:
210 job['hosts_status'] = ''
mblighfca5ed12009-11-06 02:59:56 +0000211
212 if not job.get('hosts'):
213 self.generic_error('Job has unassigned meta-hosts, '
214 'try again shortly.')
215
mblighbe630eb2008-08-01 16:41:48 +0000216 return summary
217
218
219 def execute(self):
220 summary = self.execute_on_ids_and_names(op='get_jobs_summary')
221
222 # Get the real hostnames
223 qes = self.execute_on_ids_and_names(op='get_host_queue_entries',
224 check_results={},
225 tag_id='job__in',
226 tag_name='job__name__in')
227
228 self._convert_status(summary)
229
230 return self._merge_results(summary, qes)
231
232
233 def output(self, results):
mblighfca5ed12009-11-06 02:59:56 +0000234 if self.list_hosts:
235 keys = ['hosts']
236 elif self.status_list:
237 keys = ['hosts_selected_status']
238 elif not self.verbose:
mblighbe630eb2008-08-01 16:41:48 +0000239 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status']
240 else:
241 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status',
showard2bab8f42008-11-12 18:15:22 +0000242 'owner', 'control_type', 'synch_count', 'created_on',
showarda1e74b32009-05-12 17:32:04 +0000243 'run_verify', 'reboot_before', 'reboot_after',
244 'parse_failed_repair']
mblighbe630eb2008-08-01 16:41:48 +0000245
246 if self.show_control_file:
247 keys.append('control_file')
248
249 super(job_stat, self).output(results, keys)
250
251
mbligh5a496082009-08-03 16:44:54 +0000252class job_create_or_clone(action_common.atest_create, job):
253 """Class containing the code common to the job create and clone actions"""
254 msg_items = 'job_name'
255
256 def __init__(self):
257 super(job_create_or_clone, self).__init__()
258 self.hosts = []
259 self.data_item_key = 'name'
260 self.parser.add_option('-p', '--priority', help='Job priority (low, '
261 'medium, high, urgent), default=medium',
262 type='choice', choices=('low', 'medium', 'high',
263 'urgent'), default='medium')
264 self.parser.add_option('-b', '--labels',
265 help='Comma separated list of labels '
266 'to get machine list from.', default='')
267 self.parser.add_option('-m', '--machine', help='List of machines to '
268 'run on')
269 self.parser.add_option('-M', '--mlist',
270 help='File listing machines to use',
271 type='string', metavar='MACHINE_FLIST')
272 self.parser.add_option('--one-time-hosts',
273 help='List of one time hosts')
274 self.parser.add_option('-e', '--email',
275 help='A comma seperated list of '
276 'email addresses to notify of job completion',
277 default='')
278
279
mbligh56f1f4a2009-08-03 16:45:12 +0000280 def _parse_hosts(self, args):
281 """ Parses the arguments to generate a list of hosts and meta_hosts
282 A host is a regular name, a meta_host is n*label or *label.
283 These can be mixed on the CLI, and separated by either commas or
284 spaces, e.g.: 5*Machine_Label host0 5*Machine_Label2,host2 """
285
286 hosts = []
287 meta_hosts = []
288
289 for arg in args:
290 for host in arg.split(','):
291 if re.match('^[0-9]+[*]', host):
292 num, host = host.split('*', 1)
293 meta_hosts += int(num) * [host]
294 elif re.match('^[*](\w*)', host):
295 meta_hosts += [re.match('^[*](\w*)', host).group(1)]
296 elif host != '' and host not in hosts:
297 # Real hostname and not a duplicate
298 hosts.append(host)
299
300 return (hosts, meta_hosts)
301
302
Eric Li8a12e802011-02-17 14:24:13 -0800303 def parse(self, parse_info=[]):
mbligh5a496082009-08-03 16:44:54 +0000304 host_info = topic_common.item_parse_info(attribute_name='hosts',
305 inline_option='machine',
306 filename_option='mlist')
307 job_info = topic_common.item_parse_info(attribute_name='jobname',
308 use_leftover=True)
309 oth_info = topic_common.item_parse_info(attribute_name='one_time_hosts',
310 inline_option='one_time_hosts')
jamesrenc2863162010-07-12 21:20:51 +0000311 label_info = topic_common.item_parse_info(attribute_name='labels',
312 inline_option='labels')
mbligh5a496082009-08-03 16:44:54 +0000313
Eric Li8a12e802011-02-17 14:24:13 -0800314 options, leftover = super(job_create_or_clone, self).parse(
315 [host_info, job_info, oth_info, label_info] + parse_info,
316 req_items='jobname')
mbligh5a496082009-08-03 16:44:54 +0000317 self.data = {}
Dale Curtis8adf7892011-09-08 16:13:36 -0700318 jobname = getattr(self, 'jobname')
319 if len(jobname) > 1:
mbligh5a496082009-08-03 16:44:54 +0000320 self.invalid_syntax('Too many arguments specified, only expected '
Dale Curtis8adf7892011-09-08 16:13:36 -0700321 'to receive job name: %s' % jobname)
322 self.jobname = jobname[0]
mbligh5a496082009-08-03 16:44:54 +0000323
324 if options.priority:
325 self.data['priority'] = options.priority.capitalize()
326
327 if self.one_time_hosts:
328 self.data['one_time_hosts'] = self.one_time_hosts
329
jamesrenc2863162010-07-12 21:20:51 +0000330 if self.labels:
mbligh5a496082009-08-03 16:44:54 +0000331 label_hosts = self.execute_rpc(op='get_hosts',
jamesrenc2863162010-07-12 21:20:51 +0000332 multiple_labels=self.labels)
mbligh5a496082009-08-03 16:44:54 +0000333 for host in label_hosts:
334 self.hosts.append(host['hostname'])
335
336 self.data['name'] = self.jobname
337
338 (self.data['hosts'],
mbligh56f1f4a2009-08-03 16:45:12 +0000339 self.data['meta_hosts']) = self._parse_hosts(self.hosts)
mbligh5a496082009-08-03 16:44:54 +0000340
341 self.data['email_list'] = options.email
342
343 return options, leftover
344
345
346 def create_job(self):
347 job_id = self.execute_rpc(op='create_job', **self.data)
348 return ['%s (id %s)' % (self.jobname, job_id)]
349
350
351 def get_items(self):
352 return [self.jobname]
353
354
355
356class job_create(job_create_or_clone):
mblighbe630eb2008-08-01 16:41:48 +0000357 """atest job create [--priority <Low|Medium|High|Urgent>]
mbligha212d712009-02-11 01:22:36 +0000358 [--synch_count] [--control-file </path/to/cfile>]
mblighbe630eb2008-08-01 16:41:48 +0000359 [--on-server] [--test <test1,test2>] [--kernel <http://kernel>]
360 [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
showardb27f4ad2009-05-01 00:08:26 +0000361 [--labels <list of labels of machines to run on>]
showard21baa452008-10-21 00:08:39 +0000362 [--reboot_before <option>] [--reboot_after <option>]
showard12f3e322009-05-13 21:27:42 +0000363 [--noverify] [--timeout <timeout>] [--max_runtime <max runtime>]
364 [--one-time-hosts <hosts>] [--email <email>]
365 [--dependencies <labels this job is dependent on>]
showarda1e74b32009-05-12 17:32:04 +0000366 [--atomic_group <atomic group name>] [--parse-failed-repair <option>]
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -0800367 [--image <http://path/to/image>]
mblighae64d3a2008-10-15 04:13:52 +0000368 job_name
369
370 Creating a job is rather different from the other create operations,
371 so it only uses the __init__() and output() from its superclass.
372 """
mblighbe630eb2008-08-01 16:41:48 +0000373 op_action = 'create'
mblighbe630eb2008-08-01 16:41:48 +0000374
375 def __init__(self):
376 super(job_create, self).__init__()
mblighbe630eb2008-08-01 16:41:48 +0000377 self.ctrl_file_data = {}
showard7bce1022008-11-14 22:51:05 +0000378 self.parser.add_option('-y', '--synch_count', type=int,
showard2bab8f42008-11-12 18:15:22 +0000379 help='Number of machines to use per autoserv '
mbligh7ffdb8b2009-01-21 19:01:51 +0000380 'execution')
mblighbe630eb2008-08-01 16:41:48 +0000381 self.parser.add_option('-f', '--control-file',
382 help='use this control file', metavar='FILE')
383 self.parser.add_option('-s', '--server',
384 help='This is server-side job',
385 action='store_true', default=False)
386 self.parser.add_option('-t', '--test',
mbligh51148c72008-08-11 20:23:58 +0000387 help='List of tests to run')
mbligha3c58d22009-08-24 22:01:51 +0000388
389 self.parser.add_option('-k', '--kernel', help='A comma separated list'
390 ' of kernel versions/URLs/filenames to run the'
391 ' job on')
392 self.parser.add_option('--kernel-cmdline', help='A string that will be'
393 ' given as cmdline to the booted kernel(s)'
394 ' specified by the -k option')
mbligh5a496082009-08-03 16:44:54 +0000395
showardb27f4ad2009-05-01 00:08:26 +0000396 self.parser.add_option('-d', '--dependencies', help='Comma separated '
397 'list of labels this job is dependent on.',
398 default='')
showard648a35c2009-05-01 00:08:42 +0000399 self.parser.add_option('-G', '--atomic_group', help='Name of an Atomic '
400 'Group to schedule this job on.',
401 default='')
mbligh5a496082009-08-03 16:44:54 +0000402
mblighb9a8b162008-10-29 16:47:29 +0000403 self.parser.add_option('-B', '--reboot_before',
showard21baa452008-10-21 00:08:39 +0000404 help='Whether or not to reboot the machine '
405 'before the job (never/if dirty/always)',
406 type='choice',
407 choices=('never', 'if dirty', 'always'))
408 self.parser.add_option('-a', '--reboot_after',
409 help='Whether or not to reboot the machine '
410 'after the job (never/if all tests passed/'
411 'always)',
412 type='choice',
413 choices=('never', 'if all tests passed',
414 'always'))
mbligh5a496082009-08-03 16:44:54 +0000415
showarda1e74b32009-05-12 17:32:04 +0000416 self.parser.add_option('--parse-failed-repair',
417 help='Whether or not to parse failed repair '
418 'results as part of the job',
419 type='choice',
420 choices=('true', 'false'))
mbligh5d0b4b32008-12-22 14:43:01 +0000421 self.parser.add_option('-n', '--noverify',
422 help='Do not run verify for job',
423 default=False, action='store_true')
Simran Basi7e605742013-11-12 13:43:36 -0800424 self.parser.add_option('-o', '--timeout_mins',
425 help='Job timeout in minutes.',
mbligh5d0b4b32008-12-22 14:43:01 +0000426 metavar='TIMEOUT')
showard12f3e322009-05-13 21:27:42 +0000427 self.parser.add_option('--max_runtime',
Simran Basi34217022012-11-06 13:43:15 -0800428 help='Job maximum runtime in minutes')
mblighbe630eb2008-08-01 16:41:48 +0000429
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -0800430 self.parser.add_option('-i', '--image',
431 help='OS image to install before running the '
432 'test.')
433
mblighbe630eb2008-08-01 16:41:48 +0000434
mbligha3c58d22009-08-24 22:01:51 +0000435 @staticmethod
436 def _get_kernel_data(kernel_list, cmdline):
437 # the RPC supports cmdline per kernel version in a dictionary
438 kernels = []
mbligh6aaab2e2009-09-03 20:25:19 +0000439 for version in re.split(r'[, ]+', kernel_list):
440 if not version:
441 continue
mbligha3c58d22009-08-24 22:01:51 +0000442 kernel_info = {'version': version}
443 if cmdline:
444 kernel_info['cmdline'] = cmdline
445 kernels.append(kernel_info)
446
447 return kernels
448
449
mblighbe630eb2008-08-01 16:41:48 +0000450 def parse(self):
Eric Li8a12e802011-02-17 14:24:13 -0800451 deps_info = topic_common.item_parse_info(attribute_name='dependencies',
452 inline_option='dependencies')
453 options, leftover = super(job_create, self).parse(
454 parse_info=[deps_info])
mblighbe630eb2008-08-01 16:41:48 +0000455
mbligh9deeefa2009-05-01 23:11:08 +0000456 if (len(self.hosts) == 0 and not self.one_time_hosts
showard648a35c2009-05-01 00:08:42 +0000457 and not options.labels and not options.atomic_group):
mblighce348642009-02-12 21:50:39 +0000458 self.invalid_syntax('Must specify at least one machine '
showard648a35c2009-05-01 00:08:42 +0000459 'or an atomic group '
460 '(-m, -M, -b, -G or --one-time-hosts).')
mblighbe630eb2008-08-01 16:41:48 +0000461 if not options.control_file and not options.test:
462 self.invalid_syntax('Must specify either --test or --control-file'
463 ' to create a job.')
464 if options.control_file and options.test:
465 self.invalid_syntax('Can only specify one of --control-file or '
466 '--test, not both.')
mbligh120351e2009-01-24 01:40:45 +0000467 if options.kernel:
mbligha3c58d22009-08-24 22:01:51 +0000468 self.ctrl_file_data['kernel'] = self._get_kernel_data(
469 options.kernel, options.kernel_cmdline)
mblighbe630eb2008-08-01 16:41:48 +0000470 if options.control_file:
mblighbe630eb2008-08-01 16:41:48 +0000471 try:
mbligh120351e2009-01-24 01:40:45 +0000472 control_file_f = open(options.control_file)
473 try:
474 control_file_data = control_file_f.read()
475 finally:
476 control_file_f.close()
mblighbe630eb2008-08-01 16:41:48 +0000477 except IOError:
478 self.generic_error('Unable to read from specified '
479 'control-file: %s' % options.control_file)
mbligh120351e2009-01-24 01:40:45 +0000480 if options.kernel:
mbligh120351e2009-01-24 01:40:45 +0000481 # execute() will pass this to the AFE server to wrap this
482 # control file up to include the kernel installation steps.
483 self.ctrl_file_data['client_control_file'] = control_file_data
484 else:
485 self.data['control_file'] = control_file_data
mbligh4eae22a2008-10-10 16:09:46 +0000486 if options.test:
showard2bab8f42008-11-12 18:15:22 +0000487 if options.server:
mblighb9a8b162008-10-29 16:47:29 +0000488 self.invalid_syntax('If you specify tests, then the '
showard2bab8f42008-11-12 18:15:22 +0000489 'client/server setting is implicit and '
490 'cannot be overriden.')
mbligh4eae22a2008-10-10 16:09:46 +0000491 tests = [t.strip() for t in options.test.split(',') if t.strip()]
mbligh120351e2009-01-24 01:40:45 +0000492 self.ctrl_file_data['tests'] = tests
mbligh4eae22a2008-10-10 16:09:46 +0000493
Paul Pendlebury5a8c6ad2011-02-01 07:20:17 -0800494 if options.image:
495 self.data['image'] = options.image
mblighbe630eb2008-08-01 16:41:48 +0000496
showard21baa452008-10-21 00:08:39 +0000497 if options.reboot_before:
498 self.data['reboot_before'] = options.reboot_before.capitalize()
499 if options.reboot_after:
500 self.data['reboot_after'] = options.reboot_after.capitalize()
showarda1e74b32009-05-12 17:32:04 +0000501 if options.parse_failed_repair:
502 self.data['parse_failed_repair'] = (
503 options.parse_failed_repair == 'true')
mbligh5d0b4b32008-12-22 14:43:01 +0000504 if options.noverify:
505 self.data['run_verify'] = False
Simran Basi7e605742013-11-12 13:43:36 -0800506 if options.timeout_mins:
507 self.data['timeout_mins'] = options.timeout_mins
showard12f3e322009-05-13 21:27:42 +0000508 if options.max_runtime:
Simran Basi34217022012-11-06 13:43:15 -0800509 self.data['max_runtime_mins'] = options.max_runtime
mblighbe630eb2008-08-01 16:41:48 +0000510
showard648a35c2009-05-01 00:08:42 +0000511 if options.atomic_group:
512 self.data['atomic_group_name'] = options.atomic_group
513
Eric Li8a12e802011-02-17 14:24:13 -0800514 self.data['dependencies'] = self.dependencies
mblighbe630eb2008-08-01 16:41:48 +0000515
mbligh7ffdb8b2009-01-21 19:01:51 +0000516 if options.synch_count:
517 self.data['synch_count'] = options.synch_count
mblighbe630eb2008-08-01 16:41:48 +0000518 if options.server:
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700519 self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.SERVER
mblighbe630eb2008-08-01 16:41:48 +0000520 else:
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700521 self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.CLIENT
mblighbe630eb2008-08-01 16:41:48 +0000522
mbligh9deeefa2009-05-01 23:11:08 +0000523 return options, leftover
mblighbe630eb2008-08-01 16:41:48 +0000524
525
526 def execute(self):
527 if self.ctrl_file_data:
mbligh120351e2009-01-24 01:40:45 +0000528 uploading_kernel = 'kernel' in self.ctrl_file_data
529 if uploading_kernel:
mbligh8c7b04c2009-03-25 18:01:56 +0000530 default_timeout = socket.getdefaulttimeout()
mblighbe630eb2008-08-01 16:41:48 +0000531 socket.setdefaulttimeout(topic_common.UPLOAD_SOCKET_TIMEOUT)
532 print 'Uploading Kernel: this may take a while...',
mbligh120351e2009-01-24 01:40:45 +0000533 sys.stdout.flush()
534 try:
535 cf_info = self.execute_rpc(op='generate_control_file',
536 item=self.jobname,
537 **self.ctrl_file_data)
538 finally:
539 if uploading_kernel:
mbligh8c7b04c2009-03-25 18:01:56 +0000540 socket.setdefaulttimeout(default_timeout)
541
mbligh120351e2009-01-24 01:40:45 +0000542 if uploading_kernel:
mblighbe630eb2008-08-01 16:41:48 +0000543 print 'Done'
showard989f25d2008-10-01 11:38:11 +0000544 self.data['control_file'] = cf_info['control_file']
mbligh7ffdb8b2009-01-21 19:01:51 +0000545 if 'synch_count' not in self.data:
546 self.data['synch_count'] = cf_info['synch_count']
showard989f25d2008-10-01 11:38:11 +0000547 if cf_info['is_server']:
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700548 self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.SERVER
mblighbe630eb2008-08-01 16:41:48 +0000549 else:
Aviv Keshet3dd8beb2013-05-13 17:36:04 -0700550 self.data['control_type'] = control_data.CONTROL_TYPE_NAMES.CLIENT
mblighae64d3a2008-10-15 04:13:52 +0000551
mblighb9a8b162008-10-29 16:47:29 +0000552 # Get the union of the 2 sets of dependencies
553 deps = set(self.data['dependencies'])
showarda6fe9c62008-11-03 19:04:25 +0000554 deps = sorted(deps.union(cf_info['dependencies']))
mblighb9a8b162008-10-29 16:47:29 +0000555 self.data['dependencies'] = list(deps)
mblighae64d3a2008-10-15 04:13:52 +0000556
mbligh7ffdb8b2009-01-21 19:01:51 +0000557 if 'synch_count' not in self.data:
558 self.data['synch_count'] = 1
559
mbligh5a496082009-08-03 16:44:54 +0000560 return self.create_job()
mblighbe630eb2008-08-01 16:41:48 +0000561
562
mbligh5a496082009-08-03 16:44:54 +0000563class job_clone(job_create_or_clone):
564 """atest job clone [--priority <Low|Medium|High|Urgent>]
565 [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
566 [--labels <list of labels of machines to run on>]
567 [--one-time-hosts <hosts>] [--email <email>]
568 job_name
569
570 Cloning a job is rather different from the other create operations,
571 so it only uses the __init__() and output() from its superclass.
572 """
573 op_action = 'clone'
574 usage_action = 'clone'
575
576 def __init__(self):
577 super(job_clone, self).__init__()
578 self.parser.add_option('-i', '--id', help='Job id to clone',
579 default=False,
580 metavar='JOB_ID')
581 self.parser.add_option('-r', '--reuse-hosts',
582 help='Use the exact same hosts as the '
583 'cloned job.',
584 action='store_true', default=False)
585
586
587 def parse(self):
588 options, leftover = super(job_clone, self).parse()
589
590 self.clone_id = options.id
591 self.reuse_hosts = options.reuse_hosts
592
mbligh56f1f4a2009-08-03 16:45:12 +0000593 host_specified = self.hosts or self.one_time_hosts or options.labels
594 if self.reuse_hosts and host_specified:
595 self.invalid_syntax('Cannot specify hosts and reuse the same '
596 'ones as the cloned job.')
597
598 if not (self.reuse_hosts or host_specified):
599 self.invalid_syntax('Must reuse or specify at least one '
600 'machine (-r, -m, -M, -b or '
601 '--one-time-hosts).')
mbligh5a496082009-08-03 16:44:54 +0000602
603 return options, leftover
604
605
606 def execute(self):
607 clone_info = self.execute_rpc(op='get_info_for_clone',
608 id=self.clone_id,
609 preserve_metahosts=self.reuse_hosts)
mbligh5a496082009-08-03 16:44:54 +0000610
611 # Remove fields from clone data that cannot be reused
mbligh56f1f4a2009-08-03 16:45:12 +0000612 for field in ('name', 'created_on', 'id', 'owner'):
613 del clone_info['job'][field]
mbligh5a496082009-08-03 16:44:54 +0000614
Eric Li861b2d52011-02-04 14:50:35 -0800615 # Also remove parameterized_job field, as the feature still is
616 # incomplete, this tool does not attempt to support it for now,
617 # it uses a different API function and it breaks create_job()
618 if clone_info['job'].has_key('parameterized_job'):
619 del clone_info['job']['parameterized_job']
620
mbligh5a496082009-08-03 16:44:54 +0000621 # Keyword args cannot be unicode strings
mbligh56f1f4a2009-08-03 16:45:12 +0000622 self.data.update((str(key), val)
623 for key, val in clone_info['job'].iteritems())
mbligh5a496082009-08-03 16:44:54 +0000624
mbligh56f1f4a2009-08-03 16:45:12 +0000625 if self.reuse_hosts:
626 # Convert host list from clone info that can be used for job_create
627 for label, qty in clone_info['meta_host_counts'].iteritems():
628 self.data['meta_hosts'].extend([label]*qty)
mbligh5a496082009-08-03 16:44:54 +0000629
mbligh56f1f4a2009-08-03 16:45:12 +0000630 self.data['hosts'].extend(host['hostname']
631 for host in clone_info['hosts'])
mbligh5a496082009-08-03 16:44:54 +0000632
633 return self.create_job()
mblighbe630eb2008-08-01 16:41:48 +0000634
635
636class job_abort(job, action_common.atest_delete):
637 """atest job abort <job(s)>"""
638 usage_action = op_action = 'abort'
639 msg_done = 'Aborted'
640
641 def parse(self):
mbligh9deeefa2009-05-01 23:11:08 +0000642 job_info = topic_common.item_parse_info(attribute_name='jobids',
643 use_leftover=True)
644 options, leftover = super(job_abort, self).parse([job_info],
645 req_items='jobids')
mblighbe630eb2008-08-01 16:41:48 +0000646
647
mbligh206d50a2008-11-13 01:19:25 +0000648 def execute(self):
649 data = {'job__id__in': self.jobids}
650 self.execute_rpc(op='abort_host_queue_entries', **data)
651 print 'Aborting jobs: %s' % ', '.join(self.jobids)
652
653
mblighbe630eb2008-08-01 16:41:48 +0000654 def get_items(self):
655 return self.jobids