blob: 84d2e492f90a70d690df55aea3a9e1fdba133cea [file] [log] [blame]
mblighbe630eb2008-08-01 16:41:48 +00001#
2# Copyright 2008 Google Inc. All Rights Reserved.
3
4"""
5The job module contains the objects and methods used to
6manage jobs in Autotest.
7
8The valid actions are:
9list: lists job(s)
10create: create a job
11abort: abort job(s)
12stat: detailed listing of job(s)
13
14The common options are:
15
16See topic_common.py for a High Level Design and Algorithm.
17"""
18
19import getpass, os, pwd, re, socket, sys
20from autotest_lib.cli import topic_common, action_common
21
22
23class job(topic_common.atest):
24 """Job class
25 atest job [create|list|stat|abort] <options>"""
26 usage_action = '[create|list|stat|abort]'
27 topic = msg_topic = 'job'
28 msg_items = '<job_ids>'
29
30
31 def _convert_status(self, results):
32 for result in results:
33 status = ['%s:%s' % (key, val) for key, val in
34 result['status_counts'].iteritems()]
35 status.sort()
36 result['status_counts'] = ', '.join(status)
37
38
39class job_help(job):
40 """Just here to get the atest logic working.
41 Usage is set by its parent"""
42 pass
43
44
45class job_list_stat(action_common.atest_list, job):
46 def __split_jobs_between_ids_names(self):
47 job_ids = []
48 job_names = []
49
50 # Sort between job IDs and names
51 for job_id in self.jobs:
52 if job_id.isdigit():
53 job_ids.append(job_id)
54 else:
55 job_names.append(job_id)
56 return (job_ids, job_names)
57
58
59 def execute_on_ids_and_names(self, op, filters={},
60 check_results={'id__in': 'id',
61 'name__in': 'id'},
62 tag_id='id__in', tag_name='name__in'):
63 if not self.jobs:
64 # Want everything
65 return super(job_list_stat, self).execute(op=op, filters=filters)
66
67 all_jobs = []
68 (job_ids, job_names) = self.__split_jobs_between_ids_names()
69
70 for items, tag in [(job_ids, tag_id),
71 (job_names, tag_name)]:
72 if items:
73 new_filters = filters.copy()
74 new_filters[tag] = items
75 jobs = super(job_list_stat,
76 self).execute(op=op,
77 filters=new_filters,
78 check_results=check_results)
79 all_jobs.extend(jobs)
80
81 return all_jobs
82
83
84class job_list(job_list_stat):
85 """atest job list [<jobs>] [--all] [--running] [--user <username>]"""
86 def __init__(self):
87 super(job_list, self).__init__()
88 self.parser.add_option('-a', '--all', help='List jobs for all '
89 'users.', action='store_true', default=False)
90 self.parser.add_option('-r', '--running', help='List only running '
91 'jobs', action='store_true')
92 self.parser.add_option('-u', '--user', help='List jobs for given '
93 'user', type='string')
94
95
96 def parse(self):
97 (options, leftover) = self.parse_with_flist([('jobs', '', '', True)],
98 None)
99 self.all = options.all
100 self.data['running'] = options.running
101 if options.user:
102 if options.all:
103 self.invalid_syntax('Only specify --all or --user, not both.')
104 else:
105 self.data['owner'] = options.user
106 elif not options.all and not self.jobs:
107 self.data['owner'] = getpass.getuser()
108
109 return (options, leftover)
110
111
112 def execute(self):
113 return self.execute_on_ids_and_names(op='get_jobs_summary',
114 filters=self.data)
115
116
117 def output(self, results):
118 keys = ['id', 'owner', 'name', 'status_counts']
119 if self.verbose:
120 keys.extend(['priority', 'control_type', 'created_on'])
121 self._convert_status(results)
122 super(job_list, self).output(results, keys)
123
124
125
126class job_stat(job_list_stat):
127 """atest job stat <job>"""
128 usage_action = 'stat'
129
130 def __init__(self):
131 super(job_stat, self).__init__()
132 self.parser.add_option('-f', '--control-file',
133 help='Display the control file',
134 action='store_true', default=False)
135
136
137 def parse(self):
138 (options, leftover) = self.parse_with_flist(flists=[('jobs', '', '',
139 True)],
140 req_items='jobs')
141 if not self.jobs:
142 self.invalid_syntax('Must specify at least one job.')
143
144 self.show_control_file = options.control_file
145
146 return (options, leftover)
147
148
149 def _merge_results(self, summary, qes):
150 hosts_status = {}
151 for qe in qes:
152 if qe['host']:
153 job_id = qe['job']['id']
154 hostname = qe['host']['hostname']
155 hosts_status.setdefault(job_id,
156 {}).setdefault(qe['status'],
157 []).append(hostname)
158
159 for job in summary:
160 job_id = job['id']
161 if hosts_status.has_key(job_id):
162 this_job = hosts_status[job_id]
163 host_per_status = ['%s:%s' %(status, ','.join(host))
164 for status, host in this_job.iteritems()]
165 job['hosts_status'] = ', '.join(host_per_status)
166 else:
167 job['hosts_status'] = ''
168 return summary
169
170
171 def execute(self):
172 summary = self.execute_on_ids_and_names(op='get_jobs_summary')
173
174 # Get the real hostnames
175 qes = self.execute_on_ids_and_names(op='get_host_queue_entries',
176 check_results={},
177 tag_id='job__in',
178 tag_name='job__name__in')
179
180 self._convert_status(summary)
181
182 return self._merge_results(summary, qes)
183
184
185 def output(self, results):
186 if not self.verbose:
187 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status']
188 else:
189 keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status',
190 'owner', 'control_type', 'synch_type', 'created_on']
191
192 if self.show_control_file:
193 keys.append('control_file')
194
195 super(job_stat, self).output(results, keys)
196
197
198class job_create(action_common.atest_create, job):
199 """atest job create [--priority <Low|Medium|High|Urgent>]
200 [--is-synchronous] [--container] [--control-file </path/to/cfile>]
201 [--on-server] [--test <test1,test2>] [--kernel <http://kernel>]
202 [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>]
203 job_name"""
204 op_action = 'create'
205 msg_items = 'job_name'
206 display_ids = True
207
208 def __init__(self):
209 super(job_create, self).__init__()
210 self.hosts = []
211 self.ctrl_file_data = {}
212 self.data_item_key = 'name'
213 self.parser.add_option('-p', '--priority', help='Job priority (low, '
214 'medium, high, urgent), default=medium',
215 type='choice', choices=('low', 'medium', 'high',
216 'urgent'), default='medium')
217 self.parser.add_option('-y', '--synchronous', action='store_true',
218 help='Make the job synchronous',
219 default=False)
220 self.parser.add_option('-c', '--container', help='Run this client job '
221 'in a container', action='store_true',
222 default=False)
223 self.parser.add_option('-f', '--control-file',
224 help='use this control file', metavar='FILE')
225 self.parser.add_option('-s', '--server',
226 help='This is server-side job',
227 action='store_true', default=False)
228 self.parser.add_option('-t', '--test',
229 help='Run a job with these tests')
230 self.parser.add_option('-k', '--kernel', help='Install kernel from this'
231 ' URL before beginning job')
232 self.parser.add_option('-m', '--machine', help='List of machines to '
233 'run on')
234 self.parser.add_option('-M', '--mlist',
235 help='File listing machines to use',
236 type='string', metavar='MACHINE_FLIST')
237
238
239 def parse_hosts(self, args):
240 """ Parses the arguments to generate a list of hosts and meta_hosts
241 A host is a regular name, a meta_host is n*label or *label.
242 These can be mixed on the CLI, and separated by either commas or
243 spaces, e.g.: 5*Machine_Label host0 5*Machine_Label2,host2 """
244
245 hosts = []
246 meta_hosts = []
247
248 for arg in args:
249 for host in arg.split(','):
250 if re.match('^[0-9]+[*]', host):
251 num, host = host.split('*', 1)
252 meta_hosts += int(num) * [host]
253 elif re.match('^[*](\w*)', host):
254 meta_hosts += [re.match('^[*](\w*)', host).group(1)]
255 elif host != '':
256 # Real hostname
257 hosts.append(host)
258
259 return (hosts, meta_hosts)
260
261
262 def parse(self):
263 flists = [('hosts', 'mlist', 'machine', False),
264 ('jobname', '', '', True)]
265 (options, leftover) = self.parse_with_flist(flists,
266 req_items='jobname')
267 self.data = {}
268
269 if len(self.hosts) == 0:
270 self.invalid_syntax('Must specify at least one host')
271 if not options.control_file and not options.test:
272 self.invalid_syntax('Must specify either --test or --control-file'
273 ' to create a job.')
274 if options.control_file and options.test:
275 self.invalid_syntax('Can only specify one of --control-file or '
276 '--test, not both.')
277 if options.container and options.server:
278 self.invalid_syntax('Containers (--container) can only be added to'
279 ' client side jobs.')
280 if options.control_file:
281 if options.kernel:
282 self.invalid_syntax('Use --kernel only in conjunction with '
283 '--test, not --control-file.')
284 if options.container:
285 self.invalid_syntax('Containers (--container) can only be added'
286 ' with --test, not --control-file.')
287 try:
288 self.data['control_file'] = open(options.control_file).read()
289 except IOError:
290 self.generic_error('Unable to read from specified '
291 'control-file: %s' % options.control_file)
292
293 if options.priority:
294 self.data['priority'] = options.priority.capitalize()
295
296 if len(self.jobname) > 1:
297 self.invalid_syntax('Too many arguments specified, only expected '
298 'to receive job name: %s' % self.jobname)
299 self.jobname = self.jobname[0]
300 self.data['name'] = self.jobname
301
302 (self.data['hosts'],
303 self.data['meta_hosts']) = self.parse_hosts(self.hosts)
304
305
306 self.data['is_synchronous'] = options.synchronous
307 if options.server:
308 self.data['control_type'] = 'Server'
309 else:
310 self.data['control_type'] = 'Client'
311
312 if options.test:
313 if options.server or options.synchronous:
314 self.invalid_syntax('Must specify a control file (--control-'
315 'file) for jobs that are synchronous or '
316 'server jobs.')
317 self.ctrl_file_data = {'tests': options.test.split(',')}
318 if options.kernel:
319 self.ctrl_file_data['kernel'] = options.kernel
320 self.ctrl_file_data['do_push_packages'] = True
321 self.ctrl_file_data['use_container'] = options.container
322
323 return (options, leftover)
324
325
326 def execute(self):
327 if self.ctrl_file_data:
328 if self.ctrl_file_data.has_key('kernel'):
329 socket.setdefaulttimeout(topic_common.UPLOAD_SOCKET_TIMEOUT)
330 print 'Uploading Kernel: this may take a while...',
331
332 (ctrl_file, on_server,
333 is_synch) = self.execute_rpc(op='generate_control_file',
334 item=self.jobname,
335 **self.ctrl_file_data)
336
337 if self.ctrl_file_data.has_key('kernel'):
338 print 'Done'
339 socket.setdefaulttimeout(topic_common.DEFAULT_SOCKET_TIMEOUT)
340 self.data['control_file'] = ctrl_file
341 self.data['is_synchronous'] = is_synch
342 if on_server:
343 self.data['control_type'] = 'Server'
344 else:
345 self.data['control_type'] = 'Client'
346 return super(job_create, self).execute()
347
348
349 def get_items(self):
350 return [self.jobname]
351
352
353class job_abort(job, action_common.atest_delete):
354 """atest job abort <job(s)>"""
355 usage_action = op_action = 'abort'
356 msg_done = 'Aborted'
357
358 def parse(self):
359 (options, leftover) = self.parse_with_flist([('jobids', '', '', True)],
360 req_items='jobids')
361
362
363 def get_items(self):
364 return self.jobids