mbligh | be630eb | 2008-08-01 16:41:48 +0000 | [diff] [blame] | 1 | # |
| 2 | # Copyright 2008 Google Inc. All Rights Reserved. |
| 3 | |
| 4 | """ |
| 5 | The job module contains the objects and methods used to |
| 6 | manage jobs in Autotest. |
| 7 | |
| 8 | The valid actions are: |
| 9 | list: lists job(s) |
| 10 | create: create a job |
| 11 | abort: abort job(s) |
| 12 | stat: detailed listing of job(s) |
| 13 | |
| 14 | The common options are: |
| 15 | |
| 16 | See topic_common.py for a High Level Design and Algorithm. |
| 17 | """ |
| 18 | |
| 19 | import getpass, os, pwd, re, socket, sys |
| 20 | from autotest_lib.cli import topic_common, action_common |
| 21 | |
| 22 | |
| 23 | class job(topic_common.atest): |
| 24 | """Job class |
| 25 | atest job [create|list|stat|abort] <options>""" |
| 26 | usage_action = '[create|list|stat|abort]' |
| 27 | topic = msg_topic = 'job' |
| 28 | msg_items = '<job_ids>' |
| 29 | |
| 30 | |
| 31 | def _convert_status(self, results): |
| 32 | for result in results: |
mbligh | 10a4733 | 2008-08-11 19:37:46 +0000 | [diff] [blame] | 33 | total = sum(result['status_counts'].values()) |
| 34 | status = ['%s:%s(%.1f%%)' % (key, val, 100.0*float(val)/total) |
| 35 | for key, val in result['status_counts'].iteritems()] |
mbligh | be630eb | 2008-08-01 16:41:48 +0000 | [diff] [blame] | 36 | status.sort() |
| 37 | result['status_counts'] = ', '.join(status) |
| 38 | |
| 39 | |
| 40 | class job_help(job): |
| 41 | """Just here to get the atest logic working. |
| 42 | Usage is set by its parent""" |
| 43 | pass |
| 44 | |
| 45 | |
| 46 | class job_list_stat(action_common.atest_list, job): |
| 47 | def __split_jobs_between_ids_names(self): |
| 48 | job_ids = [] |
| 49 | job_names = [] |
| 50 | |
| 51 | # Sort between job IDs and names |
| 52 | for job_id in self.jobs: |
| 53 | if job_id.isdigit(): |
| 54 | job_ids.append(job_id) |
| 55 | else: |
| 56 | job_names.append(job_id) |
| 57 | return (job_ids, job_names) |
| 58 | |
| 59 | |
| 60 | def execute_on_ids_and_names(self, op, filters={}, |
| 61 | check_results={'id__in': 'id', |
| 62 | 'name__in': 'id'}, |
| 63 | tag_id='id__in', tag_name='name__in'): |
| 64 | if not self.jobs: |
| 65 | # Want everything |
| 66 | return super(job_list_stat, self).execute(op=op, filters=filters) |
| 67 | |
| 68 | all_jobs = [] |
| 69 | (job_ids, job_names) = self.__split_jobs_between_ids_names() |
| 70 | |
| 71 | for items, tag in [(job_ids, tag_id), |
| 72 | (job_names, tag_name)]: |
| 73 | if items: |
| 74 | new_filters = filters.copy() |
| 75 | new_filters[tag] = items |
| 76 | jobs = super(job_list_stat, |
| 77 | self).execute(op=op, |
| 78 | filters=new_filters, |
| 79 | check_results=check_results) |
| 80 | all_jobs.extend(jobs) |
| 81 | |
| 82 | return all_jobs |
| 83 | |
| 84 | |
| 85 | class job_list(job_list_stat): |
| 86 | """atest job list [<jobs>] [--all] [--running] [--user <username>]""" |
| 87 | def __init__(self): |
| 88 | super(job_list, self).__init__() |
| 89 | self.parser.add_option('-a', '--all', help='List jobs for all ' |
| 90 | 'users.', action='store_true', default=False) |
| 91 | self.parser.add_option('-r', '--running', help='List only running ' |
| 92 | 'jobs', action='store_true') |
| 93 | self.parser.add_option('-u', '--user', help='List jobs for given ' |
| 94 | 'user', type='string') |
| 95 | |
| 96 | |
| 97 | def parse(self): |
| 98 | (options, leftover) = self.parse_with_flist([('jobs', '', '', True)], |
| 99 | None) |
| 100 | self.all = options.all |
| 101 | self.data['running'] = options.running |
| 102 | if options.user: |
| 103 | if options.all: |
| 104 | self.invalid_syntax('Only specify --all or --user, not both.') |
| 105 | else: |
| 106 | self.data['owner'] = options.user |
| 107 | elif not options.all and not self.jobs: |
| 108 | self.data['owner'] = getpass.getuser() |
| 109 | |
| 110 | return (options, leftover) |
| 111 | |
| 112 | |
| 113 | def execute(self): |
| 114 | return self.execute_on_ids_and_names(op='get_jobs_summary', |
| 115 | filters=self.data) |
| 116 | |
| 117 | |
| 118 | def output(self, results): |
| 119 | keys = ['id', 'owner', 'name', 'status_counts'] |
| 120 | if self.verbose: |
| 121 | keys.extend(['priority', 'control_type', 'created_on']) |
| 122 | self._convert_status(results) |
| 123 | super(job_list, self).output(results, keys) |
| 124 | |
| 125 | |
| 126 | |
| 127 | class job_stat(job_list_stat): |
| 128 | """atest job stat <job>""" |
| 129 | usage_action = 'stat' |
| 130 | |
| 131 | def __init__(self): |
| 132 | super(job_stat, self).__init__() |
| 133 | self.parser.add_option('-f', '--control-file', |
| 134 | help='Display the control file', |
| 135 | action='store_true', default=False) |
| 136 | |
| 137 | |
| 138 | def parse(self): |
| 139 | (options, leftover) = self.parse_with_flist(flists=[('jobs', '', '', |
| 140 | True)], |
| 141 | req_items='jobs') |
| 142 | if not self.jobs: |
| 143 | self.invalid_syntax('Must specify at least one job.') |
| 144 | |
| 145 | self.show_control_file = options.control_file |
| 146 | |
| 147 | return (options, leftover) |
| 148 | |
| 149 | |
| 150 | def _merge_results(self, summary, qes): |
| 151 | hosts_status = {} |
| 152 | for qe in qes: |
| 153 | if qe['host']: |
| 154 | job_id = qe['job']['id'] |
| 155 | hostname = qe['host']['hostname'] |
| 156 | hosts_status.setdefault(job_id, |
| 157 | {}).setdefault(qe['status'], |
| 158 | []).append(hostname) |
| 159 | |
| 160 | for job in summary: |
| 161 | job_id = job['id'] |
| 162 | if hosts_status.has_key(job_id): |
| 163 | this_job = hosts_status[job_id] |
| 164 | host_per_status = ['%s:%s' %(status, ','.join(host)) |
| 165 | for status, host in this_job.iteritems()] |
| 166 | job['hosts_status'] = ', '.join(host_per_status) |
| 167 | else: |
| 168 | job['hosts_status'] = '' |
| 169 | return summary |
| 170 | |
| 171 | |
| 172 | def execute(self): |
| 173 | summary = self.execute_on_ids_and_names(op='get_jobs_summary') |
| 174 | |
| 175 | # Get the real hostnames |
| 176 | qes = self.execute_on_ids_and_names(op='get_host_queue_entries', |
| 177 | check_results={}, |
| 178 | tag_id='job__in', |
| 179 | tag_name='job__name__in') |
| 180 | |
| 181 | self._convert_status(summary) |
| 182 | |
| 183 | return self._merge_results(summary, qes) |
| 184 | |
| 185 | |
| 186 | def output(self, results): |
| 187 | if not self.verbose: |
| 188 | keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status'] |
| 189 | else: |
| 190 | keys = ['id', 'name', 'priority', 'status_counts', 'hosts_status', |
| 191 | 'owner', 'control_type', 'synch_type', 'created_on'] |
| 192 | |
| 193 | if self.show_control_file: |
| 194 | keys.append('control_file') |
| 195 | |
| 196 | super(job_stat, self).output(results, keys) |
| 197 | |
| 198 | |
| 199 | class job_create(action_common.atest_create, job): |
| 200 | """atest job create [--priority <Low|Medium|High|Urgent>] |
| 201 | [--is-synchronous] [--container] [--control-file </path/to/cfile>] |
| 202 | [--on-server] [--test <test1,test2>] [--kernel <http://kernel>] |
| 203 | [--mlist </path/to/machinelist>] [--machine <host1 host2 host3>] |
| 204 | job_name""" |
| 205 | op_action = 'create' |
| 206 | msg_items = 'job_name' |
| 207 | display_ids = True |
| 208 | |
| 209 | def __init__(self): |
| 210 | super(job_create, self).__init__() |
| 211 | self.hosts = [] |
| 212 | self.ctrl_file_data = {} |
| 213 | self.data_item_key = 'name' |
| 214 | self.parser.add_option('-p', '--priority', help='Job priority (low, ' |
| 215 | 'medium, high, urgent), default=medium', |
| 216 | type='choice', choices=('low', 'medium', 'high', |
| 217 | 'urgent'), default='medium') |
| 218 | self.parser.add_option('-y', '--synchronous', action='store_true', |
| 219 | help='Make the job synchronous', |
| 220 | default=False) |
| 221 | self.parser.add_option('-c', '--container', help='Run this client job ' |
| 222 | 'in a container', action='store_true', |
| 223 | default=False) |
| 224 | self.parser.add_option('-f', '--control-file', |
| 225 | help='use this control file', metavar='FILE') |
| 226 | self.parser.add_option('-s', '--server', |
| 227 | help='This is server-side job', |
| 228 | action='store_true', default=False) |
| 229 | self.parser.add_option('-t', '--test', |
mbligh | 51148c7 | 2008-08-11 20:23:58 +0000 | [diff] [blame] | 230 | help='List of tests to run') |
mbligh | be630eb | 2008-08-01 16:41:48 +0000 | [diff] [blame] | 231 | self.parser.add_option('-k', '--kernel', help='Install kernel from this' |
| 232 | ' URL before beginning job') |
| 233 | self.parser.add_option('-m', '--machine', help='List of machines to ' |
| 234 | 'run on') |
| 235 | self.parser.add_option('-M', '--mlist', |
| 236 | help='File listing machines to use', |
| 237 | type='string', metavar='MACHINE_FLIST') |
| 238 | |
| 239 | |
| 240 | def parse_hosts(self, args): |
| 241 | """ Parses the arguments to generate a list of hosts and meta_hosts |
| 242 | A host is a regular name, a meta_host is n*label or *label. |
| 243 | These can be mixed on the CLI, and separated by either commas or |
| 244 | spaces, e.g.: 5*Machine_Label host0 5*Machine_Label2,host2 """ |
| 245 | |
| 246 | hosts = [] |
| 247 | meta_hosts = [] |
| 248 | |
| 249 | for arg in args: |
| 250 | for host in arg.split(','): |
| 251 | if re.match('^[0-9]+[*]', host): |
| 252 | num, host = host.split('*', 1) |
| 253 | meta_hosts += int(num) * [host] |
| 254 | elif re.match('^[*](\w*)', host): |
| 255 | meta_hosts += [re.match('^[*](\w*)', host).group(1)] |
| 256 | elif host != '': |
| 257 | # Real hostname |
| 258 | hosts.append(host) |
| 259 | |
| 260 | return (hosts, meta_hosts) |
| 261 | |
| 262 | |
| 263 | def parse(self): |
| 264 | flists = [('hosts', 'mlist', 'machine', False), |
| 265 | ('jobname', '', '', True)] |
| 266 | (options, leftover) = self.parse_with_flist(flists, |
| 267 | req_items='jobname') |
| 268 | self.data = {} |
| 269 | |
| 270 | if len(self.hosts) == 0: |
| 271 | self.invalid_syntax('Must specify at least one host') |
| 272 | if not options.control_file and not options.test: |
| 273 | self.invalid_syntax('Must specify either --test or --control-file' |
| 274 | ' to create a job.') |
| 275 | if options.control_file and options.test: |
| 276 | self.invalid_syntax('Can only specify one of --control-file or ' |
| 277 | '--test, not both.') |
| 278 | if options.container and options.server: |
| 279 | self.invalid_syntax('Containers (--container) can only be added to' |
| 280 | ' client side jobs.') |
| 281 | if options.control_file: |
| 282 | if options.kernel: |
| 283 | self.invalid_syntax('Use --kernel only in conjunction with ' |
| 284 | '--test, not --control-file.') |
| 285 | if options.container: |
| 286 | self.invalid_syntax('Containers (--container) can only be added' |
| 287 | ' with --test, not --control-file.') |
| 288 | try: |
| 289 | self.data['control_file'] = open(options.control_file).read() |
| 290 | except IOError: |
| 291 | self.generic_error('Unable to read from specified ' |
| 292 | 'control-file: %s' % options.control_file) |
| 293 | |
| 294 | if options.priority: |
| 295 | self.data['priority'] = options.priority.capitalize() |
| 296 | |
| 297 | if len(self.jobname) > 1: |
| 298 | self.invalid_syntax('Too many arguments specified, only expected ' |
| 299 | 'to receive job name: %s' % self.jobname) |
| 300 | self.jobname = self.jobname[0] |
| 301 | self.data['name'] = self.jobname |
| 302 | |
| 303 | (self.data['hosts'], |
| 304 | self.data['meta_hosts']) = self.parse_hosts(self.hosts) |
| 305 | |
| 306 | |
| 307 | self.data['is_synchronous'] = options.synchronous |
| 308 | if options.server: |
| 309 | self.data['control_type'] = 'Server' |
| 310 | else: |
| 311 | self.data['control_type'] = 'Client' |
| 312 | |
| 313 | if options.test: |
| 314 | if options.server or options.synchronous: |
| 315 | self.invalid_syntax('Must specify a control file (--control-' |
| 316 | 'file) for jobs that are synchronous or ' |
| 317 | 'server jobs.') |
| 318 | self.ctrl_file_data = {'tests': options.test.split(',')} |
| 319 | if options.kernel: |
| 320 | self.ctrl_file_data['kernel'] = options.kernel |
| 321 | self.ctrl_file_data['do_push_packages'] = True |
| 322 | self.ctrl_file_data['use_container'] = options.container |
| 323 | |
| 324 | return (options, leftover) |
| 325 | |
| 326 | |
| 327 | def execute(self): |
| 328 | if self.ctrl_file_data: |
| 329 | if self.ctrl_file_data.has_key('kernel'): |
| 330 | socket.setdefaulttimeout(topic_common.UPLOAD_SOCKET_TIMEOUT) |
| 331 | print 'Uploading Kernel: this may take a while...', |
| 332 | |
| 333 | (ctrl_file, on_server, |
| 334 | is_synch) = self.execute_rpc(op='generate_control_file', |
| 335 | item=self.jobname, |
| 336 | **self.ctrl_file_data) |
| 337 | |
| 338 | if self.ctrl_file_data.has_key('kernel'): |
| 339 | print 'Done' |
| 340 | socket.setdefaulttimeout(topic_common.DEFAULT_SOCKET_TIMEOUT) |
| 341 | self.data['control_file'] = ctrl_file |
| 342 | self.data['is_synchronous'] = is_synch |
| 343 | if on_server: |
| 344 | self.data['control_type'] = 'Server' |
| 345 | else: |
| 346 | self.data['control_type'] = 'Client' |
| 347 | return super(job_create, self).execute() |
| 348 | |
| 349 | |
| 350 | def get_items(self): |
| 351 | return [self.jobname] |
| 352 | |
| 353 | |
| 354 | class job_abort(job, action_common.atest_delete): |
| 355 | """atest job abort <job(s)>""" |
| 356 | usage_action = op_action = 'abort' |
| 357 | msg_done = 'Aborted' |
| 358 | |
| 359 | def parse(self): |
| 360 | (options, leftover) = self.parse_with_flist([('jobids', '', '', True)], |
| 361 | req_items='jobids') |
| 362 | |
| 363 | |
| 364 | def get_items(self): |
| 365 | return self.jobids |