blob: 48fa1079efe189ecef12628b9a99a0a07eadc081 [file] [log] [blame]
mbligh36768f02008-02-22 18:28:33 +00001#!/usr/bin/python -u
2
3"""
4Autotest scheduler
5"""
6__author__ = "Paul Turner <pjt@google.com>"
7
8import os, sys, tempfile, shutil, MySQLdb, time, traceback, subprocess, Queue
mblighe2586682008-02-29 22:45:46 +00009import optparse, signal, smtplib, socket, datetime, stat
mblighb090f142008-02-27 21:33:46 +000010from common import global_config
11
mbligh36768f02008-02-22 18:28:33 +000012RESULTS_DIR = '.'
13AUTOSERV_NICE_LEVEL = 10
14
15AUTOTEST_PATH = os.path.join(os.path.dirname(__file__), '..')
16
17if os.environ.has_key('AUTOTEST_DIR'):
18 AUTOTEST_PATH = os.environ['AUTOTEST_DIR']
19AUTOTEST_SERVER_DIR = os.path.join(AUTOTEST_PATH, 'server')
20AUTOTEST_TKO_DIR = os.path.join(AUTOTEST_PATH, 'tko')
21
22if AUTOTEST_SERVER_DIR not in sys.path:
23 sys.path.insert(0, AUTOTEST_SERVER_DIR)
24
mbligh6f8bab42008-02-29 22:45:14 +000025_db = None
mbligh36768f02008-02-22 18:28:33 +000026_shutdown = False
27_notify_email = None
mbligh4314a712008-02-29 22:44:30 +000028_autoserv_path = 'autoserv'
29_testing_mode = False
mbligh36768f02008-02-22 18:28:33 +000030
31
32def main():
33 usage = 'usage: %prog [options] results_dir'
34
35 parser = optparse.OptionParser(usage)
36 parser.add_option('--no-recover', help='Skip machine/job recovery ' +
37 'step [for multiple monitors/rolling upgrades]',
38 action='store_true')
39 parser.add_option('--logfile', help='Set a log file that all stdout ' +
40 'should be redirected to. Stderr will go to this ' +
41 'file + ".err"')
42 parser.add_option('--notify', help='Set an email address to be ' +
43 'notified of exceptions')
mbligh4314a712008-02-29 22:44:30 +000044 parser.add_option('--test', help='Indicate that scheduler is under ' +
45 'test and should use dummy autoserv and no parsing',
46 action='store_true')
mbligh36768f02008-02-22 18:28:33 +000047 (options, args) = parser.parse_args()
48 if len(args) != 1:
49 parser.print_usage()
50 return
51
52 global RESULTS_DIR
53 RESULTS_DIR = args[0]
54
55 global _notify_email
56 _notify_email = options.notify
mbligh4314a712008-02-29 22:44:30 +000057
58 if options.test:
59 global _autoserv_path
60 _autoserv_path = 'autoserv_dummy'
61 global _testing_mode
62 _testing_mode = True
mbligh36768f02008-02-22 18:28:33 +000063
64 init(options.logfile)
65 dispatcher = Dispatcher(do_recover = not options.no_recover)
66
67 try:
68 while not _shutdown:
69 dispatcher.tick()
70 time.sleep(20)
71 dispatcher.shut_down()
72 except:
73 log_stacktrace("Uncaught exception; terminating monitor_db")
74
mbligh6f8bab42008-02-29 22:45:14 +000075 _db.disconnect()
mbligh36768f02008-02-22 18:28:33 +000076
77
78def handle_sigint(signum, frame):
79 global _shutdown
80 _shutdown = True
81 print "Shutdown request received."
82
83
84def init(logfile):
85 if logfile:
86 enable_logging(logfile)
87 print "%s> dispatcher starting" % time.strftime("%X %x")
88 print "My PID is %d" % os.getpid()
89
90 os.environ['PATH'] = AUTOTEST_SERVER_DIR + ':' + os.environ['PATH']
mbligh6f8bab42008-02-29 22:45:14 +000091 global _db
92 _db = DatabaseConn()
mbligh36768f02008-02-22 18:28:33 +000093
94 print "Setting signal handler"
95 signal.signal(signal.SIGINT, handle_sigint)
96
97 print "Connected! Running..."
98
99
100def enable_logging(logfile):
101 out_file = logfile
102 err_file = "%s.err" % logfile
103 print "Enabling logging to %s (%s)" % (out_file, err_file)
104 out_fd = open(out_file, "a", buffering=0)
105 err_fd = open(err_file, "a", buffering=0)
106
107 os.dup2(out_fd.fileno(), sys.stdout.fileno())
108 os.dup2(err_fd.fileno(), sys.stderr.fileno())
109
110 sys.stdout = out_fd
111 sys.stderr = err_fd
112
113
114def idle_hosts():
mbligh6f8bab42008-02-29 22:45:14 +0000115 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000116 SELECT * FROM hosts h WHERE
117 id NOT IN (SELECT host_id FROM host_queue_entries WHERE active) AND (
118 (id IN (SELECT host_id FROM host_queue_entries WHERE not complete AND not active))
119 OR
120 (id IN (SELECT DISTINCT hl.host_id FROM host_queue_entries hqe
121 INNER JOIN hosts_labels hl ON hqe.meta_host=hl.label_id WHERE not hqe.complete AND not hqe.active))
122 )
123 AND locked=false AND (h.status IS null OR h.status='Ready') """)
mbligh6f8bab42008-02-29 22:45:14 +0000124 hosts = [Host(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000125 return hosts
126
mblighd5c95802008-03-05 00:33:46 +0000127def queue_entries_to_abort():
128 rows = _db.execute("""
129 SELECT * FROM host_queue_entries WHERE status='Abort';
130 """)
131 qe = [HostQueueEntry(row=i) for i in rows]
132 return qe
mbligh36768f02008-02-22 18:28:33 +0000133
mblighe2586682008-02-29 22:45:46 +0000134def remove_file_or_dir(path):
135 if stat.S_ISDIR(os.stat(path).st_mode):
136 # directory
137 shutil.rmtree(path)
138 else:
139 # file
140 os.remove(path)
141
142
mbligh6f8bab42008-02-29 22:45:14 +0000143class DatabaseConn:
144 def __init__(self):
145 self.reconnect_wait = 20
146 self.conn = None
147 self.cur = None
mbligh36768f02008-02-22 18:28:33 +0000148
mbligh6f8bab42008-02-29 22:45:14 +0000149 self.connect()
mbligh36768f02008-02-22 18:28:33 +0000150
151
mbligh6f8bab42008-02-29 22:45:14 +0000152 def connect(self):
153 self.disconnect()
154
155 # get global config and parse for info
156 c = global_config.global_config
157 dbase = "AUTOTEST_WEB"
158 DB_HOST = c.get_config_value(dbase, "host", "localhost")
159 DB_SCHEMA = c.get_config_value(dbase, "database",
160 "autotest_web")
161
162 global _testing_mode
163 if _testing_mode:
164 DB_SCHEMA = 'stresstest_autotest_web'
165
166 DB_USER = c.get_config_value(dbase, "user", "autotest")
167 DB_PASS = c.get_config_value(dbase, "password", "google")
168
169 while not self.conn:
170 try:
171 self.conn = MySQLdb.connect(host=DB_HOST,
172 user=DB_USER,
173 passwd=DB_PASS,
174 db=DB_SCHEMA)
175
176 self.conn.autocommit(True)
177 self.cur = self.conn.cursor()
178 except MySQLdb.OperationalError:
mbligh6f8bab42008-02-29 22:45:14 +0000179 print "Can't connect to MYSQL; reconnecting"
180 time.sleep(self.reconnect_wait)
181 self.disconnect()
182
183
184 def disconnect(self):
185 if self.conn:
186 self.conn.close()
187 self.conn = None
188 self.cur = None
189
190
191 def execute(self, *args, **dargs):
192 while (True):
193 try:
194 self.cur.execute(*args, **dargs)
195 return self.cur.fetchall()
196 except MySQLdb.OperationalError:
197 print "MYSQL connection died; reconnecting"
198 time.sleep(self.reconnect_wait)
199 self.connect()
mbligh36768f02008-02-22 18:28:33 +0000200
201
mblighdbdac6c2008-03-05 15:49:58 +0000202def generate_parse_command(results_dir, flags=""):
203 parse = os.path.abspath(os.path.join(AUTOTEST_TKO_DIR, 'parse'))
204 output = os.path.abspath(os.path.join(results_dir, '.parse.log'))
205 cmd = "%s %s -r -o %s > %s 2>&1 &"
206 return cmd % (parse, flags, results_dir, output)
207
208
mbligh36768f02008-02-22 18:28:33 +0000209def parse_results(results_dir, flags=""):
mbligh4314a712008-02-29 22:44:30 +0000210 if _testing_mode:
211 return
mblighdbdac6c2008-03-05 15:49:58 +0000212 os.system(generate_parse_command(results_dir, flags))
mbligh36768f02008-02-22 18:28:33 +0000213
214
215def log_stacktrace(reason):
216 (type, value, tb) = sys.exc_info()
217 str = "EXCEPTION: %s\n" % reason
218 str += "%s / %s / %s\n" % (socket.gethostname(), os.getpid(),
219 time.strftime("%X %x"))
220 str += ''.join(traceback.format_exception(type, value, tb))
221
222 sys.stderr.write("\n%s\n" % str)
223
224 if _notify_email:
225 sender = "monitor_db"
226 subject = "monitor_db exception"
227 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
228 sender, _notify_email, subject, str)
229 mailer = smtplib.SMTP('localhost')
230 mailer.sendmail(sender, _notify_email, msg)
231 mailer.quit()
232
233
234class Dispatcher:
235 def __init__(self, do_recover=True):
236 self._agents = []
237 self.shutting_down = False
238
239 if do_recover:
240 self._recover_lost()
241
242
243 def shut_down(self):
244 print "Shutting down!"
245 self.shutting_down = True
246 while self._agents:
247 self.tick()
248 time.sleep(40)
249
250
251 def tick(self):
252 if not self.shutting_down:
mblighd5c95802008-03-05 00:33:46 +0000253 self._find_aborting()
mbligh36768f02008-02-22 18:28:33 +0000254 self._find_more_work()
255 self._handle_agents()
256
257
258 def add_agent(self, agent):
259 self._agents.append(agent)
260 agent.dispatcher = self
mblighd5c95802008-03-05 00:33:46 +0000261
262 # Find agent corresponding to the specified queue_entry
263 def get_agents(self, queue_entry):
264 res_agents = []
265 for agent in self._agents:
266 if queue_entry.id in agent.queue_entry_ids:
267 res_agents.append(agent)
268 return res_agents
269
270
271 def remove_agent(self, agent):
272 self._agents.remove(agent)
mbligh36768f02008-02-22 18:28:33 +0000273
274
275 def _recover_lost(self):
mblighd5c95802008-03-05 00:33:46 +0000276 rows = _db.execute("""SELECT * FROM host_queue_entries WHERE active AND NOT complete AND status != 'Abort' AND status != 'Aborting'""")
mbligh6f8bab42008-02-29 22:45:14 +0000277 if len(rows) > 0:
278 queue_entries = [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000279 for queue_entry in queue_entries:
280 job = queue_entry.job
281 if job.is_synchronous():
282 for child_entry in job.get_host_queue_entries():
283 child_entry.requeue()
284 else:
285 queue_entry.requeue()
286 queue_entry.clear_results_dir()
287
mblighd5c95802008-03-05 00:33:46 +0000288 rebooting_host_ids = []
289 rows = _db.execute("""SELECT * FROM host_queue_entries
290 WHERE status='Abort' or status='Aborting'""")
291 if len(rows) > 0:
292 queue_entries = [HostQueueEntry(row=i) for i in rows]
293 for queue_entry in queue_entries:
294 queue_host = queue_entry.get_host()
295 reboot_task = RebootTask(queue_host)
296 verify_task = VerifyTask(host = queue_host)
297 self.add_agent(Agent(tasks=[reboot_task,
298 verify_task],
299 queue_entry_ids=[queue_entry.id]))
300 queue_entry.set_status('Aborted')
301 # Secure the host from being picked up
302 queue_host.set_status('Rebooting')
303 rebooting_host_ids.append(queue_host.id)
304
mbligh6f8bab42008-02-29 22:45:14 +0000305 rows = _db.execute("""SELECT * FROM hosts
mbligh36768f02008-02-22 18:28:33 +0000306 WHERE status != 'Ready' AND NOT locked""")
mbligh6f8bab42008-02-29 22:45:14 +0000307 if len(rows) > 0:
308 hosts = [Host(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000309 for host in hosts:
mblighd5c95802008-03-05 00:33:46 +0000310 if host.id in rebooting_host_ids:
311 continue
mbligh36768f02008-02-22 18:28:33 +0000312 verify_task = VerifyTask(host = host)
313 self.add_agent(Agent(tasks = [verify_task]))
314
315
316 def _find_more_work(self):
317 print "finding work"
318
319 num_started = 0
320 for host in idle_hosts():
321 tasks = host.next_queue_entries()
322 if tasks:
323 for next in tasks:
324 try:
325 agent = next.run(assigned_host=host)
326 if agent:
327 self.add_agent(agent)
328
329 num_started += 1
330 if num_started>=100:
331 return
332 break
333 except:
334 next.set_status('Failed')
335
336# if next.host:
337# next.host.set_status('Ready')
338
339 log_stacktrace("task_id = %d" % next.id)
340
341
mblighd5c95802008-03-05 00:33:46 +0000342 def _find_aborting(self):
343 num_aborted = 0
344 # Find jobs that are aborting
345 for entry in queue_entries_to_abort():
346 agents_to_abort = self.get_agents(entry)
347 entry_host = entry.get_host()
348 reboot_task = RebootTask(entry_host)
349 verify_task = VerifyTask(host = entry_host)
350 tasks = [reboot_task, verify_task]
351 if agents_to_abort:
352 abort_task = AbortTask(entry, agents_to_abort)
353 tasks.insert(0, abort_task)
354 else:
355 entry.set_status('Aborted')
356 # just to make sure this host does not get
357 # taken away
358 entry_host.set_status('Rebooting')
359 self.add_agent(Agent(tasks=tasks,
360 queue_entry_ids = [entry.id]))
361 num_aborted += 1
362 if num_aborted >= 50:
363 break
364
365
mbligh36768f02008-02-22 18:28:33 +0000366 def _handle_agents(self):
367 still_running = []
368 for agent in self._agents:
369 agent.tick()
370 if not agent.is_done():
371 still_running.append(agent)
372 else:
373 print "agent finished"
374 self._agents = still_running
375
376
377class RunMonitor(object):
378 def __init__(self, cmd, nice_level = None, log_file = None):
379 self.nice_level = nice_level
380 self.log_file = log_file
381 self.proc = self.run(cmd)
382
383 def run(self, cmd):
384 if self.nice_level:
385 nice_cmd = ['nice','-n', str(self.nice_level)]
386 nice_cmd.extend(cmd)
387 cmd = nice_cmd
388
389 out_file = None
390 if self.log_file:
391 try:
392 out_file = open(self.log_file, 'a')
393 out_file.write("\n%s\n" % ('*'*80))
394 out_file.write("%s> %s\n" % (time.strftime("%X %x"), cmd))
395 out_file.write("%s\n" % ('*'*80))
396 except:
397 pass
398
399 if not out_file:
400 out_file = open('/dev/null', 'w')
401
402 in_devnull = open('/dev/null', 'r')
403 print "cmd = %s" % cmd
404 print "path = %s" % os.getcwd()
405
406 proc = subprocess.Popen(cmd, stdout=out_file,
407 stderr=subprocess.STDOUT, stdin=in_devnull)
408 out_file.close()
409 in_devnull.close()
410 return proc
411
412
413 def kill(self):
mbligh38c2d032008-03-07 00:26:57 +0000414 if self.proc.poll() == None:
415 os.kill(self.proc.pid, signal.SIGCONT)
416 os.kill(self.proc.pid, signal.SIGTERM)
mblighd5c95802008-03-05 00:33:46 +0000417
mbligh36768f02008-02-22 18:28:33 +0000418
419 def exit_code(self):
420 return self.proc.poll()
421
422
423class Agent(object):
mblighd5c95802008-03-05 00:33:46 +0000424 def __init__(self, tasks, queue_entry_ids=[]):
mbligh36768f02008-02-22 18:28:33 +0000425 self.active_task = None
426 self.queue = Queue.Queue(0)
427 self.dispatcher = None
mblighd5c95802008-03-05 00:33:46 +0000428 self.queue_entry_ids = queue_entry_ids
mbligh36768f02008-02-22 18:28:33 +0000429
430 for task in tasks:
431 self.add_task(task)
432
433
434 def add_task(self, task):
435 self.queue.put_nowait(task)
436 task.agent = self
437
438
439 def tick(self):
440 print "agent tick"
441 if self.active_task and not self.active_task.is_done():
442 self.active_task.poll()
443 else:
444 self._next_task();
445
446
447 def _next_task(self):
448 print "agent picking task"
449 if self.active_task:
450 assert self.active_task.is_done()
451
mblighe2586682008-02-29 22:45:46 +0000452 if not self.active_task.success:
453 self.on_task_failure()
454
mbligh36768f02008-02-22 18:28:33 +0000455 self.active_task = None
456 if not self.is_done():
457 self.active_task = self.queue.get_nowait()
458 if self.active_task:
459 self.active_task.start()
460
461
mblighe2586682008-02-29 22:45:46 +0000462 def on_task_failure(self):
mblighe2586682008-02-29 22:45:46 +0000463 self.queue = Queue.Queue(0)
464 for task in self.active_task.failure_tasks:
465 self.add_task(task)
mbligh16c722d2008-03-05 00:58:44 +0000466
mblighe2586682008-02-29 22:45:46 +0000467
mbligh36768f02008-02-22 18:28:33 +0000468 def is_done(self):
469 return self.active_task == None and self.queue.empty()
470
471
472 def start(self):
473 assert self.dispatcher
474
475 self._next_task()
476
mblighd5c95802008-03-05 00:33:46 +0000477
mbligh36768f02008-02-22 18:28:33 +0000478class AgentTask(object):
mbligh16c722d2008-03-05 00:58:44 +0000479 def __init__(self, cmd, failure_tasks = []):
mblighe2586682008-02-29 22:45:46 +0000480 """\
481 By default, on failure, the Agent's task queue is cleared and
482 replaced with the tasks in failure_tasks. If
483 clear_queue_on_failure=False, the task queue will not be
484 cleared, and the tasks in failure_tasks will be inserted at the
485 beginning of the queue.
486 """
mbligh36768f02008-02-22 18:28:33 +0000487 self.done = False
488 self.failure_tasks = failure_tasks
489 self.started = False
490 self.cmd = cmd
mblighd5c95802008-03-05 00:33:46 +0000491 self.task = None
mbligh36768f02008-02-22 18:28:33 +0000492 self.agent = None
mblighd5c95802008-03-05 00:33:46 +0000493 self.monitor = None
mbligh36768f02008-02-22 18:28:33 +0000494
495
496 def poll(self):
497 print "poll"
mblighd5c95802008-03-05 00:33:46 +0000498 if self.monitor:
mbligh36768f02008-02-22 18:28:33 +0000499 self.tick(self.monitor.exit_code())
500 else:
501 self.finished(False)
502
503
504 def tick(self, exit_code):
505 if exit_code==None:
506 return
507# print "exit_code was %d" % exit_code
508 if exit_code == 0:
509 success = True
510 else:
511 success = False
512
513 self.finished(success)
514
515
516 def is_done(self):
517 return self.done
518
519
520 def finished(self, success):
521 self.done = True
522 self.success = success
523 self.epilog()
524
525
526 def prolog(self):
527 pass
528
529
530 def epilog(self):
531 pass
532
533
534 def start(self):
535 assert self.agent
536
537 if not self.started:
538 self.prolog()
539 self.run()
540
541 self.started = True
542
543
544 def abort(self):
mblighd5c95802008-03-05 00:33:46 +0000545 if self.monitor:
546 self.monitor.kill()
547 self.done = True
mbligh36768f02008-02-22 18:28:33 +0000548
549
550 def run(self):
551 if self.cmd:
552 print "agent starting monitor"
553
554 log_file = None
555 if hasattr(self, 'host'):
556 log_file = os.path.join(os.path.join(RESULTS_DIR, 'hosts'), self.host.hostname)
557
558 self.monitor = RunMonitor(self.cmd, nice_level = AUTOSERV_NICE_LEVEL, log_file = log_file)
559
560
561class RepairTask(AgentTask):
mbligh16c722d2008-03-05 00:58:44 +0000562 def __init__(self, host, fail_queue_entry=None):
563 """\
564 fail_queue_entry: queue entry to mark failed if this repair
565 fails.
566 """
mbligh48c10a52008-02-29 22:46:38 +0000567 cmd = [_autoserv_path , '-R', '-m', host.hostname]
mbligh36768f02008-02-22 18:28:33 +0000568 self.host = host
mbligh16c722d2008-03-05 00:58:44 +0000569 self.fail_queue_entry = fail_queue_entry
570 AgentTask.__init__(self, cmd)
mblighe2586682008-02-29 22:45:46 +0000571
mbligh36768f02008-02-22 18:28:33 +0000572
573 def prolog(self):
574 print "repair_task starting"
575 self.host.set_status('Repairing')
576
577
578 def epilog(self):
579 if self.success:
mbligh16c722d2008-03-05 00:58:44 +0000580 self.host.set_status('Ready')
mbligh36768f02008-02-22 18:28:33 +0000581 else:
mbligh16c722d2008-03-05 00:58:44 +0000582 self.host.set_status('Repair Failed')
583 if self.fail_queue_entry:
584 self.fail_queue_entry.handle_host_failure()
mbligh36768f02008-02-22 18:28:33 +0000585
586
587class VerifyTask(AgentTask):
588 def __init__(self, queue_entry=None, host=None):
589 assert bool(queue_entry) != bool(host)
590
591 self.host = host or queue_entry.host
592 self.queue_entry = queue_entry
593
594 self.temp_results_dir = tempfile.mkdtemp(suffix='.verify')
mbligh48c10a52008-02-29 22:46:38 +0000595 cmd = [_autoserv_path,'-v','-m',self.host.hostname,
mbligh36768f02008-02-22 18:28:33 +0000596 '-r', self.temp_results_dir]
597
mbligh16c722d2008-03-05 00:58:44 +0000598 fail_queue_entry = None
599 if queue_entry and not queue_entry.meta_host:
600 fail_queue_entry = queue_entry
601 failure_tasks = [RepairTask(self.host, fail_queue_entry)]
mblighe2586682008-02-29 22:45:46 +0000602
mblighdffd6372008-02-29 22:47:33 +0000603 AgentTask.__init__(self, cmd, failure_tasks=failure_tasks)
mblighe2586682008-02-29 22:45:46 +0000604
605
mbligh36768f02008-02-22 18:28:33 +0000606 def prolog(self):
607 print "starting verify on %s" % (self.host.hostname)
608 if self.queue_entry:
609 self.queue_entry.set_status('Verifying')
mblighdffd6372008-02-29 22:47:33 +0000610 self.queue_entry.clear_results_dir(
611 self.queue_entry.verify_results_dir())
mbligh36768f02008-02-22 18:28:33 +0000612 self.host.set_status('Verifying')
613
614
615 def epilog(self):
616 if self.queue_entry and (self.success or
617 not self.queue_entry.meta_host):
618 self.move_results()
mblighe2586682008-02-29 22:45:46 +0000619 shutil.rmtree(self.temp_results_dir)
mbligh36768f02008-02-22 18:28:33 +0000620
621 if self.success:
mbligh16c722d2008-03-05 00:58:44 +0000622 self.host.set_status('Ready')
623 elif self.queue_entry:
mblighdffd6372008-02-29 22:47:33 +0000624 self.queue_entry.requeue()
mbligh36768f02008-02-22 18:28:33 +0000625
626
627 def move_results(self):
628 assert self.queue_entry is not None
mblighe2586682008-02-29 22:45:46 +0000629 target_dir = self.queue_entry.verify_results_dir()
mbligh36768f02008-02-22 18:28:33 +0000630 if not os.path.exists(target_dir):
631 os.makedirs(target_dir)
632 files = os.listdir(self.temp_results_dir)
633 for filename in files:
mblighe2586682008-02-29 22:45:46 +0000634 self.force_move(os.path.join(self.temp_results_dir,
635 filename),
636 os.path.join(target_dir, filename))
mbligh36768f02008-02-22 18:28:33 +0000637
638
mblighe2586682008-02-29 22:45:46 +0000639 @staticmethod
640 def force_move(source, dest):
641 """\
642 Replacement for shutil.move() that will delete the destination
643 if it exists, even if it's a directory.
644 """
645 if os.path.exists(dest):
646 print ('Warning: removing existing destination file ' +
647 dest)
648 remove_file_or_dir(dest)
649 shutil.move(source, dest)
650
651
mblighdffd6372008-02-29 22:47:33 +0000652class VerifySynchronousTask(VerifyTask):
653 def __init__(self, queue_entry):
654 VerifyTask.__init__(self, queue_entry = queue_entry)
655
656
mbligh16c722d2008-03-05 00:58:44 +0000657 def epilog(self):
658 VerifyTask.epilog(self)
659 if self.success:
660 if self.queue_entry.job.num_complete() > 0:
661 # some other entry failed verify, and we've
662 # already been marked as stopped
663 return
mblighdffd6372008-02-29 22:47:33 +0000664
mbligh16c722d2008-03-05 00:58:44 +0000665 self.queue_entry.set_status('Pending')
666 job = self.queue_entry.job
667 if job.is_ready():
668 agent = job.run(self.queue_entry)
669 self.agent.dispatcher.add_agent(agent)
mblighe2586682008-02-29 22:45:46 +0000670
mbligh36768f02008-02-22 18:28:33 +0000671class QueueTask(AgentTask):
672 def __init__(self, job, queue_entries, cmd):
673 AgentTask.__init__(self, cmd)
674 self.job = job
675 self.queue_entries = queue_entries
676
677
mbligh4314a712008-02-29 22:44:30 +0000678 @staticmethod
679 def _write_keyval(queue_entry, field, value):
mbligh36768f02008-02-22 18:28:33 +0000680 key_path = os.path.join(queue_entry.results_dir(), 'keyval')
681 keyval_file = open(key_path, 'a')
682 print >> keyval_file, '%s=%d' % (field, value)
683 keyval_file.close()
684
685
686 def prolog(self):
mblighdbdac6c2008-03-05 15:49:58 +0000687 # write the parser commands into the results directories
688 if self.job.is_synchronous() or self.job.num_machines()==1:
689 results_dir = self.job.results_dir()
690 cmdfile = os.path.join(results_dir, '.parse.cmd')
691 cmd = generate_parse_command(results_dir)
692 print >> open(cmdfile, 'w'), cmd
693 else:
694 for queue_entry in self.queue_entries:
695 results_dir = queue_entry.results_dir()
696 cmdfile = os.path.join(results_dir,
697 '.parse.cmd')
698 cmd = generate_parse_command(results_dir,
699 '-l 2')
700 print >> open(cmdfile, 'w'), cmd
701
mblighe2586682008-02-29 22:45:46 +0000702 # write some job timestamps into the job keyval file
703 queued = time.mktime(self.job.created_on.timetuple())
704 started = time.time()
705 self._write_keyval(self.queue_entries[0], "job_queued", queued)
706 self._write_keyval(self.queue_entries[0], "job_started",
707 started)
mbligh36768f02008-02-22 18:28:33 +0000708 for queue_entry in self.queue_entries:
709 print "starting queue_task on %s/%s" % (queue_entry.host.hostname, queue_entry.id)
710 queue_entry.set_status('Running')
711 queue_entry.host.set_status('Running')
mblighe2586682008-02-29 22:45:46 +0000712 if (not self.job.is_synchronous() and
713 self.job.num_machines() > 1):
714 assert len(self.queue_entries) == 1
715 self.job.write_to_machines_file(self.queue_entries[0])
mbligh36768f02008-02-22 18:28:33 +0000716
717
718 def epilog(self):
719 if self.success:
720 status = 'Completed'
721 else:
722 status = 'Failed'
723
mblighe2586682008-02-29 22:45:46 +0000724 # write another timestamp into the job keyval file
725 finished = time.time()
726 self._write_keyval(self.queue_entries[0], "job_finished",
727 finished)
mbligh36768f02008-02-22 18:28:33 +0000728 for queue_entry in self.queue_entries:
729 queue_entry.set_status(status)
730 queue_entry.host.set_status('Ready')
mbligh36768f02008-02-22 18:28:33 +0000731
732 if self.job.is_synchronous() or self.job.num_machines()==1:
733 if self.job.is_finished():
734 parse_results(self.job.results_dir())
735 else:
736 for queue_entry in self.queue_entries:
737 parse_results(queue_entry.results_dir(), flags='-l 2')
738
739 print "queue_task finished with %s/%s" % (status, self.success)
740
741
742class RebootTask(AgentTask):
mblighd5c95802008-03-05 00:33:46 +0000743 def __init__(self, host):
744 global _autoserv_path
745
746 # Current implementation of autoserv requires control file
747 # to be passed on reboot action request. TODO: remove when no
748 # longer appropriate.
749 self.cmd = [_autoserv_path, '-b', '-m', host.hostname,
750 '/dev/null']
mbligh36768f02008-02-22 18:28:33 +0000751 self.host = host
mblighd5c95802008-03-05 00:33:46 +0000752 AgentTask.__init__(self, self.cmd,
mbligh16c722d2008-03-05 00:58:44 +0000753 failure_tasks=[RepairTask(host)])
754
mblighd5c95802008-03-05 00:33:46 +0000755
756 def prolog(self):
757 print "starting reboot task for host: %s" % self.host.hostname
758 self.host.set_status("Rebooting")
759
mblighd5c95802008-03-05 00:33:46 +0000760
761class AbortTask(AgentTask):
762 def __init__(self, queue_entry, agents_to_abort):
763 self.queue_entry = queue_entry
764 self.agents_to_abort = agents_to_abort
765 for agent in agents_to_abort:
766 agent.dispatcher.remove_agent(agent)
767 AgentTask.__init__(self, '')
mbligh36768f02008-02-22 18:28:33 +0000768
769
mblighd5c95802008-03-05 00:33:46 +0000770 def prolog(self):
771 print "starting abort on host %s, job %s" % (
772 self.queue_entry.host_id, self.queue_entry.job_id)
773 self.queue_entry.set_status('Aborting')
774
mbligh36768f02008-02-22 18:28:33 +0000775
mblighd5c95802008-03-05 00:33:46 +0000776 def epilog(self):
777 self.queue_entry.set_status('Aborted')
778 self.success = True
mbligh36768f02008-02-22 18:28:33 +0000779
780 def run(self):
mblighd5c95802008-03-05 00:33:46 +0000781 for agent in self.agents_to_abort:
782 if (agent.active_task):
783 agent.active_task.abort()
mbligh36768f02008-02-22 18:28:33 +0000784
785
786class DBObject(object):
mblighe2586682008-02-29 22:45:46 +0000787 def __init__(self, fields, id=None, row=None, new_record=False):
788 assert (bool(id) != bool(row)) and fields
mbligh36768f02008-02-22 18:28:33 +0000789
mblighe2586682008-02-29 22:45:46 +0000790 self.__table = self._get_table()
mbligh36768f02008-02-22 18:28:33 +0000791 self.__fields = fields
792
793 self.__new_record = new_record
794
795 if row is None:
796 sql = 'SELECT * FROM %s WHERE ID=%%s' % self.__table
mbligh6f8bab42008-02-29 22:45:14 +0000797 rows = _db.execute(sql, (id,))
798 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000799 raise "row not found (table=%s, id=%s)" % \
800 (self.__table, id)
mbligh6f8bab42008-02-29 22:45:14 +0000801 row = rows[0]
mbligh36768f02008-02-22 18:28:33 +0000802
mblighe2586682008-02-29 22:45:46 +0000803 assert len(row)==len(fields), (
804 "table = %s, row = %s/%d, fields = %s/%d" % (
805 self.__table, row, len(row), fields, len(fields)))
mbligh36768f02008-02-22 18:28:33 +0000806
807 self.__valid_fields = {}
808 for i,value in enumerate(row):
809 self.__dict__[fields[i]] = value
810 self.__valid_fields[fields[i]] = True
811
812 del self.__valid_fields['id']
813
mblighe2586682008-02-29 22:45:46 +0000814
815 @classmethod
816 def _get_table(cls):
817 raise NotImplementedError('Subclasses must override this')
818
819
mbligh36768f02008-02-22 18:28:33 +0000820 def count(self, where, table = None):
821 if not table:
822 table = self.__table
mbligh4314a712008-02-29 22:44:30 +0000823
mbligh6f8bab42008-02-29 22:45:14 +0000824 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000825 SELECT count(*) FROM %s
826 WHERE %s
827 """ % (table, where))
mbligh36768f02008-02-22 18:28:33 +0000828
mbligh6f8bab42008-02-29 22:45:14 +0000829 assert len(rows) == 1
830
831 return int(rows[0][0])
mbligh36768f02008-02-22 18:28:33 +0000832
833
834 def num_cols(self):
835 return len(self.__fields)
836
837
838 def update_field(self, field, value):
839 assert self.__valid_fields[field]
840
841 if self.__dict__[field] == value:
842 return
843
844 query = "UPDATE %s SET %s = %%s WHERE id = %%s" % \
845 (self.__table, field)
mbligh6f8bab42008-02-29 22:45:14 +0000846 _db.execute(query, (value, self.id))
mbligh36768f02008-02-22 18:28:33 +0000847
848 self.__dict__[field] = value
849
850
851 def save(self):
852 if self.__new_record:
853 keys = self.__fields[1:] # avoid id
854 columns = ','.join([str(key) for key in keys])
855 values = ['"%s"' % self.__dict__[key] for key in keys]
856 values = ','.join(values)
857 query = """INSERT INTO %s (%s) VALUES (%s)""" % \
858 (self.__table, columns, values)
mbligh6f8bab42008-02-29 22:45:14 +0000859 _db.execute(query)
mbligh36768f02008-02-22 18:28:33 +0000860
861
mblighe2586682008-02-29 22:45:46 +0000862 def delete(self):
863 query = 'DELETE FROM %s WHERE id=%%s' % self.__table
864 _db.execute(query, (self.id,))
865
866
867 @classmethod
868 def fetch(cls, where):
869 rows = _db.execute(
870 'SELECT * FROM %s WHERE %s' % (cls._get_table(), where))
871 for row in rows:
872 yield cls(row=row)
873
mbligh36768f02008-02-22 18:28:33 +0000874
875class IneligibleHostQueue(DBObject):
876 def __init__(self, id=None, row=None, new_record=None):
877 fields = ['id', 'job_id', 'host_id']
mblighe2586682008-02-29 22:45:46 +0000878 DBObject.__init__(self, fields, id=id, row=row,
879 new_record=new_record)
880
881
882 @classmethod
883 def _get_table(cls):
884 return 'ineligible_host_queues'
mbligh36768f02008-02-22 18:28:33 +0000885
886
887class Host(DBObject):
888 def __init__(self, id=None, row=None):
889 fields = ['id', 'hostname', 'locked', 'synch_id','status']
mblighe2586682008-02-29 22:45:46 +0000890 DBObject.__init__(self, fields, id=id, row=row)
891
892
893 @classmethod
894 def _get_table(cls):
895 return 'hosts'
mbligh36768f02008-02-22 18:28:33 +0000896
897
898 def current_task(self):
mbligh6f8bab42008-02-29 22:45:14 +0000899 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000900 SELECT * FROM host_queue_entries WHERE host_id=%s AND NOT complete AND active
901 """, (self.id,))
902
mbligh6f8bab42008-02-29 22:45:14 +0000903 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000904 return None
905 else:
mbligh6f8bab42008-02-29 22:45:14 +0000906 assert len(rows) == 1
907 results = rows[0];
mbligh36768f02008-02-22 18:28:33 +0000908# print "current = %s" % results
909 return HostQueueEntry(row=results)
910
911
912 def next_queue_entries(self):
913 if self.locked:
914 print "%s locked, not queuing" % self.hostname
915 return None
916# print "%s/%s looking for work" % (self.hostname, self.platform_id)
mbligh6f8bab42008-02-29 22:45:14 +0000917 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000918 SELECT * FROM host_queue_entries
919 WHERE ((host_id=%s) OR (meta_host IS NOT null AND
920 (meta_host IN (
921 SELECT label_id FROM hosts_labels WHERE host_id=%s
922 )
923 )
924 AND job_id NOT IN (
925 SELECT job_id FROM ineligible_host_queues
926 WHERE host_id=%s
927 )))
928 AND NOT complete AND NOT active
929 ORDER BY priority DESC, meta_host, id
930 LIMIT 1
931 """, (self.id,self.id, self.id))
932
mbligh6f8bab42008-02-29 22:45:14 +0000933 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000934 return None
935 else:
mbligh6f8bab42008-02-29 22:45:14 +0000936 return [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000937
938 def yield_work(self):
939 print "%s yielding work" % self.hostname
940 if self.current_task():
941 self.current_task().requeue()
942
943 def set_status(self,status):
944 self.update_field('status',status)
945
946
947class HostQueueEntry(DBObject):
948 def __init__(self, id=None, row=None):
949 assert id or row
950 fields = ['id', 'job_id', 'host_id', 'priority', 'status',
951 'meta_host', 'active', 'complete']
mblighe2586682008-02-29 22:45:46 +0000952 DBObject.__init__(self, fields, id=id, row=row)
mbligh36768f02008-02-22 18:28:33 +0000953
954 self.job = Job(self.job_id)
955
956 if self.host_id:
957 self.host = Host(self.host_id)
958 else:
959 self.host = None
960
961 self.queue_log_path = os.path.join(self.job.results_dir(),
962 'queue.log.' + str(self.id))
963
964
mblighe2586682008-02-29 22:45:46 +0000965 @classmethod
966 def _get_table(cls):
967 return 'host_queue_entries'
968
969
mbligh36768f02008-02-22 18:28:33 +0000970 def set_host(self, host):
971 if host:
972 self.queue_log_record('Assigning host ' + host.hostname)
973 self.update_field('host_id', host.id)
974 self.update_field('active', True)
mblighe2586682008-02-29 22:45:46 +0000975 self.block_host(host.id)
mbligh36768f02008-02-22 18:28:33 +0000976 else:
977 self.queue_log_record('Releasing host')
mblighe2586682008-02-29 22:45:46 +0000978 self.unblock_host(self.host.id)
mbligh36768f02008-02-22 18:28:33 +0000979 self.update_field('host_id', None)
980
981 self.host = host
982
983
984 def get_host(self):
mblighe2586682008-02-29 22:45:46 +0000985 return self.host
mbligh36768f02008-02-22 18:28:33 +0000986
987
988 def queue_log_record(self, log_line):
mblighe2586682008-02-29 22:45:46 +0000989 now = str(datetime.datetime.now())
mbligh36768f02008-02-22 18:28:33 +0000990 queue_log = open(self.queue_log_path, 'a', 0)
mblighe2586682008-02-29 22:45:46 +0000991 queue_log.write(now + ' ' + log_line + '\n')
mbligh36768f02008-02-22 18:28:33 +0000992 queue_log.close()
993
994
mblighe2586682008-02-29 22:45:46 +0000995 def block_host(self, host_id):
996 print "creating block %s/%s" % (self.job.id, host_id)
997 row = [0, self.job.id, host_id]
998 block = IneligibleHostQueue(row=row, new_record=True)
999 block.save()
1000
1001
1002 def unblock_host(self, host_id):
1003 print "removing block %s/%s" % (self.job.id, host_id)
1004 blocks = list(IneligibleHostQueue.fetch(
1005 'job_id=%d and host_id=%d' % (self.job.id, host_id)))
1006 assert len(blocks) == 1
1007 blocks[0].delete()
1008
1009
mbligh36768f02008-02-22 18:28:33 +00001010 def results_dir(self):
mblighe2586682008-02-29 22:45:46 +00001011 if self.job.is_synchronous() or self.job.num_machines() == 1:
1012 return self.job.job_dir
mbligh36768f02008-02-22 18:28:33 +00001013 else:
1014 assert self.host
mblighe2586682008-02-29 22:45:46 +00001015 return os.path.join(self.job.job_dir,
1016 self.host.hostname)
mbligh36768f02008-02-22 18:28:33 +00001017
mblighe2586682008-02-29 22:45:46 +00001018
1019 def verify_results_dir(self):
1020 if self.job.is_synchronous() or self.job.num_machines() > 1:
1021 assert self.host
1022 return os.path.join(self.job.job_dir,
1023 self.host.hostname)
1024 else:
1025 return self.job.job_dir
mbligh36768f02008-02-22 18:28:33 +00001026
1027
1028 def set_status(self, status):
1029 self.update_field('status', status)
1030 if self.host:
1031 hostname = self.host.hostname
1032 else:
1033 hostname = 'no host'
1034 print "%s/%d status -> %s" % (hostname, self.id, self.status)
1035 if status in ['Queued']:
1036 self.update_field('complete', False)
1037 self.update_field('active', False)
1038
mblighd5c95802008-03-05 00:33:46 +00001039 if status in ['Pending', 'Running', 'Verifying', 'Starting',
1040 'Abort', 'Aborting']:
mbligh36768f02008-02-22 18:28:33 +00001041 self.update_field('complete', False)
1042 self.update_field('active', True)
1043
mblighd5c95802008-03-05 00:33:46 +00001044 if status in ['Failed', 'Completed', 'Stopped', 'Aborted']:
mbligh36768f02008-02-22 18:28:33 +00001045 self.update_field('complete', True)
1046 self.update_field('active', False)
1047
1048
1049 def run(self,assigned_host=None):
1050 if self.meta_host:
1051 assert assigned_host
mblighe2586682008-02-29 22:45:46 +00001052 # ensure results dir exists for the queue log
mbligh36768f02008-02-22 18:28:33 +00001053 self.job.create_results_dir()
1054 self.set_host(assigned_host)
mbligh36768f02008-02-22 18:28:33 +00001055
mbligh36768f02008-02-22 18:28:33 +00001056 print "%s/%s scheduled on %s, status=%s" % (self.job.name,
1057 self.meta_host, self.host.hostname, self.status)
1058
1059 return self.job.run(queue_entry=self)
mblighe2586682008-02-29 22:45:46 +00001060
mbligh36768f02008-02-22 18:28:33 +00001061 def requeue(self):
1062 self.set_status('Queued')
mblighe2586682008-02-29 22:45:46 +00001063
mbligh36768f02008-02-22 18:28:33 +00001064 if self.meta_host:
1065 self.set_host(None)
1066
1067
mblighe2586682008-02-29 22:45:46 +00001068 def handle_host_failure(self):
1069 """\
1070 Called when this queue entry's host has failed verification and
1071 repair.
1072 """
mblighdffd6372008-02-29 22:47:33 +00001073 assert not self.meta_host
1074 self.set_status('Failed')
1075 if self.job.is_synchronous():
1076 self.job.stop_all_entries()
mblighe2586682008-02-29 22:45:46 +00001077
1078
1079 def clear_results_dir(self, results_dir=None):
1080 results_dir = results_dir or self.results_dir()
1081 if not os.path.exists(results_dir):
1082 return
1083 for filename in os.listdir(results_dir):
1084 if 'queue.log' in filename:
1085 continue
1086 path = os.path.join(results_dir, filename)
1087 remove_file_or_dir(path)
mbligh36768f02008-02-22 18:28:33 +00001088
1089
1090class Job(DBObject):
1091 def __init__(self, id=None, row=None):
1092 assert id or row
mblighe2586682008-02-29 22:45:46 +00001093 DBObject.__init__(self,
1094 ['id','owner','name','priority',
1095 'control_file','control_type','created_on',
1096 'synch_type', 'synch_count','synchronizing'],
1097 id=id, row=row)
mbligh36768f02008-02-22 18:28:33 +00001098
mblighe2586682008-02-29 22:45:46 +00001099 self.job_dir = os.path.join(RESULTS_DIR, "%s-%s" % (self.id,
1100 self.owner))
1101
1102
1103 @classmethod
1104 def _get_table(cls):
1105 return 'jobs'
mbligh36768f02008-02-22 18:28:33 +00001106
1107
1108 def is_server_job(self):
1109 return self.control_type != 2
1110
1111
1112 def get_host_queue_entries(self):
mbligh6f8bab42008-02-29 22:45:14 +00001113 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +00001114 SELECT * FROM host_queue_entries
1115 WHERE job_id= %s
1116 """, (self.id,))
mbligh6f8bab42008-02-29 22:45:14 +00001117 entries = [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +00001118
1119 assert len(entries)>0
1120
1121 return entries
1122
1123
1124 def set_status(self, status, update_queues=False):
1125 self.update_field('status',status)
1126
1127 if update_queues:
1128 for queue_entry in self.get_host_queue_entries():
1129 queue_entry.set_status(status)
1130
1131
1132 def is_synchronous(self):
1133 return self.synch_type == 2
1134
1135
1136 def is_ready(self):
1137 if not self.is_synchronous():
1138 return True
1139 sql = "job_id=%s AND status='Pending'" % self.id
1140 count = self.count(sql, table='host_queue_entries')
1141 return (count == self.synch_count)
1142
1143
1144 def ready_to_synchronize(self):
1145 # heuristic
1146 queue_entries = self.get_host_queue_entries()
1147 count = 0
1148 for queue_entry in queue_entries:
1149 if queue_entry.status == 'Pending':
1150 count += 1
1151
1152 return (count/self.synch_count >= 0.5)
1153
1154
1155 def start_synchronizing(self):
1156 self.update_field('synchronizing', True)
1157
1158
1159 def results_dir(self):
1160 return self.job_dir
1161
1162 def num_machines(self, clause = None):
1163 sql = "job_id=%s" % self.id
1164 if clause:
1165 sql += " AND (%s)" % clause
1166 return self.count(sql, table='host_queue_entries')
1167
1168
1169 def num_queued(self):
1170 return self.num_machines('not complete')
1171
1172
1173 def num_active(self):
1174 return self.num_machines('active')
1175
1176
1177 def num_complete(self):
1178 return self.num_machines('complete')
1179
1180
1181 def is_finished(self):
1182 left = self.num_queued()
1183 print "%s: %s machines left" % (self.name, left)
1184 return left==0
1185
1186 def stop_synchronizing(self):
1187 self.update_field('synchronizing', False)
1188 self.set_status('Queued', update_queues = False)
1189
1190
mblighe2586682008-02-29 22:45:46 +00001191 def stop_all_entries(self):
1192 for child_entry in self.get_host_queue_entries():
1193 if not child_entry.complete:
1194 child_entry.set_status('Stopped')
1195
1196
1197 def write_to_machines_file(self, queue_entry):
1198 hostname = queue_entry.get_host().hostname
1199 print "writing %s to job %s machines file" % (hostname, self.id)
1200 file_path = os.path.join(self.job_dir, '.machines')
1201 mf = open(file_path, 'a')
1202 mf.write("%s\n" % queue_entry.get_host().hostname)
1203 mf.close()
mbligh36768f02008-02-22 18:28:33 +00001204
1205
1206 def create_results_dir(self, queue_entry=None):
1207 print "create: active: %s complete %s" % (self.num_active(),
1208 self.num_complete())
1209
1210 if not os.path.exists(self.job_dir):
1211 os.makedirs(self.job_dir)
1212
1213 if queue_entry:
1214 return queue_entry.results_dir()
1215 return self.job_dir
1216
1217
1218 def run(self, queue_entry):
1219 results_dir = self.create_results_dir(queue_entry)
1220
1221 if self.is_synchronous():
1222 if not self.is_ready():
mblighd5c95802008-03-05 00:33:46 +00001223 return Agent([VerifySynchronousTask(
1224 queue_entry = queue_entry)],
1225 [queue_entry.id])
mbligh36768f02008-02-22 18:28:33 +00001226
1227 queue_entry.set_status('Starting')
1228
1229 ctrl = open(os.tmpnam(), 'w')
1230 if self.control_file:
1231 ctrl.write(self.control_file)
1232 else:
1233 ctrl.write("")
1234 ctrl.flush()
1235
1236 if self.is_synchronous():
mbligh36768f02008-02-22 18:28:33 +00001237 queue_entries = self.get_host_queue_entries()
1238 else:
1239 assert queue_entry
mbligh36768f02008-02-22 18:28:33 +00001240 queue_entries = [queue_entry]
mblighe2586682008-02-29 22:45:46 +00001241 hostnames = ','.join([entry.get_host().hostname
1242 for entry in queue_entries])
mbligh36768f02008-02-22 18:28:33 +00001243
mbligh4314a712008-02-29 22:44:30 +00001244 params = [_autoserv_path, '-n', '-r', results_dir,
mbligh36768f02008-02-22 18:28:33 +00001245 '-b', '-u', self.owner, '-l', self.name,
1246 '-m', hostnames, ctrl.name]
1247
1248 if not self.is_server_job():
1249 params.append('-c')
1250
1251 tasks = []
1252 if not self.is_synchronous():
1253 tasks.append(VerifyTask(queue_entry))
mblighe2586682008-02-29 22:45:46 +00001254
1255 tasks.append(QueueTask(job = self,
1256 queue_entries = queue_entries,
1257 cmd = params))
mbligh36768f02008-02-22 18:28:33 +00001258
mblighd5c95802008-03-05 00:33:46 +00001259 ids = []
1260 for entry in queue_entries:
1261 ids.append(entry.id)
1262
1263 agent = Agent(tasks, ids)
mbligh36768f02008-02-22 18:28:33 +00001264
1265 return agent
1266
1267
1268if __name__ == '__main__':
1269 main()