blob: 39e070c5bde618c8352830832fb89bd3a66ee181 [file] [log] [blame]
mbligh36768f02008-02-22 18:28:33 +00001#!/usr/bin/python -u
2
3"""
4Autotest scheduler
5"""
6__author__ = "Paul Turner <pjt@google.com>"
7
8import os, sys, tempfile, shutil, MySQLdb, time, traceback, subprocess, Queue
mblighe2586682008-02-29 22:45:46 +00009import optparse, signal, smtplib, socket, datetime, stat
mblighb090f142008-02-27 21:33:46 +000010from common import global_config
11
mbligh36768f02008-02-22 18:28:33 +000012RESULTS_DIR = '.'
13AUTOSERV_NICE_LEVEL = 10
14
15AUTOTEST_PATH = os.path.join(os.path.dirname(__file__), '..')
16
17if os.environ.has_key('AUTOTEST_DIR'):
18 AUTOTEST_PATH = os.environ['AUTOTEST_DIR']
19AUTOTEST_SERVER_DIR = os.path.join(AUTOTEST_PATH, 'server')
20AUTOTEST_TKO_DIR = os.path.join(AUTOTEST_PATH, 'tko')
21
22if AUTOTEST_SERVER_DIR not in sys.path:
23 sys.path.insert(0, AUTOTEST_SERVER_DIR)
24
mbligh6f8bab42008-02-29 22:45:14 +000025_db = None
mbligh36768f02008-02-22 18:28:33 +000026_shutdown = False
27_notify_email = None
mbligh4314a712008-02-29 22:44:30 +000028_autoserv_path = 'autoserv'
29_testing_mode = False
mbligh36768f02008-02-22 18:28:33 +000030
31
32def main():
33 usage = 'usage: %prog [options] results_dir'
34
35 parser = optparse.OptionParser(usage)
36 parser.add_option('--no-recover', help='Skip machine/job recovery ' +
37 'step [for multiple monitors/rolling upgrades]',
38 action='store_true')
39 parser.add_option('--logfile', help='Set a log file that all stdout ' +
40 'should be redirected to. Stderr will go to this ' +
41 'file + ".err"')
42 parser.add_option('--notify', help='Set an email address to be ' +
43 'notified of exceptions')
mbligh4314a712008-02-29 22:44:30 +000044 parser.add_option('--test', help='Indicate that scheduler is under ' +
45 'test and should use dummy autoserv and no parsing',
46 action='store_true')
mbligh36768f02008-02-22 18:28:33 +000047 (options, args) = parser.parse_args()
48 if len(args) != 1:
49 parser.print_usage()
50 return
51
52 global RESULTS_DIR
53 RESULTS_DIR = args[0]
54
55 global _notify_email
56 _notify_email = options.notify
mbligh4314a712008-02-29 22:44:30 +000057
58 if options.test:
59 global _autoserv_path
60 _autoserv_path = 'autoserv_dummy'
61 global _testing_mode
62 _testing_mode = True
mbligh36768f02008-02-22 18:28:33 +000063
64 init(options.logfile)
65 dispatcher = Dispatcher(do_recover = not options.no_recover)
66
67 try:
68 while not _shutdown:
69 dispatcher.tick()
70 time.sleep(20)
71 dispatcher.shut_down()
72 except:
73 log_stacktrace("Uncaught exception; terminating monitor_db")
74
mbligh6f8bab42008-02-29 22:45:14 +000075 _db.disconnect()
mbligh36768f02008-02-22 18:28:33 +000076
77
78def handle_sigint(signum, frame):
79 global _shutdown
80 _shutdown = True
81 print "Shutdown request received."
82
83
84def init(logfile):
85 if logfile:
86 enable_logging(logfile)
87 print "%s> dispatcher starting" % time.strftime("%X %x")
88 print "My PID is %d" % os.getpid()
89
90 os.environ['PATH'] = AUTOTEST_SERVER_DIR + ':' + os.environ['PATH']
mbligh6f8bab42008-02-29 22:45:14 +000091 global _db
92 _db = DatabaseConn()
mbligh36768f02008-02-22 18:28:33 +000093
94 print "Setting signal handler"
95 signal.signal(signal.SIGINT, handle_sigint)
96
97 print "Connected! Running..."
98
99
100def enable_logging(logfile):
101 out_file = logfile
102 err_file = "%s.err" % logfile
103 print "Enabling logging to %s (%s)" % (out_file, err_file)
104 out_fd = open(out_file, "a", buffering=0)
105 err_fd = open(err_file, "a", buffering=0)
106
107 os.dup2(out_fd.fileno(), sys.stdout.fileno())
108 os.dup2(err_fd.fileno(), sys.stderr.fileno())
109
110 sys.stdout = out_fd
111 sys.stderr = err_fd
112
113
114def idle_hosts():
mbligh6f8bab42008-02-29 22:45:14 +0000115 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000116 SELECT * FROM hosts h WHERE
117 id NOT IN (SELECT host_id FROM host_queue_entries WHERE active) AND (
118 (id IN (SELECT host_id FROM host_queue_entries WHERE not complete AND not active))
119 OR
120 (id IN (SELECT DISTINCT hl.host_id FROM host_queue_entries hqe
121 INNER JOIN hosts_labels hl ON hqe.meta_host=hl.label_id WHERE not hqe.complete AND not hqe.active))
122 )
123 AND locked=false AND (h.status IS null OR h.status='Ready') """)
mbligh6f8bab42008-02-29 22:45:14 +0000124 hosts = [Host(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000125 return hosts
126
mblighd5c95802008-03-05 00:33:46 +0000127def queue_entries_to_abort():
128 rows = _db.execute("""
129 SELECT * FROM host_queue_entries WHERE status='Abort';
130 """)
131 qe = [HostQueueEntry(row=i) for i in rows]
132 return qe
mbligh36768f02008-02-22 18:28:33 +0000133
mblighe2586682008-02-29 22:45:46 +0000134def remove_file_or_dir(path):
135 if stat.S_ISDIR(os.stat(path).st_mode):
136 # directory
137 shutil.rmtree(path)
138 else:
139 # file
140 os.remove(path)
141
142
mbligh6f8bab42008-02-29 22:45:14 +0000143class DatabaseConn:
144 def __init__(self):
145 self.reconnect_wait = 20
146 self.conn = None
147 self.cur = None
mbligh36768f02008-02-22 18:28:33 +0000148
mbligh6f8bab42008-02-29 22:45:14 +0000149 self.connect()
mbligh36768f02008-02-22 18:28:33 +0000150
151
mbligh6f8bab42008-02-29 22:45:14 +0000152 def connect(self):
153 self.disconnect()
154
155 # get global config and parse for info
156 c = global_config.global_config
157 dbase = "AUTOTEST_WEB"
158 DB_HOST = c.get_config_value(dbase, "host", "localhost")
159 DB_SCHEMA = c.get_config_value(dbase, "database",
160 "autotest_web")
161
162 global _testing_mode
163 if _testing_mode:
164 DB_SCHEMA = 'stresstest_autotest_web'
165
166 DB_USER = c.get_config_value(dbase, "user", "autotest")
167 DB_PASS = c.get_config_value(dbase, "password", "google")
168
169 while not self.conn:
170 try:
171 self.conn = MySQLdb.connect(host=DB_HOST,
172 user=DB_USER,
173 passwd=DB_PASS,
174 db=DB_SCHEMA)
175
176 self.conn.autocommit(True)
177 self.cur = self.conn.cursor()
178 except MySQLdb.OperationalError:
mbligh6f8bab42008-02-29 22:45:14 +0000179 print "Can't connect to MYSQL; reconnecting"
180 time.sleep(self.reconnect_wait)
181 self.disconnect()
182
183
184 def disconnect(self):
185 if self.conn:
186 self.conn.close()
187 self.conn = None
188 self.cur = None
189
190
191 def execute(self, *args, **dargs):
192 while (True):
193 try:
194 self.cur.execute(*args, **dargs)
195 return self.cur.fetchall()
196 except MySQLdb.OperationalError:
197 print "MYSQL connection died; reconnecting"
198 time.sleep(self.reconnect_wait)
199 self.connect()
mbligh36768f02008-02-22 18:28:33 +0000200
201
202def parse_results(results_dir, flags=""):
mbligh4314a712008-02-29 22:44:30 +0000203 if _testing_mode:
204 return
mbligh36768f02008-02-22 18:28:33 +0000205 parse = os.path.join(AUTOTEST_TKO_DIR, 'parse')
206 output = os.path.join(results_dir, '.parse.log')
207 os.system("%s %s -r -o %s > %s 2>&1 &" % (parse, flags, results_dir, output))
208
209
210def log_stacktrace(reason):
211 (type, value, tb) = sys.exc_info()
212 str = "EXCEPTION: %s\n" % reason
213 str += "%s / %s / %s\n" % (socket.gethostname(), os.getpid(),
214 time.strftime("%X %x"))
215 str += ''.join(traceback.format_exception(type, value, tb))
216
217 sys.stderr.write("\n%s\n" % str)
218
219 if _notify_email:
220 sender = "monitor_db"
221 subject = "monitor_db exception"
222 msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (
223 sender, _notify_email, subject, str)
224 mailer = smtplib.SMTP('localhost')
225 mailer.sendmail(sender, _notify_email, msg)
226 mailer.quit()
227
228
229class Dispatcher:
230 def __init__(self, do_recover=True):
231 self._agents = []
232 self.shutting_down = False
233
234 if do_recover:
235 self._recover_lost()
236
237
238 def shut_down(self):
239 print "Shutting down!"
240 self.shutting_down = True
241 while self._agents:
242 self.tick()
243 time.sleep(40)
244
245
246 def tick(self):
247 if not self.shutting_down:
mblighd5c95802008-03-05 00:33:46 +0000248 self._find_aborting()
mbligh36768f02008-02-22 18:28:33 +0000249 self._find_more_work()
250 self._handle_agents()
251
252
253 def add_agent(self, agent):
254 self._agents.append(agent)
255 agent.dispatcher = self
mblighd5c95802008-03-05 00:33:46 +0000256
257 # Find agent corresponding to the specified queue_entry
258 def get_agents(self, queue_entry):
259 res_agents = []
260 for agent in self._agents:
261 if queue_entry.id in agent.queue_entry_ids:
262 res_agents.append(agent)
263 return res_agents
264
265
266 def remove_agent(self, agent):
267 self._agents.remove(agent)
mbligh36768f02008-02-22 18:28:33 +0000268
269
270 def _recover_lost(self):
mblighd5c95802008-03-05 00:33:46 +0000271 rows = _db.execute("""SELECT * FROM host_queue_entries WHERE active AND NOT complete AND status != 'Abort' AND status != 'Aborting'""")
mbligh6f8bab42008-02-29 22:45:14 +0000272 if len(rows) > 0:
273 queue_entries = [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000274 for queue_entry in queue_entries:
275 job = queue_entry.job
276 if job.is_synchronous():
277 for child_entry in job.get_host_queue_entries():
278 child_entry.requeue()
279 else:
280 queue_entry.requeue()
281 queue_entry.clear_results_dir()
282
mblighd5c95802008-03-05 00:33:46 +0000283 rebooting_host_ids = []
284 rows = _db.execute("""SELECT * FROM host_queue_entries
285 WHERE status='Abort' or status='Aborting'""")
286 if len(rows) > 0:
287 queue_entries = [HostQueueEntry(row=i) for i in rows]
288 for queue_entry in queue_entries:
289 queue_host = queue_entry.get_host()
290 reboot_task = RebootTask(queue_host)
291 verify_task = VerifyTask(host = queue_host)
292 self.add_agent(Agent(tasks=[reboot_task,
293 verify_task],
294 queue_entry_ids=[queue_entry.id]))
295 queue_entry.set_status('Aborted')
296 # Secure the host from being picked up
297 queue_host.set_status('Rebooting')
298 rebooting_host_ids.append(queue_host.id)
299
mbligh6f8bab42008-02-29 22:45:14 +0000300 rows = _db.execute("""SELECT * FROM hosts
mbligh36768f02008-02-22 18:28:33 +0000301 WHERE status != 'Ready' AND NOT locked""")
mbligh6f8bab42008-02-29 22:45:14 +0000302 if len(rows) > 0:
303 hosts = [Host(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000304 for host in hosts:
mblighd5c95802008-03-05 00:33:46 +0000305 if host.id in rebooting_host_ids:
306 continue
mbligh36768f02008-02-22 18:28:33 +0000307 verify_task = VerifyTask(host = host)
308 self.add_agent(Agent(tasks = [verify_task]))
309
310
311 def _find_more_work(self):
312 print "finding work"
313
314 num_started = 0
315 for host in idle_hosts():
316 tasks = host.next_queue_entries()
317 if tasks:
318 for next in tasks:
319 try:
320 agent = next.run(assigned_host=host)
321 if agent:
322 self.add_agent(agent)
323
324 num_started += 1
325 if num_started>=100:
326 return
327 break
328 except:
329 next.set_status('Failed')
330
331# if next.host:
332# next.host.set_status('Ready')
333
334 log_stacktrace("task_id = %d" % next.id)
335
336
mblighd5c95802008-03-05 00:33:46 +0000337 def _find_aborting(self):
338 num_aborted = 0
339 # Find jobs that are aborting
340 for entry in queue_entries_to_abort():
341 agents_to_abort = self.get_agents(entry)
342 entry_host = entry.get_host()
343 reboot_task = RebootTask(entry_host)
344 verify_task = VerifyTask(host = entry_host)
345 tasks = [reboot_task, verify_task]
346 if agents_to_abort:
347 abort_task = AbortTask(entry, agents_to_abort)
348 tasks.insert(0, abort_task)
349 else:
350 entry.set_status('Aborted')
351 # just to make sure this host does not get
352 # taken away
353 entry_host.set_status('Rebooting')
354 self.add_agent(Agent(tasks=tasks,
355 queue_entry_ids = [entry.id]))
356 num_aborted += 1
357 if num_aborted >= 50:
358 break
359
360
mbligh36768f02008-02-22 18:28:33 +0000361 def _handle_agents(self):
362 still_running = []
363 for agent in self._agents:
364 agent.tick()
365 if not agent.is_done():
366 still_running.append(agent)
367 else:
368 print "agent finished"
369 self._agents = still_running
370
371
372class RunMonitor(object):
373 def __init__(self, cmd, nice_level = None, log_file = None):
374 self.nice_level = nice_level
375 self.log_file = log_file
376 self.proc = self.run(cmd)
377
378 def run(self, cmd):
379 if self.nice_level:
380 nice_cmd = ['nice','-n', str(self.nice_level)]
381 nice_cmd.extend(cmd)
382 cmd = nice_cmd
383
384 out_file = None
385 if self.log_file:
386 try:
387 out_file = open(self.log_file, 'a')
388 out_file.write("\n%s\n" % ('*'*80))
389 out_file.write("%s> %s\n" % (time.strftime("%X %x"), cmd))
390 out_file.write("%s\n" % ('*'*80))
391 except:
392 pass
393
394 if not out_file:
395 out_file = open('/dev/null', 'w')
396
397 in_devnull = open('/dev/null', 'r')
398 print "cmd = %s" % cmd
399 print "path = %s" % os.getcwd()
400
401 proc = subprocess.Popen(cmd, stdout=out_file,
402 stderr=subprocess.STDOUT, stdin=in_devnull)
403 out_file.close()
404 in_devnull.close()
405 return proc
406
407
408 def kill(self):
mblighd5c95802008-03-05 00:33:46 +0000409 for i in range(0, 4):
410 if self.proc.poll() == None:
411 os.kill(self.proc.pid, signal.SIGTERM)
412 time.sleep(5)
413 # Check that the process was terminated
414 if self.proc.poll() != None:
415 return
416
417 print ("""Error: process %d has not terminated""" %
418 self.proc.pid)
419
mbligh36768f02008-02-22 18:28:33 +0000420
421 def exit_code(self):
422 return self.proc.poll()
423
424
425class Agent(object):
mblighd5c95802008-03-05 00:33:46 +0000426 def __init__(self, tasks, queue_entry_ids=[]):
mbligh36768f02008-02-22 18:28:33 +0000427 self.active_task = None
428 self.queue = Queue.Queue(0)
429 self.dispatcher = None
mblighd5c95802008-03-05 00:33:46 +0000430 self.queue_entry_ids = queue_entry_ids
mbligh36768f02008-02-22 18:28:33 +0000431
432 for task in tasks:
433 self.add_task(task)
434
435
436 def add_task(self, task):
437 self.queue.put_nowait(task)
438 task.agent = self
439
440
441 def tick(self):
442 print "agent tick"
443 if self.active_task and not self.active_task.is_done():
444 self.active_task.poll()
445 else:
446 self._next_task();
447
448
449 def _next_task(self):
450 print "agent picking task"
451 if self.active_task:
452 assert self.active_task.is_done()
453
mblighe2586682008-02-29 22:45:46 +0000454 if not self.active_task.success:
455 self.on_task_failure()
456
mbligh36768f02008-02-22 18:28:33 +0000457 self.active_task = None
458 if not self.is_done():
459 self.active_task = self.queue.get_nowait()
460 if self.active_task:
461 self.active_task.start()
462
463
mblighe2586682008-02-29 22:45:46 +0000464 def on_task_failure(self):
mblighe2586682008-02-29 22:45:46 +0000465 self.queue = Queue.Queue(0)
466 for task in self.active_task.failure_tasks:
467 self.add_task(task)
mbligh16c722d2008-03-05 00:58:44 +0000468
mblighe2586682008-02-29 22:45:46 +0000469
mbligh36768f02008-02-22 18:28:33 +0000470 def is_done(self):
471 return self.active_task == None and self.queue.empty()
472
473
474 def start(self):
475 assert self.dispatcher
476
477 self._next_task()
478
mblighd5c95802008-03-05 00:33:46 +0000479
mbligh36768f02008-02-22 18:28:33 +0000480class AgentTask(object):
mbligh16c722d2008-03-05 00:58:44 +0000481 def __init__(self, cmd, failure_tasks = []):
mblighe2586682008-02-29 22:45:46 +0000482 """\
483 By default, on failure, the Agent's task queue is cleared and
484 replaced with the tasks in failure_tasks. If
485 clear_queue_on_failure=False, the task queue will not be
486 cleared, and the tasks in failure_tasks will be inserted at the
487 beginning of the queue.
488 """
mbligh36768f02008-02-22 18:28:33 +0000489 self.done = False
490 self.failure_tasks = failure_tasks
491 self.started = False
492 self.cmd = cmd
mblighd5c95802008-03-05 00:33:46 +0000493 self.task = None
mbligh36768f02008-02-22 18:28:33 +0000494 self.agent = None
mblighd5c95802008-03-05 00:33:46 +0000495 self.monitor = None
mbligh36768f02008-02-22 18:28:33 +0000496
497
498 def poll(self):
499 print "poll"
mblighd5c95802008-03-05 00:33:46 +0000500 if self.monitor:
mbligh36768f02008-02-22 18:28:33 +0000501 self.tick(self.monitor.exit_code())
502 else:
503 self.finished(False)
504
505
506 def tick(self, exit_code):
507 if exit_code==None:
508 return
509# print "exit_code was %d" % exit_code
510 if exit_code == 0:
511 success = True
512 else:
513 success = False
514
515 self.finished(success)
516
517
518 def is_done(self):
519 return self.done
520
521
522 def finished(self, success):
523 self.done = True
524 self.success = success
525 self.epilog()
526
527
528 def prolog(self):
529 pass
530
531
532 def epilog(self):
533 pass
534
535
536 def start(self):
537 assert self.agent
538
539 if not self.started:
540 self.prolog()
541 self.run()
542
543 self.started = True
544
545
546 def abort(self):
mblighd5c95802008-03-05 00:33:46 +0000547 if self.monitor:
548 self.monitor.kill()
549 self.done = True
mbligh36768f02008-02-22 18:28:33 +0000550
551
552 def run(self):
553 if self.cmd:
554 print "agent starting monitor"
555
556 log_file = None
557 if hasattr(self, 'host'):
558 log_file = os.path.join(os.path.join(RESULTS_DIR, 'hosts'), self.host.hostname)
559
560 self.monitor = RunMonitor(self.cmd, nice_level = AUTOSERV_NICE_LEVEL, log_file = log_file)
561
562
563class RepairTask(AgentTask):
mbligh16c722d2008-03-05 00:58:44 +0000564 def __init__(self, host, fail_queue_entry=None):
565 """\
566 fail_queue_entry: queue entry to mark failed if this repair
567 fails.
568 """
mbligh48c10a52008-02-29 22:46:38 +0000569 cmd = [_autoserv_path , '-R', '-m', host.hostname]
mbligh36768f02008-02-22 18:28:33 +0000570 self.host = host
mbligh16c722d2008-03-05 00:58:44 +0000571 self.fail_queue_entry = fail_queue_entry
572 AgentTask.__init__(self, cmd)
mblighe2586682008-02-29 22:45:46 +0000573
mbligh36768f02008-02-22 18:28:33 +0000574
575 def prolog(self):
576 print "repair_task starting"
577 self.host.set_status('Repairing')
578
579
580 def epilog(self):
581 if self.success:
mbligh16c722d2008-03-05 00:58:44 +0000582 self.host.set_status('Ready')
mbligh36768f02008-02-22 18:28:33 +0000583 else:
mbligh16c722d2008-03-05 00:58:44 +0000584 self.host.set_status('Repair Failed')
585 if self.fail_queue_entry:
586 self.fail_queue_entry.handle_host_failure()
mbligh36768f02008-02-22 18:28:33 +0000587
588
589class VerifyTask(AgentTask):
590 def __init__(self, queue_entry=None, host=None):
591 assert bool(queue_entry) != bool(host)
592
593 self.host = host or queue_entry.host
594 self.queue_entry = queue_entry
595
596 self.temp_results_dir = tempfile.mkdtemp(suffix='.verify')
mbligh48c10a52008-02-29 22:46:38 +0000597 cmd = [_autoserv_path,'-v','-m',self.host.hostname,
mbligh36768f02008-02-22 18:28:33 +0000598 '-r', self.temp_results_dir]
599
mbligh16c722d2008-03-05 00:58:44 +0000600 fail_queue_entry = None
601 if queue_entry and not queue_entry.meta_host:
602 fail_queue_entry = queue_entry
603 failure_tasks = [RepairTask(self.host, fail_queue_entry)]
mblighe2586682008-02-29 22:45:46 +0000604
mblighdffd6372008-02-29 22:47:33 +0000605 AgentTask.__init__(self, cmd, failure_tasks=failure_tasks)
mblighe2586682008-02-29 22:45:46 +0000606
607
mbligh36768f02008-02-22 18:28:33 +0000608 def prolog(self):
609 print "starting verify on %s" % (self.host.hostname)
610 if self.queue_entry:
611 self.queue_entry.set_status('Verifying')
mblighdffd6372008-02-29 22:47:33 +0000612 self.queue_entry.clear_results_dir(
613 self.queue_entry.verify_results_dir())
mbligh36768f02008-02-22 18:28:33 +0000614 self.host.set_status('Verifying')
615
616
617 def epilog(self):
618 if self.queue_entry and (self.success or
619 not self.queue_entry.meta_host):
620 self.move_results()
mblighe2586682008-02-29 22:45:46 +0000621 shutil.rmtree(self.temp_results_dir)
mbligh36768f02008-02-22 18:28:33 +0000622
623 if self.success:
mbligh16c722d2008-03-05 00:58:44 +0000624 self.host.set_status('Ready')
625 elif self.queue_entry:
mblighdffd6372008-02-29 22:47:33 +0000626 self.queue_entry.requeue()
mbligh36768f02008-02-22 18:28:33 +0000627
628
629 def move_results(self):
630 assert self.queue_entry is not None
mblighe2586682008-02-29 22:45:46 +0000631 target_dir = self.queue_entry.verify_results_dir()
mbligh36768f02008-02-22 18:28:33 +0000632 if not os.path.exists(target_dir):
633 os.makedirs(target_dir)
634 files = os.listdir(self.temp_results_dir)
635 for filename in files:
mblighe2586682008-02-29 22:45:46 +0000636 self.force_move(os.path.join(self.temp_results_dir,
637 filename),
638 os.path.join(target_dir, filename))
mbligh36768f02008-02-22 18:28:33 +0000639
640
mblighe2586682008-02-29 22:45:46 +0000641 @staticmethod
642 def force_move(source, dest):
643 """\
644 Replacement for shutil.move() that will delete the destination
645 if it exists, even if it's a directory.
646 """
647 if os.path.exists(dest):
648 print ('Warning: removing existing destination file ' +
649 dest)
650 remove_file_or_dir(dest)
651 shutil.move(source, dest)
652
653
mblighdffd6372008-02-29 22:47:33 +0000654class VerifySynchronousTask(VerifyTask):
655 def __init__(self, queue_entry):
656 VerifyTask.__init__(self, queue_entry = queue_entry)
657
658
mbligh16c722d2008-03-05 00:58:44 +0000659 def epilog(self):
660 VerifyTask.epilog(self)
661 if self.success:
662 if self.queue_entry.job.num_complete() > 0:
663 # some other entry failed verify, and we've
664 # already been marked as stopped
665 return
mblighdffd6372008-02-29 22:47:33 +0000666
mbligh16c722d2008-03-05 00:58:44 +0000667 self.queue_entry.set_status('Pending')
668 job = self.queue_entry.job
669 if job.is_ready():
670 agent = job.run(self.queue_entry)
671 self.agent.dispatcher.add_agent(agent)
mblighe2586682008-02-29 22:45:46 +0000672
mbligh36768f02008-02-22 18:28:33 +0000673class QueueTask(AgentTask):
674 def __init__(self, job, queue_entries, cmd):
675 AgentTask.__init__(self, cmd)
676 self.job = job
677 self.queue_entries = queue_entries
678
679
mbligh4314a712008-02-29 22:44:30 +0000680 @staticmethod
681 def _write_keyval(queue_entry, field, value):
mbligh36768f02008-02-22 18:28:33 +0000682 key_path = os.path.join(queue_entry.results_dir(), 'keyval')
683 keyval_file = open(key_path, 'a')
684 print >> keyval_file, '%s=%d' % (field, value)
685 keyval_file.close()
686
687
688 def prolog(self):
mblighe2586682008-02-29 22:45:46 +0000689 # write some job timestamps into the job keyval file
690 queued = time.mktime(self.job.created_on.timetuple())
691 started = time.time()
692 self._write_keyval(self.queue_entries[0], "job_queued", queued)
693 self._write_keyval(self.queue_entries[0], "job_started",
694 started)
mbligh36768f02008-02-22 18:28:33 +0000695 for queue_entry in self.queue_entries:
696 print "starting queue_task on %s/%s" % (queue_entry.host.hostname, queue_entry.id)
697 queue_entry.set_status('Running')
698 queue_entry.host.set_status('Running')
mblighe2586682008-02-29 22:45:46 +0000699 if (not self.job.is_synchronous() and
700 self.job.num_machines() > 1):
701 assert len(self.queue_entries) == 1
702 self.job.write_to_machines_file(self.queue_entries[0])
mbligh36768f02008-02-22 18:28:33 +0000703
704
705 def epilog(self):
706 if self.success:
707 status = 'Completed'
708 else:
709 status = 'Failed'
710
mblighe2586682008-02-29 22:45:46 +0000711 # write another timestamp into the job keyval file
712 finished = time.time()
713 self._write_keyval(self.queue_entries[0], "job_finished",
714 finished)
mbligh36768f02008-02-22 18:28:33 +0000715 for queue_entry in self.queue_entries:
716 queue_entry.set_status(status)
717 queue_entry.host.set_status('Ready')
mbligh36768f02008-02-22 18:28:33 +0000718
719 if self.job.is_synchronous() or self.job.num_machines()==1:
720 if self.job.is_finished():
721 parse_results(self.job.results_dir())
722 else:
723 for queue_entry in self.queue_entries:
724 parse_results(queue_entry.results_dir(), flags='-l 2')
725
726 print "queue_task finished with %s/%s" % (status, self.success)
727
728
729class RebootTask(AgentTask):
mblighd5c95802008-03-05 00:33:46 +0000730 def __init__(self, host):
731 global _autoserv_path
732
733 # Current implementation of autoserv requires control file
734 # to be passed on reboot action request. TODO: remove when no
735 # longer appropriate.
736 self.cmd = [_autoserv_path, '-b', '-m', host.hostname,
737 '/dev/null']
mbligh36768f02008-02-22 18:28:33 +0000738 self.host = host
mblighd5c95802008-03-05 00:33:46 +0000739 AgentTask.__init__(self, self.cmd,
mbligh16c722d2008-03-05 00:58:44 +0000740 failure_tasks=[RepairTask(host)])
741
mblighd5c95802008-03-05 00:33:46 +0000742
743 def prolog(self):
744 print "starting reboot task for host: %s" % self.host.hostname
745 self.host.set_status("Rebooting")
746
mblighd5c95802008-03-05 00:33:46 +0000747
748class AbortTask(AgentTask):
749 def __init__(self, queue_entry, agents_to_abort):
750 self.queue_entry = queue_entry
751 self.agents_to_abort = agents_to_abort
752 for agent in agents_to_abort:
753 agent.dispatcher.remove_agent(agent)
754 AgentTask.__init__(self, '')
mbligh36768f02008-02-22 18:28:33 +0000755
756
mblighd5c95802008-03-05 00:33:46 +0000757 def prolog(self):
758 print "starting abort on host %s, job %s" % (
759 self.queue_entry.host_id, self.queue_entry.job_id)
760 self.queue_entry.set_status('Aborting')
761
mbligh36768f02008-02-22 18:28:33 +0000762
mblighd5c95802008-03-05 00:33:46 +0000763 def epilog(self):
764 self.queue_entry.set_status('Aborted')
765 self.success = True
mbligh36768f02008-02-22 18:28:33 +0000766
767 def run(self):
mblighd5c95802008-03-05 00:33:46 +0000768 for agent in self.agents_to_abort:
769 if (agent.active_task):
770 agent.active_task.abort()
mbligh36768f02008-02-22 18:28:33 +0000771
772
773class DBObject(object):
mblighe2586682008-02-29 22:45:46 +0000774 def __init__(self, fields, id=None, row=None, new_record=False):
775 assert (bool(id) != bool(row)) and fields
mbligh36768f02008-02-22 18:28:33 +0000776
mblighe2586682008-02-29 22:45:46 +0000777 self.__table = self._get_table()
mbligh36768f02008-02-22 18:28:33 +0000778 self.__fields = fields
779
780 self.__new_record = new_record
781
782 if row is None:
783 sql = 'SELECT * FROM %s WHERE ID=%%s' % self.__table
mbligh6f8bab42008-02-29 22:45:14 +0000784 rows = _db.execute(sql, (id,))
785 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000786 raise "row not found (table=%s, id=%s)" % \
787 (self.__table, id)
mbligh6f8bab42008-02-29 22:45:14 +0000788 row = rows[0]
mbligh36768f02008-02-22 18:28:33 +0000789
mblighe2586682008-02-29 22:45:46 +0000790 assert len(row)==len(fields), (
791 "table = %s, row = %s/%d, fields = %s/%d" % (
792 self.__table, row, len(row), fields, len(fields)))
mbligh36768f02008-02-22 18:28:33 +0000793
794 self.__valid_fields = {}
795 for i,value in enumerate(row):
796 self.__dict__[fields[i]] = value
797 self.__valid_fields[fields[i]] = True
798
799 del self.__valid_fields['id']
800
mblighe2586682008-02-29 22:45:46 +0000801
802 @classmethod
803 def _get_table(cls):
804 raise NotImplementedError('Subclasses must override this')
805
806
mbligh36768f02008-02-22 18:28:33 +0000807 def count(self, where, table = None):
808 if not table:
809 table = self.__table
mbligh4314a712008-02-29 22:44:30 +0000810
mbligh6f8bab42008-02-29 22:45:14 +0000811 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000812 SELECT count(*) FROM %s
813 WHERE %s
814 """ % (table, where))
mbligh36768f02008-02-22 18:28:33 +0000815
mbligh6f8bab42008-02-29 22:45:14 +0000816 assert len(rows) == 1
817
818 return int(rows[0][0])
mbligh36768f02008-02-22 18:28:33 +0000819
820
821 def num_cols(self):
822 return len(self.__fields)
823
824
825 def update_field(self, field, value):
826 assert self.__valid_fields[field]
827
828 if self.__dict__[field] == value:
829 return
830
831 query = "UPDATE %s SET %s = %%s WHERE id = %%s" % \
832 (self.__table, field)
mbligh6f8bab42008-02-29 22:45:14 +0000833 _db.execute(query, (value, self.id))
mbligh36768f02008-02-22 18:28:33 +0000834
835 self.__dict__[field] = value
836
837
838 def save(self):
839 if self.__new_record:
840 keys = self.__fields[1:] # avoid id
841 columns = ','.join([str(key) for key in keys])
842 values = ['"%s"' % self.__dict__[key] for key in keys]
843 values = ','.join(values)
844 query = """INSERT INTO %s (%s) VALUES (%s)""" % \
845 (self.__table, columns, values)
mbligh6f8bab42008-02-29 22:45:14 +0000846 _db.execute(query)
mbligh36768f02008-02-22 18:28:33 +0000847
848
mblighe2586682008-02-29 22:45:46 +0000849 def delete(self):
850 query = 'DELETE FROM %s WHERE id=%%s' % self.__table
851 _db.execute(query, (self.id,))
852
853
854 @classmethod
855 def fetch(cls, where):
856 rows = _db.execute(
857 'SELECT * FROM %s WHERE %s' % (cls._get_table(), where))
858 for row in rows:
859 yield cls(row=row)
860
mbligh36768f02008-02-22 18:28:33 +0000861
862class IneligibleHostQueue(DBObject):
863 def __init__(self, id=None, row=None, new_record=None):
864 fields = ['id', 'job_id', 'host_id']
mblighe2586682008-02-29 22:45:46 +0000865 DBObject.__init__(self, fields, id=id, row=row,
866 new_record=new_record)
867
868
869 @classmethod
870 def _get_table(cls):
871 return 'ineligible_host_queues'
mbligh36768f02008-02-22 18:28:33 +0000872
873
874class Host(DBObject):
875 def __init__(self, id=None, row=None):
876 fields = ['id', 'hostname', 'locked', 'synch_id','status']
mblighe2586682008-02-29 22:45:46 +0000877 DBObject.__init__(self, fields, id=id, row=row)
878
879
880 @classmethod
881 def _get_table(cls):
882 return 'hosts'
mbligh36768f02008-02-22 18:28:33 +0000883
884
885 def current_task(self):
mbligh6f8bab42008-02-29 22:45:14 +0000886 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000887 SELECT * FROM host_queue_entries WHERE host_id=%s AND NOT complete AND active
888 """, (self.id,))
889
mbligh6f8bab42008-02-29 22:45:14 +0000890 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000891 return None
892 else:
mbligh6f8bab42008-02-29 22:45:14 +0000893 assert len(rows) == 1
894 results = rows[0];
mbligh36768f02008-02-22 18:28:33 +0000895# print "current = %s" % results
896 return HostQueueEntry(row=results)
897
898
899 def next_queue_entries(self):
900 if self.locked:
901 print "%s locked, not queuing" % self.hostname
902 return None
903# print "%s/%s looking for work" % (self.hostname, self.platform_id)
mbligh6f8bab42008-02-29 22:45:14 +0000904 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +0000905 SELECT * FROM host_queue_entries
906 WHERE ((host_id=%s) OR (meta_host IS NOT null AND
907 (meta_host IN (
908 SELECT label_id FROM hosts_labels WHERE host_id=%s
909 )
910 )
911 AND job_id NOT IN (
912 SELECT job_id FROM ineligible_host_queues
913 WHERE host_id=%s
914 )))
915 AND NOT complete AND NOT active
916 ORDER BY priority DESC, meta_host, id
917 LIMIT 1
918 """, (self.id,self.id, self.id))
919
mbligh6f8bab42008-02-29 22:45:14 +0000920 if len(rows) == 0:
mbligh36768f02008-02-22 18:28:33 +0000921 return None
922 else:
mbligh6f8bab42008-02-29 22:45:14 +0000923 return [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +0000924
925 def yield_work(self):
926 print "%s yielding work" % self.hostname
927 if self.current_task():
928 self.current_task().requeue()
929
930 def set_status(self,status):
931 self.update_field('status',status)
932
933
934class HostQueueEntry(DBObject):
935 def __init__(self, id=None, row=None):
936 assert id or row
937 fields = ['id', 'job_id', 'host_id', 'priority', 'status',
938 'meta_host', 'active', 'complete']
mblighe2586682008-02-29 22:45:46 +0000939 DBObject.__init__(self, fields, id=id, row=row)
mbligh36768f02008-02-22 18:28:33 +0000940
941 self.job = Job(self.job_id)
942
943 if self.host_id:
944 self.host = Host(self.host_id)
945 else:
946 self.host = None
947
948 self.queue_log_path = os.path.join(self.job.results_dir(),
949 'queue.log.' + str(self.id))
950
951
mblighe2586682008-02-29 22:45:46 +0000952 @classmethod
953 def _get_table(cls):
954 return 'host_queue_entries'
955
956
mbligh36768f02008-02-22 18:28:33 +0000957 def set_host(self, host):
958 if host:
959 self.queue_log_record('Assigning host ' + host.hostname)
960 self.update_field('host_id', host.id)
961 self.update_field('active', True)
mblighe2586682008-02-29 22:45:46 +0000962 self.block_host(host.id)
mbligh36768f02008-02-22 18:28:33 +0000963 else:
964 self.queue_log_record('Releasing host')
mblighe2586682008-02-29 22:45:46 +0000965 self.unblock_host(self.host.id)
mbligh36768f02008-02-22 18:28:33 +0000966 self.update_field('host_id', None)
967
968 self.host = host
969
970
971 def get_host(self):
mblighe2586682008-02-29 22:45:46 +0000972 return self.host
mbligh36768f02008-02-22 18:28:33 +0000973
974
975 def queue_log_record(self, log_line):
mblighe2586682008-02-29 22:45:46 +0000976 now = str(datetime.datetime.now())
mbligh36768f02008-02-22 18:28:33 +0000977 queue_log = open(self.queue_log_path, 'a', 0)
mblighe2586682008-02-29 22:45:46 +0000978 queue_log.write(now + ' ' + log_line + '\n')
mbligh36768f02008-02-22 18:28:33 +0000979 queue_log.close()
980
981
mblighe2586682008-02-29 22:45:46 +0000982 def block_host(self, host_id):
983 print "creating block %s/%s" % (self.job.id, host_id)
984 row = [0, self.job.id, host_id]
985 block = IneligibleHostQueue(row=row, new_record=True)
986 block.save()
987
988
989 def unblock_host(self, host_id):
990 print "removing block %s/%s" % (self.job.id, host_id)
991 blocks = list(IneligibleHostQueue.fetch(
992 'job_id=%d and host_id=%d' % (self.job.id, host_id)))
993 assert len(blocks) == 1
994 blocks[0].delete()
995
996
mbligh36768f02008-02-22 18:28:33 +0000997 def results_dir(self):
mblighe2586682008-02-29 22:45:46 +0000998 if self.job.is_synchronous() or self.job.num_machines() == 1:
999 return self.job.job_dir
mbligh36768f02008-02-22 18:28:33 +00001000 else:
1001 assert self.host
mblighe2586682008-02-29 22:45:46 +00001002 return os.path.join(self.job.job_dir,
1003 self.host.hostname)
mbligh36768f02008-02-22 18:28:33 +00001004
mblighe2586682008-02-29 22:45:46 +00001005
1006 def verify_results_dir(self):
1007 if self.job.is_synchronous() or self.job.num_machines() > 1:
1008 assert self.host
1009 return os.path.join(self.job.job_dir,
1010 self.host.hostname)
1011 else:
1012 return self.job.job_dir
mbligh36768f02008-02-22 18:28:33 +00001013
1014
1015 def set_status(self, status):
1016 self.update_field('status', status)
1017 if self.host:
1018 hostname = self.host.hostname
1019 else:
1020 hostname = 'no host'
1021 print "%s/%d status -> %s" % (hostname, self.id, self.status)
1022 if status in ['Queued']:
1023 self.update_field('complete', False)
1024 self.update_field('active', False)
1025
mblighd5c95802008-03-05 00:33:46 +00001026 if status in ['Pending', 'Running', 'Verifying', 'Starting',
1027 'Abort', 'Aborting']:
mbligh36768f02008-02-22 18:28:33 +00001028 self.update_field('complete', False)
1029 self.update_field('active', True)
1030
mblighd5c95802008-03-05 00:33:46 +00001031 if status in ['Failed', 'Completed', 'Stopped', 'Aborted']:
mbligh36768f02008-02-22 18:28:33 +00001032 self.update_field('complete', True)
1033 self.update_field('active', False)
1034
1035
1036 def run(self,assigned_host=None):
1037 if self.meta_host:
1038 assert assigned_host
mblighe2586682008-02-29 22:45:46 +00001039 # ensure results dir exists for the queue log
mbligh36768f02008-02-22 18:28:33 +00001040 self.job.create_results_dir()
1041 self.set_host(assigned_host)
mbligh36768f02008-02-22 18:28:33 +00001042
mbligh36768f02008-02-22 18:28:33 +00001043 print "%s/%s scheduled on %s, status=%s" % (self.job.name,
1044 self.meta_host, self.host.hostname, self.status)
1045
1046 return self.job.run(queue_entry=self)
mblighe2586682008-02-29 22:45:46 +00001047
mbligh36768f02008-02-22 18:28:33 +00001048 def requeue(self):
1049 self.set_status('Queued')
mblighe2586682008-02-29 22:45:46 +00001050
mbligh36768f02008-02-22 18:28:33 +00001051 if self.meta_host:
1052 self.set_host(None)
1053
1054
mblighe2586682008-02-29 22:45:46 +00001055 def handle_host_failure(self):
1056 """\
1057 Called when this queue entry's host has failed verification and
1058 repair.
1059 """
mblighdffd6372008-02-29 22:47:33 +00001060 assert not self.meta_host
1061 self.set_status('Failed')
1062 if self.job.is_synchronous():
1063 self.job.stop_all_entries()
mblighe2586682008-02-29 22:45:46 +00001064
1065
1066 def clear_results_dir(self, results_dir=None):
1067 results_dir = results_dir or self.results_dir()
1068 if not os.path.exists(results_dir):
1069 return
1070 for filename in os.listdir(results_dir):
1071 if 'queue.log' in filename:
1072 continue
1073 path = os.path.join(results_dir, filename)
1074 remove_file_or_dir(path)
mbligh36768f02008-02-22 18:28:33 +00001075
1076
1077class Job(DBObject):
1078 def __init__(self, id=None, row=None):
1079 assert id or row
mblighe2586682008-02-29 22:45:46 +00001080 DBObject.__init__(self,
1081 ['id','owner','name','priority',
1082 'control_file','control_type','created_on',
1083 'synch_type', 'synch_count','synchronizing'],
1084 id=id, row=row)
mbligh36768f02008-02-22 18:28:33 +00001085
mblighe2586682008-02-29 22:45:46 +00001086 self.job_dir = os.path.join(RESULTS_DIR, "%s-%s" % (self.id,
1087 self.owner))
1088
1089
1090 @classmethod
1091 def _get_table(cls):
1092 return 'jobs'
mbligh36768f02008-02-22 18:28:33 +00001093
1094
1095 def is_server_job(self):
1096 return self.control_type != 2
1097
1098
1099 def get_host_queue_entries(self):
mbligh6f8bab42008-02-29 22:45:14 +00001100 rows = _db.execute("""
mbligh36768f02008-02-22 18:28:33 +00001101 SELECT * FROM host_queue_entries
1102 WHERE job_id= %s
1103 """, (self.id,))
mbligh6f8bab42008-02-29 22:45:14 +00001104 entries = [HostQueueEntry(row=i) for i in rows]
mbligh36768f02008-02-22 18:28:33 +00001105
1106 assert len(entries)>0
1107
1108 return entries
1109
1110
1111 def set_status(self, status, update_queues=False):
1112 self.update_field('status',status)
1113
1114 if update_queues:
1115 for queue_entry in self.get_host_queue_entries():
1116 queue_entry.set_status(status)
1117
1118
1119 def is_synchronous(self):
1120 return self.synch_type == 2
1121
1122
1123 def is_ready(self):
1124 if not self.is_synchronous():
1125 return True
1126 sql = "job_id=%s AND status='Pending'" % self.id
1127 count = self.count(sql, table='host_queue_entries')
1128 return (count == self.synch_count)
1129
1130
1131 def ready_to_synchronize(self):
1132 # heuristic
1133 queue_entries = self.get_host_queue_entries()
1134 count = 0
1135 for queue_entry in queue_entries:
1136 if queue_entry.status == 'Pending':
1137 count += 1
1138
1139 return (count/self.synch_count >= 0.5)
1140
1141
1142 def start_synchronizing(self):
1143 self.update_field('synchronizing', True)
1144
1145
1146 def results_dir(self):
1147 return self.job_dir
1148
1149 def num_machines(self, clause = None):
1150 sql = "job_id=%s" % self.id
1151 if clause:
1152 sql += " AND (%s)" % clause
1153 return self.count(sql, table='host_queue_entries')
1154
1155
1156 def num_queued(self):
1157 return self.num_machines('not complete')
1158
1159
1160 def num_active(self):
1161 return self.num_machines('active')
1162
1163
1164 def num_complete(self):
1165 return self.num_machines('complete')
1166
1167
1168 def is_finished(self):
1169 left = self.num_queued()
1170 print "%s: %s machines left" % (self.name, left)
1171 return left==0
1172
1173 def stop_synchronizing(self):
1174 self.update_field('synchronizing', False)
1175 self.set_status('Queued', update_queues = False)
1176
1177
mblighe2586682008-02-29 22:45:46 +00001178 def stop_all_entries(self):
1179 for child_entry in self.get_host_queue_entries():
1180 if not child_entry.complete:
1181 child_entry.set_status('Stopped')
1182
1183
1184 def write_to_machines_file(self, queue_entry):
1185 hostname = queue_entry.get_host().hostname
1186 print "writing %s to job %s machines file" % (hostname, self.id)
1187 file_path = os.path.join(self.job_dir, '.machines')
1188 mf = open(file_path, 'a')
1189 mf.write("%s\n" % queue_entry.get_host().hostname)
1190 mf.close()
mbligh36768f02008-02-22 18:28:33 +00001191
1192
1193 def create_results_dir(self, queue_entry=None):
1194 print "create: active: %s complete %s" % (self.num_active(),
1195 self.num_complete())
1196
1197 if not os.path.exists(self.job_dir):
1198 os.makedirs(self.job_dir)
1199
1200 if queue_entry:
1201 return queue_entry.results_dir()
1202 return self.job_dir
1203
1204
1205 def run(self, queue_entry):
1206 results_dir = self.create_results_dir(queue_entry)
1207
1208 if self.is_synchronous():
1209 if not self.is_ready():
mblighd5c95802008-03-05 00:33:46 +00001210 return Agent([VerifySynchronousTask(
1211 queue_entry = queue_entry)],
1212 [queue_entry.id])
mbligh36768f02008-02-22 18:28:33 +00001213
1214 queue_entry.set_status('Starting')
1215
1216 ctrl = open(os.tmpnam(), 'w')
1217 if self.control_file:
1218 ctrl.write(self.control_file)
1219 else:
1220 ctrl.write("")
1221 ctrl.flush()
1222
1223 if self.is_synchronous():
mbligh36768f02008-02-22 18:28:33 +00001224 queue_entries = self.get_host_queue_entries()
1225 else:
1226 assert queue_entry
mbligh36768f02008-02-22 18:28:33 +00001227 queue_entries = [queue_entry]
mblighe2586682008-02-29 22:45:46 +00001228 hostnames = ','.join([entry.get_host().hostname
1229 for entry in queue_entries])
mbligh36768f02008-02-22 18:28:33 +00001230
mbligh4314a712008-02-29 22:44:30 +00001231 params = [_autoserv_path, '-n', '-r', results_dir,
mbligh36768f02008-02-22 18:28:33 +00001232 '-b', '-u', self.owner, '-l', self.name,
1233 '-m', hostnames, ctrl.name]
1234
1235 if not self.is_server_job():
1236 params.append('-c')
1237
1238 tasks = []
1239 if not self.is_synchronous():
1240 tasks.append(VerifyTask(queue_entry))
mblighe2586682008-02-29 22:45:46 +00001241
1242 tasks.append(QueueTask(job = self,
1243 queue_entries = queue_entries,
1244 cmd = params))
mbligh36768f02008-02-22 18:28:33 +00001245
mblighd5c95802008-03-05 00:33:46 +00001246 ids = []
1247 for entry in queue_entries:
1248 ids.append(entry.id)
1249
1250 agent = Agent(tasks, ids)
mbligh36768f02008-02-22 18:28:33 +00001251
1252 return agent
1253
1254
1255if __name__ == '__main__':
1256 main()