blob: 282bbdc3becf69874235db805ef54283eb270a2b [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
17from autotest_lib.client.common_lib import error
18from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighb3c9f372008-01-14 16:39:44 +000037import hosts, autotest, kvm, git, standalone_profiler
mblighd0868ab2007-12-04 22:47:46 +000038import source_kernel, rpm_kernel, deb_kernel, git_kernel
mbligh03f4fc72007-11-29 20:56:14 +000039from common.error import *
mblighe1417fa2007-12-10 16:55:13 +000040from common import barrier
mblighf1c52842007-10-16 15:21:38 +000041from subcommand import *
42from utils import run, get_tmp_dir, sh_escape
43
mbligh119c12a2007-11-12 22:13:44 +000044autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000045hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000046barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000047
48if len(machines) > 1:
49 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000050"""
51
52client_wrapper = """
53at = autotest.Autotest()
54
55def run_client(machine):
56 host = hosts.SSHHost(machine)
57 at.run(control, host=host)
58
mbligh6437ff52008-04-17 15:24:38 +000059job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000060"""
61
mbligh303ccac2007-11-05 18:07:28 +000062crashdumps = """
63def crashdumps(machine):
64 host = hosts.SSHHost(machine, initialize=False)
65 host.get_crashdumps(test_start_time)
66
mbligh6437ff52008-04-17 15:24:38 +000067job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000068"""
69
mbligh98ff1462007-12-19 16:27:55 +000070reboot_segment="""\
71def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000072 host = hosts.SSHHost(machine, initialize=False)
73 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000074
mbligh6437ff52008-04-17 15:24:38 +000075job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000076"""
77
mblighf36243d2007-10-30 15:36:16 +000078install="""\
79def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000080 host = hosts.SSHHost(machine, initialize=False)
81 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000082
mbligh6437ff52008-04-17 15:24:38 +000083job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000084"""
85
mbligh7f86e0b2007-11-24 19:45:07 +000086# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000087verify = load_control_segment("site_verify")
88verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000089
mbligh7f86e0b2007-11-24 19:45:07 +000090# load up the repair control segment, with an optional site-specific hook
91repair = load_control_segment("site_repair")
92repair += load_control_segment("repair")
93
mbligh1d42d4e2007-11-05 22:42:00 +000094
mbligh970b94e2008-01-24 16:29:34 +000095# load up site-specific code for generating site-specific job data
96try:
97 import site_job
98 get_site_job_data = site_job.get_site_job_data
99 del site_job
100except ImportError:
101 # by default provide a stub that generates no site data
102 def get_site_job_data(job):
103 return {}
104
105
mblighcaa62c22008-04-07 21:51:17 +0000106class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000107 """The actual job against which we do everything.
108
109 Properties:
110 autodir
111 The top level autotest directory (/usr/local/autotest).
112 serverdir
113 <autodir>/server/
114 clientdir
115 <autodir>/client/
116 conmuxdir
117 <autodir>/conmux/
118 testdir
119 <autodir>/server/tests/
120 control
121 the control file for this job
122 """
123
mblighe8b37a92007-12-19 15:54:11 +0000124 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000125 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000126 """
127 control
128 The control file (pathname of)
129 args
130 args to pass to the control file
131 resultdir
132 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000133 label
134 label for the job
mblighf1c52842007-10-16 15:21:38 +0000135 user
136 Username for the job (email address)
137 client
138 True if a client-side control file
139 """
mblighf5427bb2008-04-09 15:55:57 +0000140 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000141 self.autodir = os.path.abspath(os.path.join(path, '..'))
142 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000143 self.testdir = os.path.join(self.serverdir, 'tests')
144 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000145 self.conmuxdir = os.path.join(self.autodir, 'conmux')
146 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000147 if control:
148 self.control = open(control, 'r').read()
149 self.control = re.sub('\r', '', self.control)
150 else:
151 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000152 self.resultdir = resultdir
153 if not os.path.exists(resultdir):
154 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000155 self.debugdir = os.path.join(resultdir, 'debug')
156 if not os.path.exists(self.debugdir):
157 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000158 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000159 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000160 self.user = user
161 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000162 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000163 self.client = client
164 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000165 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000166
mbligh3f4bced2007-11-05 17:55:53 +0000167 self.stdout = fd_stack.fd_stack(1, sys.stdout)
168 self.stderr = fd_stack.fd_stack(2, sys.stderr)
169
mbligh3dcf2c92007-10-16 22:24:00 +0000170 if os.path.exists(self.status):
171 os.unlink(self.status)
mblighe8b37a92007-12-19 15:54:11 +0000172 job_data = { 'label' : label, 'user' : user,
173 'hostname' : ','.join(machines) }
mbligh970b94e2008-01-24 16:29:34 +0000174 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000175 write_keyval(self.resultdir, job_data)
176
mbligh6437ff52008-04-17 15:24:38 +0000177 self.parse_job = parse_job
178 if self.parse_job and len(machines) == 1:
179 self.using_parser = True
180 self.init_parser(resultdir)
181 else:
182 self.using_parser = False
183
184
185 def init_parser(self, resultdir):
186 """Start the continuous parsing of resultdir. This sets up
187 the database connection and inserts the basic job object into
188 the database if necessary."""
189 # redirect parser debugging to .parse.log
190 parse_log = os.path.join(resultdir, '.parse.log')
191 parse_log = open(parse_log, 'w', 0)
192 tko_utils.redirect_parser_debugging(parse_log)
193 # create a job model object and set up the db
194 self.results_db = tko_db.db(autocommit=True)
195 self.parser = status_lib.parser(0)
196 self.job_model = self.parser.make_job(resultdir)
197 self.parser.start(self.job_model)
198 # check if a job already exists in the db and insert it if
199 # it does not
200 job_idx = self.results_db.find_job(self.parse_job)
201 if job_idx is None:
202 self.results_db.insert_job(self.parse_job,
203 self.job_model)
204 else:
205 machine_idx = self.results_db.lookup_machine(
206 self.job_model.machine)
207 self.job_model.index = job_idx
208 self.job_model.machine_idx = machine_idx
209
210
mblighfe0af112008-04-17 15:27:47 +0000211 def cleanup_parser(self):
mbligh6437ff52008-04-17 15:24:38 +0000212 """This should be called after the server job is finished
213 to carry out any remaining cleanup (e.g. flushing any
214 remaining test results to the results db)"""
215 if not self.using_parser:
216 return
217 final_tests = self.parser.end()
218 for test in final_tests:
219 self.results_db.insert_test(self.job_model, test)
mblighfe0af112008-04-17 15:27:47 +0000220 self.using_parser = False
mbligh6437ff52008-04-17 15:24:38 +0000221
mblighf1c52842007-10-16 15:21:38 +0000222
mblighe25fd5b2008-01-22 17:23:37 +0000223 def verify(self):
224 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000225 raise error.AutoservError(
226 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000227 try:
228 namespace = {'machines' : self.machines, 'job' : self}
229 exec(preamble + verify, namespace, namespace)
230 except Exception, e:
231 msg = 'Verify failed\n' + str(e) + '\n' + format_error()
232 self.record('ABORT', None, None, msg)
233 raise
234
235
236 def repair(self):
237 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000238 raise error.AutoservError(
239 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000240 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000241 # no matter what happens during repair, go on to try to reverify
242 try:
243 exec(preamble + repair, namespace, namespace)
244 except Exception, exc:
245 print 'Exception occured during repair'
246 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000247 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000248
249
mblighcaa62c22008-04-07 21:51:17 +0000250 def enable_external_logging(self):
251 """Start or restart external logging mechanism.
252 """
253 pass
254
255
256 def disable_external_logging(self):
257 """ Pause or stop external logging mechanism.
258 """
259 pass
260
261
262 def use_external_logging(self):
263 """Return True if external logging should be used.
264 """
265 return False
266
267
mbligh6437ff52008-04-17 15:24:38 +0000268 def parallel_simple(self, function, machines, log=True, timeout=None):
269 """Run 'function' using parallel_simple, with an extra
270 wrapper to handle the necessary setup for continuous parsing,
271 if possible. If continuous parsing is already properly
272 initialized then this should just work."""
273 is_forking = not (len(machines) == 1 and
274 self.machines == machines)
275 if self.parse_job and is_forking:
276 def wrapper(machine):
277 self.parse_job += "/" + machine
278 self.using_parser = True
279 self.machines = [machine]
280 self.resultdir = os.path.join(self.resultdir,
281 machine)
282 self.init_parser(self.resultdir)
283 result = function(machine)
mblighfe0af112008-04-17 15:27:47 +0000284 self.cleanup_parser()
mbligh6437ff52008-04-17 15:24:38 +0000285 return result
286 else:
287 wrapper = function
288 subcommand.parallel_simple(wrapper, machines, log, timeout)
289
290
mblighe8b37a92007-12-19 15:54:11 +0000291 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000292 install_after = False, collect_crashdumps = True,
293 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000294 # use a copy so changes don't affect the original dictionary
295 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000296 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000297
mblighfaf0cd42007-11-19 16:00:24 +0000298 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000299 namespace['machines'] = machines
300 namespace['args'] = self.args
301 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000302 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000303
mbligh87c5d882007-10-29 17:07:24 +0000304 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000305
306 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000307 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000308 try:
mblighf36243d2007-10-30 15:36:16 +0000309 if install_before and machines:
310 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000311 if self.client:
312 namespace['control'] = self.control
313 open('control', 'w').write(self.control)
314 open('control.srv', 'w').write(client_wrapper)
315 server_control = client_wrapper
316 else:
317 open('control.srv', 'w').write(self.control)
318 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000319 exec(preamble + server_control, namespace, namespace)
320
321 finally:
mblighddd54332008-03-07 18:14:06 +0000322 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000323 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000324 exec(preamble + crashdumps,
325 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000326 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000327 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000328 exec(preamble + reboot_segment,
329 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000330 if install_after and machines:
331 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000332
333
334 def run_test(self, url, *args, **dargs):
335 """Summon a test object and run it.
336
337 tag
338 tag to add to testname
339 url
340 url of the test to run
341 """
342
mblighf1c52842007-10-16 15:21:38 +0000343 (group, testname) = test.testname(url)
344 tag = None
345 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000346
mblighf1c52842007-10-16 15:21:38 +0000347 if dargs.has_key('tag'):
348 tag = dargs['tag']
349 del dargs['tag']
350 if tag:
351 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000352
mbligh43ac5222007-10-16 15:55:01 +0000353 try:
354 test.runtest(self, url, tag, args, dargs)
355 self.record('GOOD', subdir, testname, 'completed successfully')
356 except Exception, detail:
mbligh4e61c4e2008-04-07 18:36:59 +0000357 self.record('FAIL', subdir, testname, str(detail) + "\n" + format_error())
mblighf1c52842007-10-16 15:21:38 +0000358
359
360 def run_group(self, function, *args, **dargs):
361 """\
362 function:
363 subroutine to run
364 *args:
365 arguments for the function
366 """
367
368 result = None
369 name = function.__name__
370
371 # Allow the tag for the group to be specified.
372 if dargs.has_key('tag'):
373 tag = dargs['tag']
374 del dargs['tag']
375 if tag:
376 name = tag
377
378 # if tag:
379 # name += '.' + tag
380 old_record_prefix = self.record_prefix
381 try:
382 try:
383 self.record('START', None, name)
384 self.record_prefix += '\t'
385 result = function(*args, **dargs)
386 self.record_prefix = old_record_prefix
387 self.record('END GOOD', None, name)
388 except:
389 self.record_prefix = old_record_prefix
390 self.record('END FAIL', None, name, format_error())
391 # We don't want to raise up an error higher if it's just
392 # a TestError - we want to carry on to other tests. Hence
393 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000394 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000395 pass
396 except:
mbligh6437ff52008-04-17 15:24:38 +0000397 raise error.TestError(name + ' failed\n' +
398 format_error())
mblighf1c52842007-10-16 15:21:38 +0000399
400 return result
401
402
mblighf4e04152008-02-21 16:05:53 +0000403 def record(self, status_code, subdir, operation, status=''):
mblighf1c52842007-10-16 15:21:38 +0000404 """
405 Record job-level status
406
407 The intent is to make this file both machine parseable and
408 human readable. That involves a little more complexity, but
409 really isn't all that bad ;-)
410
411 Format is <status code>\t<subdir>\t<operation>\t<status>
412
413 status code: (GOOD|WARN|FAIL|ABORT)
414 or START
415 or END (GOOD|WARN|FAIL|ABORT)
416
417 subdir: MUST be a relevant subdirectory in the results,
418 or None, which will be represented as '----'
419
420 operation: description of what you ran (e.g. "dbench", or
421 "mkfs -t foobar /dev/sda9")
422
423 status: error message or "completed sucessfully"
424
425 ------------------------------------------------------------
426
427 Initial tabs indicate indent levels for grouping, and is
428 governed by self.record_prefix
429
430 multiline messages have secondary lines prefaced by a double
431 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000432
433 Executing this method will trigger the logging of all new
434 warnings to date from the various console loggers.
435 """
mblighdab39662008-02-27 16:47:55 +0000436 # poll all our warning loggers for new warnings
437 warnings = self._read_warnings()
438 for timestamp, msg in warnings:
439 self.__record("WARN", None, None, msg, timestamp)
440
441 # write out the actual status log line
442 self.__record(status_code, subdir, operation, status)
443
444
445 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000446 warnings = []
447 while True:
448 # pull in a line of output from every logger that has
449 # output ready to be read
450 loggers, _, _ = select.select(self.warning_loggers,
451 [], [], 0)
452 closed_loggers = set()
453 for logger in loggers:
454 line = logger.readline()
455 # record any broken pipes (aka line == empty)
456 if len(line) == 0:
457 closed_loggers.add(logger)
458 continue
459 timestamp, msg = line.split('\t', 1)
460 warnings.append((int(timestamp), msg.strip()))
461
462 # stop listening to loggers that are closed
463 self.warning_loggers -= closed_loggers
464
465 # stop if none of the loggers have any output left
466 if not loggers:
467 break
468
mblighdab39662008-02-27 16:47:55 +0000469 # sort into timestamp order
470 warnings.sort()
471 return warnings
mblighf4e04152008-02-21 16:05:53 +0000472
473
mblighdab39662008-02-27 16:47:55 +0000474 def _render_record(self, status_code, subdir, operation, status='',
475 epoch_time=None, record_prefix=None):
mblighf4e04152008-02-21 16:05:53 +0000476 """
mblighdab39662008-02-27 16:47:55 +0000477 Internal Function to generate a record to be written into a
478 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000479 """
mblighf1c52842007-10-16 15:21:38 +0000480 if subdir:
481 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000482 raise ValueError(
483 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000484 substr = subdir
485 else:
486 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000487
mblighf1c52842007-10-16 15:21:38 +0000488 if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT))$', \
489 status_code):
mbligh6437ff52008-04-17 15:24:38 +0000490 raise ValueError('Invalid status code supplied: %s' %
491 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000492 if not operation:
493 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000494 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000495 raise ValueError(
496 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000497 operation = operation.rstrip()
498 status = status.rstrip()
499 status = re.sub(r"\t", " ", status)
500 # Ensure any continuation lines are marked so we can
501 # detect them in the status file to ensure it is parsable.
502 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
503
mbligh30270302007-11-05 20:33:52 +0000504 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000505 if epoch_time is None:
506 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000507 local_time = time.localtime(epoch_time)
508 epoch_time_str = "timestamp=%d" % (epoch_time,)
509 local_time_str = time.strftime("localtime=%b %d %H:%M:%S",
510 local_time)
511
mblighdab39662008-02-27 16:47:55 +0000512 if record_prefix is None:
513 record_prefix = self.record_prefix
514
mbligh30270302007-11-05 20:33:52 +0000515 msg = '\t'.join(str(x) for x in (status_code, substr, operation,
516 epoch_time_str, local_time_str,
517 status))
mblighdab39662008-02-27 16:47:55 +0000518 return record_prefix + msg + '\n'
519
520
521 def _record_prerendered(self, msg):
522 """
523 Record a pre-rendered msg into the status logs. The only
524 change this makes to the message is to add on the local
525 indentation. Should not be called outside of server_job.*
526 classes. Unlike __record, this does not write the message
527 to standard output.
528 """
mbligh6437ff52008-04-17 15:24:38 +0000529 lines = []
mblighdab39662008-02-27 16:47:55 +0000530 status_file = os.path.join(self.resultdir, 'status.log')
531 status_log = open(status_file, 'a')
532 for line in msg.splitlines():
533 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000534 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000535 status_log.write(line)
536 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000537 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000538
539
540 def __record(self, status_code, subdir, operation, status='',
541 epoch_time=None):
542 """
543 Actual function for recording a single line into the status
544 logs. Should never be called directly, only by job.record as
545 this would bypass the console monitor logging.
546 """
547
548 msg = self._render_record(status_code, subdir, operation,
549 status, epoch_time)
550
mblighf1c52842007-10-16 15:21:38 +0000551
mbligh31a49de2007-11-05 18:41:19 +0000552 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000553 sys.stdout.write(msg)
554 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000555 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000556 test_dir = os.path.join(self.resultdir, subdir)
557 if not os.path.exists(test_dir):
558 os.mkdir(test_dir)
559 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000560 open(status_file, "a").write(msg)
mbligh6437ff52008-04-17 15:24:38 +0000561 self.__parse_status([msg])
mblighb03ba642008-03-13 17:37:17 +0000562
563
mbligh6437ff52008-04-17 15:24:38 +0000564 def __parse_status(self, new_lines):
565 if not self.using_parser:
566 return
567 new_tests = self.parser.process_lines(new_lines)
568 for test in new_tests:
569 self.results_db.insert_test(self.job_model, test)
mblighdab39662008-02-27 16:47:55 +0000570
571
572# a file-like object for catching stderr from an autotest client and
573# extracting status logs from it
574class client_logger(object):
575 """Partial file object to write to both stdout and
576 the status log file. We only implement those methods
577 utils.run() actually calls.
578 """
579 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
580 extract_indent = re.compile(r"^(\t*).*$")
581
582 def __init__(self, job):
583 self.job = job
584 self.leftover = ""
585 self.last_line = ""
586 self.logs = {}
587
588
589 def _process_log_dict(self, log_dict):
590 log_list = log_dict.pop("logs", [])
591 for key in sorted(log_dict.iterkeys()):
592 log_list += self._process_log_dict(log_dict.pop(key))
593 return log_list
594
595
596 def _process_logs(self):
597 """Go through the accumulated logs in self.log and print them
598 out to stdout and the status log. Note that this processes
599 logs in an ordering where:
600
601 1) logs to different tags are never interleaved
602 2) logs to x.y come before logs to x.y.z for all z
603 3) logs to x.y come before x.z whenever y < z
604
605 Note that this will in general not be the same as the
606 chronological ordering of the logs. However, if a chronological
607 ordering is desired that one can be reconstructed from the
608 status log by looking at timestamp lines."""
609 log_list = self._process_log_dict(self.logs)
610 for line in log_list:
611 self.job._record_prerendered(line + '\n')
612 if log_list:
613 self.last_line = log_list[-1]
614
615
616 def _process_quoted_line(self, tag, line):
617 """Process a line quoted with an AUTOTEST_STATUS flag. If the
618 tag is blank then we want to push out all the data we've been
619 building up in self.logs, and then the newest line. If the
620 tag is not blank, then push the line into the logs for handling
621 later."""
622 print line
623 if tag == "":
624 self._process_logs()
625 self.job._record_prerendered(line + '\n')
626 self.last_line = line
627 else:
628 tag_parts = [int(x) for x in tag.split(".")]
629 log_dict = self.logs
630 for part in tag_parts:
631 log_dict = log_dict.setdefault(part, {})
632 log_list = log_dict.setdefault("logs", [])
633 log_list.append(line)
634
635
636 def _process_line(self, line):
637 """Write out a line of data to the appropriate stream. Status
638 lines sent by autotest will be prepended with
639 "AUTOTEST_STATUS", and all other lines are ssh error
640 messages."""
641 match = self.parser.search(line)
642 if match:
643 tag, line = match.groups()
644 self._process_quoted_line(tag, line)
645 else:
mblighfe749d22008-03-07 18:14:46 +0000646 print line
mblighdab39662008-02-27 16:47:55 +0000647
648
649 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000650 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000651 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000652 # if the last line starts a new group, add an extra indent
653 if last_line.lstrip('\t').startswith("START\t"):
654 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000655 return [self.job._render_record("WARN", None, None, msg,
656 timestamp, indent).rstrip('\n')
657 for timestamp, msg in warnings]
658
659
660 def _process_warnings(self, last_line, log_dict, warnings):
661 if log_dict.keys() in ([], ["logs"]):
662 # there are no sub-jobs, just append the warnings here
663 warnings = self._format_warnings(last_line, warnings)
664 log_list = log_dict.setdefault("logs", [])
665 log_list += warnings
666 for warning in warnings:
667 sys.stdout.write(warning + '\n')
668 else:
669 # there are sub-jobs, so put the warnings in there
670 log_list = log_dict.get("logs", [])
671 if log_list:
672 last_line = log_list[-1]
673 for key in sorted(log_dict.iterkeys()):
674 if key != "logs":
675 self._process_warnings(last_line,
676 log_dict[key],
677 warnings)
678
679
680 def write(self, data):
681 # first check for any new console warnings
682 warnings = self.job._read_warnings()
683 self._process_warnings(self.last_line, self.logs, warnings)
684 # now process the newest data written out
685 data = self.leftover + data
686 lines = data.split("\n")
687 # process every line but the last one
688 for line in lines[:-1]:
689 self._process_line(line)
690 # save the last line for later processing
691 # since we may not have the whole line yet
692 self.leftover = lines[-1]
693
694
695 def flush(self):
696 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000697
698
699 def close(self):
700 if self.leftover:
701 self._process_line(self.leftover)
702 self._process_logs()
703 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000704
705# site_server_job.py may be non-existant or empty, make sure that an
706# appropriate site_server_job class is created nevertheless
707try:
708 from site_server_job import site_server_job
709except ImportError:
710 class site_server_job(base_server_job):
711 pass
712
713class server_job(site_server_job):
714 pass