blob: 0a1a94b167afcd5df64a4004fd5c954ac1608146 [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
17from autotest_lib.client.common_lib import error
18from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighccb9e182008-04-17 15:42:10 +000037from autotest_lib.server import hosts, autotest, kvm, git, standalone_profiler
38from autotest_lib.server import source_kernel, rpm_kernel, deb_kernel
39from autotest_lib.server import git_kernel
40from autotest_lib.server.subcommand import *
41from autotest_lib.server.utils import run, get_tmp_dir, sh_escape
42
43from autotest_lib.client.common_lib.error import *
44from autotest_lib.client.common_lib import barrier
mblighf1c52842007-10-16 15:21:38 +000045
mbligh119c12a2007-11-12 22:13:44 +000046autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000047hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000048barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000049
50if len(machines) > 1:
51 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000052"""
53
54client_wrapper = """
55at = autotest.Autotest()
56
57def run_client(machine):
58 host = hosts.SSHHost(machine)
59 at.run(control, host=host)
60
mbligh6437ff52008-04-17 15:24:38 +000061job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000062"""
63
mbligh303ccac2007-11-05 18:07:28 +000064crashdumps = """
65def crashdumps(machine):
66 host = hosts.SSHHost(machine, initialize=False)
67 host.get_crashdumps(test_start_time)
68
mbligh6437ff52008-04-17 15:24:38 +000069job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000070"""
71
mbligh98ff1462007-12-19 16:27:55 +000072reboot_segment="""\
73def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000074 host = hosts.SSHHost(machine, initialize=False)
75 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000076
mbligh6437ff52008-04-17 15:24:38 +000077job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000078"""
79
mblighf36243d2007-10-30 15:36:16 +000080install="""\
81def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000082 host = hosts.SSHHost(machine, initialize=False)
83 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000084
mbligh6437ff52008-04-17 15:24:38 +000085job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000086"""
87
mbligh7f86e0b2007-11-24 19:45:07 +000088# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000089verify = load_control_segment("site_verify")
90verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000091
mbligh7f86e0b2007-11-24 19:45:07 +000092# load up the repair control segment, with an optional site-specific hook
93repair = load_control_segment("site_repair")
94repair += load_control_segment("repair")
95
mbligh1d42d4e2007-11-05 22:42:00 +000096
mbligh970b94e2008-01-24 16:29:34 +000097# load up site-specific code for generating site-specific job data
98try:
99 import site_job
100 get_site_job_data = site_job.get_site_job_data
101 del site_job
102except ImportError:
103 # by default provide a stub that generates no site data
104 def get_site_job_data(job):
105 return {}
106
107
mblighcaa62c22008-04-07 21:51:17 +0000108class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000109 """The actual job against which we do everything.
110
111 Properties:
112 autodir
113 The top level autotest directory (/usr/local/autotest).
114 serverdir
115 <autodir>/server/
116 clientdir
117 <autodir>/client/
118 conmuxdir
119 <autodir>/conmux/
120 testdir
121 <autodir>/server/tests/
122 control
123 the control file for this job
124 """
125
mblighe8b37a92007-12-19 15:54:11 +0000126 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000127 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000128 """
129 control
130 The control file (pathname of)
131 args
132 args to pass to the control file
133 resultdir
134 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000135 label
136 label for the job
mblighf1c52842007-10-16 15:21:38 +0000137 user
138 Username for the job (email address)
139 client
140 True if a client-side control file
141 """
mblighf5427bb2008-04-09 15:55:57 +0000142 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000143 self.autodir = os.path.abspath(os.path.join(path, '..'))
144 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000145 self.testdir = os.path.join(self.serverdir, 'tests')
146 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000147 self.conmuxdir = os.path.join(self.autodir, 'conmux')
148 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000149 if control:
150 self.control = open(control, 'r').read()
151 self.control = re.sub('\r', '', self.control)
152 else:
153 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000154 self.resultdir = resultdir
155 if not os.path.exists(resultdir):
156 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000157 self.debugdir = os.path.join(resultdir, 'debug')
158 if not os.path.exists(self.debugdir):
159 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000160 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000161 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000162 self.user = user
163 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000164 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000165 self.client = client
166 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000167 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000168
mbligh3f4bced2007-11-05 17:55:53 +0000169 self.stdout = fd_stack.fd_stack(1, sys.stdout)
170 self.stderr = fd_stack.fd_stack(2, sys.stderr)
171
mbligh3dcf2c92007-10-16 22:24:00 +0000172 if os.path.exists(self.status):
173 os.unlink(self.status)
mblighe8b37a92007-12-19 15:54:11 +0000174 job_data = { 'label' : label, 'user' : user,
175 'hostname' : ','.join(machines) }
mbligh970b94e2008-01-24 16:29:34 +0000176 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000177 write_keyval(self.resultdir, job_data)
178
mbligh6437ff52008-04-17 15:24:38 +0000179 self.parse_job = parse_job
180 if self.parse_job and len(machines) == 1:
181 self.using_parser = True
182 self.init_parser(resultdir)
183 else:
184 self.using_parser = False
185
186
187 def init_parser(self, resultdir):
188 """Start the continuous parsing of resultdir. This sets up
189 the database connection and inserts the basic job object into
190 the database if necessary."""
191 # redirect parser debugging to .parse.log
192 parse_log = os.path.join(resultdir, '.parse.log')
193 parse_log = open(parse_log, 'w', 0)
194 tko_utils.redirect_parser_debugging(parse_log)
195 # create a job model object and set up the db
196 self.results_db = tko_db.db(autocommit=True)
197 self.parser = status_lib.parser(0)
198 self.job_model = self.parser.make_job(resultdir)
199 self.parser.start(self.job_model)
200 # check if a job already exists in the db and insert it if
201 # it does not
202 job_idx = self.results_db.find_job(self.parse_job)
203 if job_idx is None:
204 self.results_db.insert_job(self.parse_job,
205 self.job_model)
206 else:
207 machine_idx = self.results_db.lookup_machine(
208 self.job_model.machine)
209 self.job_model.index = job_idx
210 self.job_model.machine_idx = machine_idx
211
212
mblighfe0af112008-04-17 15:27:47 +0000213 def cleanup_parser(self):
mbligh6437ff52008-04-17 15:24:38 +0000214 """This should be called after the server job is finished
215 to carry out any remaining cleanup (e.g. flushing any
216 remaining test results to the results db)"""
217 if not self.using_parser:
218 return
219 final_tests = self.parser.end()
220 for test in final_tests:
221 self.results_db.insert_test(self.job_model, test)
mblighfe0af112008-04-17 15:27:47 +0000222 self.using_parser = False
mbligh6437ff52008-04-17 15:24:38 +0000223
mblighf1c52842007-10-16 15:21:38 +0000224
mblighe25fd5b2008-01-22 17:23:37 +0000225 def verify(self):
226 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000227 raise error.AutoservError(
228 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000229 try:
230 namespace = {'machines' : self.machines, 'job' : self}
231 exec(preamble + verify, namespace, namespace)
232 except Exception, e:
233 msg = 'Verify failed\n' + str(e) + '\n' + format_error()
234 self.record('ABORT', None, None, msg)
235 raise
236
237
238 def repair(self):
239 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000240 raise error.AutoservError(
241 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000242 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000243 # no matter what happens during repair, go on to try to reverify
244 try:
245 exec(preamble + repair, namespace, namespace)
246 except Exception, exc:
247 print 'Exception occured during repair'
248 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000249 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000250
251
mblighcaa62c22008-04-07 21:51:17 +0000252 def enable_external_logging(self):
253 """Start or restart external logging mechanism.
254 """
255 pass
256
257
258 def disable_external_logging(self):
259 """ Pause or stop external logging mechanism.
260 """
261 pass
262
263
264 def use_external_logging(self):
265 """Return True if external logging should be used.
266 """
267 return False
268
269
mbligh6437ff52008-04-17 15:24:38 +0000270 def parallel_simple(self, function, machines, log=True, timeout=None):
271 """Run 'function' using parallel_simple, with an extra
272 wrapper to handle the necessary setup for continuous parsing,
273 if possible. If continuous parsing is already properly
274 initialized then this should just work."""
275 is_forking = not (len(machines) == 1 and
276 self.machines == machines)
277 if self.parse_job and is_forking:
278 def wrapper(machine):
279 self.parse_job += "/" + machine
280 self.using_parser = True
281 self.machines = [machine]
282 self.resultdir = os.path.join(self.resultdir,
283 machine)
284 self.init_parser(self.resultdir)
285 result = function(machine)
mblighfe0af112008-04-17 15:27:47 +0000286 self.cleanup_parser()
mbligh6437ff52008-04-17 15:24:38 +0000287 return result
288 else:
289 wrapper = function
290 subcommand.parallel_simple(wrapper, machines, log, timeout)
291
292
mblighe8b37a92007-12-19 15:54:11 +0000293 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000294 install_after = False, collect_crashdumps = True,
295 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000296 # use a copy so changes don't affect the original dictionary
297 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000298 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000299
mblighfaf0cd42007-11-19 16:00:24 +0000300 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000301 namespace['machines'] = machines
302 namespace['args'] = self.args
303 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000304 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000305
mbligh87c5d882007-10-29 17:07:24 +0000306 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000307
308 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000309 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000310 try:
mblighf36243d2007-10-30 15:36:16 +0000311 if install_before and machines:
312 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000313 if self.client:
314 namespace['control'] = self.control
315 open('control', 'w').write(self.control)
316 open('control.srv', 'w').write(client_wrapper)
317 server_control = client_wrapper
318 else:
319 open('control.srv', 'w').write(self.control)
320 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000321 exec(preamble + server_control, namespace, namespace)
322
323 finally:
mblighddd54332008-03-07 18:14:06 +0000324 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000325 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000326 exec(preamble + crashdumps,
327 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000328 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000329 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000330 exec(preamble + reboot_segment,
331 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000332 if install_after and machines:
333 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000334
335
336 def run_test(self, url, *args, **dargs):
337 """Summon a test object and run it.
338
339 tag
340 tag to add to testname
341 url
342 url of the test to run
343 """
344
mblighf1c52842007-10-16 15:21:38 +0000345 (group, testname) = test.testname(url)
346 tag = None
347 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000348
mblighf1c52842007-10-16 15:21:38 +0000349 if dargs.has_key('tag'):
350 tag = dargs['tag']
351 del dargs['tag']
352 if tag:
353 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000354
mbligh43ac5222007-10-16 15:55:01 +0000355 try:
356 test.runtest(self, url, tag, args, dargs)
357 self.record('GOOD', subdir, testname, 'completed successfully')
358 except Exception, detail:
mbligh4e61c4e2008-04-07 18:36:59 +0000359 self.record('FAIL', subdir, testname, str(detail) + "\n" + format_error())
mblighf1c52842007-10-16 15:21:38 +0000360
361
362 def run_group(self, function, *args, **dargs):
363 """\
364 function:
365 subroutine to run
366 *args:
367 arguments for the function
368 """
369
370 result = None
371 name = function.__name__
372
373 # Allow the tag for the group to be specified.
374 if dargs.has_key('tag'):
375 tag = dargs['tag']
376 del dargs['tag']
377 if tag:
378 name = tag
379
380 # if tag:
381 # name += '.' + tag
382 old_record_prefix = self.record_prefix
383 try:
384 try:
385 self.record('START', None, name)
386 self.record_prefix += '\t'
387 result = function(*args, **dargs)
388 self.record_prefix = old_record_prefix
389 self.record('END GOOD', None, name)
390 except:
391 self.record_prefix = old_record_prefix
392 self.record('END FAIL', None, name, format_error())
393 # We don't want to raise up an error higher if it's just
394 # a TestError - we want to carry on to other tests. Hence
395 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000396 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000397 pass
398 except:
mbligh6437ff52008-04-17 15:24:38 +0000399 raise error.TestError(name + ' failed\n' +
400 format_error())
mblighf1c52842007-10-16 15:21:38 +0000401
402 return result
403
404
mblighf4e04152008-02-21 16:05:53 +0000405 def record(self, status_code, subdir, operation, status=''):
mblighf1c52842007-10-16 15:21:38 +0000406 """
407 Record job-level status
408
409 The intent is to make this file both machine parseable and
410 human readable. That involves a little more complexity, but
411 really isn't all that bad ;-)
412
413 Format is <status code>\t<subdir>\t<operation>\t<status>
414
415 status code: (GOOD|WARN|FAIL|ABORT)
416 or START
417 or END (GOOD|WARN|FAIL|ABORT)
418
419 subdir: MUST be a relevant subdirectory in the results,
420 or None, which will be represented as '----'
421
422 operation: description of what you ran (e.g. "dbench", or
423 "mkfs -t foobar /dev/sda9")
424
425 status: error message or "completed sucessfully"
426
427 ------------------------------------------------------------
428
429 Initial tabs indicate indent levels for grouping, and is
430 governed by self.record_prefix
431
432 multiline messages have secondary lines prefaced by a double
433 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000434
435 Executing this method will trigger the logging of all new
436 warnings to date from the various console loggers.
437 """
mblighdab39662008-02-27 16:47:55 +0000438 # poll all our warning loggers for new warnings
439 warnings = self._read_warnings()
440 for timestamp, msg in warnings:
441 self.__record("WARN", None, None, msg, timestamp)
442
443 # write out the actual status log line
444 self.__record(status_code, subdir, operation, status)
445
446
447 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000448 warnings = []
449 while True:
450 # pull in a line of output from every logger that has
451 # output ready to be read
452 loggers, _, _ = select.select(self.warning_loggers,
453 [], [], 0)
454 closed_loggers = set()
455 for logger in loggers:
456 line = logger.readline()
457 # record any broken pipes (aka line == empty)
458 if len(line) == 0:
459 closed_loggers.add(logger)
460 continue
461 timestamp, msg = line.split('\t', 1)
462 warnings.append((int(timestamp), msg.strip()))
463
464 # stop listening to loggers that are closed
465 self.warning_loggers -= closed_loggers
466
467 # stop if none of the loggers have any output left
468 if not loggers:
469 break
470
mblighdab39662008-02-27 16:47:55 +0000471 # sort into timestamp order
472 warnings.sort()
473 return warnings
mblighf4e04152008-02-21 16:05:53 +0000474
475
mblighdab39662008-02-27 16:47:55 +0000476 def _render_record(self, status_code, subdir, operation, status='',
477 epoch_time=None, record_prefix=None):
mblighf4e04152008-02-21 16:05:53 +0000478 """
mblighdab39662008-02-27 16:47:55 +0000479 Internal Function to generate a record to be written into a
480 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000481 """
mblighf1c52842007-10-16 15:21:38 +0000482 if subdir:
483 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000484 raise ValueError(
485 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000486 substr = subdir
487 else:
488 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000489
mblighf1c52842007-10-16 15:21:38 +0000490 if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT))$', \
491 status_code):
mbligh6437ff52008-04-17 15:24:38 +0000492 raise ValueError('Invalid status code supplied: %s' %
493 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000494 if not operation:
495 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000496 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000497 raise ValueError(
498 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000499 operation = operation.rstrip()
500 status = status.rstrip()
501 status = re.sub(r"\t", " ", status)
502 # Ensure any continuation lines are marked so we can
503 # detect them in the status file to ensure it is parsable.
504 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
505
mbligh30270302007-11-05 20:33:52 +0000506 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000507 if epoch_time is None:
508 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000509 local_time = time.localtime(epoch_time)
510 epoch_time_str = "timestamp=%d" % (epoch_time,)
511 local_time_str = time.strftime("localtime=%b %d %H:%M:%S",
512 local_time)
513
mblighdab39662008-02-27 16:47:55 +0000514 if record_prefix is None:
515 record_prefix = self.record_prefix
516
mbligh30270302007-11-05 20:33:52 +0000517 msg = '\t'.join(str(x) for x in (status_code, substr, operation,
518 epoch_time_str, local_time_str,
519 status))
mblighdab39662008-02-27 16:47:55 +0000520 return record_prefix + msg + '\n'
521
522
523 def _record_prerendered(self, msg):
524 """
525 Record a pre-rendered msg into the status logs. The only
526 change this makes to the message is to add on the local
527 indentation. Should not be called outside of server_job.*
528 classes. Unlike __record, this does not write the message
529 to standard output.
530 """
mbligh6437ff52008-04-17 15:24:38 +0000531 lines = []
mblighdab39662008-02-27 16:47:55 +0000532 status_file = os.path.join(self.resultdir, 'status.log')
533 status_log = open(status_file, 'a')
534 for line in msg.splitlines():
535 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000536 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000537 status_log.write(line)
538 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000539 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000540
541
542 def __record(self, status_code, subdir, operation, status='',
543 epoch_time=None):
544 """
545 Actual function for recording a single line into the status
546 logs. Should never be called directly, only by job.record as
547 this would bypass the console monitor logging.
548 """
549
550 msg = self._render_record(status_code, subdir, operation,
551 status, epoch_time)
552
mblighf1c52842007-10-16 15:21:38 +0000553
mbligh31a49de2007-11-05 18:41:19 +0000554 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000555 sys.stdout.write(msg)
556 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000557 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000558 test_dir = os.path.join(self.resultdir, subdir)
559 if not os.path.exists(test_dir):
560 os.mkdir(test_dir)
561 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000562 open(status_file, "a").write(msg)
mbligh6437ff52008-04-17 15:24:38 +0000563 self.__parse_status([msg])
mblighb03ba642008-03-13 17:37:17 +0000564
565
mbligh6437ff52008-04-17 15:24:38 +0000566 def __parse_status(self, new_lines):
567 if not self.using_parser:
568 return
569 new_tests = self.parser.process_lines(new_lines)
570 for test in new_tests:
571 self.results_db.insert_test(self.job_model, test)
mblighdab39662008-02-27 16:47:55 +0000572
573
574# a file-like object for catching stderr from an autotest client and
575# extracting status logs from it
576class client_logger(object):
577 """Partial file object to write to both stdout and
578 the status log file. We only implement those methods
579 utils.run() actually calls.
580 """
581 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
582 extract_indent = re.compile(r"^(\t*).*$")
583
584 def __init__(self, job):
585 self.job = job
586 self.leftover = ""
587 self.last_line = ""
588 self.logs = {}
589
590
591 def _process_log_dict(self, log_dict):
592 log_list = log_dict.pop("logs", [])
593 for key in sorted(log_dict.iterkeys()):
594 log_list += self._process_log_dict(log_dict.pop(key))
595 return log_list
596
597
598 def _process_logs(self):
599 """Go through the accumulated logs in self.log and print them
600 out to stdout and the status log. Note that this processes
601 logs in an ordering where:
602
603 1) logs to different tags are never interleaved
604 2) logs to x.y come before logs to x.y.z for all z
605 3) logs to x.y come before x.z whenever y < z
606
607 Note that this will in general not be the same as the
608 chronological ordering of the logs. However, if a chronological
609 ordering is desired that one can be reconstructed from the
610 status log by looking at timestamp lines."""
611 log_list = self._process_log_dict(self.logs)
612 for line in log_list:
613 self.job._record_prerendered(line + '\n')
614 if log_list:
615 self.last_line = log_list[-1]
616
617
618 def _process_quoted_line(self, tag, line):
619 """Process a line quoted with an AUTOTEST_STATUS flag. If the
620 tag is blank then we want to push out all the data we've been
621 building up in self.logs, and then the newest line. If the
622 tag is not blank, then push the line into the logs for handling
623 later."""
624 print line
625 if tag == "":
626 self._process_logs()
627 self.job._record_prerendered(line + '\n')
628 self.last_line = line
629 else:
630 tag_parts = [int(x) for x in tag.split(".")]
631 log_dict = self.logs
632 for part in tag_parts:
633 log_dict = log_dict.setdefault(part, {})
634 log_list = log_dict.setdefault("logs", [])
635 log_list.append(line)
636
637
638 def _process_line(self, line):
639 """Write out a line of data to the appropriate stream. Status
640 lines sent by autotest will be prepended with
641 "AUTOTEST_STATUS", and all other lines are ssh error
642 messages."""
643 match = self.parser.search(line)
644 if match:
645 tag, line = match.groups()
646 self._process_quoted_line(tag, line)
647 else:
mblighfe749d22008-03-07 18:14:46 +0000648 print line
mblighdab39662008-02-27 16:47:55 +0000649
650
651 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000652 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000653 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000654 # if the last line starts a new group, add an extra indent
655 if last_line.lstrip('\t').startswith("START\t"):
656 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000657 return [self.job._render_record("WARN", None, None, msg,
658 timestamp, indent).rstrip('\n')
659 for timestamp, msg in warnings]
660
661
662 def _process_warnings(self, last_line, log_dict, warnings):
663 if log_dict.keys() in ([], ["logs"]):
664 # there are no sub-jobs, just append the warnings here
665 warnings = self._format_warnings(last_line, warnings)
666 log_list = log_dict.setdefault("logs", [])
667 log_list += warnings
668 for warning in warnings:
669 sys.stdout.write(warning + '\n')
670 else:
671 # there are sub-jobs, so put the warnings in there
672 log_list = log_dict.get("logs", [])
673 if log_list:
674 last_line = log_list[-1]
675 for key in sorted(log_dict.iterkeys()):
676 if key != "logs":
677 self._process_warnings(last_line,
678 log_dict[key],
679 warnings)
680
681
682 def write(self, data):
683 # first check for any new console warnings
684 warnings = self.job._read_warnings()
685 self._process_warnings(self.last_line, self.logs, warnings)
686 # now process the newest data written out
687 data = self.leftover + data
688 lines = data.split("\n")
689 # process every line but the last one
690 for line in lines[:-1]:
691 self._process_line(line)
692 # save the last line for later processing
693 # since we may not have the whole line yet
694 self.leftover = lines[-1]
695
696
697 def flush(self):
698 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000699
700
701 def close(self):
702 if self.leftover:
703 self._process_line(self.leftover)
704 self._process_logs()
705 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000706
707# site_server_job.py may be non-existant or empty, make sure that an
708# appropriate site_server_job class is created nevertheless
709try:
mblighccb9e182008-04-17 15:42:10 +0000710 from autotest_lib.server.site_server_job import site_server_job
mblighcaa62c22008-04-07 21:51:17 +0000711except ImportError:
712 class site_server_job(base_server_job):
713 pass
714
715class server_job(site_server_job):
716 pass