blob: 5b92cabadd701fbf2c1b68a88e98ca100d8dddbd [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
17from autotest_lib.client.common_lib import error
18from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighb3c9f372008-01-14 16:39:44 +000037import hosts, autotest, kvm, git, standalone_profiler
mblighd0868ab2007-12-04 22:47:46 +000038import source_kernel, rpm_kernel, deb_kernel, git_kernel
mbligh03f4fc72007-11-29 20:56:14 +000039from common.error import *
mblighe1417fa2007-12-10 16:55:13 +000040from common import barrier
mblighf1c52842007-10-16 15:21:38 +000041from subcommand import *
42from utils import run, get_tmp_dir, sh_escape
43
mbligh119c12a2007-11-12 22:13:44 +000044autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000045hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000046barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000047
48if len(machines) > 1:
49 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000050"""
51
52client_wrapper = """
53at = autotest.Autotest()
54
55def run_client(machine):
56 host = hosts.SSHHost(machine)
57 at.run(control, host=host)
58
mbligh6437ff52008-04-17 15:24:38 +000059job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000060"""
61
mbligh303ccac2007-11-05 18:07:28 +000062crashdumps = """
63def crashdumps(machine):
64 host = hosts.SSHHost(machine, initialize=False)
65 host.get_crashdumps(test_start_time)
66
mbligh6437ff52008-04-17 15:24:38 +000067job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000068"""
69
mbligh98ff1462007-12-19 16:27:55 +000070reboot_segment="""\
71def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000072 host = hosts.SSHHost(machine, initialize=False)
73 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000074
mbligh6437ff52008-04-17 15:24:38 +000075job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000076"""
77
mblighf36243d2007-10-30 15:36:16 +000078install="""\
79def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000080 host = hosts.SSHHost(machine, initialize=False)
81 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000082
mbligh6437ff52008-04-17 15:24:38 +000083job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000084"""
85
mbligh7f86e0b2007-11-24 19:45:07 +000086# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000087verify = load_control_segment("site_verify")
88verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000089
mbligh7f86e0b2007-11-24 19:45:07 +000090# load up the repair control segment, with an optional site-specific hook
91repair = load_control_segment("site_repair")
92repair += load_control_segment("repair")
93
mbligh1d42d4e2007-11-05 22:42:00 +000094
mbligh970b94e2008-01-24 16:29:34 +000095# load up site-specific code for generating site-specific job data
96try:
97 import site_job
98 get_site_job_data = site_job.get_site_job_data
99 del site_job
100except ImportError:
101 # by default provide a stub that generates no site data
102 def get_site_job_data(job):
103 return {}
104
105
mblighcaa62c22008-04-07 21:51:17 +0000106class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000107 """The actual job against which we do everything.
108
109 Properties:
110 autodir
111 The top level autotest directory (/usr/local/autotest).
112 serverdir
113 <autodir>/server/
114 clientdir
115 <autodir>/client/
116 conmuxdir
117 <autodir>/conmux/
118 testdir
119 <autodir>/server/tests/
120 control
121 the control file for this job
122 """
123
mblighe8b37a92007-12-19 15:54:11 +0000124 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000125 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000126 """
127 control
128 The control file (pathname of)
129 args
130 args to pass to the control file
131 resultdir
132 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000133 label
134 label for the job
mblighf1c52842007-10-16 15:21:38 +0000135 user
136 Username for the job (email address)
137 client
138 True if a client-side control file
139 """
mblighf5427bb2008-04-09 15:55:57 +0000140 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000141 self.autodir = os.path.abspath(os.path.join(path, '..'))
142 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000143 self.testdir = os.path.join(self.serverdir, 'tests')
144 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000145 self.conmuxdir = os.path.join(self.autodir, 'conmux')
146 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000147 if control:
148 self.control = open(control, 'r').read()
149 self.control = re.sub('\r', '', self.control)
150 else:
151 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000152 self.resultdir = resultdir
153 if not os.path.exists(resultdir):
154 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000155 self.debugdir = os.path.join(resultdir, 'debug')
156 if not os.path.exists(self.debugdir):
157 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000158 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000159 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000160 self.user = user
161 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000162 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000163 self.client = client
164 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000165 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000166
mbligh3f4bced2007-11-05 17:55:53 +0000167 self.stdout = fd_stack.fd_stack(1, sys.stdout)
168 self.stderr = fd_stack.fd_stack(2, sys.stderr)
169
mbligh3dcf2c92007-10-16 22:24:00 +0000170 if os.path.exists(self.status):
171 os.unlink(self.status)
mblighe8b37a92007-12-19 15:54:11 +0000172 job_data = { 'label' : label, 'user' : user,
173 'hostname' : ','.join(machines) }
mbligh970b94e2008-01-24 16:29:34 +0000174 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000175 write_keyval(self.resultdir, job_data)
176
mbligh6437ff52008-04-17 15:24:38 +0000177 self.parse_job = parse_job
178 if self.parse_job and len(machines) == 1:
179 self.using_parser = True
180 self.init_parser(resultdir)
181 else:
182 self.using_parser = False
183
184
185 def init_parser(self, resultdir):
186 """Start the continuous parsing of resultdir. This sets up
187 the database connection and inserts the basic job object into
188 the database if necessary."""
189 # redirect parser debugging to .parse.log
190 parse_log = os.path.join(resultdir, '.parse.log')
191 parse_log = open(parse_log, 'w', 0)
192 tko_utils.redirect_parser_debugging(parse_log)
193 # create a job model object and set up the db
194 self.results_db = tko_db.db(autocommit=True)
195 self.parser = status_lib.parser(0)
196 self.job_model = self.parser.make_job(resultdir)
197 self.parser.start(self.job_model)
198 # check if a job already exists in the db and insert it if
199 # it does not
200 job_idx = self.results_db.find_job(self.parse_job)
201 if job_idx is None:
202 self.results_db.insert_job(self.parse_job,
203 self.job_model)
204 else:
205 machine_idx = self.results_db.lookup_machine(
206 self.job_model.machine)
207 self.job_model.index = job_idx
208 self.job_model.machine_idx = machine_idx
209
210
211 def _cleanup(self):
212 """This should be called after the server job is finished
213 to carry out any remaining cleanup (e.g. flushing any
214 remaining test results to the results db)"""
215 if not self.using_parser:
216 return
217 final_tests = self.parser.end()
218 for test in final_tests:
219 self.results_db.insert_test(self.job_model, test)
220
mblighf1c52842007-10-16 15:21:38 +0000221
mblighe25fd5b2008-01-22 17:23:37 +0000222 def verify(self):
223 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000224 raise error.AutoservError(
225 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000226 try:
227 namespace = {'machines' : self.machines, 'job' : self}
228 exec(preamble + verify, namespace, namespace)
229 except Exception, e:
230 msg = 'Verify failed\n' + str(e) + '\n' + format_error()
231 self.record('ABORT', None, None, msg)
232 raise
233
234
235 def repair(self):
236 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000237 raise error.AutoservError(
238 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000239 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000240 # no matter what happens during repair, go on to try to reverify
241 try:
242 exec(preamble + repair, namespace, namespace)
243 except Exception, exc:
244 print 'Exception occured during repair'
245 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000246 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000247
248
mblighcaa62c22008-04-07 21:51:17 +0000249 def enable_external_logging(self):
250 """Start or restart external logging mechanism.
251 """
252 pass
253
254
255 def disable_external_logging(self):
256 """ Pause or stop external logging mechanism.
257 """
258 pass
259
260
261 def use_external_logging(self):
262 """Return True if external logging should be used.
263 """
264 return False
265
266
mbligh6437ff52008-04-17 15:24:38 +0000267 def parallel_simple(self, function, machines, log=True, timeout=None):
268 """Run 'function' using parallel_simple, with an extra
269 wrapper to handle the necessary setup for continuous parsing,
270 if possible. If continuous parsing is already properly
271 initialized then this should just work."""
272 is_forking = not (len(machines) == 1 and
273 self.machines == machines)
274 if self.parse_job and is_forking:
275 def wrapper(machine):
276 self.parse_job += "/" + machine
277 self.using_parser = True
278 self.machines = [machine]
279 self.resultdir = os.path.join(self.resultdir,
280 machine)
281 self.init_parser(self.resultdir)
282 result = function(machine)
283 self._cleanup()
284 return result
285 else:
286 wrapper = function
287 subcommand.parallel_simple(wrapper, machines, log, timeout)
288
289
mblighe8b37a92007-12-19 15:54:11 +0000290 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000291 install_after = False, collect_crashdumps = True,
292 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000293 # use a copy so changes don't affect the original dictionary
294 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000295 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000296
mblighfaf0cd42007-11-19 16:00:24 +0000297 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000298 namespace['machines'] = machines
299 namespace['args'] = self.args
300 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000301 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000302
mbligh87c5d882007-10-29 17:07:24 +0000303 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000304
305 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000306 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000307 try:
mblighf36243d2007-10-30 15:36:16 +0000308 if install_before and machines:
309 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000310 if self.client:
311 namespace['control'] = self.control
312 open('control', 'w').write(self.control)
313 open('control.srv', 'w').write(client_wrapper)
314 server_control = client_wrapper
315 else:
316 open('control.srv', 'w').write(self.control)
317 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000318 exec(preamble + server_control, namespace, namespace)
319
320 finally:
mblighddd54332008-03-07 18:14:06 +0000321 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000322 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000323 exec(preamble + crashdumps,
324 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000325 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000326 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000327 exec(preamble + reboot_segment,
328 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000329 if install_after and machines:
330 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000331
332
333 def run_test(self, url, *args, **dargs):
334 """Summon a test object and run it.
335
336 tag
337 tag to add to testname
338 url
339 url of the test to run
340 """
341
mblighf1c52842007-10-16 15:21:38 +0000342 (group, testname) = test.testname(url)
343 tag = None
344 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000345
mblighf1c52842007-10-16 15:21:38 +0000346 if dargs.has_key('tag'):
347 tag = dargs['tag']
348 del dargs['tag']
349 if tag:
350 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000351
mbligh43ac5222007-10-16 15:55:01 +0000352 try:
353 test.runtest(self, url, tag, args, dargs)
354 self.record('GOOD', subdir, testname, 'completed successfully')
355 except Exception, detail:
mbligh4e61c4e2008-04-07 18:36:59 +0000356 self.record('FAIL', subdir, testname, str(detail) + "\n" + format_error())
mblighf1c52842007-10-16 15:21:38 +0000357
358
359 def run_group(self, function, *args, **dargs):
360 """\
361 function:
362 subroutine to run
363 *args:
364 arguments for the function
365 """
366
367 result = None
368 name = function.__name__
369
370 # Allow the tag for the group to be specified.
371 if dargs.has_key('tag'):
372 tag = dargs['tag']
373 del dargs['tag']
374 if tag:
375 name = tag
376
377 # if tag:
378 # name += '.' + tag
379 old_record_prefix = self.record_prefix
380 try:
381 try:
382 self.record('START', None, name)
383 self.record_prefix += '\t'
384 result = function(*args, **dargs)
385 self.record_prefix = old_record_prefix
386 self.record('END GOOD', None, name)
387 except:
388 self.record_prefix = old_record_prefix
389 self.record('END FAIL', None, name, format_error())
390 # We don't want to raise up an error higher if it's just
391 # a TestError - we want to carry on to other tests. Hence
392 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000393 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000394 pass
395 except:
mbligh6437ff52008-04-17 15:24:38 +0000396 raise error.TestError(name + ' failed\n' +
397 format_error())
mblighf1c52842007-10-16 15:21:38 +0000398
399 return result
400
401
mblighf4e04152008-02-21 16:05:53 +0000402 def record(self, status_code, subdir, operation, status=''):
mblighf1c52842007-10-16 15:21:38 +0000403 """
404 Record job-level status
405
406 The intent is to make this file both machine parseable and
407 human readable. That involves a little more complexity, but
408 really isn't all that bad ;-)
409
410 Format is <status code>\t<subdir>\t<operation>\t<status>
411
412 status code: (GOOD|WARN|FAIL|ABORT)
413 or START
414 or END (GOOD|WARN|FAIL|ABORT)
415
416 subdir: MUST be a relevant subdirectory in the results,
417 or None, which will be represented as '----'
418
419 operation: description of what you ran (e.g. "dbench", or
420 "mkfs -t foobar /dev/sda9")
421
422 status: error message or "completed sucessfully"
423
424 ------------------------------------------------------------
425
426 Initial tabs indicate indent levels for grouping, and is
427 governed by self.record_prefix
428
429 multiline messages have secondary lines prefaced by a double
430 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000431
432 Executing this method will trigger the logging of all new
433 warnings to date from the various console loggers.
434 """
mblighdab39662008-02-27 16:47:55 +0000435 # poll all our warning loggers for new warnings
436 warnings = self._read_warnings()
437 for timestamp, msg in warnings:
438 self.__record("WARN", None, None, msg, timestamp)
439
440 # write out the actual status log line
441 self.__record(status_code, subdir, operation, status)
442
443
444 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000445 warnings = []
446 while True:
447 # pull in a line of output from every logger that has
448 # output ready to be read
449 loggers, _, _ = select.select(self.warning_loggers,
450 [], [], 0)
451 closed_loggers = set()
452 for logger in loggers:
453 line = logger.readline()
454 # record any broken pipes (aka line == empty)
455 if len(line) == 0:
456 closed_loggers.add(logger)
457 continue
458 timestamp, msg = line.split('\t', 1)
459 warnings.append((int(timestamp), msg.strip()))
460
461 # stop listening to loggers that are closed
462 self.warning_loggers -= closed_loggers
463
464 # stop if none of the loggers have any output left
465 if not loggers:
466 break
467
mblighdab39662008-02-27 16:47:55 +0000468 # sort into timestamp order
469 warnings.sort()
470 return warnings
mblighf4e04152008-02-21 16:05:53 +0000471
472
mblighdab39662008-02-27 16:47:55 +0000473 def _render_record(self, status_code, subdir, operation, status='',
474 epoch_time=None, record_prefix=None):
mblighf4e04152008-02-21 16:05:53 +0000475 """
mblighdab39662008-02-27 16:47:55 +0000476 Internal Function to generate a record to be written into a
477 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000478 """
mblighf1c52842007-10-16 15:21:38 +0000479 if subdir:
480 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000481 raise ValueError(
482 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000483 substr = subdir
484 else:
485 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000486
mblighf1c52842007-10-16 15:21:38 +0000487 if not re.match(r'(START|(END )?(GOOD|WARN|FAIL|ABORT))$', \
488 status_code):
mbligh6437ff52008-04-17 15:24:38 +0000489 raise ValueError('Invalid status code supplied: %s' %
490 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000491 if not operation:
492 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000493 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000494 raise ValueError(
495 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000496 operation = operation.rstrip()
497 status = status.rstrip()
498 status = re.sub(r"\t", " ", status)
499 # Ensure any continuation lines are marked so we can
500 # detect them in the status file to ensure it is parsable.
501 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
502
mbligh30270302007-11-05 20:33:52 +0000503 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000504 if epoch_time is None:
505 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000506 local_time = time.localtime(epoch_time)
507 epoch_time_str = "timestamp=%d" % (epoch_time,)
508 local_time_str = time.strftime("localtime=%b %d %H:%M:%S",
509 local_time)
510
mblighdab39662008-02-27 16:47:55 +0000511 if record_prefix is None:
512 record_prefix = self.record_prefix
513
mbligh30270302007-11-05 20:33:52 +0000514 msg = '\t'.join(str(x) for x in (status_code, substr, operation,
515 epoch_time_str, local_time_str,
516 status))
mblighdab39662008-02-27 16:47:55 +0000517 return record_prefix + msg + '\n'
518
519
520 def _record_prerendered(self, msg):
521 """
522 Record a pre-rendered msg into the status logs. The only
523 change this makes to the message is to add on the local
524 indentation. Should not be called outside of server_job.*
525 classes. Unlike __record, this does not write the message
526 to standard output.
527 """
mbligh6437ff52008-04-17 15:24:38 +0000528 lines = []
mblighdab39662008-02-27 16:47:55 +0000529 status_file = os.path.join(self.resultdir, 'status.log')
530 status_log = open(status_file, 'a')
531 for line in msg.splitlines():
532 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000533 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000534 status_log.write(line)
535 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000536 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000537
538
539 def __record(self, status_code, subdir, operation, status='',
540 epoch_time=None):
541 """
542 Actual function for recording a single line into the status
543 logs. Should never be called directly, only by job.record as
544 this would bypass the console monitor logging.
545 """
546
547 msg = self._render_record(status_code, subdir, operation,
548 status, epoch_time)
549
mblighf1c52842007-10-16 15:21:38 +0000550
mbligh31a49de2007-11-05 18:41:19 +0000551 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000552 sys.stdout.write(msg)
553 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000554 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000555 test_dir = os.path.join(self.resultdir, subdir)
556 if not os.path.exists(test_dir):
557 os.mkdir(test_dir)
558 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000559 open(status_file, "a").write(msg)
mbligh6437ff52008-04-17 15:24:38 +0000560 self.__parse_status([msg])
mblighb03ba642008-03-13 17:37:17 +0000561
562
mbligh6437ff52008-04-17 15:24:38 +0000563 def __parse_status(self, new_lines):
564 if not self.using_parser:
565 return
566 new_tests = self.parser.process_lines(new_lines)
567 for test in new_tests:
568 self.results_db.insert_test(self.job_model, test)
mblighdab39662008-02-27 16:47:55 +0000569
570
571# a file-like object for catching stderr from an autotest client and
572# extracting status logs from it
573class client_logger(object):
574 """Partial file object to write to both stdout and
575 the status log file. We only implement those methods
576 utils.run() actually calls.
577 """
578 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
579 extract_indent = re.compile(r"^(\t*).*$")
580
581 def __init__(self, job):
582 self.job = job
583 self.leftover = ""
584 self.last_line = ""
585 self.logs = {}
586
587
588 def _process_log_dict(self, log_dict):
589 log_list = log_dict.pop("logs", [])
590 for key in sorted(log_dict.iterkeys()):
591 log_list += self._process_log_dict(log_dict.pop(key))
592 return log_list
593
594
595 def _process_logs(self):
596 """Go through the accumulated logs in self.log and print them
597 out to stdout and the status log. Note that this processes
598 logs in an ordering where:
599
600 1) logs to different tags are never interleaved
601 2) logs to x.y come before logs to x.y.z for all z
602 3) logs to x.y come before x.z whenever y < z
603
604 Note that this will in general not be the same as the
605 chronological ordering of the logs. However, if a chronological
606 ordering is desired that one can be reconstructed from the
607 status log by looking at timestamp lines."""
608 log_list = self._process_log_dict(self.logs)
609 for line in log_list:
610 self.job._record_prerendered(line + '\n')
611 if log_list:
612 self.last_line = log_list[-1]
613
614
615 def _process_quoted_line(self, tag, line):
616 """Process a line quoted with an AUTOTEST_STATUS flag. If the
617 tag is blank then we want to push out all the data we've been
618 building up in self.logs, and then the newest line. If the
619 tag is not blank, then push the line into the logs for handling
620 later."""
621 print line
622 if tag == "":
623 self._process_logs()
624 self.job._record_prerendered(line + '\n')
625 self.last_line = line
626 else:
627 tag_parts = [int(x) for x in tag.split(".")]
628 log_dict = self.logs
629 for part in tag_parts:
630 log_dict = log_dict.setdefault(part, {})
631 log_list = log_dict.setdefault("logs", [])
632 log_list.append(line)
633
634
635 def _process_line(self, line):
636 """Write out a line of data to the appropriate stream. Status
637 lines sent by autotest will be prepended with
638 "AUTOTEST_STATUS", and all other lines are ssh error
639 messages."""
640 match = self.parser.search(line)
641 if match:
642 tag, line = match.groups()
643 self._process_quoted_line(tag, line)
644 else:
mblighfe749d22008-03-07 18:14:46 +0000645 print line
mblighdab39662008-02-27 16:47:55 +0000646
647
648 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000649 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000650 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000651 # if the last line starts a new group, add an extra indent
652 if last_line.lstrip('\t').startswith("START\t"):
653 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000654 return [self.job._render_record("WARN", None, None, msg,
655 timestamp, indent).rstrip('\n')
656 for timestamp, msg in warnings]
657
658
659 def _process_warnings(self, last_line, log_dict, warnings):
660 if log_dict.keys() in ([], ["logs"]):
661 # there are no sub-jobs, just append the warnings here
662 warnings = self._format_warnings(last_line, warnings)
663 log_list = log_dict.setdefault("logs", [])
664 log_list += warnings
665 for warning in warnings:
666 sys.stdout.write(warning + '\n')
667 else:
668 # there are sub-jobs, so put the warnings in there
669 log_list = log_dict.get("logs", [])
670 if log_list:
671 last_line = log_list[-1]
672 for key in sorted(log_dict.iterkeys()):
673 if key != "logs":
674 self._process_warnings(last_line,
675 log_dict[key],
676 warnings)
677
678
679 def write(self, data):
680 # first check for any new console warnings
681 warnings = self.job._read_warnings()
682 self._process_warnings(self.last_line, self.logs, warnings)
683 # now process the newest data written out
684 data = self.leftover + data
685 lines = data.split("\n")
686 # process every line but the last one
687 for line in lines[:-1]:
688 self._process_line(line)
689 # save the last line for later processing
690 # since we may not have the whole line yet
691 self.leftover = lines[-1]
692
693
694 def flush(self):
695 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000696
697
698 def close(self):
699 if self.leftover:
700 self._process_line(self.leftover)
701 self._process_logs()
702 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000703
704# site_server_job.py may be non-existant or empty, make sure that an
705# appropriate site_server_job class is created nevertheless
706try:
707 from site_server_job import site_server_job
708except ImportError:
709 class site_server_job(base_server_job):
710 pass
711
712class server_job(site_server_job):
713 pass