blob: a11e2d36a3fbf1d13acd1136abfab48e692c406d [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
mbligh302482e2008-05-01 20:06:16 +000017from autotest_lib.client.common_lib import error, logging
mbligh6437ff52008-04-17 15:24:38 +000018from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighccb9e182008-04-17 15:42:10 +000037from autotest_lib.server import hosts, autotest, kvm, git, standalone_profiler
38from autotest_lib.server import source_kernel, rpm_kernel, deb_kernel
39from autotest_lib.server import git_kernel
40from autotest_lib.server.subcommand import *
41from autotest_lib.server.utils import run, get_tmp_dir, sh_escape
mblighccb9e182008-04-17 15:42:10 +000042from autotest_lib.client.common_lib.error import *
43from autotest_lib.client.common_lib import barrier
mblighf1c52842007-10-16 15:21:38 +000044
mbligh119c12a2007-11-12 22:13:44 +000045autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000046hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000047barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000048if len(machines) > 1:
49 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000050"""
51
52client_wrapper = """
53at = autotest.Autotest()
54
55def run_client(machine):
56 host = hosts.SSHHost(machine)
57 at.run(control, host=host)
58
mbligh6437ff52008-04-17 15:24:38 +000059job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000060"""
61
mbligh303ccac2007-11-05 18:07:28 +000062crashdumps = """
63def crashdumps(machine):
64 host = hosts.SSHHost(machine, initialize=False)
65 host.get_crashdumps(test_start_time)
66
mbligh6437ff52008-04-17 15:24:38 +000067job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000068"""
69
mbligh98ff1462007-12-19 16:27:55 +000070reboot_segment="""\
71def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000072 host = hosts.SSHHost(machine, initialize=False)
73 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000074
mbligh6437ff52008-04-17 15:24:38 +000075job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000076"""
77
mblighf36243d2007-10-30 15:36:16 +000078install="""\
79def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000080 host = hosts.SSHHost(machine, initialize=False)
81 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000082
mbligh6437ff52008-04-17 15:24:38 +000083job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000084"""
85
mbligh7f86e0b2007-11-24 19:45:07 +000086# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000087verify = load_control_segment("site_verify")
88verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000089
mbligh7f86e0b2007-11-24 19:45:07 +000090# load up the repair control segment, with an optional site-specific hook
91repair = load_control_segment("site_repair")
92repair += load_control_segment("repair")
93
mbligh1d42d4e2007-11-05 22:42:00 +000094
mbligh970b94e2008-01-24 16:29:34 +000095# load up site-specific code for generating site-specific job data
96try:
97 import site_job
98 get_site_job_data = site_job.get_site_job_data
99 del site_job
100except ImportError:
101 # by default provide a stub that generates no site data
102 def get_site_job_data(job):
103 return {}
104
105
mblighcaa62c22008-04-07 21:51:17 +0000106class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000107 """The actual job against which we do everything.
108
109 Properties:
110 autodir
111 The top level autotest directory (/usr/local/autotest).
112 serverdir
113 <autodir>/server/
114 clientdir
115 <autodir>/client/
116 conmuxdir
117 <autodir>/conmux/
118 testdir
119 <autodir>/server/tests/
120 control
121 the control file for this job
122 """
123
mblighe8b37a92007-12-19 15:54:11 +0000124 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000125 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000126 """
127 control
128 The control file (pathname of)
129 args
130 args to pass to the control file
131 resultdir
132 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000133 label
134 label for the job
mblighf1c52842007-10-16 15:21:38 +0000135 user
136 Username for the job (email address)
137 client
138 True if a client-side control file
139 """
mblighf5427bb2008-04-09 15:55:57 +0000140 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000141 self.autodir = os.path.abspath(os.path.join(path, '..'))
142 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000143 self.testdir = os.path.join(self.serverdir, 'tests')
144 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000145 self.conmuxdir = os.path.join(self.autodir, 'conmux')
146 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000147 if control:
148 self.control = open(control, 'r').read()
149 self.control = re.sub('\r', '', self.control)
150 else:
151 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000152 self.resultdir = resultdir
153 if not os.path.exists(resultdir):
154 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000155 self.debugdir = os.path.join(resultdir, 'debug')
156 if not os.path.exists(self.debugdir):
157 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000158 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000159 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000160 self.user = user
161 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000162 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000163 self.client = client
164 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000165 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000166
mbligh3f4bced2007-11-05 17:55:53 +0000167 self.stdout = fd_stack.fd_stack(1, sys.stdout)
168 self.stderr = fd_stack.fd_stack(2, sys.stderr)
169
mbligh3dcf2c92007-10-16 22:24:00 +0000170 if os.path.exists(self.status):
171 os.unlink(self.status)
mblighe8b37a92007-12-19 15:54:11 +0000172 job_data = { 'label' : label, 'user' : user,
173 'hostname' : ','.join(machines) }
mbligh970b94e2008-01-24 16:29:34 +0000174 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000175 write_keyval(self.resultdir, job_data)
176
mbligh6437ff52008-04-17 15:24:38 +0000177 self.parse_job = parse_job
178 if self.parse_job and len(machines) == 1:
179 self.using_parser = True
180 self.init_parser(resultdir)
181 else:
182 self.using_parser = False
183
184
185 def init_parser(self, resultdir):
186 """Start the continuous parsing of resultdir. This sets up
187 the database connection and inserts the basic job object into
188 the database if necessary."""
189 # redirect parser debugging to .parse.log
190 parse_log = os.path.join(resultdir, '.parse.log')
191 parse_log = open(parse_log, 'w', 0)
192 tko_utils.redirect_parser_debugging(parse_log)
193 # create a job model object and set up the db
194 self.results_db = tko_db.db(autocommit=True)
195 self.parser = status_lib.parser(0)
196 self.job_model = self.parser.make_job(resultdir)
197 self.parser.start(self.job_model)
198 # check if a job already exists in the db and insert it if
199 # it does not
200 job_idx = self.results_db.find_job(self.parse_job)
201 if job_idx is None:
202 self.results_db.insert_job(self.parse_job,
203 self.job_model)
204 else:
205 machine_idx = self.results_db.lookup_machine(
206 self.job_model.machine)
207 self.job_model.index = job_idx
208 self.job_model.machine_idx = machine_idx
209
210
mblighfe0af112008-04-17 15:27:47 +0000211 def cleanup_parser(self):
mbligh6437ff52008-04-17 15:24:38 +0000212 """This should be called after the server job is finished
213 to carry out any remaining cleanup (e.g. flushing any
214 remaining test results to the results db)"""
215 if not self.using_parser:
216 return
217 final_tests = self.parser.end()
218 for test in final_tests:
219 self.results_db.insert_test(self.job_model, test)
mblighfe0af112008-04-17 15:27:47 +0000220 self.using_parser = False
mbligh6437ff52008-04-17 15:24:38 +0000221
mblighf1c52842007-10-16 15:21:38 +0000222
mblighe25fd5b2008-01-22 17:23:37 +0000223 def verify(self):
224 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000225 raise error.AutoservError(
226 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000227 try:
228 namespace = {'machines' : self.machines, 'job' : self}
229 exec(preamble + verify, namespace, namespace)
230 except Exception, e:
mbligh302482e2008-05-01 20:06:16 +0000231 msg = ('Verify failed\n' + str(e) + '\n'
232 + traceback.format_exc())
mblighe25fd5b2008-01-22 17:23:37 +0000233 self.record('ABORT', None, None, msg)
234 raise
235
236
237 def repair(self):
238 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000239 raise error.AutoservError(
240 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000241 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000242 # no matter what happens during repair, go on to try to reverify
243 try:
244 exec(preamble + repair, namespace, namespace)
245 except Exception, exc:
246 print 'Exception occured during repair'
247 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000248 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000249
250
mblighcaa62c22008-04-07 21:51:17 +0000251 def enable_external_logging(self):
252 """Start or restart external logging mechanism.
253 """
254 pass
255
256
257 def disable_external_logging(self):
258 """ Pause or stop external logging mechanism.
259 """
260 pass
261
262
263 def use_external_logging(self):
264 """Return True if external logging should be used.
265 """
266 return False
267
268
mbligh6437ff52008-04-17 15:24:38 +0000269 def parallel_simple(self, function, machines, log=True, timeout=None):
270 """Run 'function' using parallel_simple, with an extra
271 wrapper to handle the necessary setup for continuous parsing,
272 if possible. If continuous parsing is already properly
273 initialized then this should just work."""
274 is_forking = not (len(machines) == 1 and
275 self.machines == machines)
276 if self.parse_job and is_forking:
277 def wrapper(machine):
278 self.parse_job += "/" + machine
279 self.using_parser = True
280 self.machines = [machine]
281 self.resultdir = os.path.join(self.resultdir,
282 machine)
283 self.init_parser(self.resultdir)
284 result = function(machine)
mblighfe0af112008-04-17 15:27:47 +0000285 self.cleanup_parser()
mbligh6437ff52008-04-17 15:24:38 +0000286 return result
287 else:
288 wrapper = function
289 subcommand.parallel_simple(wrapper, machines, log, timeout)
290
291
mblighe8b37a92007-12-19 15:54:11 +0000292 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000293 install_after = False, collect_crashdumps = True,
294 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000295 # use a copy so changes don't affect the original dictionary
296 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000297 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000298
mblighfaf0cd42007-11-19 16:00:24 +0000299 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000300 namespace['machines'] = machines
301 namespace['args'] = self.args
302 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000303 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000304
mbligh87c5d882007-10-29 17:07:24 +0000305 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000306
307 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000308 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000309 try:
mblighf36243d2007-10-30 15:36:16 +0000310 if install_before and machines:
311 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000312 if self.client:
313 namespace['control'] = self.control
314 open('control', 'w').write(self.control)
315 open('control.srv', 'w').write(client_wrapper)
316 server_control = client_wrapper
317 else:
318 open('control.srv', 'w').write(self.control)
319 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000320 exec(preamble + server_control, namespace, namespace)
321
322 finally:
mblighddd54332008-03-07 18:14:06 +0000323 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000324 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000325 exec(preamble + crashdumps,
326 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000327 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000328 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000329 exec(preamble + reboot_segment,
330 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000331 if install_after and machines:
332 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000333
334
335 def run_test(self, url, *args, **dargs):
336 """Summon a test object and run it.
337
338 tag
339 tag to add to testname
340 url
341 url of the test to run
342 """
343
mblighf1c52842007-10-16 15:21:38 +0000344 (group, testname) = test.testname(url)
345 tag = None
346 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000347
mblighf1c52842007-10-16 15:21:38 +0000348 if dargs.has_key('tag'):
349 tag = dargs['tag']
350 del dargs['tag']
351 if tag:
352 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000353
mbligh43ac5222007-10-16 15:55:01 +0000354 try:
355 test.runtest(self, url, tag, args, dargs)
356 self.record('GOOD', subdir, testname, 'completed successfully')
mbligh302482e2008-05-01 20:06:16 +0000357 except error.TestNAError, detail:
358 self.record('TEST_NA', subdir, testmame, str(detail))
mbligh43ac5222007-10-16 15:55:01 +0000359 except Exception, detail:
mbligh302482e2008-05-01 20:06:16 +0000360 info = str(detail) + "\n" + traceback.format_exc()
361 self.record('FAIL', subdir, testname, info)
mblighf1c52842007-10-16 15:21:38 +0000362
363
364 def run_group(self, function, *args, **dargs):
365 """\
366 function:
367 subroutine to run
368 *args:
369 arguments for the function
370 """
371
372 result = None
373 name = function.__name__
374
375 # Allow the tag for the group to be specified.
376 if dargs.has_key('tag'):
377 tag = dargs['tag']
378 del dargs['tag']
379 if tag:
380 name = tag
381
382 # if tag:
383 # name += '.' + tag
384 old_record_prefix = self.record_prefix
385 try:
386 try:
387 self.record('START', None, name)
388 self.record_prefix += '\t'
389 result = function(*args, **dargs)
390 self.record_prefix = old_record_prefix
391 self.record('END GOOD', None, name)
392 except:
393 self.record_prefix = old_record_prefix
mbligh302482e2008-05-01 20:06:16 +0000394 self.record('END FAIL', None, name,
395 traceback.format_exc())
396
mblighf1c52842007-10-16 15:21:38 +0000397 # We don't want to raise up an error higher if it's just
398 # a TestError - we want to carry on to other tests. Hence
399 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000400 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000401 pass
402 except:
mbligh6437ff52008-04-17 15:24:38 +0000403 raise error.TestError(name + ' failed\n' +
mbligh0a1727a2008-04-21 18:10:07 +0000404 traceback.format_exc())
mblighf1c52842007-10-16 15:21:38 +0000405
406 return result
407
408
mblighf4e04152008-02-21 16:05:53 +0000409 def record(self, status_code, subdir, operation, status=''):
mblighf1c52842007-10-16 15:21:38 +0000410 """
411 Record job-level status
412
413 The intent is to make this file both machine parseable and
414 human readable. That involves a little more complexity, but
415 really isn't all that bad ;-)
416
417 Format is <status code>\t<subdir>\t<operation>\t<status>
418
mbligh302482e2008-05-01 20:06:16 +0000419 status code: see common_lib.logging.is_valid_status()
420 for valid status definition
mblighf1c52842007-10-16 15:21:38 +0000421
422 subdir: MUST be a relevant subdirectory in the results,
423 or None, which will be represented as '----'
424
425 operation: description of what you ran (e.g. "dbench", or
426 "mkfs -t foobar /dev/sda9")
427
428 status: error message or "completed sucessfully"
429
430 ------------------------------------------------------------
431
432 Initial tabs indicate indent levels for grouping, and is
433 governed by self.record_prefix
434
435 multiline messages have secondary lines prefaced by a double
436 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000437
438 Executing this method will trigger the logging of all new
439 warnings to date from the various console loggers.
440 """
mblighdab39662008-02-27 16:47:55 +0000441 # poll all our warning loggers for new warnings
442 warnings = self._read_warnings()
443 for timestamp, msg in warnings:
444 self.__record("WARN", None, None, msg, timestamp)
445
446 # write out the actual status log line
447 self.__record(status_code, subdir, operation, status)
448
449
450 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000451 warnings = []
452 while True:
453 # pull in a line of output from every logger that has
454 # output ready to be read
455 loggers, _, _ = select.select(self.warning_loggers,
456 [], [], 0)
457 closed_loggers = set()
458 for logger in loggers:
459 line = logger.readline()
460 # record any broken pipes (aka line == empty)
461 if len(line) == 0:
462 closed_loggers.add(logger)
463 continue
464 timestamp, msg = line.split('\t', 1)
465 warnings.append((int(timestamp), msg.strip()))
466
467 # stop listening to loggers that are closed
468 self.warning_loggers -= closed_loggers
469
470 # stop if none of the loggers have any output left
471 if not loggers:
472 break
473
mblighdab39662008-02-27 16:47:55 +0000474 # sort into timestamp order
475 warnings.sort()
476 return warnings
mblighf4e04152008-02-21 16:05:53 +0000477
478
mblighdab39662008-02-27 16:47:55 +0000479 def _render_record(self, status_code, subdir, operation, status='',
480 epoch_time=None, record_prefix=None):
mblighf4e04152008-02-21 16:05:53 +0000481 """
mblighdab39662008-02-27 16:47:55 +0000482 Internal Function to generate a record to be written into a
483 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000484 """
mblighf1c52842007-10-16 15:21:38 +0000485 if subdir:
486 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000487 raise ValueError(
488 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000489 substr = subdir
490 else:
491 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000492
mbligh302482e2008-05-01 20:06:16 +0000493 if not logging.is_valid_status(status_code):
mbligh6437ff52008-04-17 15:24:38 +0000494 raise ValueError('Invalid status code supplied: %s' %
495 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000496 if not operation:
497 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000498 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000499 raise ValueError(
500 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000501 operation = operation.rstrip()
502 status = status.rstrip()
503 status = re.sub(r"\t", " ", status)
504 # Ensure any continuation lines are marked so we can
505 # detect them in the status file to ensure it is parsable.
506 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
507
mbligh30270302007-11-05 20:33:52 +0000508 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000509 if epoch_time is None:
510 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000511 local_time = time.localtime(epoch_time)
512 epoch_time_str = "timestamp=%d" % (epoch_time,)
513 local_time_str = time.strftime("localtime=%b %d %H:%M:%S",
514 local_time)
515
mblighdab39662008-02-27 16:47:55 +0000516 if record_prefix is None:
517 record_prefix = self.record_prefix
518
mbligh30270302007-11-05 20:33:52 +0000519 msg = '\t'.join(str(x) for x in (status_code, substr, operation,
520 epoch_time_str, local_time_str,
521 status))
mblighdab39662008-02-27 16:47:55 +0000522 return record_prefix + msg + '\n'
523
524
525 def _record_prerendered(self, msg):
526 """
527 Record a pre-rendered msg into the status logs. The only
528 change this makes to the message is to add on the local
529 indentation. Should not be called outside of server_job.*
530 classes. Unlike __record, this does not write the message
531 to standard output.
532 """
mbligh6437ff52008-04-17 15:24:38 +0000533 lines = []
mblighdab39662008-02-27 16:47:55 +0000534 status_file = os.path.join(self.resultdir, 'status.log')
535 status_log = open(status_file, 'a')
536 for line in msg.splitlines():
537 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000538 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000539 status_log.write(line)
540 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000541 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000542
543
544 def __record(self, status_code, subdir, operation, status='',
545 epoch_time=None):
546 """
547 Actual function for recording a single line into the status
548 logs. Should never be called directly, only by job.record as
549 this would bypass the console monitor logging.
550 """
551
552 msg = self._render_record(status_code, subdir, operation,
553 status, epoch_time)
554
mblighf1c52842007-10-16 15:21:38 +0000555
mbligh31a49de2007-11-05 18:41:19 +0000556 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000557 sys.stdout.write(msg)
558 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000559 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000560 test_dir = os.path.join(self.resultdir, subdir)
561 if not os.path.exists(test_dir):
562 os.mkdir(test_dir)
563 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000564 open(status_file, "a").write(msg)
mbligh6437ff52008-04-17 15:24:38 +0000565 self.__parse_status([msg])
mblighb03ba642008-03-13 17:37:17 +0000566
567
mbligh6437ff52008-04-17 15:24:38 +0000568 def __parse_status(self, new_lines):
569 if not self.using_parser:
570 return
571 new_tests = self.parser.process_lines(new_lines)
572 for test in new_tests:
573 self.results_db.insert_test(self.job_model, test)
mblighdab39662008-02-27 16:47:55 +0000574
575
576# a file-like object for catching stderr from an autotest client and
577# extracting status logs from it
578class client_logger(object):
579 """Partial file object to write to both stdout and
580 the status log file. We only implement those methods
581 utils.run() actually calls.
582 """
583 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
584 extract_indent = re.compile(r"^(\t*).*$")
585
586 def __init__(self, job):
587 self.job = job
588 self.leftover = ""
589 self.last_line = ""
590 self.logs = {}
591
592
593 def _process_log_dict(self, log_dict):
594 log_list = log_dict.pop("logs", [])
595 for key in sorted(log_dict.iterkeys()):
596 log_list += self._process_log_dict(log_dict.pop(key))
597 return log_list
598
599
600 def _process_logs(self):
601 """Go through the accumulated logs in self.log and print them
602 out to stdout and the status log. Note that this processes
603 logs in an ordering where:
604
605 1) logs to different tags are never interleaved
606 2) logs to x.y come before logs to x.y.z for all z
607 3) logs to x.y come before x.z whenever y < z
608
609 Note that this will in general not be the same as the
610 chronological ordering of the logs. However, if a chronological
611 ordering is desired that one can be reconstructed from the
612 status log by looking at timestamp lines."""
613 log_list = self._process_log_dict(self.logs)
614 for line in log_list:
615 self.job._record_prerendered(line + '\n')
616 if log_list:
617 self.last_line = log_list[-1]
618
619
620 def _process_quoted_line(self, tag, line):
621 """Process a line quoted with an AUTOTEST_STATUS flag. If the
622 tag is blank then we want to push out all the data we've been
623 building up in self.logs, and then the newest line. If the
624 tag is not blank, then push the line into the logs for handling
625 later."""
626 print line
627 if tag == "":
628 self._process_logs()
629 self.job._record_prerendered(line + '\n')
630 self.last_line = line
631 else:
632 tag_parts = [int(x) for x in tag.split(".")]
633 log_dict = self.logs
634 for part in tag_parts:
635 log_dict = log_dict.setdefault(part, {})
636 log_list = log_dict.setdefault("logs", [])
637 log_list.append(line)
638
639
640 def _process_line(self, line):
641 """Write out a line of data to the appropriate stream. Status
642 lines sent by autotest will be prepended with
643 "AUTOTEST_STATUS", and all other lines are ssh error
644 messages."""
645 match = self.parser.search(line)
646 if match:
647 tag, line = match.groups()
648 self._process_quoted_line(tag, line)
649 else:
mblighfe749d22008-03-07 18:14:46 +0000650 print line
mblighdab39662008-02-27 16:47:55 +0000651
652
653 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000654 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000655 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000656 # if the last line starts a new group, add an extra indent
657 if last_line.lstrip('\t').startswith("START\t"):
658 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000659 return [self.job._render_record("WARN", None, None, msg,
660 timestamp, indent).rstrip('\n')
661 for timestamp, msg in warnings]
662
663
664 def _process_warnings(self, last_line, log_dict, warnings):
665 if log_dict.keys() in ([], ["logs"]):
666 # there are no sub-jobs, just append the warnings here
667 warnings = self._format_warnings(last_line, warnings)
668 log_list = log_dict.setdefault("logs", [])
669 log_list += warnings
670 for warning in warnings:
671 sys.stdout.write(warning + '\n')
672 else:
673 # there are sub-jobs, so put the warnings in there
674 log_list = log_dict.get("logs", [])
675 if log_list:
676 last_line = log_list[-1]
677 for key in sorted(log_dict.iterkeys()):
678 if key != "logs":
679 self._process_warnings(last_line,
680 log_dict[key],
681 warnings)
682
683
684 def write(self, data):
685 # first check for any new console warnings
686 warnings = self.job._read_warnings()
687 self._process_warnings(self.last_line, self.logs, warnings)
688 # now process the newest data written out
689 data = self.leftover + data
690 lines = data.split("\n")
691 # process every line but the last one
692 for line in lines[:-1]:
693 self._process_line(line)
694 # save the last line for later processing
695 # since we may not have the whole line yet
696 self.leftover = lines[-1]
697
698
699 def flush(self):
700 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000701
702
703 def close(self):
704 if self.leftover:
705 self._process_line(self.leftover)
706 self._process_logs()
707 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000708
709# site_server_job.py may be non-existant or empty, make sure that an
710# appropriate site_server_job class is created nevertheless
711try:
mblighccb9e182008-04-17 15:42:10 +0000712 from autotest_lib.server.site_server_job import site_server_job
mblighcaa62c22008-04-07 21:51:17 +0000713except ImportError:
714 class site_server_job(base_server_job):
715 pass
716
717class server_job(site_server_job):
718 pass