blob: 3aa638bda389dfa36aa8ab06a4270465b912f7f0 [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
mbligh302482e2008-05-01 20:06:16 +000017from autotest_lib.client.common_lib import error, logging
mbligh6437ff52008-04-17 15:24:38 +000018from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighccb9e182008-04-17 15:42:10 +000037from autotest_lib.server import hosts, autotest, kvm, git, standalone_profiler
38from autotest_lib.server import source_kernel, rpm_kernel, deb_kernel
39from autotest_lib.server import git_kernel
40from autotest_lib.server.subcommand import *
41from autotest_lib.server.utils import run, get_tmp_dir, sh_escape
mblighccb9e182008-04-17 15:42:10 +000042from autotest_lib.client.common_lib.error import *
43from autotest_lib.client.common_lib import barrier
mblighf1c52842007-10-16 15:21:38 +000044
mbligh119c12a2007-11-12 22:13:44 +000045autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000046hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000047barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000048if len(machines) > 1:
49 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000050"""
51
52client_wrapper = """
53at = autotest.Autotest()
54
55def run_client(machine):
56 host = hosts.SSHHost(machine)
57 at.run(control, host=host)
58
mbligh6437ff52008-04-17 15:24:38 +000059job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000060"""
61
mbligh303ccac2007-11-05 18:07:28 +000062crashdumps = """
63def crashdumps(machine):
64 host = hosts.SSHHost(machine, initialize=False)
65 host.get_crashdumps(test_start_time)
66
mbligh6437ff52008-04-17 15:24:38 +000067job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000068"""
69
mbligh98ff1462007-12-19 16:27:55 +000070reboot_segment="""\
71def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000072 host = hosts.SSHHost(machine, initialize=False)
73 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000074
mbligh6437ff52008-04-17 15:24:38 +000075job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000076"""
77
mblighf36243d2007-10-30 15:36:16 +000078install="""\
79def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000080 host = hosts.SSHHost(machine, initialize=False)
81 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000082
mbligh6437ff52008-04-17 15:24:38 +000083job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000084"""
85
mbligh7f86e0b2007-11-24 19:45:07 +000086# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000087verify = load_control_segment("site_verify")
88verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000089
mbligh7f86e0b2007-11-24 19:45:07 +000090# load up the repair control segment, with an optional site-specific hook
91repair = load_control_segment("site_repair")
92repair += load_control_segment("repair")
93
mbligh1d42d4e2007-11-05 22:42:00 +000094
mbligh970b94e2008-01-24 16:29:34 +000095# load up site-specific code for generating site-specific job data
96try:
97 import site_job
98 get_site_job_data = site_job.get_site_job_data
99 del site_job
100except ImportError:
101 # by default provide a stub that generates no site data
102 def get_site_job_data(job):
103 return {}
104
105
mblighcaa62c22008-04-07 21:51:17 +0000106class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000107 """The actual job against which we do everything.
108
109 Properties:
110 autodir
111 The top level autotest directory (/usr/local/autotest).
112 serverdir
113 <autodir>/server/
114 clientdir
115 <autodir>/client/
116 conmuxdir
117 <autodir>/conmux/
118 testdir
119 <autodir>/server/tests/
120 control
121 the control file for this job
122 """
123
jadmanski6e8bf752008-05-14 00:17:48 +0000124 STATUS_VERSION = 1
125
126
mblighe8b37a92007-12-19 15:54:11 +0000127 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000128 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000129 """
130 control
131 The control file (pathname of)
132 args
133 args to pass to the control file
134 resultdir
135 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000136 label
137 label for the job
mblighf1c52842007-10-16 15:21:38 +0000138 user
139 Username for the job (email address)
140 client
141 True if a client-side control file
142 """
mblighf5427bb2008-04-09 15:55:57 +0000143 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000144 self.autodir = os.path.abspath(os.path.join(path, '..'))
145 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000146 self.testdir = os.path.join(self.serverdir, 'tests')
147 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000148 self.conmuxdir = os.path.join(self.autodir, 'conmux')
149 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000150 if control:
151 self.control = open(control, 'r').read()
152 self.control = re.sub('\r', '', self.control)
153 else:
154 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000155 self.resultdir = resultdir
156 if not os.path.exists(resultdir):
157 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000158 self.debugdir = os.path.join(resultdir, 'debug')
159 if not os.path.exists(self.debugdir):
160 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000161 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000162 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000163 self.user = user
164 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000165 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000166 self.client = client
167 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000168 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000169
mbligh3f4bced2007-11-05 17:55:53 +0000170 self.stdout = fd_stack.fd_stack(1, sys.stdout)
171 self.stderr = fd_stack.fd_stack(2, sys.stderr)
172
mbligh3dcf2c92007-10-16 22:24:00 +0000173 if os.path.exists(self.status):
174 os.unlink(self.status)
jadmanski6e8bf752008-05-14 00:17:48 +0000175 job_data = {'label' : label, 'user' : user,
176 'hostname' : ','.join(machines),
177 'status_version' : str(self.STATUS_VERSION)}
mbligh970b94e2008-01-24 16:29:34 +0000178 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000179 write_keyval(self.resultdir, job_data)
180
mbligh6437ff52008-04-17 15:24:38 +0000181 self.parse_job = parse_job
182 if self.parse_job and len(machines) == 1:
183 self.using_parser = True
184 self.init_parser(resultdir)
185 else:
186 self.using_parser = False
187
188
189 def init_parser(self, resultdir):
190 """Start the continuous parsing of resultdir. This sets up
191 the database connection and inserts the basic job object into
192 the database if necessary."""
193 # redirect parser debugging to .parse.log
194 parse_log = os.path.join(resultdir, '.parse.log')
195 parse_log = open(parse_log, 'w', 0)
196 tko_utils.redirect_parser_debugging(parse_log)
197 # create a job model object and set up the db
198 self.results_db = tko_db.db(autocommit=True)
jadmanski6e8bf752008-05-14 00:17:48 +0000199 self.parser = status_lib.parser(self.STATUS_VERSION)
mbligh6437ff52008-04-17 15:24:38 +0000200 self.job_model = self.parser.make_job(resultdir)
201 self.parser.start(self.job_model)
202 # check if a job already exists in the db and insert it if
203 # it does not
204 job_idx = self.results_db.find_job(self.parse_job)
205 if job_idx is None:
206 self.results_db.insert_job(self.parse_job,
207 self.job_model)
208 else:
209 machine_idx = self.results_db.lookup_machine(
210 self.job_model.machine)
211 self.job_model.index = job_idx
212 self.job_model.machine_idx = machine_idx
213
214
mblighfe0af112008-04-17 15:27:47 +0000215 def cleanup_parser(self):
mbligh6437ff52008-04-17 15:24:38 +0000216 """This should be called after the server job is finished
217 to carry out any remaining cleanup (e.g. flushing any
218 remaining test results to the results db)"""
219 if not self.using_parser:
220 return
221 final_tests = self.parser.end()
222 for test in final_tests:
jadmanski28816c22008-05-21 18:18:05 +0000223 self.__insert_test(test)
mblighfe0af112008-04-17 15:27:47 +0000224 self.using_parser = False
mbligh6437ff52008-04-17 15:24:38 +0000225
mblighf1c52842007-10-16 15:21:38 +0000226
mblighe25fd5b2008-01-22 17:23:37 +0000227 def verify(self):
228 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000229 raise error.AutoservError(
230 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000231 try:
232 namespace = {'machines' : self.machines, 'job' : self}
233 exec(preamble + verify, namespace, namespace)
234 except Exception, e:
mbligh302482e2008-05-01 20:06:16 +0000235 msg = ('Verify failed\n' + str(e) + '\n'
236 + traceback.format_exc())
mblighe25fd5b2008-01-22 17:23:37 +0000237 self.record('ABORT', None, None, msg)
238 raise
239
240
241 def repair(self):
242 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000243 raise error.AutoservError(
244 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000245 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000246 # no matter what happens during repair, go on to try to reverify
247 try:
248 exec(preamble + repair, namespace, namespace)
249 except Exception, exc:
250 print 'Exception occured during repair'
251 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000252 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000253
254
mblighcaa62c22008-04-07 21:51:17 +0000255 def enable_external_logging(self):
256 """Start or restart external logging mechanism.
257 """
258 pass
259
260
261 def disable_external_logging(self):
262 """ Pause or stop external logging mechanism.
263 """
264 pass
265
266
267 def use_external_logging(self):
268 """Return True if external logging should be used.
269 """
270 return False
271
272
mbligh6437ff52008-04-17 15:24:38 +0000273 def parallel_simple(self, function, machines, log=True, timeout=None):
274 """Run 'function' using parallel_simple, with an extra
275 wrapper to handle the necessary setup for continuous parsing,
276 if possible. If continuous parsing is already properly
277 initialized then this should just work."""
278 is_forking = not (len(machines) == 1 and
279 self.machines == machines)
280 if self.parse_job and is_forking:
281 def wrapper(machine):
282 self.parse_job += "/" + machine
283 self.using_parser = True
284 self.machines = [machine]
285 self.resultdir = os.path.join(self.resultdir,
286 machine)
287 self.init_parser(self.resultdir)
288 result = function(machine)
mblighfe0af112008-04-17 15:27:47 +0000289 self.cleanup_parser()
mbligh6437ff52008-04-17 15:24:38 +0000290 return result
291 else:
292 wrapper = function
293 subcommand.parallel_simple(wrapper, machines, log, timeout)
294
295
mblighe8b37a92007-12-19 15:54:11 +0000296 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000297 install_after = False, collect_crashdumps = True,
298 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000299 # use a copy so changes don't affect the original dictionary
300 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000301 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000302
mblighfaf0cd42007-11-19 16:00:24 +0000303 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000304 namespace['machines'] = machines
305 namespace['args'] = self.args
306 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000307 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000308
mbligh87c5d882007-10-29 17:07:24 +0000309 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000310
311 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000312 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000313 try:
mblighf36243d2007-10-30 15:36:16 +0000314 if install_before and machines:
315 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000316 if self.client:
317 namespace['control'] = self.control
318 open('control', 'w').write(self.control)
319 open('control.srv', 'w').write(client_wrapper)
320 server_control = client_wrapper
321 else:
322 open('control.srv', 'w').write(self.control)
323 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000324 exec(preamble + server_control, namespace, namespace)
325
326 finally:
mblighddd54332008-03-07 18:14:06 +0000327 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000328 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000329 exec(preamble + crashdumps,
330 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000331 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000332 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000333 exec(preamble + reboot_segment,
334 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000335 if install_after and machines:
336 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000337
338
339 def run_test(self, url, *args, **dargs):
340 """Summon a test object and run it.
341
342 tag
343 tag to add to testname
344 url
345 url of the test to run
346 """
347
mblighf1c52842007-10-16 15:21:38 +0000348 (group, testname) = test.testname(url)
349 tag = None
350 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000351
mblighf1c52842007-10-16 15:21:38 +0000352 if dargs.has_key('tag'):
353 tag = dargs['tag']
354 del dargs['tag']
355 if tag:
356 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000357
mbligh43ac5222007-10-16 15:55:01 +0000358 try:
359 test.runtest(self, url, tag, args, dargs)
360 self.record('GOOD', subdir, testname, 'completed successfully')
mbligh302482e2008-05-01 20:06:16 +0000361 except error.TestNAError, detail:
362 self.record('TEST_NA', subdir, testmame, str(detail))
mbligh43ac5222007-10-16 15:55:01 +0000363 except Exception, detail:
mbligh302482e2008-05-01 20:06:16 +0000364 info = str(detail) + "\n" + traceback.format_exc()
365 self.record('FAIL', subdir, testname, info)
mblighf1c52842007-10-16 15:21:38 +0000366
367
368 def run_group(self, function, *args, **dargs):
369 """\
370 function:
371 subroutine to run
372 *args:
373 arguments for the function
374 """
375
376 result = None
377 name = function.__name__
378
379 # Allow the tag for the group to be specified.
380 if dargs.has_key('tag'):
381 tag = dargs['tag']
382 del dargs['tag']
383 if tag:
384 name = tag
385
386 # if tag:
387 # name += '.' + tag
388 old_record_prefix = self.record_prefix
389 try:
390 try:
391 self.record('START', None, name)
392 self.record_prefix += '\t'
393 result = function(*args, **dargs)
jadmanskif35bbb62008-05-29 21:36:04 +0000394 except Exception:
mblighf1c52842007-10-16 15:21:38 +0000395 self.record_prefix = old_record_prefix
mbligh302482e2008-05-01 20:06:16 +0000396 self.record('END FAIL', None, name,
397 traceback.format_exc())
jadmanskif35bbb62008-05-29 21:36:04 +0000398 else:
399 self.record_prefix = old_record_prefix
400 self.record('END GOOD', None, name)
mbligh302482e2008-05-01 20:06:16 +0000401
mblighf1c52842007-10-16 15:21:38 +0000402 # We don't want to raise up an error higher if it's just
403 # a TestError - we want to carry on to other tests. Hence
404 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000405 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000406 pass
407 except:
mbligh6437ff52008-04-17 15:24:38 +0000408 raise error.TestError(name + ' failed\n' +
mbligh0a1727a2008-04-21 18:10:07 +0000409 traceback.format_exc())
mblighf1c52842007-10-16 15:21:38 +0000410
411 return result
412
413
jadmanskif35bbb62008-05-29 21:36:04 +0000414 def run_reboot(self, reboot_func, get_kernel_func):
415 """\
416 A specialization of run_group meant specifically for handling
417 a reboot. Includes support for capturing the kernel version
418 after the reboot.
419
420 reboot_func: a function that carries out the reboot
421
422 get_kernel_func: a function that returns a string
423 representing the kernel version.
424 """
425
426 old_record_prefix = self.record_prefix
427 try:
428 self.record('START', None, 'reboot')
429 self.record_prefix += '\t'
430 reboot_func()
431 except Exception:
432 self.record_prefix = old_record_prefix
433 self.record('END FAIL', None, 'reboot',
434 traceback.format_exc())
435 else:
436 kernel = get_kernel_func()
437 self.record_prefix = old_record_prefix
438 self.record('END GOOD', None, 'reboot',
439 optional_fields={"kernel": kernel})
440
441
442 def record(self, status_code, subdir, operation, status='',
443 optional_fields=None):
mblighf1c52842007-10-16 15:21:38 +0000444 """
445 Record job-level status
446
447 The intent is to make this file both machine parseable and
448 human readable. That involves a little more complexity, but
449 really isn't all that bad ;-)
450
451 Format is <status code>\t<subdir>\t<operation>\t<status>
452
mbligh302482e2008-05-01 20:06:16 +0000453 status code: see common_lib.logging.is_valid_status()
454 for valid status definition
mblighf1c52842007-10-16 15:21:38 +0000455
456 subdir: MUST be a relevant subdirectory in the results,
457 or None, which will be represented as '----'
458
459 operation: description of what you ran (e.g. "dbench", or
460 "mkfs -t foobar /dev/sda9")
461
462 status: error message or "completed sucessfully"
463
464 ------------------------------------------------------------
465
466 Initial tabs indicate indent levels for grouping, and is
467 governed by self.record_prefix
468
469 multiline messages have secondary lines prefaced by a double
470 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000471
472 Executing this method will trigger the logging of all new
473 warnings to date from the various console loggers.
474 """
mblighdab39662008-02-27 16:47:55 +0000475 # poll all our warning loggers for new warnings
476 warnings = self._read_warnings()
477 for timestamp, msg in warnings:
478 self.__record("WARN", None, None, msg, timestamp)
479
480 # write out the actual status log line
jadmanskif35bbb62008-05-29 21:36:04 +0000481 self.__record(status_code, subdir, operation, status,
482 optional_fields=optional_fields)
mblighdab39662008-02-27 16:47:55 +0000483
484
485 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000486 warnings = []
487 while True:
488 # pull in a line of output from every logger that has
489 # output ready to be read
490 loggers, _, _ = select.select(self.warning_loggers,
491 [], [], 0)
492 closed_loggers = set()
493 for logger in loggers:
494 line = logger.readline()
495 # record any broken pipes (aka line == empty)
496 if len(line) == 0:
497 closed_loggers.add(logger)
498 continue
499 timestamp, msg = line.split('\t', 1)
500 warnings.append((int(timestamp), msg.strip()))
501
502 # stop listening to loggers that are closed
503 self.warning_loggers -= closed_loggers
504
505 # stop if none of the loggers have any output left
506 if not loggers:
507 break
508
mblighdab39662008-02-27 16:47:55 +0000509 # sort into timestamp order
510 warnings.sort()
511 return warnings
mblighf4e04152008-02-21 16:05:53 +0000512
513
mblighdab39662008-02-27 16:47:55 +0000514 def _render_record(self, status_code, subdir, operation, status='',
jadmanskif35bbb62008-05-29 21:36:04 +0000515 epoch_time=None, record_prefix=None,
516 optional_fields=None):
mblighf4e04152008-02-21 16:05:53 +0000517 """
mblighdab39662008-02-27 16:47:55 +0000518 Internal Function to generate a record to be written into a
519 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000520 """
mblighf1c52842007-10-16 15:21:38 +0000521 if subdir:
522 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000523 raise ValueError(
524 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000525 substr = subdir
526 else:
527 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000528
mbligh302482e2008-05-01 20:06:16 +0000529 if not logging.is_valid_status(status_code):
mbligh6437ff52008-04-17 15:24:38 +0000530 raise ValueError('Invalid status code supplied: %s' %
531 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000532 if not operation:
533 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000534 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000535 raise ValueError(
536 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000537 operation = operation.rstrip()
538 status = status.rstrip()
539 status = re.sub(r"\t", " ", status)
540 # Ensure any continuation lines are marked so we can
541 # detect them in the status file to ensure it is parsable.
542 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
543
jadmanskif35bbb62008-05-29 21:36:04 +0000544 if not optional_fields:
545 optional_fields = {}
546
mbligh30270302007-11-05 20:33:52 +0000547 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000548 if epoch_time is None:
549 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000550 local_time = time.localtime(epoch_time)
jadmanskif35bbb62008-05-29 21:36:04 +0000551 optional_fields["timestamp"] = str(epoch_time)
552 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
553 local_time)
554
555 fields = [status_code, substr, operation]
556 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
557 fields.append(status)
mbligh30270302007-11-05 20:33:52 +0000558
mblighdab39662008-02-27 16:47:55 +0000559 if record_prefix is None:
560 record_prefix = self.record_prefix
561
jadmanskif35bbb62008-05-29 21:36:04 +0000562 msg = '\t'.join(str(x) for x in fields)
563
mblighdab39662008-02-27 16:47:55 +0000564 return record_prefix + msg + '\n'
565
566
567 def _record_prerendered(self, msg):
568 """
569 Record a pre-rendered msg into the status logs. The only
570 change this makes to the message is to add on the local
571 indentation. Should not be called outside of server_job.*
572 classes. Unlike __record, this does not write the message
573 to standard output.
574 """
mbligh6437ff52008-04-17 15:24:38 +0000575 lines = []
mblighdab39662008-02-27 16:47:55 +0000576 status_file = os.path.join(self.resultdir, 'status.log')
577 status_log = open(status_file, 'a')
578 for line in msg.splitlines():
579 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000580 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000581 status_log.write(line)
582 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000583 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000584
585
586 def __record(self, status_code, subdir, operation, status='',
jadmanskif35bbb62008-05-29 21:36:04 +0000587 epoch_time=None, optional_fields=None):
mblighdab39662008-02-27 16:47:55 +0000588 """
589 Actual function for recording a single line into the status
590 logs. Should never be called directly, only by job.record as
591 this would bypass the console monitor logging.
592 """
593
594 msg = self._render_record(status_code, subdir, operation,
jadmanskif35bbb62008-05-29 21:36:04 +0000595 status, epoch_time,
596 optional_fields=optional_fields)
mblighdab39662008-02-27 16:47:55 +0000597
mblighf1c52842007-10-16 15:21:38 +0000598
mbligh31a49de2007-11-05 18:41:19 +0000599 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000600 sys.stdout.write(msg)
601 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000602 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000603 test_dir = os.path.join(self.resultdir, subdir)
604 if not os.path.exists(test_dir):
605 os.mkdir(test_dir)
606 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000607 open(status_file, "a").write(msg)
jadmanski96bb7642008-05-15 17:58:16 +0000608 self.__parse_status(msg.splitlines())
mblighb03ba642008-03-13 17:37:17 +0000609
610
mbligh6437ff52008-04-17 15:24:38 +0000611 def __parse_status(self, new_lines):
612 if not self.using_parser:
613 return
614 new_tests = self.parser.process_lines(new_lines)
615 for test in new_tests:
jadmanski28816c22008-05-21 18:18:05 +0000616 self.__insert_test(test)
617
618
619 def __insert_test(self, test):
620 """ An internal method to insert a new test result into the
621 database. This method will not raise an exception, even if an
622 error occurs during the insert, to avoid failing a test
623 simply because of unexpected database issues."""
624 try:
mbligh6437ff52008-04-17 15:24:38 +0000625 self.results_db.insert_test(self.job_model, test)
jadmanski28816c22008-05-21 18:18:05 +0000626 except Exception:
627 msg = ("WARNING: An unexpected error occured while "
628 "inserting test results into the database. "
629 "Ignoring error.\n" + traceback.format_exc())
630 print >> sys.stderr, msg
mblighdab39662008-02-27 16:47:55 +0000631
632
633# a file-like object for catching stderr from an autotest client and
634# extracting status logs from it
635class client_logger(object):
636 """Partial file object to write to both stdout and
637 the status log file. We only implement those methods
638 utils.run() actually calls.
639 """
640 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
641 extract_indent = re.compile(r"^(\t*).*$")
642
643 def __init__(self, job):
644 self.job = job
645 self.leftover = ""
646 self.last_line = ""
647 self.logs = {}
648
649
650 def _process_log_dict(self, log_dict):
651 log_list = log_dict.pop("logs", [])
652 for key in sorted(log_dict.iterkeys()):
653 log_list += self._process_log_dict(log_dict.pop(key))
654 return log_list
655
656
657 def _process_logs(self):
658 """Go through the accumulated logs in self.log and print them
659 out to stdout and the status log. Note that this processes
660 logs in an ordering where:
661
662 1) logs to different tags are never interleaved
663 2) logs to x.y come before logs to x.y.z for all z
664 3) logs to x.y come before x.z whenever y < z
665
666 Note that this will in general not be the same as the
667 chronological ordering of the logs. However, if a chronological
668 ordering is desired that one can be reconstructed from the
669 status log by looking at timestamp lines."""
670 log_list = self._process_log_dict(self.logs)
671 for line in log_list:
672 self.job._record_prerendered(line + '\n')
673 if log_list:
674 self.last_line = log_list[-1]
675
676
677 def _process_quoted_line(self, tag, line):
678 """Process a line quoted with an AUTOTEST_STATUS flag. If the
679 tag is blank then we want to push out all the data we've been
680 building up in self.logs, and then the newest line. If the
681 tag is not blank, then push the line into the logs for handling
682 later."""
683 print line
684 if tag == "":
685 self._process_logs()
686 self.job._record_prerendered(line + '\n')
687 self.last_line = line
688 else:
689 tag_parts = [int(x) for x in tag.split(".")]
690 log_dict = self.logs
691 for part in tag_parts:
692 log_dict = log_dict.setdefault(part, {})
693 log_list = log_dict.setdefault("logs", [])
694 log_list.append(line)
695
696
697 def _process_line(self, line):
698 """Write out a line of data to the appropriate stream. Status
699 lines sent by autotest will be prepended with
700 "AUTOTEST_STATUS", and all other lines are ssh error
701 messages."""
702 match = self.parser.search(line)
703 if match:
704 tag, line = match.groups()
705 self._process_quoted_line(tag, line)
706 else:
mblighfe749d22008-03-07 18:14:46 +0000707 print line
mblighdab39662008-02-27 16:47:55 +0000708
709
710 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000711 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000712 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000713 # if the last line starts a new group, add an extra indent
714 if last_line.lstrip('\t').startswith("START\t"):
715 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000716 return [self.job._render_record("WARN", None, None, msg,
717 timestamp, indent).rstrip('\n')
718 for timestamp, msg in warnings]
719
720
721 def _process_warnings(self, last_line, log_dict, warnings):
722 if log_dict.keys() in ([], ["logs"]):
723 # there are no sub-jobs, just append the warnings here
724 warnings = self._format_warnings(last_line, warnings)
725 log_list = log_dict.setdefault("logs", [])
726 log_list += warnings
727 for warning in warnings:
728 sys.stdout.write(warning + '\n')
729 else:
730 # there are sub-jobs, so put the warnings in there
731 log_list = log_dict.get("logs", [])
732 if log_list:
733 last_line = log_list[-1]
734 for key in sorted(log_dict.iterkeys()):
735 if key != "logs":
736 self._process_warnings(last_line,
737 log_dict[key],
738 warnings)
739
740
741 def write(self, data):
742 # first check for any new console warnings
743 warnings = self.job._read_warnings()
744 self._process_warnings(self.last_line, self.logs, warnings)
745 # now process the newest data written out
746 data = self.leftover + data
747 lines = data.split("\n")
748 # process every line but the last one
749 for line in lines[:-1]:
750 self._process_line(line)
751 # save the last line for later processing
752 # since we may not have the whole line yet
753 self.leftover = lines[-1]
754
755
756 def flush(self):
757 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000758
759
760 def close(self):
761 if self.leftover:
762 self._process_line(self.leftover)
763 self._process_logs()
764 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000765
766# site_server_job.py may be non-existant or empty, make sure that an
767# appropriate site_server_job class is created nevertheless
768try:
mblighccb9e182008-04-17 15:42:10 +0000769 from autotest_lib.server.site_server_job import site_server_job
mblighcaa62c22008-04-07 21:51:17 +0000770except ImportError:
771 class site_server_job(base_server_job):
772 pass
773
774class server_job(site_server_job):
775 pass