blob: 48e755e6d8b4bd00e9a45046bbe5113d755dd13d [file] [log] [blame]
mblighf1c52842007-10-16 15:21:38 +00001"""
2The main job wrapper for the server side.
3
4This is the core infrastructure. Derived from the client side job.py
5
6Copyright Martin J. Bligh, Andy Whitcroft 2007
7"""
8
9__author__ = """
10Martin J. Bligh <mbligh@google.com>
11Andy Whitcroft <apw@shadowen.org>
12"""
13
mblighdbdac6c2008-03-05 15:49:58 +000014import os, sys, re, time, select, subprocess, traceback
mblighf1c52842007-10-16 15:21:38 +000015
mbligh6437ff52008-04-17 15:24:38 +000016from autotest_lib.client.bin import fd_stack
mbligh302482e2008-05-01 20:06:16 +000017from autotest_lib.client.common_lib import error, logging
mbligh6437ff52008-04-17 15:24:38 +000018from autotest_lib.server import test, subcommand
19from autotest_lib.tko import db as tko_db, status_lib, utils as tko_utils
20from autotest_lib.server.utils import *
21
mbligh3f4bced2007-11-05 17:55:53 +000022
mblighed5a4102007-11-20 00:46:41 +000023# load up a control segment
24# these are all stored in <server_dir>/control_segments
25def load_control_segment(name):
26 server_dir = os.path.dirname(os.path.abspath(__file__))
mbligh7f86e0b2007-11-24 19:45:07 +000027 script_file = os.path.join(server_dir, "control_segments", name)
mblighed5a4102007-11-20 00:46:41 +000028 if os.path.exists(script_file):
29 return file(script_file).read()
30 else:
31 return ""
32
33
mblighf1c52842007-10-16 15:21:38 +000034preamble = """\
35import os, sys
36
mblighccb9e182008-04-17 15:42:10 +000037from autotest_lib.server import hosts, autotest, kvm, git, standalone_profiler
38from autotest_lib.server import source_kernel, rpm_kernel, deb_kernel
39from autotest_lib.server import git_kernel
40from autotest_lib.server.subcommand import *
41from autotest_lib.server.utils import run, get_tmp_dir, sh_escape
mblighccb9e182008-04-17 15:42:10 +000042from autotest_lib.client.common_lib.error import *
43from autotest_lib.client.common_lib import barrier
mblighf1c52842007-10-16 15:21:38 +000044
mbligh119c12a2007-11-12 22:13:44 +000045autotest.Autotest.job = job
mbligh31a49de2007-11-05 18:41:19 +000046hosts.SSHHost.job = job
mblighe1417fa2007-12-10 16:55:13 +000047barrier = barrier.barrier
mbligh1fb77cc2008-02-27 16:41:20 +000048if len(machines) > 1:
49 open('.machines', 'w').write('\\n'.join(machines) + '\\n')
mblighf1c52842007-10-16 15:21:38 +000050"""
51
52client_wrapper = """
53at = autotest.Autotest()
54
55def run_client(machine):
56 host = hosts.SSHHost(machine)
57 at.run(control, host=host)
58
mbligh6437ff52008-04-17 15:24:38 +000059job.parallel_simple(run_client, machines)
mblighf1c52842007-10-16 15:21:38 +000060"""
61
mbligh303ccac2007-11-05 18:07:28 +000062crashdumps = """
63def crashdumps(machine):
64 host = hosts.SSHHost(machine, initialize=False)
65 host.get_crashdumps(test_start_time)
66
mbligh6437ff52008-04-17 15:24:38 +000067job.parallel_simple(crashdumps, machines, log=False)
mbligh303ccac2007-11-05 18:07:28 +000068"""
69
mbligh98ff1462007-12-19 16:27:55 +000070reboot_segment="""\
71def reboot(machine):
mbligh17f0c662007-11-05 18:28:19 +000072 host = hosts.SSHHost(machine, initialize=False)
73 host.reboot()
mblighf1c52842007-10-16 15:21:38 +000074
mbligh6437ff52008-04-17 15:24:38 +000075job.parallel_simple(reboot, machines, log=False)
mblighf1c52842007-10-16 15:21:38 +000076"""
77
mblighf36243d2007-10-30 15:36:16 +000078install="""\
79def install(machine):
mbligh17f0c662007-11-05 18:28:19 +000080 host = hosts.SSHHost(machine, initialize=False)
81 host.machine_install()
mblighf36243d2007-10-30 15:36:16 +000082
mbligh6437ff52008-04-17 15:24:38 +000083job.parallel_simple(install, machines, log=False)
mblighf36243d2007-10-30 15:36:16 +000084"""
85
mbligh7f86e0b2007-11-24 19:45:07 +000086# load up the verifier control segment, with an optional site-specific hook
mblighed5a4102007-11-20 00:46:41 +000087verify = load_control_segment("site_verify")
88verify += load_control_segment("verify")
mbligh1d42d4e2007-11-05 22:42:00 +000089
mbligh7f86e0b2007-11-24 19:45:07 +000090# load up the repair control segment, with an optional site-specific hook
91repair = load_control_segment("site_repair")
92repair += load_control_segment("repair")
93
mbligh1d42d4e2007-11-05 22:42:00 +000094
mbligh970b94e2008-01-24 16:29:34 +000095# load up site-specific code for generating site-specific job data
96try:
97 import site_job
98 get_site_job_data = site_job.get_site_job_data
99 del site_job
100except ImportError:
101 # by default provide a stub that generates no site data
102 def get_site_job_data(job):
103 return {}
104
105
mblighcaa62c22008-04-07 21:51:17 +0000106class base_server_job:
mblighf1c52842007-10-16 15:21:38 +0000107 """The actual job against which we do everything.
108
109 Properties:
110 autodir
111 The top level autotest directory (/usr/local/autotest).
112 serverdir
113 <autodir>/server/
114 clientdir
115 <autodir>/client/
116 conmuxdir
117 <autodir>/conmux/
118 testdir
119 <autodir>/server/tests/
120 control
121 the control file for this job
122 """
123
jadmanski6e8bf752008-05-14 00:17:48 +0000124 STATUS_VERSION = 1
125
126
mblighe8b37a92007-12-19 15:54:11 +0000127 def __init__(self, control, args, resultdir, label, user, machines,
mbligh6437ff52008-04-17 15:24:38 +0000128 client=False, parse_job=""):
mblighf1c52842007-10-16 15:21:38 +0000129 """
130 control
131 The control file (pathname of)
132 args
133 args to pass to the control file
134 resultdir
135 where to throw the results
mbligh18420c22007-10-16 22:27:14 +0000136 label
137 label for the job
mblighf1c52842007-10-16 15:21:38 +0000138 user
139 Username for the job (email address)
140 client
141 True if a client-side control file
142 """
mblighf5427bb2008-04-09 15:55:57 +0000143 path = os.path.dirname(__file__)
mblighf1c52842007-10-16 15:21:38 +0000144 self.autodir = os.path.abspath(os.path.join(path, '..'))
145 self.serverdir = os.path.join(self.autodir, 'server')
mbligh05269362007-10-16 16:58:11 +0000146 self.testdir = os.path.join(self.serverdir, 'tests')
147 self.tmpdir = os.path.join(self.serverdir, 'tmp')
mblighf1c52842007-10-16 15:21:38 +0000148 self.conmuxdir = os.path.join(self.autodir, 'conmux')
149 self.clientdir = os.path.join(self.autodir, 'client')
mblighe25fd5b2008-01-22 17:23:37 +0000150 if control:
151 self.control = open(control, 'r').read()
152 self.control = re.sub('\r', '', self.control)
153 else:
154 self.control = None
mblighf1c52842007-10-16 15:21:38 +0000155 self.resultdir = resultdir
156 if not os.path.exists(resultdir):
157 os.mkdir(resultdir)
mbligh3ccb8592007-11-05 18:13:40 +0000158 self.debugdir = os.path.join(resultdir, 'debug')
159 if not os.path.exists(self.debugdir):
160 os.mkdir(self.debugdir)
mbligh3dcf2c92007-10-16 22:24:00 +0000161 self.status = os.path.join(resultdir, 'status')
mbligh18420c22007-10-16 22:27:14 +0000162 self.label = label
mblighf1c52842007-10-16 15:21:38 +0000163 self.user = user
164 self.args = args
mblighe8b37a92007-12-19 15:54:11 +0000165 self.machines = machines
mblighf1c52842007-10-16 15:21:38 +0000166 self.client = client
167 self.record_prefix = ''
mblighf4e04152008-02-21 16:05:53 +0000168 self.warning_loggers = set()
mblighf1c52842007-10-16 15:21:38 +0000169
mbligh3f4bced2007-11-05 17:55:53 +0000170 self.stdout = fd_stack.fd_stack(1, sys.stdout)
171 self.stderr = fd_stack.fd_stack(2, sys.stderr)
172
mbligh3dcf2c92007-10-16 22:24:00 +0000173 if os.path.exists(self.status):
174 os.unlink(self.status)
jadmanski6e8bf752008-05-14 00:17:48 +0000175 job_data = {'label' : label, 'user' : user,
176 'hostname' : ','.join(machines),
177 'status_version' : str(self.STATUS_VERSION)}
mbligh970b94e2008-01-24 16:29:34 +0000178 job_data.update(get_site_job_data(self))
mblighf1c52842007-10-16 15:21:38 +0000179 write_keyval(self.resultdir, job_data)
180
mbligh6437ff52008-04-17 15:24:38 +0000181 self.parse_job = parse_job
182 if self.parse_job and len(machines) == 1:
183 self.using_parser = True
184 self.init_parser(resultdir)
185 else:
186 self.using_parser = False
187
188
189 def init_parser(self, resultdir):
190 """Start the continuous parsing of resultdir. This sets up
191 the database connection and inserts the basic job object into
192 the database if necessary."""
193 # redirect parser debugging to .parse.log
194 parse_log = os.path.join(resultdir, '.parse.log')
195 parse_log = open(parse_log, 'w', 0)
196 tko_utils.redirect_parser_debugging(parse_log)
197 # create a job model object and set up the db
198 self.results_db = tko_db.db(autocommit=True)
jadmanski6e8bf752008-05-14 00:17:48 +0000199 self.parser = status_lib.parser(self.STATUS_VERSION)
mbligh6437ff52008-04-17 15:24:38 +0000200 self.job_model = self.parser.make_job(resultdir)
201 self.parser.start(self.job_model)
202 # check if a job already exists in the db and insert it if
203 # it does not
204 job_idx = self.results_db.find_job(self.parse_job)
205 if job_idx is None:
206 self.results_db.insert_job(self.parse_job,
207 self.job_model)
208 else:
209 machine_idx = self.results_db.lookup_machine(
210 self.job_model.machine)
211 self.job_model.index = job_idx
212 self.job_model.machine_idx = machine_idx
213
214
mblighfe0af112008-04-17 15:27:47 +0000215 def cleanup_parser(self):
mbligh6437ff52008-04-17 15:24:38 +0000216 """This should be called after the server job is finished
217 to carry out any remaining cleanup (e.g. flushing any
218 remaining test results to the results db)"""
219 if not self.using_parser:
220 return
221 final_tests = self.parser.end()
222 for test in final_tests:
jadmanski28816c22008-05-21 18:18:05 +0000223 self.__insert_test(test)
mblighfe0af112008-04-17 15:27:47 +0000224 self.using_parser = False
mbligh6437ff52008-04-17 15:24:38 +0000225
mblighf1c52842007-10-16 15:21:38 +0000226
mblighe25fd5b2008-01-22 17:23:37 +0000227 def verify(self):
228 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000229 raise error.AutoservError(
230 'No machines specified to verify')
mblighe25fd5b2008-01-22 17:23:37 +0000231 try:
232 namespace = {'machines' : self.machines, 'job' : self}
233 exec(preamble + verify, namespace, namespace)
234 except Exception, e:
mbligh302482e2008-05-01 20:06:16 +0000235 msg = ('Verify failed\n' + str(e) + '\n'
236 + traceback.format_exc())
mblighe25fd5b2008-01-22 17:23:37 +0000237 self.record('ABORT', None, None, msg)
238 raise
239
240
241 def repair(self):
242 if not self.machines:
mbligh6437ff52008-04-17 15:24:38 +0000243 raise error.AutoservError(
244 'No machines specified to repair')
mblighe25fd5b2008-01-22 17:23:37 +0000245 namespace = {'machines' : self.machines, 'job' : self}
mbligh16c722d2008-03-05 00:58:44 +0000246 # no matter what happens during repair, go on to try to reverify
247 try:
248 exec(preamble + repair, namespace, namespace)
249 except Exception, exc:
250 print 'Exception occured during repair'
251 traceback.print_exc()
mbligh8141f862008-01-25 17:20:40 +0000252 self.verify()
mblighe25fd5b2008-01-22 17:23:37 +0000253
254
mblighcaa62c22008-04-07 21:51:17 +0000255 def enable_external_logging(self):
256 """Start or restart external logging mechanism.
257 """
258 pass
259
260
261 def disable_external_logging(self):
262 """ Pause or stop external logging mechanism.
263 """
264 pass
265
266
267 def use_external_logging(self):
268 """Return True if external logging should be used.
269 """
270 return False
271
272
mbligh6437ff52008-04-17 15:24:38 +0000273 def parallel_simple(self, function, machines, log=True, timeout=None):
274 """Run 'function' using parallel_simple, with an extra
275 wrapper to handle the necessary setup for continuous parsing,
276 if possible. If continuous parsing is already properly
277 initialized then this should just work."""
278 is_forking = not (len(machines) == 1 and
279 self.machines == machines)
280 if self.parse_job and is_forking:
281 def wrapper(machine):
282 self.parse_job += "/" + machine
283 self.using_parser = True
284 self.machines = [machine]
285 self.resultdir = os.path.join(self.resultdir,
286 machine)
287 self.init_parser(self.resultdir)
288 result = function(machine)
mblighfe0af112008-04-17 15:27:47 +0000289 self.cleanup_parser()
mbligh6437ff52008-04-17 15:24:38 +0000290 return result
291 else:
292 wrapper = function
293 subcommand.parallel_simple(wrapper, machines, log, timeout)
294
295
mblighe8b37a92007-12-19 15:54:11 +0000296 def run(self, reboot = False, install_before = False,
mblighddd54332008-03-07 18:14:06 +0000297 install_after = False, collect_crashdumps = True,
298 namespace = {}):
mbligh60dbd502007-10-26 14:59:31 +0000299 # use a copy so changes don't affect the original dictionary
300 namespace = namespace.copy()
mblighe8b37a92007-12-19 15:54:11 +0000301 machines = self.machines
mbligh60dbd502007-10-26 14:59:31 +0000302
mblighfaf0cd42007-11-19 16:00:24 +0000303 self.aborted = False
mblighf1c52842007-10-16 15:21:38 +0000304 namespace['machines'] = machines
305 namespace['args'] = self.args
306 namespace['job'] = self
mbligh6e294382007-11-05 18:11:29 +0000307 test_start_time = int(time.time())
mblighf1c52842007-10-16 15:21:38 +0000308
mbligh87c5d882007-10-29 17:07:24 +0000309 os.chdir(self.resultdir)
mblighcaa62c22008-04-07 21:51:17 +0000310
311 self.enable_external_logging()
mbligh87c5d882007-10-29 17:07:24 +0000312 status_log = os.path.join(self.resultdir, 'status.log')
mblighf1c52842007-10-16 15:21:38 +0000313 try:
mblighf36243d2007-10-30 15:36:16 +0000314 if install_before and machines:
315 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000316 if self.client:
317 namespace['control'] = self.control
318 open('control', 'w').write(self.control)
319 open('control.srv', 'w').write(client_wrapper)
320 server_control = client_wrapper
321 else:
322 open('control.srv', 'w').write(self.control)
323 server_control = self.control
mblighf1c52842007-10-16 15:21:38 +0000324 exec(preamble + server_control, namespace, namespace)
325
326 finally:
mblighddd54332008-03-07 18:14:06 +0000327 if machines and collect_crashdumps:
mbligh6e294382007-11-05 18:11:29 +0000328 namespace['test_start_time'] = test_start_time
mbligh98ff1462007-12-19 16:27:55 +0000329 exec(preamble + crashdumps,
330 namespace, namespace)
mblighcaa62c22008-04-07 21:51:17 +0000331 self.disable_external_logging()
mblighf1c52842007-10-16 15:21:38 +0000332 if reboot and machines:
mbligh98ff1462007-12-19 16:27:55 +0000333 exec(preamble + reboot_segment,
334 namespace, namespace)
mblighf36243d2007-10-30 15:36:16 +0000335 if install_after and machines:
336 exec(preamble + install, namespace, namespace)
mblighf1c52842007-10-16 15:21:38 +0000337
338
339 def run_test(self, url, *args, **dargs):
340 """Summon a test object and run it.
341
342 tag
343 tag to add to testname
344 url
345 url of the test to run
346 """
347
mblighf1c52842007-10-16 15:21:38 +0000348 (group, testname) = test.testname(url)
349 tag = None
350 subdir = testname
mbligh43ac5222007-10-16 15:55:01 +0000351
mblighf1c52842007-10-16 15:21:38 +0000352 if dargs.has_key('tag'):
353 tag = dargs['tag']
354 del dargs['tag']
355 if tag:
356 subdir += '.' + tag
mblighf1c52842007-10-16 15:21:38 +0000357
mbligh43ac5222007-10-16 15:55:01 +0000358 try:
359 test.runtest(self, url, tag, args, dargs)
360 self.record('GOOD', subdir, testname, 'completed successfully')
mbligh302482e2008-05-01 20:06:16 +0000361 except error.TestNAError, detail:
362 self.record('TEST_NA', subdir, testmame, str(detail))
mbligh43ac5222007-10-16 15:55:01 +0000363 except Exception, detail:
mbligh302482e2008-05-01 20:06:16 +0000364 info = str(detail) + "\n" + traceback.format_exc()
365 self.record('FAIL', subdir, testname, info)
mblighf1c52842007-10-16 15:21:38 +0000366
367
368 def run_group(self, function, *args, **dargs):
369 """\
370 function:
371 subroutine to run
372 *args:
373 arguments for the function
374 """
375
376 result = None
377 name = function.__name__
378
379 # Allow the tag for the group to be specified.
380 if dargs.has_key('tag'):
381 tag = dargs['tag']
382 del dargs['tag']
383 if tag:
384 name = tag
385
386 # if tag:
387 # name += '.' + tag
388 old_record_prefix = self.record_prefix
389 try:
390 try:
391 self.record('START', None, name)
392 self.record_prefix += '\t'
393 result = function(*args, **dargs)
jadmanski0c109552008-06-02 18:02:29 +0000394 except Exception, e:
mblighf1c52842007-10-16 15:21:38 +0000395 self.record_prefix = old_record_prefix
jadmanski0c109552008-06-02 18:02:29 +0000396 err_msg = str(e) + '\n'
397 err_msg += traceback.format_exc()
398 self.record('END FAIL', None, name, err_msg)
jadmanskif35bbb62008-05-29 21:36:04 +0000399 else:
400 self.record_prefix = old_record_prefix
401 self.record('END GOOD', None, name)
mbligh302482e2008-05-01 20:06:16 +0000402
mblighf1c52842007-10-16 15:21:38 +0000403 # We don't want to raise up an error higher if it's just
404 # a TestError - we want to carry on to other tests. Hence
405 # this outer try/except block.
mbligh6437ff52008-04-17 15:24:38 +0000406 except error.TestError:
mblighf1c52842007-10-16 15:21:38 +0000407 pass
408 except:
mbligh6437ff52008-04-17 15:24:38 +0000409 raise error.TestError(name + ' failed\n' +
mbligh0a1727a2008-04-21 18:10:07 +0000410 traceback.format_exc())
mblighf1c52842007-10-16 15:21:38 +0000411
412 return result
413
414
jadmanskif35bbb62008-05-29 21:36:04 +0000415 def run_reboot(self, reboot_func, get_kernel_func):
416 """\
417 A specialization of run_group meant specifically for handling
418 a reboot. Includes support for capturing the kernel version
419 after the reboot.
420
421 reboot_func: a function that carries out the reboot
422
423 get_kernel_func: a function that returns a string
424 representing the kernel version.
425 """
426
427 old_record_prefix = self.record_prefix
428 try:
429 self.record('START', None, 'reboot')
430 self.record_prefix += '\t'
431 reboot_func()
jadmanski0c109552008-06-02 18:02:29 +0000432 except Exception, e:
jadmanskif35bbb62008-05-29 21:36:04 +0000433 self.record_prefix = old_record_prefix
jadmanski0c109552008-06-02 18:02:29 +0000434 err_msg = str(e) + '\n' + traceback.format_exc()
435 self.record('END FAIL', None, 'reboot', err_msg)
jadmanskif35bbb62008-05-29 21:36:04 +0000436 else:
437 kernel = get_kernel_func()
438 self.record_prefix = old_record_prefix
439 self.record('END GOOD', None, 'reboot',
440 optional_fields={"kernel": kernel})
441
442
443 def record(self, status_code, subdir, operation, status='',
444 optional_fields=None):
mblighf1c52842007-10-16 15:21:38 +0000445 """
446 Record job-level status
447
448 The intent is to make this file both machine parseable and
449 human readable. That involves a little more complexity, but
450 really isn't all that bad ;-)
451
452 Format is <status code>\t<subdir>\t<operation>\t<status>
453
mbligh302482e2008-05-01 20:06:16 +0000454 status code: see common_lib.logging.is_valid_status()
455 for valid status definition
mblighf1c52842007-10-16 15:21:38 +0000456
457 subdir: MUST be a relevant subdirectory in the results,
458 or None, which will be represented as '----'
459
460 operation: description of what you ran (e.g. "dbench", or
461 "mkfs -t foobar /dev/sda9")
462
463 status: error message or "completed sucessfully"
464
465 ------------------------------------------------------------
466
467 Initial tabs indicate indent levels for grouping, and is
468 governed by self.record_prefix
469
470 multiline messages have secondary lines prefaced by a double
471 space (' ')
mblighf4e04152008-02-21 16:05:53 +0000472
473 Executing this method will trigger the logging of all new
474 warnings to date from the various console loggers.
475 """
mblighdab39662008-02-27 16:47:55 +0000476 # poll all our warning loggers for new warnings
477 warnings = self._read_warnings()
478 for timestamp, msg in warnings:
479 self.__record("WARN", None, None, msg, timestamp)
480
481 # write out the actual status log line
jadmanskif35bbb62008-05-29 21:36:04 +0000482 self.__record(status_code, subdir, operation, status,
483 optional_fields=optional_fields)
mblighdab39662008-02-27 16:47:55 +0000484
485
486 def _read_warnings(self):
mblighf4e04152008-02-21 16:05:53 +0000487 warnings = []
488 while True:
489 # pull in a line of output from every logger that has
490 # output ready to be read
491 loggers, _, _ = select.select(self.warning_loggers,
492 [], [], 0)
493 closed_loggers = set()
494 for logger in loggers:
495 line = logger.readline()
496 # record any broken pipes (aka line == empty)
497 if len(line) == 0:
498 closed_loggers.add(logger)
499 continue
500 timestamp, msg = line.split('\t', 1)
501 warnings.append((int(timestamp), msg.strip()))
502
503 # stop listening to loggers that are closed
504 self.warning_loggers -= closed_loggers
505
506 # stop if none of the loggers have any output left
507 if not loggers:
508 break
509
mblighdab39662008-02-27 16:47:55 +0000510 # sort into timestamp order
511 warnings.sort()
512 return warnings
mblighf4e04152008-02-21 16:05:53 +0000513
514
mblighdab39662008-02-27 16:47:55 +0000515 def _render_record(self, status_code, subdir, operation, status='',
jadmanskif35bbb62008-05-29 21:36:04 +0000516 epoch_time=None, record_prefix=None,
517 optional_fields=None):
mblighf4e04152008-02-21 16:05:53 +0000518 """
mblighdab39662008-02-27 16:47:55 +0000519 Internal Function to generate a record to be written into a
520 status log. For use by server_job.* classes only.
mblighf1c52842007-10-16 15:21:38 +0000521 """
mblighf1c52842007-10-16 15:21:38 +0000522 if subdir:
523 if re.match(r'[\n\t]', subdir):
mbligh6437ff52008-04-17 15:24:38 +0000524 raise ValueError(
525 'Invalid character in subdir string')
mblighf1c52842007-10-16 15:21:38 +0000526 substr = subdir
527 else:
528 substr = '----'
mbligh6437ff52008-04-17 15:24:38 +0000529
mbligh302482e2008-05-01 20:06:16 +0000530 if not logging.is_valid_status(status_code):
mbligh6437ff52008-04-17 15:24:38 +0000531 raise ValueError('Invalid status code supplied: %s' %
532 status_code)
mblighe25fd5b2008-01-22 17:23:37 +0000533 if not operation:
534 operation = '----'
mblighf1c52842007-10-16 15:21:38 +0000535 if re.match(r'[\n\t]', operation):
mbligh6437ff52008-04-17 15:24:38 +0000536 raise ValueError(
537 'Invalid character in operation string')
mblighf1c52842007-10-16 15:21:38 +0000538 operation = operation.rstrip()
539 status = status.rstrip()
540 status = re.sub(r"\t", " ", status)
541 # Ensure any continuation lines are marked so we can
542 # detect them in the status file to ensure it is parsable.
543 status = re.sub(r"\n", "\n" + self.record_prefix + " ", status)
544
jadmanskif35bbb62008-05-29 21:36:04 +0000545 if not optional_fields:
546 optional_fields = {}
547
mbligh30270302007-11-05 20:33:52 +0000548 # Generate timestamps for inclusion in the logs
mblighf4e04152008-02-21 16:05:53 +0000549 if epoch_time is None:
550 epoch_time = int(time.time())
mbligh30270302007-11-05 20:33:52 +0000551 local_time = time.localtime(epoch_time)
jadmanskif35bbb62008-05-29 21:36:04 +0000552 optional_fields["timestamp"] = str(epoch_time)
553 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
554 local_time)
555
556 fields = [status_code, substr, operation]
557 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
558 fields.append(status)
mbligh30270302007-11-05 20:33:52 +0000559
mblighdab39662008-02-27 16:47:55 +0000560 if record_prefix is None:
561 record_prefix = self.record_prefix
562
jadmanskif35bbb62008-05-29 21:36:04 +0000563 msg = '\t'.join(str(x) for x in fields)
564
mblighdab39662008-02-27 16:47:55 +0000565 return record_prefix + msg + '\n'
566
567
568 def _record_prerendered(self, msg):
569 """
570 Record a pre-rendered msg into the status logs. The only
571 change this makes to the message is to add on the local
572 indentation. Should not be called outside of server_job.*
573 classes. Unlike __record, this does not write the message
574 to standard output.
575 """
mbligh6437ff52008-04-17 15:24:38 +0000576 lines = []
mblighdab39662008-02-27 16:47:55 +0000577 status_file = os.path.join(self.resultdir, 'status.log')
578 status_log = open(status_file, 'a')
579 for line in msg.splitlines():
580 line = self.record_prefix + line + '\n'
mbligh6437ff52008-04-17 15:24:38 +0000581 lines.append(line)
mblighdab39662008-02-27 16:47:55 +0000582 status_log.write(line)
583 status_log.close()
mbligh6437ff52008-04-17 15:24:38 +0000584 self.__parse_status(lines)
mblighdab39662008-02-27 16:47:55 +0000585
586
587 def __record(self, status_code, subdir, operation, status='',
jadmanskif35bbb62008-05-29 21:36:04 +0000588 epoch_time=None, optional_fields=None):
mblighdab39662008-02-27 16:47:55 +0000589 """
590 Actual function for recording a single line into the status
591 logs. Should never be called directly, only by job.record as
592 this would bypass the console monitor logging.
593 """
594
595 msg = self._render_record(status_code, subdir, operation,
jadmanskif35bbb62008-05-29 21:36:04 +0000596 status, epoch_time,
597 optional_fields=optional_fields)
mblighdab39662008-02-27 16:47:55 +0000598
mblighf1c52842007-10-16 15:21:38 +0000599
mbligh31a49de2007-11-05 18:41:19 +0000600 status_file = os.path.join(self.resultdir, 'status.log')
mblighdab39662008-02-27 16:47:55 +0000601 sys.stdout.write(msg)
602 open(status_file, "a").write(msg)
mblighf1c52842007-10-16 15:21:38 +0000603 if subdir:
mblighd56eb592008-01-22 16:36:34 +0000604 test_dir = os.path.join(self.resultdir, subdir)
605 if not os.path.exists(test_dir):
606 os.mkdir(test_dir)
607 status_file = os.path.join(test_dir, 'status')
mblighdab39662008-02-27 16:47:55 +0000608 open(status_file, "a").write(msg)
jadmanski96bb7642008-05-15 17:58:16 +0000609 self.__parse_status(msg.splitlines())
mblighb03ba642008-03-13 17:37:17 +0000610
611
mbligh6437ff52008-04-17 15:24:38 +0000612 def __parse_status(self, new_lines):
613 if not self.using_parser:
614 return
615 new_tests = self.parser.process_lines(new_lines)
616 for test in new_tests:
jadmanski28816c22008-05-21 18:18:05 +0000617 self.__insert_test(test)
618
619
620 def __insert_test(self, test):
621 """ An internal method to insert a new test result into the
622 database. This method will not raise an exception, even if an
623 error occurs during the insert, to avoid failing a test
624 simply because of unexpected database issues."""
625 try:
mbligh6437ff52008-04-17 15:24:38 +0000626 self.results_db.insert_test(self.job_model, test)
jadmanski28816c22008-05-21 18:18:05 +0000627 except Exception:
628 msg = ("WARNING: An unexpected error occured while "
629 "inserting test results into the database. "
630 "Ignoring error.\n" + traceback.format_exc())
631 print >> sys.stderr, msg
mblighdab39662008-02-27 16:47:55 +0000632
633
634# a file-like object for catching stderr from an autotest client and
635# extracting status logs from it
636class client_logger(object):
637 """Partial file object to write to both stdout and
638 the status log file. We only implement those methods
639 utils.run() actually calls.
640 """
641 parser = re.compile(r"^AUTOTEST_STATUS:([^:]*):(.*)$")
642 extract_indent = re.compile(r"^(\t*).*$")
643
644 def __init__(self, job):
645 self.job = job
646 self.leftover = ""
647 self.last_line = ""
648 self.logs = {}
649
650
651 def _process_log_dict(self, log_dict):
652 log_list = log_dict.pop("logs", [])
653 for key in sorted(log_dict.iterkeys()):
654 log_list += self._process_log_dict(log_dict.pop(key))
655 return log_list
656
657
658 def _process_logs(self):
659 """Go through the accumulated logs in self.log and print them
660 out to stdout and the status log. Note that this processes
661 logs in an ordering where:
662
663 1) logs to different tags are never interleaved
664 2) logs to x.y come before logs to x.y.z for all z
665 3) logs to x.y come before x.z whenever y < z
666
667 Note that this will in general not be the same as the
668 chronological ordering of the logs. However, if a chronological
669 ordering is desired that one can be reconstructed from the
670 status log by looking at timestamp lines."""
671 log_list = self._process_log_dict(self.logs)
672 for line in log_list:
673 self.job._record_prerendered(line + '\n')
674 if log_list:
675 self.last_line = log_list[-1]
676
677
678 def _process_quoted_line(self, tag, line):
679 """Process a line quoted with an AUTOTEST_STATUS flag. If the
680 tag is blank then we want to push out all the data we've been
681 building up in self.logs, and then the newest line. If the
682 tag is not blank, then push the line into the logs for handling
683 later."""
684 print line
685 if tag == "":
686 self._process_logs()
687 self.job._record_prerendered(line + '\n')
688 self.last_line = line
689 else:
690 tag_parts = [int(x) for x in tag.split(".")]
691 log_dict = self.logs
692 for part in tag_parts:
693 log_dict = log_dict.setdefault(part, {})
694 log_list = log_dict.setdefault("logs", [])
695 log_list.append(line)
696
697
698 def _process_line(self, line):
699 """Write out a line of data to the appropriate stream. Status
700 lines sent by autotest will be prepended with
701 "AUTOTEST_STATUS", and all other lines are ssh error
702 messages."""
703 match = self.parser.search(line)
704 if match:
705 tag, line = match.groups()
706 self._process_quoted_line(tag, line)
707 else:
mblighfe749d22008-03-07 18:14:46 +0000708 print line
mblighdab39662008-02-27 16:47:55 +0000709
710
711 def _format_warnings(self, last_line, warnings):
mbligh71d340d2008-03-05 15:51:16 +0000712 # use the indentation of whatever the last log line was
mblighdab39662008-02-27 16:47:55 +0000713 indent = self.extract_indent.match(last_line).group(1)
mbligh71d340d2008-03-05 15:51:16 +0000714 # if the last line starts a new group, add an extra indent
715 if last_line.lstrip('\t').startswith("START\t"):
716 indent += '\t'
mblighdab39662008-02-27 16:47:55 +0000717 return [self.job._render_record("WARN", None, None, msg,
718 timestamp, indent).rstrip('\n')
719 for timestamp, msg in warnings]
720
721
722 def _process_warnings(self, last_line, log_dict, warnings):
723 if log_dict.keys() in ([], ["logs"]):
724 # there are no sub-jobs, just append the warnings here
725 warnings = self._format_warnings(last_line, warnings)
726 log_list = log_dict.setdefault("logs", [])
727 log_list += warnings
728 for warning in warnings:
729 sys.stdout.write(warning + '\n')
730 else:
731 # there are sub-jobs, so put the warnings in there
732 log_list = log_dict.get("logs", [])
733 if log_list:
734 last_line = log_list[-1]
735 for key in sorted(log_dict.iterkeys()):
736 if key != "logs":
737 self._process_warnings(last_line,
738 log_dict[key],
739 warnings)
740
741
742 def write(self, data):
743 # first check for any new console warnings
744 warnings = self.job._read_warnings()
745 self._process_warnings(self.last_line, self.logs, warnings)
746 # now process the newest data written out
747 data = self.leftover + data
748 lines = data.split("\n")
749 # process every line but the last one
750 for line in lines[:-1]:
751 self._process_line(line)
752 # save the last line for later processing
753 # since we may not have the whole line yet
754 self.leftover = lines[-1]
755
756
757 def flush(self):
758 sys.stdout.flush()
mblighdab39662008-02-27 16:47:55 +0000759
760
761 def close(self):
762 if self.leftover:
763 self._process_line(self.leftover)
764 self._process_logs()
765 self.flush()
mblighcaa62c22008-04-07 21:51:17 +0000766
767# site_server_job.py may be non-existant or empty, make sure that an
768# appropriate site_server_job class is created nevertheless
769try:
mblighccb9e182008-04-17 15:42:10 +0000770 from autotest_lib.server.site_server_job import site_server_job
mblighcaa62c22008-04-07 21:51:17 +0000771except ImportError:
772 class site_server_job(base_server_job):
773 pass
774
775class server_job(site_server_job):
776 pass