blob: fdc7adee768a853b8af48cff1c550fc80382cf85 [file] [log] [blame]
mblighc86b0b42006-07-28 17:35:28 +00001"""The main job wrapper
mbligha2508052006-05-28 21:29:53 +00002
mblighc86b0b42006-07-28 17:35:28 +00003This is the core infrastructure.
4"""
5
6__author__ = """Copyright Andy Whitcroft, Martin J. Bligh 2006"""
mbligha2508052006-05-28 21:29:53 +00007
mbligh8f243ec2006-10-10 05:55:49 +00008# standard stuff
mbligh366ff1b2008-04-25 16:07:56 +00009import os, sys, re, pickle, shutil, time, traceback, types, copy
mbligh302482e2008-05-01 20:06:16 +000010
mbligh8f243ec2006-10-10 05:55:49 +000011# autotest stuff
mbligh302482e2008-05-01 20:06:16 +000012from autotest_lib.client.bin import autotest_utils
13from autotest_lib.client.common_lib import error, barrier, logging
14
15import parallel, kernel, xen, test, profilers, filesystem, fd_stack, boottool
16import harness, config, sysinfo, cpuset
17
mblighf4c35322006-03-13 01:01:10 +000018
mbligh12a04cb2008-04-25 16:07:20 +000019
mbligh366ff1b2008-04-25 16:07:56 +000020JOB_PREAMBLE = """
21from common.error import *
22from autotest_utils import *
23"""
24
25
mbligh302482e2008-05-01 20:06:16 +000026class StepError(error.AutotestError):
mbligh12a04cb2008-04-25 16:07:20 +000027 pass
28
29
mblighcaa62c22008-04-07 21:51:17 +000030class base_job:
mblighc86b0b42006-07-28 17:35:28 +000031 """The actual job against which we do everything.
32
33 Properties:
mbligh72b88fc2006-12-16 18:41:35 +000034 autodir
mblighc86b0b42006-07-28 17:35:28 +000035 The top level autotest directory (/usr/local/autotest).
36 Comes from os.environ['AUTODIR'].
mbligh72b88fc2006-12-16 18:41:35 +000037 bindir
mblighc86b0b42006-07-28 17:35:28 +000038 <autodir>/bin/
mblighd5a38832008-01-25 18:15:39 +000039 libdir
40 <autodir>/lib/
mbligh72b88fc2006-12-16 18:41:35 +000041 testdir
mblighc86b0b42006-07-28 17:35:28 +000042 <autodir>/tests/
mbligh84bafdb2008-01-26 19:43:34 +000043 site_testdir
44 <autodir>/site_tests/
mblighc86b0b42006-07-28 17:35:28 +000045 profdir
46 <autodir>/profilers/
47 tmpdir
48 <autodir>/tmp/
49 resultdir
50 <autodir>/results/<jobtag>
51 stdout
52 fd_stack object for stdout
53 stderr
54 fd_stack object for stderr
55 profilers
56 the profilers object for this job
apw504a7dd2006-10-12 17:18:37 +000057 harness
58 the server harness object for this job
apw059e1b12006-10-12 17:18:26 +000059 config
60 the job configuration for this job
mblighc86b0b42006-07-28 17:35:28 +000061 """
62
mblighd528d302007-12-19 16:19:05 +000063 DEFAULT_LOG_FILENAME = "status"
64
mblighcaa62c22008-04-07 21:51:17 +000065 def __init__(self, control, jobtag, cont, harness_type=None,
66 use_external_logging = False):
mblighc86b0b42006-07-28 17:35:28 +000067 """
68 control
69 The control file (pathname of)
70 jobtag
71 The job tag string (eg "default")
apw96da1a42006-11-02 00:23:18 +000072 cont
73 If this is the continuation of this job
apwe68a7132006-12-01 11:21:37 +000074 harness_type
75 An alternative server harness
mblighc86b0b42006-07-28 17:35:28 +000076 """
mblighf4c35322006-03-13 01:01:10 +000077 self.autodir = os.environ['AUTODIR']
apw870988b2007-09-25 16:50:53 +000078 self.bindir = os.path.join(self.autodir, 'bin')
mblighd5a38832008-01-25 18:15:39 +000079 self.libdir = os.path.join(self.autodir, 'lib')
apw870988b2007-09-25 16:50:53 +000080 self.testdir = os.path.join(self.autodir, 'tests')
mbligh84bafdb2008-01-26 19:43:34 +000081 self.site_testdir = os.path.join(self.autodir, 'site_tests')
apw870988b2007-09-25 16:50:53 +000082 self.profdir = os.path.join(self.autodir, 'profilers')
83 self.tmpdir = os.path.join(self.autodir, 'tmp')
84 self.resultdir = os.path.join(self.autodir, 'results', jobtag)
mbligh0fb83972008-01-10 16:30:02 +000085 self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
mbligh8d83cdc2007-12-03 18:09:18 +000086 self.control = os.path.abspath(control)
mbligh366ff1b2008-04-25 16:07:56 +000087 self.state_file = self.control + '.state'
jadmanskia9c75c42008-05-01 22:05:31 +000088 self.__load_state()
mbligha2508052006-05-28 21:29:53 +000089
apw96da1a42006-11-02 00:23:18 +000090 if not cont:
mblighc1cbc992008-05-27 20:01:45 +000091 """
92 Don't cleanup the tmp dir (which contains the lockfile)
93 in the constructor, this would be a problem for multiple
94 jobs starting at the same time on the same client. Instead
95 do the delete at the server side. We simply create the tmp
96 directory here if it does not already exist.
97 """
98 if not os.path.exists(self.tmpdir):
99 os.mkdir(self.tmpdir)
apw96da1a42006-11-02 00:23:18 +0000100
apw870988b2007-09-25 16:50:53 +0000101 results = os.path.join(self.autodir, 'results')
102 if not os.path.exists(results):
103 os.mkdir(results)
mblighfbfb77d2007-02-15 18:54:03 +0000104
apwf3d28622007-09-25 16:49:17 +0000105 download = os.path.join(self.testdir, 'download')
mblighc1cbc992008-05-27 20:01:45 +0000106 if not os.path.exists(download):
107 os.mkdir(download)
108
apw96da1a42006-11-02 00:23:18 +0000109 if os.path.exists(self.resultdir):
mbligh302482e2008-05-01 20:06:16 +0000110 autotest_utils.system('rm -rf '
111 + self.resultdir)
apw96da1a42006-11-02 00:23:18 +0000112 os.mkdir(self.resultdir)
mbligh0fb83972008-01-10 16:30:02 +0000113 os.mkdir(self.sysinfodir)
apw96da1a42006-11-02 00:23:18 +0000114
apw870988b2007-09-25 16:50:53 +0000115 os.mkdir(os.path.join(self.resultdir, 'debug'))
116 os.mkdir(os.path.join(self.resultdir, 'analysis'))
apw870988b2007-09-25 16:50:53 +0000117
mbligh8d83cdc2007-12-03 18:09:18 +0000118 shutil.copyfile(self.control,
119 os.path.join(self.resultdir, 'control'))
mblighf4ca14f2008-03-03 16:03:05 +0000120
mbligh4b089662006-06-14 22:34:58 +0000121
apwecf41b72006-03-31 14:00:55 +0000122 self.control = control
mbligh27113602007-10-31 21:07:51 +0000123 self.jobtag = jobtag
mblighd528d302007-12-19 16:19:05 +0000124 self.log_filename = self.DEFAULT_LOG_FILENAME
mbligh68119582008-01-25 18:16:41 +0000125 self.container = None
mblighf4c35322006-03-13 01:01:10 +0000126
mbligh56f1fbb2006-10-01 15:10:56 +0000127 self.stdout = fd_stack.fd_stack(1, sys.stdout)
128 self.stderr = fd_stack.fd_stack(2, sys.stderr)
jadmanskia9c75c42008-05-01 22:05:31 +0000129
130 self._init_group_level()
mblighf4c35322006-03-13 01:01:10 +0000131
apw059e1b12006-10-12 17:18:26 +0000132 self.config = config.config(self)
133
apwd27e55f2006-12-01 11:22:08 +0000134 self.harness = harness.select(harness_type, self)
135
mbligha35553b2006-04-23 15:52:25 +0000136 self.profilers = profilers.profilers(self)
mbligh72905562006-05-25 01:30:49 +0000137
mblighcaa605c2006-10-02 00:37:35 +0000138 try:
apw90154af2006-12-01 11:23:36 +0000139 tool = self.config_get('boottool.executable')
140 self.bootloader = boottool.boottool(tool)
mblighcaa605c2006-10-02 00:37:35 +0000141 except:
142 pass
143
mbligh0fb83972008-01-10 16:30:02 +0000144 sysinfo.log_per_reboot_data(self.sysinfodir)
mbligh3a6d6ca2006-04-23 15:50:24 +0000145
mbligh30270302007-11-05 20:33:52 +0000146 if not cont:
mblighc3430162007-11-14 23:57:19 +0000147 self.record('START', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000148 self._increment_group_level()
apw357f50f2006-12-01 11:22:39 +0000149
apwf91efaf2007-11-24 17:32:13 +0000150 self.harness.run_start()
mblighcaa62c22008-04-07 21:51:17 +0000151
152 if use_external_logging:
153 self.enable_external_logging()
apwf91efaf2007-11-24 17:32:13 +0000154
jadmanski8415f962008-05-06 20:38:53 +0000155 # load the max disk usage rate - default to no monitoring
156 self.max_disk_usage_rate = self.get_state('__monitor_disk',
157 default=0.0)
158
159
160 def monitor_disk_usage(self, max_rate):
161 """\
162 Signal that the job should monitor disk space usage on /
163 and generate a warning if a test uses up disk space at a
164 rate exceeding 'max_rate'.
165
166 Parameters:
167 max_rate - the maximium allowed rate of disk consumption
168 during a test, in MB/hour, or 0 to indicate
169 no limit.
170 """
171 self.set_state('__monitor_disk', max_rate)
172 self.max_disk_usage_rate = max_rate
173
mbligh0692e472007-08-30 16:07:53 +0000174
175 def relative_path(self, path):
176 """\
177 Return a patch relative to the job results directory
178 """
mbligh1c250ca2007-08-30 16:31:38 +0000179 head = len(self.resultdir) + 1 # remove the / inbetween
180 return path[head:]
mbligh0692e472007-08-30 16:07:53 +0000181
182
mbligh362ab3d2007-08-30 11:24:04 +0000183 def control_get(self):
184 return self.control
185
mblighcaa605c2006-10-02 00:37:35 +0000186
mbligh8d83cdc2007-12-03 18:09:18 +0000187 def control_set(self, control):
188 self.control = os.path.abspath(control)
189
190
apwde1503a2006-10-10 08:34:21 +0000191 def harness_select(self, which):
192 self.harness = harness.select(which, self)
193
194
apw059e1b12006-10-12 17:18:26 +0000195 def config_set(self, name, value):
196 self.config.set(name, value)
197
198
199 def config_get(self, name):
200 return self.config.get(name)
201
mbligh8baa2ea2006-12-17 23:01:24 +0000202 def setup_dirs(self, results_dir, tmp_dir):
mbligh1e8858e2006-11-24 22:18:35 +0000203 if not tmp_dir:
apw870988b2007-09-25 16:50:53 +0000204 tmp_dir = os.path.join(self.tmpdir, 'build')
mbligh1e8858e2006-11-24 22:18:35 +0000205 if not os.path.exists(tmp_dir):
206 os.mkdir(tmp_dir)
207 if not os.path.isdir(tmp_dir):
mbligh642b03e2008-01-14 16:53:15 +0000208 e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
209 raise ValueError(e_msg)
mbligh1e8858e2006-11-24 22:18:35 +0000210
211 # We label the first build "build" and then subsequent ones
212 # as "build.2", "build.3", etc. Whilst this is a little bit
213 # inconsistent, 99.9% of jobs will only have one build
214 # (that's not done as kernbench, sparse, or buildtest),
215 # so it works out much cleaner. One of life's comprimises.
216 if not results_dir:
217 results_dir = os.path.join(self.resultdir, 'build')
218 i = 2
219 while os.path.exists(results_dir):
220 results_dir = os.path.join(self.resultdir, 'build.%d' % i)
mblighd9223fc2006-11-26 17:19:54 +0000221 i += 1
mbligh1e8858e2006-11-24 22:18:35 +0000222 if not os.path.exists(results_dir):
223 os.mkdir(results_dir)
mbligh72b88fc2006-12-16 18:41:35 +0000224
mbligh8baa2ea2006-12-17 23:01:24 +0000225 return (results_dir, tmp_dir)
226
227
228 def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
229 kjob = None ):
230 """Summon a xen object"""
231 (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
232 build_dir = 'xen'
233 return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
234
235
236 def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
237 """Summon a kernel object"""
mbligh669caa12007-11-05 18:32:13 +0000238 (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
mbligh8baa2ea2006-12-17 23:01:24 +0000239 build_dir = 'linux'
mbligh6ee7ee02007-11-13 23:49:05 +0000240 return kernel.auto_kernel(self, base_tree, results_dir,
241 tmp_dir, build_dir, leave)
mblighf4c35322006-03-13 01:01:10 +0000242
mblighcaa605c2006-10-02 00:37:35 +0000243
mbligh6b504ff2007-12-12 21:03:49 +0000244 def barrier(self, *args, **kwds):
mblighfadca202006-09-23 04:40:01 +0000245 """Create a barrier object"""
mbligh6b504ff2007-12-12 21:03:49 +0000246 return barrier.barrier(*args, **kwds)
mblighfadca202006-09-23 04:40:01 +0000247
mblighcaa605c2006-10-02 00:37:35 +0000248
mbligh4b089662006-06-14 22:34:58 +0000249 def setup_dep(self, deps):
mblighc86b0b42006-07-28 17:35:28 +0000250 """Set up the dependencies for this test.
251
252 deps is a list of libraries required for this test.
253 """
mbligh4b089662006-06-14 22:34:58 +0000254 for dep in deps:
255 try:
apw870988b2007-09-25 16:50:53 +0000256 os.chdir(os.path.join(self.autodir, 'deps', dep))
mbligh302482e2008-05-01 20:06:16 +0000257 autotest_utils.system('./' + dep + '.py')
mbligh4b089662006-06-14 22:34:58 +0000258 except:
mbligh302482e2008-05-01 20:06:16 +0000259 err = "setting up dependency " + dep + "\n"
260 raise error.UnhandledError(err)
mbligh4b089662006-06-14 22:34:58 +0000261
262
mbligh72b88fc2006-12-16 18:41:35 +0000263 def __runtest(self, url, tag, args, dargs):
264 try:
mbligh53c41502007-10-23 20:45:04 +0000265 l = lambda : test.runtest(self, url, tag, args, dargs)
mbligh302482e2008-05-01 20:06:16 +0000266 pid = parallel.fork_start(self.resultdir, l)
267 parallel.fork_waitfor(self.resultdir, pid)
268 except error.AutotestError:
mbligh72b88fc2006-12-16 18:41:35 +0000269 raise
jadmanskicf8c4d62008-05-27 22:09:14 +0000270 except Exception, e:
271 msg = "Unhandled %s error occured during test\n"
272 msg %= str(e.__class__.__name__)
273 raise error.UnhandledError(msg)
apwf1a81162006-04-25 10:10:29 +0000274
mblighcaa605c2006-10-02 00:37:35 +0000275
mblighd016ecc2006-11-25 21:41:07 +0000276 def run_test(self, url, *args, **dargs):
mblighc86b0b42006-07-28 17:35:28 +0000277 """Summon a test object and run it.
278
279 tag
280 tag to add to testname
mbligh12a7df72006-10-06 03:54:33 +0000281 url
282 url of the test to run
mblighc86b0b42006-07-28 17:35:28 +0000283 """
mbligh12a7df72006-10-06 03:54:33 +0000284
mblighd016ecc2006-11-25 21:41:07 +0000285 if not url:
mbligh302482e2008-05-01 20:06:16 +0000286 raise TypeError("Test name is invalid. "
287 "Switched arguments?")
mbligh09f288a2007-09-18 21:34:57 +0000288 (group, testname) = test.testname(url)
mbligh7dd510c2007-11-13 17:11:22 +0000289 tag = dargs.pop('tag', None)
mbligh65938a22007-12-10 16:58:52 +0000290 container = dargs.pop('container', None)
mbligh09f288a2007-09-18 21:34:57 +0000291 subdir = testname
mbligh7dd510c2007-11-13 17:11:22 +0000292 if tag:
293 subdir += '.' + tag
294
mbligh65938a22007-12-10 16:58:52 +0000295 if container:
mbligh68119582008-01-25 18:16:41 +0000296 cname = container.get('name', None)
297 if not cname: # get old name
298 cname = container.get('container_name', None)
299 mbytes = container.get('mbytes', None)
300 if not mbytes: # get old name
301 mbytes = container.get('mem', None)
302 cpus = container.get('cpus', None)
303 if not cpus: # get old name
304 cpus = container.get('cpu', None)
jadmanski87cbc7f2008-05-13 18:17:10 +0000305 root = container.get('root', None)
mbligh68119582008-01-25 18:16:41 +0000306 self.new_container(mbytes=mbytes, cpus=cpus,
307 root=root, name=cname)
mbligh65938a22007-12-10 16:58:52 +0000308 # We are running in a container now...
309
jadmanski8415f962008-05-06 20:38:53 +0000310 def log_warning(reason):
311 self.record("WARN", subdir, testname, reason)
312 @disk_usage_monitor.watch(log_warning, "/",
313 self.max_disk_usage_rate)
mbligh7dd510c2007-11-13 17:11:22 +0000314 def group_func():
apwf1a81162006-04-25 10:10:29 +0000315 try:
mblighd016ecc2006-11-25 21:41:07 +0000316 self.__runtest(url, tag, args, dargs)
mbligh302482e2008-05-01 20:06:16 +0000317 except error.TestNAError, detail:
318 self.record('TEST_NA', subdir, testname,
319 str(detail))
320 raise
apwf1a81162006-04-25 10:10:29 +0000321 except Exception, detail:
mbligh7dd510c2007-11-13 17:11:22 +0000322 self.record('FAIL', subdir, testname,
323 str(detail))
apwf1a81162006-04-25 10:10:29 +0000324 raise
325 else:
mbligh7dd510c2007-11-13 17:11:22 +0000326 self.record('GOOD', subdir, testname,
327 'completed successfully')
jadmanski8415f962008-05-06 20:38:53 +0000328
mblighcfc6dd32007-11-20 00:44:35 +0000329 result, exc_info = self.__rungroup(subdir, group_func)
mbligh68119582008-01-25 18:16:41 +0000330 if container:
331 self.release_container()
mbligh302482e2008-05-01 20:06:16 +0000332 if exc_info and isinstance(exc_info[1], error.TestError):
mbligh7dd510c2007-11-13 17:11:22 +0000333 return False
334 elif exc_info:
mbligh71ea2492008-01-15 20:35:52 +0000335 raise exc_info[0], exc_info[1], exc_info[2]
apwf1a81162006-04-25 10:10:29 +0000336 else:
mbligh7dd510c2007-11-13 17:11:22 +0000337 return True
338
339
340 def __rungroup(self, name, function, *args, **dargs):
341 """\
342 name:
343 name of the group
344 function:
345 subroutine to run
346 *args:
347 arguments for the function
348
349 Returns a 2-tuple (result, exc_info) where result
350 is the return value of function, and exc_info is
351 the sys.exc_info() of the exception thrown by the
352 function (which may be None).
353 """
354
355 result, exc_info = None, None
356 try:
357 self.record('START', None, name)
jadmanskia9c75c42008-05-01 22:05:31 +0000358 self._increment_group_level()
mbligh7dd510c2007-11-13 17:11:22 +0000359 result = function(*args, **dargs)
jadmanskia9c75c42008-05-01 22:05:31 +0000360 self._decrement_group_level()
mbligh7dd510c2007-11-13 17:11:22 +0000361 self.record('END GOOD', None, name)
mbligh302482e2008-05-01 20:06:16 +0000362 except error.TestNAError, e:
jadmanskia9c75c42008-05-01 22:05:31 +0000363 self._decrement_group_level()
mbligh302482e2008-05-01 20:06:16 +0000364 self.record('END TEST_NA', None, name, str(e))
mbligh7dd510c2007-11-13 17:11:22 +0000365 except Exception, e:
366 exc_info = sys.exc_info()
jadmanskia9c75c42008-05-01 22:05:31 +0000367 self._decrement_group_level()
mbligh302482e2008-05-01 20:06:16 +0000368 err_msg = str(e) + '\n' + traceback.format_exc()
mbligh51144e02007-11-20 20:38:18 +0000369 self.record('END FAIL', None, name, err_msg)
mbligh7dd510c2007-11-13 17:11:22 +0000370
371 return result, exc_info
apw0865f482006-03-30 18:50:19 +0000372
mblighd7fb4a62006-10-01 00:57:53 +0000373
apw1da244b2007-09-27 17:18:01 +0000374 def run_group(self, function, *args, **dargs):
mbligh88ab90f2007-08-29 15:52:49 +0000375 """\
376 function:
377 subroutine to run
378 *args:
379 arguments for the function
380 """
381
mbligh7dd510c2007-11-13 17:11:22 +0000382 # Allow the tag for the group to be specified
mbligh88ab90f2007-08-29 15:52:49 +0000383 name = function.__name__
mbligh7dd510c2007-11-13 17:11:22 +0000384 tag = dargs.pop('tag', None)
385 if tag:
386 name = tag
apw1da244b2007-09-27 17:18:01 +0000387
mbligh7dd510c2007-11-13 17:11:22 +0000388 result, exc_info = self.__rungroup(name, function,
389 *args, **dargs)
apw1da244b2007-09-27 17:18:01 +0000390
mbligh7dd510c2007-11-13 17:11:22 +0000391 # if there was a non-TestError exception, raise it
mbligh302482e2008-05-01 20:06:16 +0000392 if exc_info and not isinstance(exc_info[1], error.TestError):
mbligh7dd510c2007-11-13 17:11:22 +0000393 err = ''.join(traceback.format_exception(*exc_info))
mbligh302482e2008-05-01 20:06:16 +0000394 raise error.TestError(name + ' failed\n' + err)
mbligh88ab90f2007-08-29 15:52:49 +0000395
mbligh7dd510c2007-11-13 17:11:22 +0000396 # pass back the actual return value from the function
apw08403ca2007-09-27 17:17:22 +0000397 return result
398
mbligh88ab90f2007-08-29 15:52:49 +0000399
jadmanski87cbc7f2008-05-13 18:17:10 +0000400 def new_container(self, mbytes=None, cpus=None, root=None, name=None):
mbligh8ea61e22008-05-09 18:09:37 +0000401 if not autotest_utils.grep('cpuset', '/proc/filesystems'):
mbligh68119582008-01-25 18:16:41 +0000402 print "Containers not enabled by latest reboot"
403 return # containers weren't enabled in this kernel boot
404 pid = os.getpid()
mbligh68119582008-01-25 18:16:41 +0000405 if not name:
406 name = 'test%d' % pid # make arbitrary unique name
407 self.container = cpuset.cpuset(name, job_size=mbytes,
mbligh337bb762008-04-16 21:23:10 +0000408 job_pid=pid, cpus=cpus, root=root)
mbligh68119582008-01-25 18:16:41 +0000409 # This job's python shell is now running in the new container
410 # and all forked test processes will inherit that container
411
412
413 def release_container(self):
414 if self.container:
mbligh337bb762008-04-16 21:23:10 +0000415 self.container.release()
mbligh68119582008-01-25 18:16:41 +0000416 self.container = None
417
418
419 def cpu_count(self):
420 if self.container:
421 return len(self.container.cpus)
jadmanskia9c75c42008-05-01 22:05:31 +0000422 return autotest_utils.count_cpus() # use total system count
mbligh68119582008-01-25 18:16:41 +0000423
424
apwce73d892007-09-25 16:53:05 +0000425 # Check the passed kernel identifier against the command line
426 # and the running kernel, abort the job on missmatch.
mbligh38a4a112008-03-19 13:11:34 +0000427 def kernel_check_ident(self, expected_when, expected_id, subdir,
jadmanskia9c75c42008-05-01 22:05:31 +0000428 type = 'src', patches=[]):
mbligh38a4a112008-03-19 13:11:34 +0000429 print (("POST BOOT: checking booted kernel " +
430 "mark=%d identity='%s' type='%s'") %
431 (expected_when, expected_id, type))
apwce73d892007-09-25 16:53:05 +0000432
jadmanskia9c75c42008-05-01 22:05:31 +0000433 running_id = autotest_utils.running_os_ident()
apwce73d892007-09-25 16:53:05 +0000434
jadmanskia9c75c42008-05-01 22:05:31 +0000435 cmdline = autotest_utils.read_one_line("/proc/cmdline")
apwce73d892007-09-25 16:53:05 +0000436
437 find_sum = re.compile(r'.*IDENT=(\d+)')
438 m = find_sum.match(cmdline)
439 cmdline_when = -1
440 if m:
441 cmdline_when = int(m.groups()[0])
442
443 # We have all the facts, see if they indicate we
444 # booted the requested kernel or not.
445 bad = False
mblighda0311e2007-10-25 16:03:33 +0000446 if (type == 'src' and expected_id != running_id or
jadmanskia9c75c42008-05-01 22:05:31 +0000447 type == 'rpm' and
448 not running_id.startswith(expected_id + '::')):
apwce73d892007-09-25 16:53:05 +0000449 print "check_kernel_ident: kernel identifier mismatch"
450 bad = True
451 if expected_when != cmdline_when:
452 print "check_kernel_ident: kernel command line mismatch"
453 bad = True
454
455 if bad:
456 print " Expected Ident: " + expected_id
457 print " Running Ident: " + running_id
458 print " Expected Mark: %d" % (expected_when)
459 print "Command Line Mark: %d" % (cmdline_when)
460 print " Command Line: " + cmdline
461
mbligh302482e2008-05-01 20:06:16 +0000462 raise error.JobError("boot failure", "reboot.verify")
apwce73d892007-09-25 16:53:05 +0000463
jadmanskia9c75c42008-05-01 22:05:31 +0000464 kernel_info = {'kernel': expected_id}
465 for i, patch in enumerate(patches):
466 kernel_info["patch%d" % i] = patch
mblighb7fd2702008-03-25 14:57:08 +0000467 self.record('GOOD', subdir, 'reboot.verify', expected_id)
jadmanskia9c75c42008-05-01 22:05:31 +0000468 self._decrement_group_level()
469 self.record('END GOOD', subdir, 'reboot',
470 optional_fields=kernel_info)
apwce73d892007-09-25 16:53:05 +0000471
472
mblighc2359852007-08-28 18:11:48 +0000473 def filesystem(self, device, mountpoint = None, loop_size = 0):
mblighd7fb4a62006-10-01 00:57:53 +0000474 if not mountpoint:
475 mountpoint = self.tmpdir
mblighc2359852007-08-28 18:11:48 +0000476 return filesystem.filesystem(self, device, mountpoint,loop_size)
mblighd7fb4a62006-10-01 00:57:53 +0000477
mblighcaa62c22008-04-07 21:51:17 +0000478
479 def enable_external_logging(self):
480 pass
481
482
483 def disable_external_logging(self):
484 pass
485
486
487 def reboot_setup(self):
488 pass
489
mblighcaa605c2006-10-02 00:37:35 +0000490
491 def reboot(self, tag='autotest'):
mblighcaa62c22008-04-07 21:51:17 +0000492 self.reboot_setup()
jadmanskia9c75c42008-05-01 22:05:31 +0000493 self.record('START', None, 'reboot')
494 self._increment_group_level()
mbligh30270302007-11-05 20:33:52 +0000495 self.record('GOOD', None, 'reboot.start')
apwde1503a2006-10-10 08:34:21 +0000496 self.harness.run_reboot()
apw11985b72007-10-04 15:44:47 +0000497 default = self.config_get('boot.set_default')
498 if default:
499 self.bootloader.set_default(tag)
500 else:
501 self.bootloader.boot_once(tag)
mbligh302482e2008-05-01 20:06:16 +0000502 cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
503 autotest_utils.system(cmd)
apw0778a2f2006-10-06 03:11:40 +0000504 self.quit()
mblighcaa605c2006-10-02 00:37:35 +0000505
506
apw0865f482006-03-30 18:50:19 +0000507 def noop(self, text):
508 print "job: noop: " + text
509
mblighcaa605c2006-10-02 00:37:35 +0000510
mblighc86b0b42006-07-28 17:35:28 +0000511 def parallel(self, *tasklist):
512 """Run tasks in parallel"""
apw8fef4ac2006-10-10 22:53:37 +0000513
514 pids = []
mblighd528d302007-12-19 16:19:05 +0000515 old_log_filename = self.log_filename
516 for i, task in enumerate(tasklist):
517 self.log_filename = old_log_filename + (".%d" % i)
518 task_func = lambda: task[0](*task[1:])
mbligh302482e2008-05-01 20:06:16 +0000519 pids.append(parallel.fork_start(self.resultdir,
520 task_func))
mblighd528d302007-12-19 16:19:05 +0000521
522 old_log_path = os.path.join(self.resultdir, old_log_filename)
523 old_log = open(old_log_path, "a")
mblighd509b712008-01-14 17:41:25 +0000524 exceptions = []
mblighd528d302007-12-19 16:19:05 +0000525 for i, pid in enumerate(pids):
526 # wait for the task to finish
mblighd509b712008-01-14 17:41:25 +0000527 try:
mbligh302482e2008-05-01 20:06:16 +0000528 parallel.fork_waitfor(self.resultdir, pid)
mblighd509b712008-01-14 17:41:25 +0000529 except Exception, e:
530 exceptions.append(e)
mblighd528d302007-12-19 16:19:05 +0000531 # copy the logs from the subtask into the main log
532 new_log_path = old_log_path + (".%d" % i)
533 if os.path.exists(new_log_path):
534 new_log = open(new_log_path)
535 old_log.write(new_log.read())
536 new_log.close()
537 old_log.flush()
538 os.remove(new_log_path)
539 old_log.close()
540
541 self.log_filename = old_log_filename
apw0865f482006-03-30 18:50:19 +0000542
mblighd509b712008-01-14 17:41:25 +0000543 # handle any exceptions raised by the parallel tasks
544 if exceptions:
545 msg = "%d task(s) failed" % len(exceptions)
mbligh302482e2008-05-01 20:06:16 +0000546 raise error.JobError(msg, str(exceptions), exceptions)
mblighd509b712008-01-14 17:41:25 +0000547
mblighcaa605c2006-10-02 00:37:35 +0000548
apw0865f482006-03-30 18:50:19 +0000549 def quit(self):
mblighc86b0b42006-07-28 17:35:28 +0000550 # XXX: should have a better name.
apwde1503a2006-10-10 08:34:21 +0000551 self.harness.run_pause()
mbligh302482e2008-05-01 20:06:16 +0000552 raise error.JobContinue("more to come")
apw0865f482006-03-30 18:50:19 +0000553
mblighcaa605c2006-10-02 00:37:35 +0000554
apw0865f482006-03-30 18:50:19 +0000555 def complete(self, status):
mblighc86b0b42006-07-28 17:35:28 +0000556 """Clean up and exit"""
apw0865f482006-03-30 18:50:19 +0000557 # We are about to exit 'complete' so clean up the control file.
558 try:
mbligh366ff1b2008-04-25 16:07:56 +0000559 os.unlink(self.state_file)
apw0865f482006-03-30 18:50:19 +0000560 except:
561 pass
mblighc0b10d32008-03-03 16:03:28 +0000562
mbligh61a6c1a2006-12-25 01:26:38 +0000563 self.harness.run_complete()
mblighcaa62c22008-04-07 21:51:17 +0000564 self.disable_external_logging()
apw1b021902006-04-03 17:02:56 +0000565 sys.exit(status)
apw0865f482006-03-30 18:50:19 +0000566
mblighcaa605c2006-10-02 00:37:35 +0000567
mbligh366ff1b2008-04-25 16:07:56 +0000568 def set_state(self, var, val):
569 # Deep copies make sure that the state can't be altered
570 # without it being re-written. Perf wise, deep copies
571 # are overshadowed by pickling/loading.
572 self.state[var] = copy.deepcopy(val)
573 pickle.dump(self.state, open(self.state_file, 'w'))
574
575
576 def __load_state(self):
jadmanskia9c75c42008-05-01 22:05:31 +0000577 assert not hasattr(self, "state")
mbligh366ff1b2008-04-25 16:07:56 +0000578 try:
579 self.state = pickle.load(open(self.state_file, 'r'))
jadmanskia9c75c42008-05-01 22:05:31 +0000580 self.state_existed = True
mbligh366ff1b2008-04-25 16:07:56 +0000581 except Exception:
582 print "Initializing the state engine."
583 self.state = {}
mblighf1ae0a42008-04-25 16:09:20 +0000584 self.set_state('__steps', []) # writes pickle file
jadmanskia9c75c42008-05-01 22:05:31 +0000585 self.state_existed = False
mbligh366ff1b2008-04-25 16:07:56 +0000586
587
588 def get_state(self, var, default=None):
589 if var in self.state or default == None:
590 val = self.state[var]
591 else:
592 val = default
593 return copy.deepcopy(val)
594
595
mbligh12a04cb2008-04-25 16:07:20 +0000596 def __create_step_tuple(self, fn, args, dargs):
597 # Legacy code passes in an array where the first arg is
598 # the function or its name.
599 if isinstance(fn, list):
600 assert(len(args) == 0)
601 assert(len(dargs) == 0)
602 args = fn[1:]
603 fn = fn[0]
604 # Pickling actual functions is harry, thus we have to call
605 # them by name. Unfortunately, this means only functions
606 # defined globally can be used as a next step.
607 if isinstance(fn, types.FunctionType):
608 fn = fn.__name__
609 if not isinstance(fn, types.StringTypes):
610 raise StepError("Next steps must be functions or "
611 "strings containing the function name")
612 return (fn, args, dargs)
613
614
mbligh12a04cb2008-04-25 16:07:20 +0000615 def next_step(self, fn, *args, **dargs):
mblighc86b0b42006-07-28 17:35:28 +0000616 """Define the next step"""
mblighf1ae0a42008-04-25 16:09:20 +0000617 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000618 steps.append(self.__create_step_tuple(fn, args, dargs))
mblighf1ae0a42008-04-25 16:09:20 +0000619 self.set_state('__steps', steps)
apw0865f482006-03-30 18:50:19 +0000620
mblighcaa605c2006-10-02 00:37:35 +0000621
mbligh12a04cb2008-04-25 16:07:20 +0000622 def next_step_prepend(self, fn, *args, **dargs):
mbligh237bed32007-09-05 13:05:57 +0000623 """Insert a new step, executing first"""
mblighf1ae0a42008-04-25 16:09:20 +0000624 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000625 steps.insert(0, self.__create_step_tuple(fn, args, dargs))
mblighf1ae0a42008-04-25 16:09:20 +0000626 self.set_state('__steps', steps)
mbligh237bed32007-09-05 13:05:57 +0000627
628
apw83f8d772006-04-27 14:12:56 +0000629 def step_engine(self):
mblighc86b0b42006-07-28 17:35:28 +0000630 """the stepping engine -- if the control file defines
631 step_init we will be using this engine to drive multiple runs.
632 """
633 """Do the next step"""
apw83f8d772006-04-27 14:12:56 +0000634
mbligh366ff1b2008-04-25 16:07:56 +0000635 # Set up the environment and then interpret the control file.
636 # Some control files will have code outside of functions,
637 # which means we need to have our state engine initialized
638 # before reading in the file.
mbligh366ff1b2008-04-25 16:07:56 +0000639 lcl = {'job': self}
640 exec(JOB_PREAMBLE, lcl, lcl)
apw83f8d772006-04-27 14:12:56 +0000641 execfile(self.control, lcl, lcl)
642
mbligh366ff1b2008-04-25 16:07:56 +0000643 # If we loaded in a mid-job state file, then we presumably
644 # know what steps we have yet to run.
jadmanskia9c75c42008-05-01 22:05:31 +0000645 if not self.state_existed:
apw83f8d772006-04-27 14:12:56 +0000646 if lcl.has_key('step_init'):
647 self.next_step([lcl['step_init']])
apw0865f482006-03-30 18:50:19 +0000648
mbligh366ff1b2008-04-25 16:07:56 +0000649 # Iterate through the steps. If we reboot, we'll simply
650 # continue iterating on the next step.
mblighf1ae0a42008-04-25 16:09:20 +0000651 while len(self.get_state('__steps')) > 0:
652 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000653 (fn, args, dargs) = steps.pop(0)
mblighf1ae0a42008-04-25 16:09:20 +0000654 self.set_state('__steps', steps)
apw0865f482006-03-30 18:50:19 +0000655
mbligh12a04cb2008-04-25 16:07:20 +0000656 lcl['__args'] = args
657 lcl['__dargs'] = dargs
658 exec(fn + "(*__args, **__dargs)", lcl, lcl)
apw0865f482006-03-30 18:50:19 +0000659
mblighcaa605c2006-10-02 00:37:35 +0000660
jadmanskia9c75c42008-05-01 22:05:31 +0000661 def _init_group_level(self):
662 self.group_level = self.get_state("__group_level", default=0)
663
664
665 def _increment_group_level(self):
666 self.group_level += 1
667 self.set_state("__group_level", self.group_level)
668
669
670 def _decrement_group_level(self):
671 self.group_level -= 1
672 self.set_state("__group_level", self.group_level)
673
674
675 def record(self, status_code, subdir, operation, status = '',
676 optional_fields=None):
mbligh09f288a2007-09-18 21:34:57 +0000677 """
678 Record job-level status
apw7db8d0b2006-10-09 08:10:25 +0000679
mbligh09f288a2007-09-18 21:34:57 +0000680 The intent is to make this file both machine parseable and
681 human readable. That involves a little more complexity, but
682 really isn't all that bad ;-)
683
684 Format is <status code>\t<subdir>\t<operation>\t<status>
685
686 status code: (GOOD|WARN|FAIL|ABORT)
687 or START
688 or END (GOOD|WARN|FAIL|ABORT)
689
690 subdir: MUST be a relevant subdirectory in the results,
691 or None, which will be represented as '----'
692
693 operation: description of what you ran (e.g. "dbench", or
694 "mkfs -t foobar /dev/sda9")
695
696 status: error message or "completed sucessfully"
697
698 ------------------------------------------------------------
699
700 Initial tabs indicate indent levels for grouping, and is
mbligh7dd510c2007-11-13 17:11:22 +0000701 governed by self.group_level
mbligh09f288a2007-09-18 21:34:57 +0000702
703 multiline messages have secondary lines prefaced by a double
704 space (' ')
705 """
706
mblighb0570ad2007-09-19 18:18:11 +0000707 if subdir:
708 if re.match(r'[\n\t]', subdir):
jadmanskia9c75c42008-05-01 22:05:31 +0000709 raise ValueError("Invalid character in "
710 "subdir string")
mblighb0570ad2007-09-19 18:18:11 +0000711 substr = subdir
712 else:
713 substr = '----'
mbligh09f288a2007-09-18 21:34:57 +0000714
mbligh302482e2008-05-01 20:06:16 +0000715 if not logging.is_valid_status(status_code):
jadmanskia9c75c42008-05-01 22:05:31 +0000716 raise ValueError("Invalid status code supplied: %s" %
717 status_code)
mbligh9c5ac322007-10-31 18:01:59 +0000718 if not operation:
719 operation = '----'
jadmanskia9c75c42008-05-01 22:05:31 +0000720
mbligh09f288a2007-09-18 21:34:57 +0000721 if re.match(r'[\n\t]', operation):
jadmanskia9c75c42008-05-01 22:05:31 +0000722 raise ValueError("Invalid character in "
723 "operation string")
mbligh09f288a2007-09-18 21:34:57 +0000724 operation = operation.rstrip()
jadmanskia9c75c42008-05-01 22:05:31 +0000725
726 if not optional_fields:
727 optional_fields = {}
728
mbligh09f288a2007-09-18 21:34:57 +0000729 status = status.rstrip()
730 status = re.sub(r"\t", " ", status)
apw7db8d0b2006-10-09 08:10:25 +0000731 # Ensure any continuation lines are marked so we can
732 # detect them in the status file to ensure it is parsable.
jadmanskia9c75c42008-05-01 22:05:31 +0000733 status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
734 status)
mbligh09f288a2007-09-18 21:34:57 +0000735
mbligh30270302007-11-05 20:33:52 +0000736 # Generate timestamps for inclusion in the logs
737 epoch_time = int(time.time()) # seconds since epoch, in UTC
738 local_time = time.localtime(epoch_time)
jadmanskia9c75c42008-05-01 22:05:31 +0000739 optional_fields["timestamp"] = str(epoch_time)
740 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
741 local_time)
mbligh30270302007-11-05 20:33:52 +0000742
jadmanskia9c75c42008-05-01 22:05:31 +0000743 fields = [status_code, substr, operation]
744 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
745 fields.append(status)
746
747 msg = '\t'.join(str(x) for x in fields)
mbligh7dd510c2007-11-13 17:11:22 +0000748 msg = '\t' * self.group_level + msg
apw7db8d0b2006-10-09 08:10:25 +0000749
mblighd528d302007-12-19 16:19:05 +0000750 msg_tag = ""
751 if "." in self.log_filename:
752 msg_tag = self.log_filename.split(".", 1)[1]
753
jadmanskia9c75c42008-05-01 22:05:31 +0000754 self.harness.test_status_detail(status_code, substr,
755 operation, status, msg_tag)
mblighd528d302007-12-19 16:19:05 +0000756 self.harness.test_status(msg, msg_tag)
757
758 # log to stdout (if enabled)
759 #if self.log_filename == self.DEFAULT_LOG_FILENAME:
apwf1a81162006-04-25 10:10:29 +0000760 print msg
mblighd528d302007-12-19 16:19:05 +0000761
762 # log to the "root" status log
763 status_file = os.path.join(self.resultdir, self.log_filename)
mbligh7dd510c2007-11-13 17:11:22 +0000764 open(status_file, "a").write(msg + "\n")
mblighd528d302007-12-19 16:19:05 +0000765
766 # log to the subdir status log (if subdir is set)
mblighb0570ad2007-09-19 18:18:11 +0000767 if subdir:
mblighadff6ca2008-01-22 16:38:25 +0000768 dir = os.path.join(self.resultdir, subdir)
769 if not os.path.exists(dir):
770 os.mkdir(dir)
771
772 status_file = os.path.join(dir,
mblighd528d302007-12-19 16:19:05 +0000773 self.DEFAULT_LOG_FILENAME)
mblighb0570ad2007-09-19 18:18:11 +0000774 open(status_file, "a").write(msg + "\n")
apwce9abe92006-04-27 14:14:04 +0000775
776
jadmanski8415f962008-05-06 20:38:53 +0000777class disk_usage_monitor:
778 def __init__(self, logging_func, device, max_mb_per_hour):
779 self.func = logging_func
780 self.device = device
781 self.max_mb_per_hour = max_mb_per_hour
782
783
784 def start(self):
785 self.initial_space = autotest_utils.freespace(self.device)
786 self.start_time = time.time()
787
788
789 def stop(self):
790 # if no maximum usage rate was set, we don't need to
791 # generate any warnings
792 if not self.max_mb_per_hour:
793 return
794
795 final_space = autotest_utils.freespace(self.device)
796 used_space = self.initial_space - final_space
797 stop_time = time.time()
798 total_time = stop_time - self.start_time
799 # round up the time to one minute, to keep extremely short
800 # tests from generating false positives due to short, badly
801 # timed bursts of activity
802 total_time = max(total_time, 60.0)
803
804 # determine the usage rate
805 bytes_per_sec = used_space / total_time
806 mb_per_sec = bytes_per_sec / 1024**2
807 mb_per_hour = mb_per_sec * 60 * 60
808
809 if mb_per_hour > self.max_mb_per_hour:
810 msg = ("disk space on %s was consumed at a rate of "
811 "%.2f MB/hour")
812 msg %= (self.device, mb_per_hour)
813 self.func(msg)
814
815
816 @classmethod
817 def watch(cls, *monitor_args, **monitor_dargs):
818 """ Generic decorator to wrap a function call with the
819 standard create-monitor -> start -> call -> stop idiom."""
820 def decorator(func):
821 def watched_func(*args, **dargs):
822 monitor = cls(*monitor_args, **monitor_dargs)
823 monitor.start()
824 try:
825 func(*args, **dargs)
826 finally:
827 monitor.stop()
828 return watched_func
829 return decorator
830
831
mblighcaa62c22008-04-07 21:51:17 +0000832def runjob(control, cont = False, tag = "default", harness_type = '',
833 use_external_logging = False):
mblighc86b0b42006-07-28 17:35:28 +0000834 """The main interface to this module
835
mbligh72b88fc2006-12-16 18:41:35 +0000836 control
mblighc86b0b42006-07-28 17:35:28 +0000837 The control file to use for this job.
838 cont
839 Whether this is the continuation of a previously started job
840 """
mblighb4eef242007-07-23 18:22:49 +0000841 control = os.path.abspath(control)
apwce9abe92006-04-27 14:14:04 +0000842 state = control + '.state'
843
844 # instantiate the job object ready for the control file.
845 myjob = None
846 try:
847 # Check that the control file is valid
848 if not os.path.exists(control):
mbligh302482e2008-05-01 20:06:16 +0000849 raise error.JobError(control +
850 ": control file not found")
apwce9abe92006-04-27 14:14:04 +0000851
852 # When continuing, the job is complete when there is no
853 # state file, ensure we don't try and continue.
mblighf3fef462006-09-13 16:05:05 +0000854 if cont and not os.path.exists(state):
mbligh302482e2008-05-01 20:06:16 +0000855 raise error.JobComplete("all done")
mblighf3fef462006-09-13 16:05:05 +0000856 if cont == False and os.path.exists(state):
apwce9abe92006-04-27 14:14:04 +0000857 os.unlink(state)
858
mblighcaa62c22008-04-07 21:51:17 +0000859 myjob = job(control, tag, cont, harness_type,
860 use_external_logging)
apwce9abe92006-04-27 14:14:04 +0000861
862 # Load in the users control file, may do any one of:
863 # 1) execute in toto
864 # 2) define steps, and select the first via next_step()
865 myjob.step_engine()
866
mbligh302482e2008-05-01 20:06:16 +0000867 except error.JobContinue:
apwce9abe92006-04-27 14:14:04 +0000868 sys.exit(5)
869
mbligh302482e2008-05-01 20:06:16 +0000870 except error.JobComplete:
apwb832e1b2007-11-24 20:24:38 +0000871 sys.exit(1)
872
mbligh302482e2008-05-01 20:06:16 +0000873 except error.JobError, instance:
apwce9abe92006-04-27 14:14:04 +0000874 print "JOB ERROR: " + instance.args[0]
mbligh9c5ac322007-10-31 18:01:59 +0000875 if myjob:
mbligh30270302007-11-05 20:33:52 +0000876 command = None
877 if len(instance.args) > 1:
878 command = instance.args[1]
879 myjob.record('ABORT', None, command, instance.args[0])
jadmanskia9c75c42008-05-01 22:05:31 +0000880 myjob._decrement_group_level()
mblighc3430162007-11-14 23:57:19 +0000881 myjob.record('END ABORT', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000882 assert(myjob.group_level == 0)
apwce9abe92006-04-27 14:14:04 +0000883 myjob.complete(1)
apwb832e1b2007-11-24 20:24:38 +0000884 else:
885 sys.exit(1)
apwce9abe92006-04-27 14:14:04 +0000886
mblighc3430162007-11-14 23:57:19 +0000887 except Exception, e:
mbligh302482e2008-05-01 20:06:16 +0000888 msg = str(e) + '\n' + traceback.format_exc()
mblighc3430162007-11-14 23:57:19 +0000889 print "JOB ERROR: " + msg
mblighfbfb77d2007-02-15 18:54:03 +0000890 if myjob:
mblighc3430162007-11-14 23:57:19 +0000891 myjob.record('ABORT', None, None, msg)
jadmanskia9c75c42008-05-01 22:05:31 +0000892 myjob._decrement_group_level()
mblighc3430162007-11-14 23:57:19 +0000893 myjob.record('END ABORT', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000894 assert(myjob.group_level == 0)
mbligh9c5ac322007-10-31 18:01:59 +0000895 myjob.complete(1)
apwb832e1b2007-11-24 20:24:38 +0000896 else:
897 sys.exit(1)
mbligh892d37f2007-03-01 17:03:25 +0000898
mbligh0144e5a2008-03-07 18:17:53 +0000899 # If we get here, then we assume the job is complete and good.
jadmanskia9c75c42008-05-01 22:05:31 +0000900 myjob._decrement_group_level()
mbligh0144e5a2008-03-07 18:17:53 +0000901 myjob.record('END GOOD', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000902 assert(myjob.group_level == 0)
mbligh0144e5a2008-03-07 18:17:53 +0000903
mbligh892d37f2007-03-01 17:03:25 +0000904 myjob.complete(0)
mblighcaa62c22008-04-07 21:51:17 +0000905
906
907# site_job.py may be non-existant or empty, make sure that an appropriate
908# site_job class is created nevertheless
909try:
910 from site_job import site_job
911except ImportError:
912 class site_job(base_job):
913 pass
914
915class job(site_job):
916 pass
jadmanski87cbc7f2008-05-13 18:17:10 +0000917