blob: 02465d32d2c12e37d042a3b7544a6c1061aa489a [file] [log] [blame]
mblighc86b0b42006-07-28 17:35:28 +00001"""The main job wrapper
mbligha2508052006-05-28 21:29:53 +00002
mblighc86b0b42006-07-28 17:35:28 +00003This is the core infrastructure.
4"""
5
6__author__ = """Copyright Andy Whitcroft, Martin J. Bligh 2006"""
mbligha2508052006-05-28 21:29:53 +00007
mbligh8f243ec2006-10-10 05:55:49 +00008# standard stuff
mbligh366ff1b2008-04-25 16:07:56 +00009import os, sys, re, pickle, shutil, time, traceback, types, copy
mbligh302482e2008-05-01 20:06:16 +000010
mbligh8f243ec2006-10-10 05:55:49 +000011# autotest stuff
mbligh302482e2008-05-01 20:06:16 +000012from autotest_lib.client.bin import autotest_utils
13from autotest_lib.client.common_lib import error, barrier, logging
14
15import parallel, kernel, xen, test, profilers, filesystem, fd_stack, boottool
16import harness, config, sysinfo, cpuset
17
mblighf4c35322006-03-13 01:01:10 +000018
mbligh12a04cb2008-04-25 16:07:20 +000019
mbligh366ff1b2008-04-25 16:07:56 +000020JOB_PREAMBLE = """
21from common.error import *
22from autotest_utils import *
23"""
24
25
mbligh302482e2008-05-01 20:06:16 +000026class StepError(error.AutotestError):
mbligh12a04cb2008-04-25 16:07:20 +000027 pass
28
29
mblighcaa62c22008-04-07 21:51:17 +000030class base_job:
mblighc86b0b42006-07-28 17:35:28 +000031 """The actual job against which we do everything.
32
33 Properties:
mbligh72b88fc2006-12-16 18:41:35 +000034 autodir
mblighc86b0b42006-07-28 17:35:28 +000035 The top level autotest directory (/usr/local/autotest).
36 Comes from os.environ['AUTODIR'].
mbligh72b88fc2006-12-16 18:41:35 +000037 bindir
mblighc86b0b42006-07-28 17:35:28 +000038 <autodir>/bin/
mblighd5a38832008-01-25 18:15:39 +000039 libdir
40 <autodir>/lib/
mbligh72b88fc2006-12-16 18:41:35 +000041 testdir
mblighc86b0b42006-07-28 17:35:28 +000042 <autodir>/tests/
mbligh84bafdb2008-01-26 19:43:34 +000043 site_testdir
44 <autodir>/site_tests/
mblighc86b0b42006-07-28 17:35:28 +000045 profdir
46 <autodir>/profilers/
47 tmpdir
48 <autodir>/tmp/
49 resultdir
50 <autodir>/results/<jobtag>
51 stdout
52 fd_stack object for stdout
53 stderr
54 fd_stack object for stderr
55 profilers
56 the profilers object for this job
apw504a7dd2006-10-12 17:18:37 +000057 harness
58 the server harness object for this job
apw059e1b12006-10-12 17:18:26 +000059 config
60 the job configuration for this job
mblighc86b0b42006-07-28 17:35:28 +000061 """
62
mblighd528d302007-12-19 16:19:05 +000063 DEFAULT_LOG_FILENAME = "status"
64
mblighcaa62c22008-04-07 21:51:17 +000065 def __init__(self, control, jobtag, cont, harness_type=None,
66 use_external_logging = False):
mblighc86b0b42006-07-28 17:35:28 +000067 """
68 control
69 The control file (pathname of)
70 jobtag
71 The job tag string (eg "default")
apw96da1a42006-11-02 00:23:18 +000072 cont
73 If this is the continuation of this job
apwe68a7132006-12-01 11:21:37 +000074 harness_type
75 An alternative server harness
mblighc86b0b42006-07-28 17:35:28 +000076 """
mblighf4c35322006-03-13 01:01:10 +000077 self.autodir = os.environ['AUTODIR']
apw870988b2007-09-25 16:50:53 +000078 self.bindir = os.path.join(self.autodir, 'bin')
mblighd5a38832008-01-25 18:15:39 +000079 self.libdir = os.path.join(self.autodir, 'lib')
apw870988b2007-09-25 16:50:53 +000080 self.testdir = os.path.join(self.autodir, 'tests')
mbligh84bafdb2008-01-26 19:43:34 +000081 self.site_testdir = os.path.join(self.autodir, 'site_tests')
apw870988b2007-09-25 16:50:53 +000082 self.profdir = os.path.join(self.autodir, 'profilers')
83 self.tmpdir = os.path.join(self.autodir, 'tmp')
84 self.resultdir = os.path.join(self.autodir, 'results', jobtag)
mbligh0fb83972008-01-10 16:30:02 +000085 self.sysinfodir = os.path.join(self.resultdir, 'sysinfo')
mbligh8d83cdc2007-12-03 18:09:18 +000086 self.control = os.path.abspath(control)
mbligh366ff1b2008-04-25 16:07:56 +000087 self.state_file = self.control + '.state'
jadmanskia9c75c42008-05-01 22:05:31 +000088 self.__load_state()
mbligha2508052006-05-28 21:29:53 +000089
apw96da1a42006-11-02 00:23:18 +000090 if not cont:
mblighc1cbc992008-05-27 20:01:45 +000091 """
92 Don't cleanup the tmp dir (which contains the lockfile)
93 in the constructor, this would be a problem for multiple
94 jobs starting at the same time on the same client. Instead
95 do the delete at the server side. We simply create the tmp
96 directory here if it does not already exist.
97 """
98 if not os.path.exists(self.tmpdir):
99 os.mkdir(self.tmpdir)
apw96da1a42006-11-02 00:23:18 +0000100
apw870988b2007-09-25 16:50:53 +0000101 results = os.path.join(self.autodir, 'results')
102 if not os.path.exists(results):
103 os.mkdir(results)
mblighfbfb77d2007-02-15 18:54:03 +0000104
apwf3d28622007-09-25 16:49:17 +0000105 download = os.path.join(self.testdir, 'download')
mblighc1cbc992008-05-27 20:01:45 +0000106 if not os.path.exists(download):
107 os.mkdir(download)
108
apw96da1a42006-11-02 00:23:18 +0000109 if os.path.exists(self.resultdir):
mbligh302482e2008-05-01 20:06:16 +0000110 autotest_utils.system('rm -rf '
111 + self.resultdir)
apw96da1a42006-11-02 00:23:18 +0000112 os.mkdir(self.resultdir)
mbligh0fb83972008-01-10 16:30:02 +0000113 os.mkdir(self.sysinfodir)
apw96da1a42006-11-02 00:23:18 +0000114
apw870988b2007-09-25 16:50:53 +0000115 os.mkdir(os.path.join(self.resultdir, 'debug'))
116 os.mkdir(os.path.join(self.resultdir, 'analysis'))
apw870988b2007-09-25 16:50:53 +0000117
mbligh8d83cdc2007-12-03 18:09:18 +0000118 shutil.copyfile(self.control,
119 os.path.join(self.resultdir, 'control'))
mblighf4ca14f2008-03-03 16:03:05 +0000120
mbligh4b089662006-06-14 22:34:58 +0000121
apwecf41b72006-03-31 14:00:55 +0000122 self.control = control
mbligh27113602007-10-31 21:07:51 +0000123 self.jobtag = jobtag
mblighd528d302007-12-19 16:19:05 +0000124 self.log_filename = self.DEFAULT_LOG_FILENAME
mbligh68119582008-01-25 18:16:41 +0000125 self.container = None
mblighf4c35322006-03-13 01:01:10 +0000126
mbligh56f1fbb2006-10-01 15:10:56 +0000127 self.stdout = fd_stack.fd_stack(1, sys.stdout)
128 self.stderr = fd_stack.fd_stack(2, sys.stderr)
jadmanskia9c75c42008-05-01 22:05:31 +0000129
130 self._init_group_level()
mblighf4c35322006-03-13 01:01:10 +0000131
apw059e1b12006-10-12 17:18:26 +0000132 self.config = config.config(self)
133
apwd27e55f2006-12-01 11:22:08 +0000134 self.harness = harness.select(harness_type, self)
135
mbligha35553b2006-04-23 15:52:25 +0000136 self.profilers = profilers.profilers(self)
mbligh72905562006-05-25 01:30:49 +0000137
mblighcaa605c2006-10-02 00:37:35 +0000138 try:
apw90154af2006-12-01 11:23:36 +0000139 tool = self.config_get('boottool.executable')
140 self.bootloader = boottool.boottool(tool)
mblighcaa605c2006-10-02 00:37:35 +0000141 except:
142 pass
143
mbligh0fb83972008-01-10 16:30:02 +0000144 sysinfo.log_per_reboot_data(self.sysinfodir)
mbligh3a6d6ca2006-04-23 15:50:24 +0000145
mbligh30270302007-11-05 20:33:52 +0000146 if not cont:
mblighc3430162007-11-14 23:57:19 +0000147 self.record('START', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000148 self._increment_group_level()
apw357f50f2006-12-01 11:22:39 +0000149
apwf91efaf2007-11-24 17:32:13 +0000150 self.harness.run_start()
mblighcaa62c22008-04-07 21:51:17 +0000151
152 if use_external_logging:
153 self.enable_external_logging()
apwf91efaf2007-11-24 17:32:13 +0000154
jadmanski8415f962008-05-06 20:38:53 +0000155 # load the max disk usage rate - default to no monitoring
156 self.max_disk_usage_rate = self.get_state('__monitor_disk',
157 default=0.0)
158
159
160 def monitor_disk_usage(self, max_rate):
161 """\
162 Signal that the job should monitor disk space usage on /
163 and generate a warning if a test uses up disk space at a
164 rate exceeding 'max_rate'.
165
166 Parameters:
167 max_rate - the maximium allowed rate of disk consumption
168 during a test, in MB/hour, or 0 to indicate
169 no limit.
170 """
171 self.set_state('__monitor_disk', max_rate)
172 self.max_disk_usage_rate = max_rate
173
mbligh0692e472007-08-30 16:07:53 +0000174
175 def relative_path(self, path):
176 """\
177 Return a patch relative to the job results directory
178 """
mbligh1c250ca2007-08-30 16:31:38 +0000179 head = len(self.resultdir) + 1 # remove the / inbetween
180 return path[head:]
mbligh0692e472007-08-30 16:07:53 +0000181
182
mbligh362ab3d2007-08-30 11:24:04 +0000183 def control_get(self):
184 return self.control
185
mblighcaa605c2006-10-02 00:37:35 +0000186
mbligh8d83cdc2007-12-03 18:09:18 +0000187 def control_set(self, control):
188 self.control = os.path.abspath(control)
189
190
apwde1503a2006-10-10 08:34:21 +0000191 def harness_select(self, which):
192 self.harness = harness.select(which, self)
193
194
apw059e1b12006-10-12 17:18:26 +0000195 def config_set(self, name, value):
196 self.config.set(name, value)
197
198
199 def config_get(self, name):
200 return self.config.get(name)
201
mbligh8baa2ea2006-12-17 23:01:24 +0000202 def setup_dirs(self, results_dir, tmp_dir):
mbligh1e8858e2006-11-24 22:18:35 +0000203 if not tmp_dir:
apw870988b2007-09-25 16:50:53 +0000204 tmp_dir = os.path.join(self.tmpdir, 'build')
mbligh1e8858e2006-11-24 22:18:35 +0000205 if not os.path.exists(tmp_dir):
206 os.mkdir(tmp_dir)
207 if not os.path.isdir(tmp_dir):
mbligh642b03e2008-01-14 16:53:15 +0000208 e_msg = "Temp dir (%s) is not a dir - args backwards?" % self.tmpdir
209 raise ValueError(e_msg)
mbligh1e8858e2006-11-24 22:18:35 +0000210
211 # We label the first build "build" and then subsequent ones
212 # as "build.2", "build.3", etc. Whilst this is a little bit
213 # inconsistent, 99.9% of jobs will only have one build
214 # (that's not done as kernbench, sparse, or buildtest),
215 # so it works out much cleaner. One of life's comprimises.
216 if not results_dir:
217 results_dir = os.path.join(self.resultdir, 'build')
218 i = 2
219 while os.path.exists(results_dir):
220 results_dir = os.path.join(self.resultdir, 'build.%d' % i)
mblighd9223fc2006-11-26 17:19:54 +0000221 i += 1
mbligh1e8858e2006-11-24 22:18:35 +0000222 if not os.path.exists(results_dir):
223 os.mkdir(results_dir)
mbligh72b88fc2006-12-16 18:41:35 +0000224
mbligh8baa2ea2006-12-17 23:01:24 +0000225 return (results_dir, tmp_dir)
226
227
228 def xen(self, base_tree, results_dir = '', tmp_dir = '', leave = False, \
229 kjob = None ):
230 """Summon a xen object"""
231 (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
232 build_dir = 'xen'
233 return xen.xen(self, base_tree, results_dir, tmp_dir, build_dir, leave, kjob)
234
235
236 def kernel(self, base_tree, results_dir = '', tmp_dir = '', leave = False):
237 """Summon a kernel object"""
mbligh669caa12007-11-05 18:32:13 +0000238 (results_dir, tmp_dir) = self.setup_dirs(results_dir, tmp_dir)
mbligh8baa2ea2006-12-17 23:01:24 +0000239 build_dir = 'linux'
mbligh6ee7ee02007-11-13 23:49:05 +0000240 return kernel.auto_kernel(self, base_tree, results_dir,
241 tmp_dir, build_dir, leave)
mblighf4c35322006-03-13 01:01:10 +0000242
mblighcaa605c2006-10-02 00:37:35 +0000243
mbligh6b504ff2007-12-12 21:03:49 +0000244 def barrier(self, *args, **kwds):
mblighfadca202006-09-23 04:40:01 +0000245 """Create a barrier object"""
mbligh6b504ff2007-12-12 21:03:49 +0000246 return barrier.barrier(*args, **kwds)
mblighfadca202006-09-23 04:40:01 +0000247
mblighcaa605c2006-10-02 00:37:35 +0000248
mbligh4b089662006-06-14 22:34:58 +0000249 def setup_dep(self, deps):
mblighc86b0b42006-07-28 17:35:28 +0000250 """Set up the dependencies for this test.
251
252 deps is a list of libraries required for this test.
253 """
mbligh4b089662006-06-14 22:34:58 +0000254 for dep in deps:
255 try:
apw870988b2007-09-25 16:50:53 +0000256 os.chdir(os.path.join(self.autodir, 'deps', dep))
mbligh302482e2008-05-01 20:06:16 +0000257 autotest_utils.system('./' + dep + '.py')
mbligh4b089662006-06-14 22:34:58 +0000258 except:
mbligh302482e2008-05-01 20:06:16 +0000259 err = "setting up dependency " + dep + "\n"
260 raise error.UnhandledError(err)
mbligh4b089662006-06-14 22:34:58 +0000261
262
mbligh72b88fc2006-12-16 18:41:35 +0000263 def __runtest(self, url, tag, args, dargs):
264 try:
mbligh53c41502007-10-23 20:45:04 +0000265 l = lambda : test.runtest(self, url, tag, args, dargs)
mbligh302482e2008-05-01 20:06:16 +0000266 pid = parallel.fork_start(self.resultdir, l)
267 parallel.fork_waitfor(self.resultdir, pid)
268 except error.AutotestError:
mbligh72b88fc2006-12-16 18:41:35 +0000269 raise
270 except:
mbligh302482e2008-05-01 20:06:16 +0000271 raise error.UnhandledError('running test ' + \
mbligh72b88fc2006-12-16 18:41:35 +0000272 self.__class__.__name__ + "\n")
apwf1a81162006-04-25 10:10:29 +0000273
mblighcaa605c2006-10-02 00:37:35 +0000274
mblighd016ecc2006-11-25 21:41:07 +0000275 def run_test(self, url, *args, **dargs):
mblighc86b0b42006-07-28 17:35:28 +0000276 """Summon a test object and run it.
277
278 tag
279 tag to add to testname
mbligh12a7df72006-10-06 03:54:33 +0000280 url
281 url of the test to run
mblighc86b0b42006-07-28 17:35:28 +0000282 """
mbligh12a7df72006-10-06 03:54:33 +0000283
mblighd016ecc2006-11-25 21:41:07 +0000284 if not url:
mbligh302482e2008-05-01 20:06:16 +0000285 raise TypeError("Test name is invalid. "
286 "Switched arguments?")
mbligh09f288a2007-09-18 21:34:57 +0000287 (group, testname) = test.testname(url)
mbligh7dd510c2007-11-13 17:11:22 +0000288 tag = dargs.pop('tag', None)
mbligh65938a22007-12-10 16:58:52 +0000289 container = dargs.pop('container', None)
mbligh09f288a2007-09-18 21:34:57 +0000290 subdir = testname
mbligh7dd510c2007-11-13 17:11:22 +0000291 if tag:
292 subdir += '.' + tag
293
mbligh65938a22007-12-10 16:58:52 +0000294 if container:
mbligh68119582008-01-25 18:16:41 +0000295 cname = container.get('name', None)
296 if not cname: # get old name
297 cname = container.get('container_name', None)
298 mbytes = container.get('mbytes', None)
299 if not mbytes: # get old name
300 mbytes = container.get('mem', None)
301 cpus = container.get('cpus', None)
302 if not cpus: # get old name
303 cpus = container.get('cpu', None)
jadmanski87cbc7f2008-05-13 18:17:10 +0000304 root = container.get('root', None)
mbligh68119582008-01-25 18:16:41 +0000305 self.new_container(mbytes=mbytes, cpus=cpus,
306 root=root, name=cname)
mbligh65938a22007-12-10 16:58:52 +0000307 # We are running in a container now...
308
jadmanski8415f962008-05-06 20:38:53 +0000309 def log_warning(reason):
310 self.record("WARN", subdir, testname, reason)
311 @disk_usage_monitor.watch(log_warning, "/",
312 self.max_disk_usage_rate)
mbligh7dd510c2007-11-13 17:11:22 +0000313 def group_func():
apwf1a81162006-04-25 10:10:29 +0000314 try:
mblighd016ecc2006-11-25 21:41:07 +0000315 self.__runtest(url, tag, args, dargs)
mbligh302482e2008-05-01 20:06:16 +0000316 except error.TestNAError, detail:
317 self.record('TEST_NA', subdir, testname,
318 str(detail))
319 raise
apwf1a81162006-04-25 10:10:29 +0000320 except Exception, detail:
mbligh7dd510c2007-11-13 17:11:22 +0000321 self.record('FAIL', subdir, testname,
322 str(detail))
apwf1a81162006-04-25 10:10:29 +0000323 raise
324 else:
mbligh7dd510c2007-11-13 17:11:22 +0000325 self.record('GOOD', subdir, testname,
326 'completed successfully')
jadmanski8415f962008-05-06 20:38:53 +0000327
mblighcfc6dd32007-11-20 00:44:35 +0000328 result, exc_info = self.__rungroup(subdir, group_func)
mbligh68119582008-01-25 18:16:41 +0000329 if container:
330 self.release_container()
mbligh302482e2008-05-01 20:06:16 +0000331 if exc_info and isinstance(exc_info[1], error.TestError):
mbligh7dd510c2007-11-13 17:11:22 +0000332 return False
333 elif exc_info:
mbligh71ea2492008-01-15 20:35:52 +0000334 raise exc_info[0], exc_info[1], exc_info[2]
apwf1a81162006-04-25 10:10:29 +0000335 else:
mbligh7dd510c2007-11-13 17:11:22 +0000336 return True
337
338
339 def __rungroup(self, name, function, *args, **dargs):
340 """\
341 name:
342 name of the group
343 function:
344 subroutine to run
345 *args:
346 arguments for the function
347
348 Returns a 2-tuple (result, exc_info) where result
349 is the return value of function, and exc_info is
350 the sys.exc_info() of the exception thrown by the
351 function (which may be None).
352 """
353
354 result, exc_info = None, None
355 try:
356 self.record('START', None, name)
jadmanskia9c75c42008-05-01 22:05:31 +0000357 self._increment_group_level()
mbligh7dd510c2007-11-13 17:11:22 +0000358 result = function(*args, **dargs)
jadmanskia9c75c42008-05-01 22:05:31 +0000359 self._decrement_group_level()
mbligh7dd510c2007-11-13 17:11:22 +0000360 self.record('END GOOD', None, name)
mbligh302482e2008-05-01 20:06:16 +0000361 except error.TestNAError, e:
jadmanskia9c75c42008-05-01 22:05:31 +0000362 self._decrement_group_level()
mbligh302482e2008-05-01 20:06:16 +0000363 self.record('END TEST_NA', None, name, str(e))
mbligh7dd510c2007-11-13 17:11:22 +0000364 except Exception, e:
365 exc_info = sys.exc_info()
jadmanskia9c75c42008-05-01 22:05:31 +0000366 self._decrement_group_level()
mbligh302482e2008-05-01 20:06:16 +0000367 err_msg = str(e) + '\n' + traceback.format_exc()
mbligh51144e02007-11-20 20:38:18 +0000368 self.record('END FAIL', None, name, err_msg)
mbligh7dd510c2007-11-13 17:11:22 +0000369
370 return result, exc_info
apw0865f482006-03-30 18:50:19 +0000371
mblighd7fb4a62006-10-01 00:57:53 +0000372
apw1da244b2007-09-27 17:18:01 +0000373 def run_group(self, function, *args, **dargs):
mbligh88ab90f2007-08-29 15:52:49 +0000374 """\
375 function:
376 subroutine to run
377 *args:
378 arguments for the function
379 """
380
mbligh7dd510c2007-11-13 17:11:22 +0000381 # Allow the tag for the group to be specified
mbligh88ab90f2007-08-29 15:52:49 +0000382 name = function.__name__
mbligh7dd510c2007-11-13 17:11:22 +0000383 tag = dargs.pop('tag', None)
384 if tag:
385 name = tag
apw1da244b2007-09-27 17:18:01 +0000386
mbligh7dd510c2007-11-13 17:11:22 +0000387 result, exc_info = self.__rungroup(name, function,
388 *args, **dargs)
apw1da244b2007-09-27 17:18:01 +0000389
mbligh7dd510c2007-11-13 17:11:22 +0000390 # if there was a non-TestError exception, raise it
mbligh302482e2008-05-01 20:06:16 +0000391 if exc_info and not isinstance(exc_info[1], error.TestError):
mbligh7dd510c2007-11-13 17:11:22 +0000392 err = ''.join(traceback.format_exception(*exc_info))
mbligh302482e2008-05-01 20:06:16 +0000393 raise error.TestError(name + ' failed\n' + err)
mbligh88ab90f2007-08-29 15:52:49 +0000394
mbligh7dd510c2007-11-13 17:11:22 +0000395 # pass back the actual return value from the function
apw08403ca2007-09-27 17:17:22 +0000396 return result
397
mbligh88ab90f2007-08-29 15:52:49 +0000398
jadmanski87cbc7f2008-05-13 18:17:10 +0000399 def new_container(self, mbytes=None, cpus=None, root=None, name=None):
mbligh8ea61e22008-05-09 18:09:37 +0000400 if not autotest_utils.grep('cpuset', '/proc/filesystems'):
mbligh68119582008-01-25 18:16:41 +0000401 print "Containers not enabled by latest reboot"
402 return # containers weren't enabled in this kernel boot
403 pid = os.getpid()
mbligh68119582008-01-25 18:16:41 +0000404 if not name:
405 name = 'test%d' % pid # make arbitrary unique name
406 self.container = cpuset.cpuset(name, job_size=mbytes,
mbligh337bb762008-04-16 21:23:10 +0000407 job_pid=pid, cpus=cpus, root=root)
mbligh68119582008-01-25 18:16:41 +0000408 # This job's python shell is now running in the new container
409 # and all forked test processes will inherit that container
410
411
412 def release_container(self):
413 if self.container:
mbligh337bb762008-04-16 21:23:10 +0000414 self.container.release()
mbligh68119582008-01-25 18:16:41 +0000415 self.container = None
416
417
418 def cpu_count(self):
419 if self.container:
420 return len(self.container.cpus)
jadmanskia9c75c42008-05-01 22:05:31 +0000421 return autotest_utils.count_cpus() # use total system count
mbligh68119582008-01-25 18:16:41 +0000422
423
apwce73d892007-09-25 16:53:05 +0000424 # Check the passed kernel identifier against the command line
425 # and the running kernel, abort the job on missmatch.
mbligh38a4a112008-03-19 13:11:34 +0000426 def kernel_check_ident(self, expected_when, expected_id, subdir,
jadmanskia9c75c42008-05-01 22:05:31 +0000427 type = 'src', patches=[]):
mbligh38a4a112008-03-19 13:11:34 +0000428 print (("POST BOOT: checking booted kernel " +
429 "mark=%d identity='%s' type='%s'") %
430 (expected_when, expected_id, type))
apwce73d892007-09-25 16:53:05 +0000431
jadmanskia9c75c42008-05-01 22:05:31 +0000432 running_id = autotest_utils.running_os_ident()
apwce73d892007-09-25 16:53:05 +0000433
jadmanskia9c75c42008-05-01 22:05:31 +0000434 cmdline = autotest_utils.read_one_line("/proc/cmdline")
apwce73d892007-09-25 16:53:05 +0000435
436 find_sum = re.compile(r'.*IDENT=(\d+)')
437 m = find_sum.match(cmdline)
438 cmdline_when = -1
439 if m:
440 cmdline_when = int(m.groups()[0])
441
442 # We have all the facts, see if they indicate we
443 # booted the requested kernel or not.
444 bad = False
mblighda0311e2007-10-25 16:03:33 +0000445 if (type == 'src' and expected_id != running_id or
jadmanskia9c75c42008-05-01 22:05:31 +0000446 type == 'rpm' and
447 not running_id.startswith(expected_id + '::')):
apwce73d892007-09-25 16:53:05 +0000448 print "check_kernel_ident: kernel identifier mismatch"
449 bad = True
450 if expected_when != cmdline_when:
451 print "check_kernel_ident: kernel command line mismatch"
452 bad = True
453
454 if bad:
455 print " Expected Ident: " + expected_id
456 print " Running Ident: " + running_id
457 print " Expected Mark: %d" % (expected_when)
458 print "Command Line Mark: %d" % (cmdline_when)
459 print " Command Line: " + cmdline
460
mbligh302482e2008-05-01 20:06:16 +0000461 raise error.JobError("boot failure", "reboot.verify")
apwce73d892007-09-25 16:53:05 +0000462
jadmanskia9c75c42008-05-01 22:05:31 +0000463 kernel_info = {'kernel': expected_id}
464 for i, patch in enumerate(patches):
465 kernel_info["patch%d" % i] = patch
mblighb7fd2702008-03-25 14:57:08 +0000466 self.record('GOOD', subdir, 'reboot.verify', expected_id)
jadmanskia9c75c42008-05-01 22:05:31 +0000467 self._decrement_group_level()
468 self.record('END GOOD', subdir, 'reboot',
469 optional_fields=kernel_info)
apwce73d892007-09-25 16:53:05 +0000470
471
mblighc2359852007-08-28 18:11:48 +0000472 def filesystem(self, device, mountpoint = None, loop_size = 0):
mblighd7fb4a62006-10-01 00:57:53 +0000473 if not mountpoint:
474 mountpoint = self.tmpdir
mblighc2359852007-08-28 18:11:48 +0000475 return filesystem.filesystem(self, device, mountpoint,loop_size)
mblighd7fb4a62006-10-01 00:57:53 +0000476
mblighcaa62c22008-04-07 21:51:17 +0000477
478 def enable_external_logging(self):
479 pass
480
481
482 def disable_external_logging(self):
483 pass
484
485
486 def reboot_setup(self):
487 pass
488
mblighcaa605c2006-10-02 00:37:35 +0000489
490 def reboot(self, tag='autotest'):
mblighcaa62c22008-04-07 21:51:17 +0000491 self.reboot_setup()
jadmanskia9c75c42008-05-01 22:05:31 +0000492 self.record('START', None, 'reboot')
493 self._increment_group_level()
mbligh30270302007-11-05 20:33:52 +0000494 self.record('GOOD', None, 'reboot.start')
apwde1503a2006-10-10 08:34:21 +0000495 self.harness.run_reboot()
apw11985b72007-10-04 15:44:47 +0000496 default = self.config_get('boot.set_default')
497 if default:
498 self.bootloader.set_default(tag)
499 else:
500 self.bootloader.boot_once(tag)
mbligh302482e2008-05-01 20:06:16 +0000501 cmd = "(sleep 5; reboot) </dev/null >/dev/null 2>&1 &"
502 autotest_utils.system(cmd)
apw0778a2f2006-10-06 03:11:40 +0000503 self.quit()
mblighcaa605c2006-10-02 00:37:35 +0000504
505
apw0865f482006-03-30 18:50:19 +0000506 def noop(self, text):
507 print "job: noop: " + text
508
mblighcaa605c2006-10-02 00:37:35 +0000509
mblighc86b0b42006-07-28 17:35:28 +0000510 def parallel(self, *tasklist):
511 """Run tasks in parallel"""
apw8fef4ac2006-10-10 22:53:37 +0000512
513 pids = []
mblighd528d302007-12-19 16:19:05 +0000514 old_log_filename = self.log_filename
515 for i, task in enumerate(tasklist):
516 self.log_filename = old_log_filename + (".%d" % i)
517 task_func = lambda: task[0](*task[1:])
mbligh302482e2008-05-01 20:06:16 +0000518 pids.append(parallel.fork_start(self.resultdir,
519 task_func))
mblighd528d302007-12-19 16:19:05 +0000520
521 old_log_path = os.path.join(self.resultdir, old_log_filename)
522 old_log = open(old_log_path, "a")
mblighd509b712008-01-14 17:41:25 +0000523 exceptions = []
mblighd528d302007-12-19 16:19:05 +0000524 for i, pid in enumerate(pids):
525 # wait for the task to finish
mblighd509b712008-01-14 17:41:25 +0000526 try:
mbligh302482e2008-05-01 20:06:16 +0000527 parallel.fork_waitfor(self.resultdir, pid)
mblighd509b712008-01-14 17:41:25 +0000528 except Exception, e:
529 exceptions.append(e)
mblighd528d302007-12-19 16:19:05 +0000530 # copy the logs from the subtask into the main log
531 new_log_path = old_log_path + (".%d" % i)
532 if os.path.exists(new_log_path):
533 new_log = open(new_log_path)
534 old_log.write(new_log.read())
535 new_log.close()
536 old_log.flush()
537 os.remove(new_log_path)
538 old_log.close()
539
540 self.log_filename = old_log_filename
apw0865f482006-03-30 18:50:19 +0000541
mblighd509b712008-01-14 17:41:25 +0000542 # handle any exceptions raised by the parallel tasks
543 if exceptions:
544 msg = "%d task(s) failed" % len(exceptions)
mbligh302482e2008-05-01 20:06:16 +0000545 raise error.JobError(msg, str(exceptions), exceptions)
mblighd509b712008-01-14 17:41:25 +0000546
mblighcaa605c2006-10-02 00:37:35 +0000547
apw0865f482006-03-30 18:50:19 +0000548 def quit(self):
mblighc86b0b42006-07-28 17:35:28 +0000549 # XXX: should have a better name.
apwde1503a2006-10-10 08:34:21 +0000550 self.harness.run_pause()
mbligh302482e2008-05-01 20:06:16 +0000551 raise error.JobContinue("more to come")
apw0865f482006-03-30 18:50:19 +0000552
mblighcaa605c2006-10-02 00:37:35 +0000553
apw0865f482006-03-30 18:50:19 +0000554 def complete(self, status):
mblighc86b0b42006-07-28 17:35:28 +0000555 """Clean up and exit"""
apw0865f482006-03-30 18:50:19 +0000556 # We are about to exit 'complete' so clean up the control file.
557 try:
mbligh366ff1b2008-04-25 16:07:56 +0000558 os.unlink(self.state_file)
apw0865f482006-03-30 18:50:19 +0000559 except:
560 pass
mblighc0b10d32008-03-03 16:03:28 +0000561
mbligh61a6c1a2006-12-25 01:26:38 +0000562 self.harness.run_complete()
mblighcaa62c22008-04-07 21:51:17 +0000563 self.disable_external_logging()
apw1b021902006-04-03 17:02:56 +0000564 sys.exit(status)
apw0865f482006-03-30 18:50:19 +0000565
mblighcaa605c2006-10-02 00:37:35 +0000566
mbligh366ff1b2008-04-25 16:07:56 +0000567 def set_state(self, var, val):
568 # Deep copies make sure that the state can't be altered
569 # without it being re-written. Perf wise, deep copies
570 # are overshadowed by pickling/loading.
571 self.state[var] = copy.deepcopy(val)
572 pickle.dump(self.state, open(self.state_file, 'w'))
573
574
575 def __load_state(self):
jadmanskia9c75c42008-05-01 22:05:31 +0000576 assert not hasattr(self, "state")
mbligh366ff1b2008-04-25 16:07:56 +0000577 try:
578 self.state = pickle.load(open(self.state_file, 'r'))
jadmanskia9c75c42008-05-01 22:05:31 +0000579 self.state_existed = True
mbligh366ff1b2008-04-25 16:07:56 +0000580 except Exception:
581 print "Initializing the state engine."
582 self.state = {}
mblighf1ae0a42008-04-25 16:09:20 +0000583 self.set_state('__steps', []) # writes pickle file
jadmanskia9c75c42008-05-01 22:05:31 +0000584 self.state_existed = False
mbligh366ff1b2008-04-25 16:07:56 +0000585
586
587 def get_state(self, var, default=None):
588 if var in self.state or default == None:
589 val = self.state[var]
590 else:
591 val = default
592 return copy.deepcopy(val)
593
594
mbligh12a04cb2008-04-25 16:07:20 +0000595 def __create_step_tuple(self, fn, args, dargs):
596 # Legacy code passes in an array where the first arg is
597 # the function or its name.
598 if isinstance(fn, list):
599 assert(len(args) == 0)
600 assert(len(dargs) == 0)
601 args = fn[1:]
602 fn = fn[0]
603 # Pickling actual functions is harry, thus we have to call
604 # them by name. Unfortunately, this means only functions
605 # defined globally can be used as a next step.
606 if isinstance(fn, types.FunctionType):
607 fn = fn.__name__
608 if not isinstance(fn, types.StringTypes):
609 raise StepError("Next steps must be functions or "
610 "strings containing the function name")
611 return (fn, args, dargs)
612
613
mbligh12a04cb2008-04-25 16:07:20 +0000614 def next_step(self, fn, *args, **dargs):
mblighc86b0b42006-07-28 17:35:28 +0000615 """Define the next step"""
mblighf1ae0a42008-04-25 16:09:20 +0000616 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000617 steps.append(self.__create_step_tuple(fn, args, dargs))
mblighf1ae0a42008-04-25 16:09:20 +0000618 self.set_state('__steps', steps)
apw0865f482006-03-30 18:50:19 +0000619
mblighcaa605c2006-10-02 00:37:35 +0000620
mbligh12a04cb2008-04-25 16:07:20 +0000621 def next_step_prepend(self, fn, *args, **dargs):
mbligh237bed32007-09-05 13:05:57 +0000622 """Insert a new step, executing first"""
mblighf1ae0a42008-04-25 16:09:20 +0000623 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000624 steps.insert(0, self.__create_step_tuple(fn, args, dargs))
mblighf1ae0a42008-04-25 16:09:20 +0000625 self.set_state('__steps', steps)
mbligh237bed32007-09-05 13:05:57 +0000626
627
apw83f8d772006-04-27 14:12:56 +0000628 def step_engine(self):
mblighc86b0b42006-07-28 17:35:28 +0000629 """the stepping engine -- if the control file defines
630 step_init we will be using this engine to drive multiple runs.
631 """
632 """Do the next step"""
apw83f8d772006-04-27 14:12:56 +0000633
mbligh366ff1b2008-04-25 16:07:56 +0000634 # Set up the environment and then interpret the control file.
635 # Some control files will have code outside of functions,
636 # which means we need to have our state engine initialized
637 # before reading in the file.
mbligh366ff1b2008-04-25 16:07:56 +0000638 lcl = {'job': self}
639 exec(JOB_PREAMBLE, lcl, lcl)
apw83f8d772006-04-27 14:12:56 +0000640 execfile(self.control, lcl, lcl)
641
mbligh366ff1b2008-04-25 16:07:56 +0000642 # If we loaded in a mid-job state file, then we presumably
643 # know what steps we have yet to run.
jadmanskia9c75c42008-05-01 22:05:31 +0000644 if not self.state_existed:
apw83f8d772006-04-27 14:12:56 +0000645 if lcl.has_key('step_init'):
646 self.next_step([lcl['step_init']])
apw0865f482006-03-30 18:50:19 +0000647
mbligh366ff1b2008-04-25 16:07:56 +0000648 # Iterate through the steps. If we reboot, we'll simply
649 # continue iterating on the next step.
mblighf1ae0a42008-04-25 16:09:20 +0000650 while len(self.get_state('__steps')) > 0:
651 steps = self.get_state('__steps')
mbligh366ff1b2008-04-25 16:07:56 +0000652 (fn, args, dargs) = steps.pop(0)
mblighf1ae0a42008-04-25 16:09:20 +0000653 self.set_state('__steps', steps)
apw0865f482006-03-30 18:50:19 +0000654
mbligh12a04cb2008-04-25 16:07:20 +0000655 lcl['__args'] = args
656 lcl['__dargs'] = dargs
657 exec(fn + "(*__args, **__dargs)", lcl, lcl)
apw0865f482006-03-30 18:50:19 +0000658
mblighcaa605c2006-10-02 00:37:35 +0000659
jadmanskia9c75c42008-05-01 22:05:31 +0000660 def _init_group_level(self):
661 self.group_level = self.get_state("__group_level", default=0)
662
663
664 def _increment_group_level(self):
665 self.group_level += 1
666 self.set_state("__group_level", self.group_level)
667
668
669 def _decrement_group_level(self):
670 self.group_level -= 1
671 self.set_state("__group_level", self.group_level)
672
673
674 def record(self, status_code, subdir, operation, status = '',
675 optional_fields=None):
mbligh09f288a2007-09-18 21:34:57 +0000676 """
677 Record job-level status
apw7db8d0b2006-10-09 08:10:25 +0000678
mbligh09f288a2007-09-18 21:34:57 +0000679 The intent is to make this file both machine parseable and
680 human readable. That involves a little more complexity, but
681 really isn't all that bad ;-)
682
683 Format is <status code>\t<subdir>\t<operation>\t<status>
684
685 status code: (GOOD|WARN|FAIL|ABORT)
686 or START
687 or END (GOOD|WARN|FAIL|ABORT)
688
689 subdir: MUST be a relevant subdirectory in the results,
690 or None, which will be represented as '----'
691
692 operation: description of what you ran (e.g. "dbench", or
693 "mkfs -t foobar /dev/sda9")
694
695 status: error message or "completed sucessfully"
696
697 ------------------------------------------------------------
698
699 Initial tabs indicate indent levels for grouping, and is
mbligh7dd510c2007-11-13 17:11:22 +0000700 governed by self.group_level
mbligh09f288a2007-09-18 21:34:57 +0000701
702 multiline messages have secondary lines prefaced by a double
703 space (' ')
704 """
705
mblighb0570ad2007-09-19 18:18:11 +0000706 if subdir:
707 if re.match(r'[\n\t]', subdir):
jadmanskia9c75c42008-05-01 22:05:31 +0000708 raise ValueError("Invalid character in "
709 "subdir string")
mblighb0570ad2007-09-19 18:18:11 +0000710 substr = subdir
711 else:
712 substr = '----'
mbligh09f288a2007-09-18 21:34:57 +0000713
mbligh302482e2008-05-01 20:06:16 +0000714 if not logging.is_valid_status(status_code):
jadmanskia9c75c42008-05-01 22:05:31 +0000715 raise ValueError("Invalid status code supplied: %s" %
716 status_code)
mbligh9c5ac322007-10-31 18:01:59 +0000717 if not operation:
718 operation = '----'
jadmanskia9c75c42008-05-01 22:05:31 +0000719
mbligh09f288a2007-09-18 21:34:57 +0000720 if re.match(r'[\n\t]', operation):
jadmanskia9c75c42008-05-01 22:05:31 +0000721 raise ValueError("Invalid character in "
722 "operation string")
mbligh09f288a2007-09-18 21:34:57 +0000723 operation = operation.rstrip()
jadmanskia9c75c42008-05-01 22:05:31 +0000724
725 if not optional_fields:
726 optional_fields = {}
727
mbligh09f288a2007-09-18 21:34:57 +0000728 status = status.rstrip()
729 status = re.sub(r"\t", " ", status)
apw7db8d0b2006-10-09 08:10:25 +0000730 # Ensure any continuation lines are marked so we can
731 # detect them in the status file to ensure it is parsable.
jadmanskia9c75c42008-05-01 22:05:31 +0000732 status = re.sub(r"\n", "\n" + "\t" * self.group_level + " ",
733 status)
mbligh09f288a2007-09-18 21:34:57 +0000734
mbligh30270302007-11-05 20:33:52 +0000735 # Generate timestamps for inclusion in the logs
736 epoch_time = int(time.time()) # seconds since epoch, in UTC
737 local_time = time.localtime(epoch_time)
jadmanskia9c75c42008-05-01 22:05:31 +0000738 optional_fields["timestamp"] = str(epoch_time)
739 optional_fields["localtime"] = time.strftime("%b %d %H:%M:%S",
740 local_time)
mbligh30270302007-11-05 20:33:52 +0000741
jadmanskia9c75c42008-05-01 22:05:31 +0000742 fields = [status_code, substr, operation]
743 fields += ["%s=%s" % x for x in optional_fields.iteritems()]
744 fields.append(status)
745
746 msg = '\t'.join(str(x) for x in fields)
mbligh7dd510c2007-11-13 17:11:22 +0000747 msg = '\t' * self.group_level + msg
apw7db8d0b2006-10-09 08:10:25 +0000748
mblighd528d302007-12-19 16:19:05 +0000749 msg_tag = ""
750 if "." in self.log_filename:
751 msg_tag = self.log_filename.split(".", 1)[1]
752
jadmanskia9c75c42008-05-01 22:05:31 +0000753 self.harness.test_status_detail(status_code, substr,
754 operation, status, msg_tag)
mblighd528d302007-12-19 16:19:05 +0000755 self.harness.test_status(msg, msg_tag)
756
757 # log to stdout (if enabled)
758 #if self.log_filename == self.DEFAULT_LOG_FILENAME:
apwf1a81162006-04-25 10:10:29 +0000759 print msg
mblighd528d302007-12-19 16:19:05 +0000760
761 # log to the "root" status log
762 status_file = os.path.join(self.resultdir, self.log_filename)
mbligh7dd510c2007-11-13 17:11:22 +0000763 open(status_file, "a").write(msg + "\n")
mblighd528d302007-12-19 16:19:05 +0000764
765 # log to the subdir status log (if subdir is set)
mblighb0570ad2007-09-19 18:18:11 +0000766 if subdir:
mblighadff6ca2008-01-22 16:38:25 +0000767 dir = os.path.join(self.resultdir, subdir)
768 if not os.path.exists(dir):
769 os.mkdir(dir)
770
771 status_file = os.path.join(dir,
mblighd528d302007-12-19 16:19:05 +0000772 self.DEFAULT_LOG_FILENAME)
mblighb0570ad2007-09-19 18:18:11 +0000773 open(status_file, "a").write(msg + "\n")
apwce9abe92006-04-27 14:14:04 +0000774
775
jadmanski8415f962008-05-06 20:38:53 +0000776class disk_usage_monitor:
777 def __init__(self, logging_func, device, max_mb_per_hour):
778 self.func = logging_func
779 self.device = device
780 self.max_mb_per_hour = max_mb_per_hour
781
782
783 def start(self):
784 self.initial_space = autotest_utils.freespace(self.device)
785 self.start_time = time.time()
786
787
788 def stop(self):
789 # if no maximum usage rate was set, we don't need to
790 # generate any warnings
791 if not self.max_mb_per_hour:
792 return
793
794 final_space = autotest_utils.freespace(self.device)
795 used_space = self.initial_space - final_space
796 stop_time = time.time()
797 total_time = stop_time - self.start_time
798 # round up the time to one minute, to keep extremely short
799 # tests from generating false positives due to short, badly
800 # timed bursts of activity
801 total_time = max(total_time, 60.0)
802
803 # determine the usage rate
804 bytes_per_sec = used_space / total_time
805 mb_per_sec = bytes_per_sec / 1024**2
806 mb_per_hour = mb_per_sec * 60 * 60
807
808 if mb_per_hour > self.max_mb_per_hour:
809 msg = ("disk space on %s was consumed at a rate of "
810 "%.2f MB/hour")
811 msg %= (self.device, mb_per_hour)
812 self.func(msg)
813
814
815 @classmethod
816 def watch(cls, *monitor_args, **monitor_dargs):
817 """ Generic decorator to wrap a function call with the
818 standard create-monitor -> start -> call -> stop idiom."""
819 def decorator(func):
820 def watched_func(*args, **dargs):
821 monitor = cls(*monitor_args, **monitor_dargs)
822 monitor.start()
823 try:
824 func(*args, **dargs)
825 finally:
826 monitor.stop()
827 return watched_func
828 return decorator
829
830
mblighcaa62c22008-04-07 21:51:17 +0000831def runjob(control, cont = False, tag = "default", harness_type = '',
832 use_external_logging = False):
mblighc86b0b42006-07-28 17:35:28 +0000833 """The main interface to this module
834
mbligh72b88fc2006-12-16 18:41:35 +0000835 control
mblighc86b0b42006-07-28 17:35:28 +0000836 The control file to use for this job.
837 cont
838 Whether this is the continuation of a previously started job
839 """
mblighb4eef242007-07-23 18:22:49 +0000840 control = os.path.abspath(control)
apwce9abe92006-04-27 14:14:04 +0000841 state = control + '.state'
842
843 # instantiate the job object ready for the control file.
844 myjob = None
845 try:
846 # Check that the control file is valid
847 if not os.path.exists(control):
mbligh302482e2008-05-01 20:06:16 +0000848 raise error.JobError(control +
849 ": control file not found")
apwce9abe92006-04-27 14:14:04 +0000850
851 # When continuing, the job is complete when there is no
852 # state file, ensure we don't try and continue.
mblighf3fef462006-09-13 16:05:05 +0000853 if cont and not os.path.exists(state):
mbligh302482e2008-05-01 20:06:16 +0000854 raise error.JobComplete("all done")
mblighf3fef462006-09-13 16:05:05 +0000855 if cont == False and os.path.exists(state):
apwce9abe92006-04-27 14:14:04 +0000856 os.unlink(state)
857
mblighcaa62c22008-04-07 21:51:17 +0000858 myjob = job(control, tag, cont, harness_type,
859 use_external_logging)
apwce9abe92006-04-27 14:14:04 +0000860
861 # Load in the users control file, may do any one of:
862 # 1) execute in toto
863 # 2) define steps, and select the first via next_step()
864 myjob.step_engine()
865
mbligh302482e2008-05-01 20:06:16 +0000866 except error.JobContinue:
apwce9abe92006-04-27 14:14:04 +0000867 sys.exit(5)
868
mbligh302482e2008-05-01 20:06:16 +0000869 except error.JobComplete:
apwb832e1b2007-11-24 20:24:38 +0000870 sys.exit(1)
871
mbligh302482e2008-05-01 20:06:16 +0000872 except error.JobError, instance:
apwce9abe92006-04-27 14:14:04 +0000873 print "JOB ERROR: " + instance.args[0]
mbligh9c5ac322007-10-31 18:01:59 +0000874 if myjob:
mbligh30270302007-11-05 20:33:52 +0000875 command = None
876 if len(instance.args) > 1:
877 command = instance.args[1]
878 myjob.record('ABORT', None, command, instance.args[0])
jadmanskia9c75c42008-05-01 22:05:31 +0000879 myjob._decrement_group_level()
mblighc3430162007-11-14 23:57:19 +0000880 myjob.record('END ABORT', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000881 assert(myjob.group_level == 0)
apwce9abe92006-04-27 14:14:04 +0000882 myjob.complete(1)
apwb832e1b2007-11-24 20:24:38 +0000883 else:
884 sys.exit(1)
apwce9abe92006-04-27 14:14:04 +0000885
mblighc3430162007-11-14 23:57:19 +0000886 except Exception, e:
mbligh302482e2008-05-01 20:06:16 +0000887 msg = str(e) + '\n' + traceback.format_exc()
mblighc3430162007-11-14 23:57:19 +0000888 print "JOB ERROR: " + msg
mblighfbfb77d2007-02-15 18:54:03 +0000889 if myjob:
mblighc3430162007-11-14 23:57:19 +0000890 myjob.record('ABORT', None, None, msg)
jadmanskia9c75c42008-05-01 22:05:31 +0000891 myjob._decrement_group_level()
mblighc3430162007-11-14 23:57:19 +0000892 myjob.record('END ABORT', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000893 assert(myjob.group_level == 0)
mbligh9c5ac322007-10-31 18:01:59 +0000894 myjob.complete(1)
apwb832e1b2007-11-24 20:24:38 +0000895 else:
896 sys.exit(1)
mbligh892d37f2007-03-01 17:03:25 +0000897
mbligh0144e5a2008-03-07 18:17:53 +0000898 # If we get here, then we assume the job is complete and good.
jadmanskia9c75c42008-05-01 22:05:31 +0000899 myjob._decrement_group_level()
mbligh0144e5a2008-03-07 18:17:53 +0000900 myjob.record('END GOOD', None, None)
jadmanskia9c75c42008-05-01 22:05:31 +0000901 assert(myjob.group_level == 0)
mbligh0144e5a2008-03-07 18:17:53 +0000902
mbligh892d37f2007-03-01 17:03:25 +0000903 myjob.complete(0)
mblighcaa62c22008-04-07 21:51:17 +0000904
905
906# site_job.py may be non-existant or empty, make sure that an appropriate
907# site_job class is created nevertheless
908try:
909 from site_job import site_job
910except ImportError:
911 class site_job(base_job):
912 pass
913
914class job(site_job):
915 pass
jadmanski87cbc7f2008-05-13 18:17:10 +0000916