blob: 73c33fa37e068073fa803f4120fec3437c968b96 [file] [log] [blame]
mbligh6231cd62008-02-02 19:18:33 +00001# Shell class for a test, inherited by all individual tests
2#
3# Methods:
jadmanski0afbb632008-06-06 21:10:57 +00004# __init__ initialise
5# initialize run once for each job
6# setup run once for each new version of the test installed
7# run run the test (wrapped by job.run_test())
mbligh6231cd62008-02-02 19:18:33 +00008#
9# Data:
jadmanski0afbb632008-06-06 21:10:57 +000010# job backreference to the job this test instance is part of
11# outputdir eg. results/<job>/<testname.tag>
12# resultsdir eg. results/<job>/<testname.tag>/results
13# profdir eg. results/<job>/<testname.tag>/profiling
14# debugdir eg. results/<job>/<testname.tag>/debug
15# bindir eg. tests/<test>
16# src eg. tests/<test>/src
jadmanski825e24c2008-08-27 20:54:31 +000017# tmpdir eg. tmp/<tempname>_<testname.tag>
mbligh6231cd62008-02-02 19:18:33 +000018
Keith Haddow1e5c7012016-03-09 16:05:37 -080019#pylint: disable=C0111
Aviv Keshet39164ca2013-03-27 15:08:33 -070020
Xixuan Wu5019d692017-11-10 15:58:33 -080021import fcntl
22import json
Scott Zawalski91493c82013-01-25 16:15:20 -050023import logging
Xixuan Wu5019d692017-11-10 15:58:33 -080024import os
25import re
26import shutil
27import stat
28import sys
29import tempfile
30import time
31import traceback
mbligh6231cd62008-02-02 19:18:33 +000032
mbligh53da18e2009-01-05 21:13:26 +000033from autotest_lib.client.bin import utils
Ilja H. Friedel8753d832014-10-07 22:30:06 -070034from autotest_lib.client.common_lib import error
Xixuan Wue8ce6542017-11-07 17:22:10 -080035from autotest_lib.client.common_lib import utils as client_utils
36
37try:
38 from chromite.lib import metrics
39except ImportError:
40 metrics = client_utils.metrics_mock
mbligh6231cd62008-02-02 19:18:33 +000041
42
jadmanskiaafbf2a2010-06-25 17:07:24 +000043class base_test(object):
jadmanski0afbb632008-06-06 21:10:57 +000044 preserve_srcdir = False
mbligh6231cd62008-02-02 19:18:33 +000045
jadmanski0afbb632008-06-06 21:10:57 +000046 def __init__(self, job, bindir, outputdir):
47 self.job = job
mbligh21e33582009-02-04 18:18:31 +000048 self.pkgmgr = job.pkgmgr
jadmanski0afbb632008-06-06 21:10:57 +000049 self.autodir = job.autodir
jadmanski0afbb632008-06-06 21:10:57 +000050 self.outputdir = outputdir
showardb18134f2009-03-20 20:52:18 +000051 self.tagged_testname = os.path.basename(self.outputdir)
jadmanski0afbb632008-06-06 21:10:57 +000052 self.resultsdir = os.path.join(self.outputdir, 'results')
53 os.mkdir(self.resultsdir)
54 self.profdir = os.path.join(self.outputdir, 'profiling')
55 os.mkdir(self.profdir)
56 self.debugdir = os.path.join(self.outputdir, 'debug')
57 os.mkdir(self.debugdir)
Eric Li8b2954a2010-07-12 21:42:34 -070058 # TODO(ericli): figure out how autotest crash handler work with cros
Scott Zawalski91493c82013-01-25 16:15:20 -050059 # Once this is re-enabled import getpass. crosbug.com/31232
Eric Li8b2954a2010-07-12 21:42:34 -070060 # crash handler, we should restore it in near term.
Scott Zawalski91493c82013-01-25 16:15:20 -050061 # if getpass.getuser() == 'root':
62 # self.configure_crash_handler()
63 # else:
Eric Li8b2954a2010-07-12 21:42:34 -070064 self.crash_handling_enabled = False
jadmanski0afbb632008-06-06 21:10:57 +000065 self.bindir = bindir
jadmanski0afbb632008-06-06 21:10:57 +000066 self.srcdir = os.path.join(self.bindir, 'src')
mbligh5c1bb252009-03-25 22:06:49 +000067 self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
showardb18134f2009-03-20 20:52:18 +000068 dir=job.tmpdir)
mbligh7af09972009-04-17 22:17:08 +000069 self._keyvals = []
70 self._new_keyval = False
mbligh32cb5b42009-05-01 23:05:09 +000071 self.failed_constraints = []
mbligh5e703a22009-06-15 22:00:12 +000072 self.iteration = 0
mbligh742ae422009-05-13 20:46:41 +000073 self.before_iteration_hooks = []
74 self.after_iteration_hooks = []
mbligh6231cd62008-02-02 19:18:33 +000075
Dan Shi2ca97772013-10-21 17:17:27 -070076 # Flag to indicate if the test has succeeded or failed.
77 self.success = False
78
mbligh6231cd62008-02-02 19:18:33 +000079
mbligh6894ce22009-09-18 19:56:30 +000080 def configure_crash_handler(self):
lmrfb118872009-10-13 19:41:48 +000081 pass
mbligh6894ce22009-09-18 19:56:30 +000082
83
84 def crash_handler_report(self):
lmrfb118872009-10-13 19:41:48 +000085 pass
mbligh6894ce22009-09-18 19:56:30 +000086
87
jadmanski0afbb632008-06-06 21:10:57 +000088 def assert_(self, expr, msg='Assertion failed.'):
89 if not expr:
90 raise error.TestError(msg)
mbligh6231cd62008-02-02 19:18:33 +000091
92
jadmanski0afbb632008-06-06 21:10:57 +000093 def write_test_keyval(self, attr_dict):
Aviv Keshetf0c82242017-05-18 22:06:40 -070094 utils.write_keyval(self.outputdir, attr_dict)
jadmanskicc549172008-05-21 18:11:51 +000095
Ilja H. Friedel21bf5622014-09-08 18:59:04 -070096
jadmanski0afbb632008-06-06 21:10:57 +000097 @staticmethod
98 def _append_type_to_keys(dictionary, typename):
99 new_dict = {}
100 for key, value in dictionary.iteritems():
101 new_key = "%s{%s}" % (key, typename)
102 new_dict[new_key] = value
103 return new_dict
jadmanskicc549172008-05-21 18:11:51 +0000104
105
Fang Denge689e712013-11-13 18:27:06 -0800106 def output_perf_value(self, description, value, units=None,
Po-Hsien Wang3c4fc6f2017-05-20 12:11:58 -0700107 higher_is_better=None, graph=None,
108 replacement='_', replace_existing_values=False):
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700109 """
110 Records a measured performance value in an output file.
111
112 The output file will subsequently be parsed by the TKO parser to have
113 the information inserted into the results database.
114
115 @param description: A string describing the measured perf value. Must
116 be maximum length 256, and may only contain letters, numbers,
117 periods, dashes, and underscores. For example:
118 "page_load_time", "scrolling-frame-rate".
119 @param value: A number representing the measured perf value, or a list
120 of measured values if a test takes multiple measurements.
121 Measured perf values can be either ints or floats.
122 @param units: A string describing the units associated with the
123 measured perf value. Must be maximum length 32, and may only
124 contain letters, numbers, periods, dashes, and underscores.
125 For example: "msec", "fps", "score", "runs_per_second".
126 @param higher_is_better: A boolean indicating whether or not a "higher"
127 measured perf value is considered to be better. If False, it is
128 assumed that a "lower" measured value is considered to be
Ilja H. Friedeld06a7a52014-11-06 21:34:20 -0800129 better. This impacts dashboard plotting and email notification.
130 Pure autotests are expected to specify either True or False!
131 This value can be set to "None" to indicate that the perf
132 dashboard should apply the rules encoded via Chromium
133 unit-info.json. This is only used for tracking Chromium based
134 tests (in particular telemetry).
Fang Deng7f24f0b2013-11-12 11:22:16 -0800135 @param graph: A string indicating the name of the graph on which
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700136 the perf value will be subsequently displayed on the chrome perf
137 dashboard. This allows multiple metrics be grouped together on
138 the same graphs. Defaults to None, indicating that the perf
139 value should be displayed individually on a separate graph.
140 @param replacement: string to replace illegal characters in
141 |description| and |units| with.
Po-Hsien Wang3c4fc6f2017-05-20 12:11:58 -0700142 @param replace_existing_values: A boolean indicating whether or not a
143 new added perf value should replace existing perf.
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700144 """
145 if len(description) > 256:
146 raise ValueError('The description must be at most 256 characters.')
David Sharp78a16a52015-12-16 11:58:31 -0800147 if units and len(units) > 32:
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700148 raise ValueError('The units must be at most 32 characters.')
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700149
150 # If |replacement| is legal replace illegal characters with it.
151 string_regex = re.compile(r'[^-\.\w]')
152 if replacement is None or re.search(string_regex, replacement):
153 raise ValueError('Invalid replacement string to mask illegal '
154 'characters. May only contain letters, numbers, '
155 'periods, dashes, and underscores. '
156 'replacement: %s' % replacement)
157 description = re.sub(string_regex, replacement, description)
158 units = re.sub(string_regex, replacement, units) if units else None
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700159
Keith Haddow1e5c7012016-03-09 16:05:37 -0800160 charts = {}
161 output_file = os.path.join(self.resultsdir, 'results-chart.json')
162 if os.path.isfile(output_file):
163 with open(output_file, 'r') as fp:
164 contents = fp.read()
165 if contents:
166 charts = json.loads(contents)
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700167
Keith Haddow1e5c7012016-03-09 16:05:37 -0800168 if graph:
Keith Haddowdf2730b2016-03-28 21:31:18 -0700169 first_level = graph
Keith Haddow1e5c7012016-03-09 16:05:37 -0800170 second_level = description
171 else:
172 first_level = description
173 second_level = 'summary'
174
175 direction = 'up' if higher_is_better else 'down'
176
Keith Haddow4fc24652016-03-16 10:08:17 -0700177 # All input should be a number - but at times there are strings
178 # representing numbers logged, attempt to convert them to numbers.
179 # If a non number string is logged an exception will be thrown.
180 if isinstance(value, list):
181 value = map(float, value)
182 else:
183 value = float(value)
184
Keith Haddow1e5c7012016-03-09 16:05:37 -0800185 result_type = 'scalar'
186 value_key = 'value'
187 result_value = value
188
189 # The chart json spec go/telemetry-json differenciates between a single
190 # value vs a list of values. Lists of values get extra processing in
191 # the chromeperf dashboard ( mean, standard deviation etc)
192 # Tests can log one or more values for the same metric, to adhere stricly
193 # to the specification the first value logged is a scalar but if another
194 # value is logged the results become a list of scalar.
195 # TODO Figure out if there would be any difference of always using list
196 # of scalar even if there is just one item in the list.
Keith Haddowd804bc82016-03-15 17:16:03 -0700197 if isinstance(value, list):
198 result_type = 'list_of_scalar_values'
199 value_key = 'values'
200 if first_level in charts and second_level in charts[first_level]:
201 if 'values' in charts[first_level][second_level]:
202 result_value = charts[first_level][second_level]['values']
Keith Haddowd804bc82016-03-15 17:16:03 -0700203 elif 'value' in charts[first_level][second_level]:
204 result_value = [charts[first_level][second_level]['value']]
Po-Hsien Wang3c4fc6f2017-05-20 12:11:58 -0700205 if replace_existing_values:
206 result_value = value
207 else:
Keith Haddowd804bc82016-03-15 17:16:03 -0700208 result_value.extend(value)
209 else:
210 result_value = value
Po-Hsien Wang3c4fc6f2017-05-20 12:11:58 -0700211 elif (first_level in charts and second_level in charts[first_level] and
212 not replace_existing_values):
Keith Haddow1e5c7012016-03-09 16:05:37 -0800213 result_type = 'list_of_scalar_values'
214 value_key = 'values'
215 if 'values' in charts[first_level][second_level]:
216 result_value = charts[first_level][second_level]['values']
217 result_value.append(value)
218 else:
219 result_value = [charts[first_level][second_level]['value'], value]
220
Keith Haddowdf2730b2016-03-28 21:31:18 -0700221 test_data = {
222 second_level: {
223 'type': result_type,
224 'units': units,
225 value_key: result_value,
226 'improvement_direction': direction
227 }
228 }
229
230 if first_level in charts:
231 charts[first_level].update(test_data)
232 else:
233 charts.update({first_level: test_data})
234
Keith Haddow1e5c7012016-03-09 16:05:37 -0800235 with open(output_file, 'w') as fp:
236 fp.write(json.dumps(charts, indent=2))
Dennis Jeffrey918863f2013-06-19 16:49:48 -0700237
238
mbligh0b3dd5f2008-07-16 20:37:13 +0000239 def write_perf_keyval(self, perf_dict):
Aviv Keshetf0c82242017-05-18 22:06:40 -0700240 self.write_iteration_keyval({}, perf_dict)
jadmanskicc549172008-05-21 18:11:51 +0000241
mbligh0b3dd5f2008-07-16 20:37:13 +0000242
243 def write_attr_keyval(self, attr_dict):
Aviv Keshetf0c82242017-05-18 22:06:40 -0700244 self.write_iteration_keyval(attr_dict, {})
mbligh0b3dd5f2008-07-16 20:37:13 +0000245
246
Aviv Keshetf0c82242017-05-18 22:06:40 -0700247 def write_iteration_keyval(self, attr_dict, perf_dict):
mbligh7af09972009-04-17 22:17:08 +0000248 # append the dictionaries before they have the {perf} and {attr} added
249 self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
250 self._new_keyval = True
251
mbligh0b3dd5f2008-07-16 20:37:13 +0000252 if attr_dict:
253 attr_dict = self._append_type_to_keys(attr_dict, "attr")
Aviv Keshetf0c82242017-05-18 22:06:40 -0700254 utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
mbligh0b3dd5f2008-07-16 20:37:13 +0000255
256 if perf_dict:
257 perf_dict = self._append_type_to_keys(perf_dict, "perf")
Aviv Keshetf0c82242017-05-18 22:06:40 -0700258 utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
jadmanskicc549172008-05-21 18:11:51 +0000259
jadmanski0afbb632008-06-06 21:10:57 +0000260 keyval_path = os.path.join(self.resultsdir, "keyval")
261 print >> open(keyval_path, "a"), ""
jadmanskicc549172008-05-21 18:11:51 +0000262
263
mbligh7af09972009-04-17 22:17:08 +0000264 def analyze_perf_constraints(self, constraints):
265 if not self._new_keyval:
266 return
267
jadmanski0d9ea772009-10-08 22:30:56 +0000268 # create a dict from the keyvals suitable as an environment for eval
269 keyval_env = self._keyvals[-1]['perf'].copy()
270 keyval_env['__builtins__'] = None
mbligh7af09972009-04-17 22:17:08 +0000271 self._new_keyval = False
mbligh32cb5b42009-05-01 23:05:09 +0000272 failures = []
jadmanski0d9ea772009-10-08 22:30:56 +0000273
274 # evaluate each constraint using the current keyvals
mbligh7af09972009-04-17 22:17:08 +0000275 for constraint in constraints:
jadmanski0d9ea772009-10-08 22:30:56 +0000276 logging.info('___________________ constraint = %s', constraint)
277 logging.info('___________________ keyvals = %s', keyval_env)
278
mbligh7af09972009-04-17 22:17:08 +0000279 try:
jadmanski0d9ea772009-10-08 22:30:56 +0000280 if not eval(constraint, keyval_env):
mbligh8beabca2009-05-21 01:33:15 +0000281 failures.append('%s: constraint was not met' % constraint)
mbligh7af09972009-04-17 22:17:08 +0000282 except:
mbligh32cb5b42009-05-01 23:05:09 +0000283 failures.append('could not evaluate constraint: %s'
284 % constraint)
mbligh7af09972009-04-17 22:17:08 +0000285
mbligh32cb5b42009-05-01 23:05:09 +0000286 # keep track of the errors for each iteration
287 self.failed_constraints.append(failures)
288
289
290 def process_failed_constraints(self):
291 msg = ''
292 for i, failures in enumerate(self.failed_constraints):
293 if failures:
294 msg += 'iteration %d:%s ' % (i, ','.join(failures))
295
296 if msg:
297 raise error.TestFail(msg)
mbligh7af09972009-04-17 22:17:08 +0000298
299
mbligh742ae422009-05-13 20:46:41 +0000300 def register_before_iteration_hook(self, iteration_hook):
301 """
302 This is how we expect test writers to register a before_iteration_hook.
303 This adds the method to the list of hooks which are executed
304 before each iteration.
305
306 @param iteration_hook: Method to run before each iteration. A valid
307 hook accepts a single argument which is the
308 test object.
309 """
310 self.before_iteration_hooks.append(iteration_hook)
311
312
313 def register_after_iteration_hook(self, iteration_hook):
314 """
315 This is how we expect test writers to register an after_iteration_hook.
316 This adds the method to the list of hooks which are executed
Christopher Grantb16491a2015-06-11 15:46:19 -0400317 after each iteration. Hooks are executed starting with the most-
318 recently registered, in stack fashion.
mbligh742ae422009-05-13 20:46:41 +0000319
320 @param iteration_hook: Method to run after each iteration. A valid
321 hook accepts a single argument which is the
322 test object.
323 """
324 self.after_iteration_hooks.append(iteration_hook)
325
326
jadmanski0afbb632008-06-06 21:10:57 +0000327 def initialize(self):
328 pass
mbligh6231cd62008-02-02 19:18:33 +0000329
330
jadmanski0afbb632008-06-06 21:10:57 +0000331 def setup(self):
332 pass
mbligh6231cd62008-02-02 19:18:33 +0000333
334
mbligh14f98562008-07-29 21:16:27 +0000335 def warmup(self, *args, **dargs):
mbligh4205d892008-07-14 16:23:20 +0000336 pass
mblighb53a3472008-07-11 21:27:58 +0000337
338
mblighb5dac432008-11-27 00:38:44 +0000339 def drop_caches_between_iterations(self):
340 if self.job.drop_caches_between_iterations:
mbligh53da18e2009-01-05 21:13:26 +0000341 utils.drop_caches()
mblighb5dac432008-11-27 00:38:44 +0000342
343
Scott Zawalski91493c82013-01-25 16:15:20 -0500344 def _call_run_once_with_retry(self, constraints, profile_only,
345 postprocess_profiled_run, args, dargs):
346 """Thin wrapper around _call_run_once that retries unsuccessful tests.
347
348 If the job object's attribute test_retry is > 0 retry any tests that
349 ran unsuccessfully X times.
350 *Note this does not competely re-initialize the test, it only
351 re-executes code once all the initial job set up (packages,
352 sysinfo, etc) is complete.
353 """
354 if self.job.test_retry != 0:
355 logging.info('Test will be retried a maximum of %d times',
356 self.job.test_retry)
357
358 max_runs = self.job.test_retry
359 for retry_run in xrange(0, max_runs+1):
360 try:
361 self._call_run_once(constraints, profile_only,
362 postprocess_profiled_run, args, dargs)
363 break
Aviv Keshet39164ca2013-03-27 15:08:33 -0700364 except error.TestFailRetry as err:
Scott Zawalski91493c82013-01-25 16:15:20 -0500365 if retry_run == max_runs:
366 raise
367 self.job.record('INFO', None, None, 'Run %s failed with %s' % (
368 retry_run, err))
369 if retry_run > 0:
370 self.write_test_keyval({'test_retries_before_success': retry_run})
371
372
mblighf58865f2009-05-13 21:32:42 +0000373 def _call_run_once(self, constraints, profile_only,
374 postprocess_profiled_run, args, dargs):
mbligh6b97f792009-03-23 21:23:12 +0000375 self.drop_caches_between_iterations()
mbligh4395bbd2009-03-25 19:34:17 +0000376 # execute iteration hooks
Xixuan Wuf4fb11b2017-11-13 19:54:38 -0800377 if not self.job.fast:
378 logging.debug('Starting before_iteration_hooks for %s',
379 self.tagged_testname)
380 with metrics.SecondsTimer(
381 'chromeos/autotest/job/before_iteration_hook_duration'):
382 for hook in self.before_iteration_hooks:
383 hook(self)
384 logging.debug('before_iteration_hooks completed')
mblighf58865f2009-05-13 21:32:42 +0000385
Xixuan Wuf4fb11b2017-11-13 19:54:38 -0800386 finished = False
Eric Lidaf6ff02011-03-01 15:31:31 -0800387 try:
388 if profile_only:
389 if not self.job.profilers.present():
390 self.job.record('WARN', None, None,
391 'No profilers have been added but '
392 'profile_only is set - nothing '
393 'will be run')
394 self.run_once_profiling(postprocess_profiled_run,
395 *args, **dargs)
396 else:
397 self.before_run_once()
xixuan02b6fee2017-02-01 18:35:20 -0800398 logging.debug('starting test(run_once()), test details follow'
399 '\n%r', args)
Eric Lidaf6ff02011-03-01 15:31:31 -0800400 self.run_once(*args, **dargs)
xixuan02b6fee2017-02-01 18:35:20 -0800401 logging.debug('The test has completed successfully')
Eric Lidaf6ff02011-03-01 15:31:31 -0800402 self.after_run_once()
mblighf58865f2009-05-13 21:32:42 +0000403
Eric Lidaf6ff02011-03-01 15:31:31 -0800404 self.postprocess_iteration()
405 self.analyze_perf_constraints(constraints)
Xixuan Wuf4fb11b2017-11-13 19:54:38 -0800406 finished = True
Christopher Grant4beca022015-06-16 15:14:47 -0400407 # Catch and re-raise to let after_iteration_hooks see the exception.
xixuan02b6fee2017-02-01 18:35:20 -0800408 except Exception as e:
409 logging.debug('Test failed due to %s. Exception log follows the '
410 'after_iteration_hooks.', str(e))
Christopher Grant4beca022015-06-16 15:14:47 -0400411 raise
Eric Lidaf6ff02011-03-01 15:31:31 -0800412 finally:
Xixuan Wuf4fb11b2017-11-13 19:54:38 -0800413 if not finished or not self.job.fast:
414 logging.debug('Starting after_iteration_hooks for %s',
415 self.tagged_testname)
416 with metrics.SecondsTimer(
417 'chromeos/autotest/job/after_iteration_hook_duration'):
418 for hook in reversed(self.after_iteration_hooks):
419 hook(self)
420 logging.debug('after_iteration_hooks completed')
mbligh6b97f792009-03-23 21:23:12 +0000421
422
showarda6082ef2009-10-12 20:25:44 +0000423 def execute(self, iterations=None, test_length=None, profile_only=None,
mbligha49c5cb2009-02-26 01:01:09 +0000424 _get_time=time.time, postprocess_profiled_run=None,
mbligh7af09972009-04-17 22:17:08 +0000425 constraints=(), *args, **dargs):
mbligh777d96e2008-09-03 16:34:38 +0000426 """
427 This is the basic execute method for the tests inherited from base_test.
428 If you want to implement a benchmark test, it's better to implement
429 the run_once function, to cope with the profiling infrastructure. For
430 other tests, you can just override the default implementation.
mbligh60434712008-07-16 16:35:10 +0000431
mbligh777d96e2008-09-03 16:34:38 +0000432 @param test_length: The minimum test length in seconds. We'll run the
mbligh4b835b82009-02-11 01:26:13 +0000433 run_once function for a number of times large enough to cover the
434 minimum test length.
mbligh777d96e2008-09-03 16:34:38 +0000435
436 @param iterations: A number of iterations that we'll run the run_once
mbligh4b835b82009-02-11 01:26:13 +0000437 function. This parameter is incompatible with test_length and will
438 be silently ignored if you specify both.
439
mblighf58865f2009-05-13 21:32:42 +0000440 @param profile_only: If true run X iterations with profilers enabled.
showarda6082ef2009-10-12 20:25:44 +0000441 If false run X iterations and one with profiling if profiles are
442 enabled. If None, default to the value of job.default_profile_only.
mbligh4b835b82009-02-11 01:26:13 +0000443
444 @param _get_time: [time.time] Used for unit test time injection.
mblighc9314082009-02-26 00:48:18 +0000445
mbligha49c5cb2009-02-26 01:01:09 +0000446 @param postprocess_profiled_run: Run the postprocessing for the
447 profiled run.
mbligh777d96e2008-09-03 16:34:38 +0000448 """
449
450 # For our special class of tests, the benchmarks, we don't want
451 # profilers to run during the test iterations. Let's reserve only
452 # the last iteration for profiling, if needed. So let's stop
453 # all profilers if they are present and active.
mblighb3c0c912008-11-27 00:32:45 +0000454 profilers = self.job.profilers
455 if profilers.active():
mbligh777d96e2008-09-03 16:34:38 +0000456 profilers.stop(self)
showarda6082ef2009-10-12 20:25:44 +0000457 if profile_only is None:
458 profile_only = self.job.default_profile_only
mbligh777d96e2008-09-03 16:34:38 +0000459 # If the user called this test in an odd way (specified both iterations
mbligh4b835b82009-02-11 01:26:13 +0000460 # and test_length), let's warn them.
mbligh777d96e2008-09-03 16:34:38 +0000461 if iterations and test_length:
Dale Curtis456d3c12011-07-19 11:42:51 -0700462 logging.debug('Iterations parameter ignored (timed execution)')
mbligh777d96e2008-09-03 16:34:38 +0000463 if test_length:
mbligh4b835b82009-02-11 01:26:13 +0000464 test_start = _get_time()
mbligh777d96e2008-09-03 16:34:38 +0000465 time_elapsed = 0
466 timed_counter = 0
Dale Curtis456d3c12011-07-19 11:42:51 -0700467 logging.debug('Test started. Specified %d s as the minimum test '
468 'length', test_length)
mbligh777d96e2008-09-03 16:34:38 +0000469 while time_elapsed < test_length:
470 timed_counter = timed_counter + 1
471 if time_elapsed == 0:
Dale Curtis456d3c12011-07-19 11:42:51 -0700472 logging.debug('Executing iteration %d', timed_counter)
mbligh777d96e2008-09-03 16:34:38 +0000473 elif time_elapsed > 0:
Dale Curtis456d3c12011-07-19 11:42:51 -0700474 logging.debug('Executing iteration %d, time_elapsed %d s',
475 timed_counter, time_elapsed)
Scott Zawalski91493c82013-01-25 16:15:20 -0500476 self._call_run_once_with_retry(constraints, profile_only,
477 postprocess_profiled_run, args,
478 dargs)
mbligh4b835b82009-02-11 01:26:13 +0000479 test_iteration_finish = _get_time()
mbligh777d96e2008-09-03 16:34:38 +0000480 time_elapsed = test_iteration_finish - test_start
Dale Curtis456d3c12011-07-19 11:42:51 -0700481 logging.debug('Test finished after %d iterations, '
482 'time elapsed: %d s', timed_counter, time_elapsed)
mbligh777d96e2008-09-03 16:34:38 +0000483 else:
mblighf58865f2009-05-13 21:32:42 +0000484 if iterations is None:
mbligh777d96e2008-09-03 16:34:38 +0000485 iterations = 1
Dale Curtis456d3c12011-07-19 11:42:51 -0700486 if iterations > 1:
487 logging.debug('Test started. Specified %d iterations',
488 iterations)
489 for self.iteration in xrange(1, iterations + 1):
490 if iterations > 1:
491 logging.debug('Executing iteration %d of %d',
492 self.iteration, iterations)
Scott Zawalski91493c82013-01-25 16:15:20 -0500493 self._call_run_once_with_retry(constraints, profile_only,
494 postprocess_profiled_run, args,
495 dargs)
mbligh60434712008-07-16 16:35:10 +0000496
mblighf58865f2009-05-13 21:32:42 +0000497 if not profile_only:
mbligh5e703a22009-06-15 22:00:12 +0000498 self.iteration += 1
mblighf58865f2009-05-13 21:32:42 +0000499 self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
mblighd27604e2009-02-03 02:06:08 +0000500
501 # Do any postprocessing, normally extracting performance keyvals, etc
502 self.postprocess()
mbligh32cb5b42009-05-01 23:05:09 +0000503 self.process_failed_constraints()
mblighd27604e2009-02-03 02:06:08 +0000504
505
mbligha49c5cb2009-02-26 01:01:09 +0000506 def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
mblighd27604e2009-02-03 02:06:08 +0000507 profilers = self.job.profilers
mbligh60434712008-07-16 16:35:10 +0000508 # Do a profiling run if necessary
mblighb3c0c912008-11-27 00:32:45 +0000509 if profilers.present():
mblighc9314082009-02-26 00:48:18 +0000510 self.drop_caches_between_iterations()
mbligh1b0faf92009-12-19 05:26:13 +0000511 profilers.before_start(self)
512
513 self.before_run_once()
mbligh60434712008-07-16 16:35:10 +0000514 profilers.start(self)
Dale Curtis456d3c12011-07-19 11:42:51 -0700515 logging.debug('Profilers present. Profiling run started')
mbligh1b0faf92009-12-19 05:26:13 +0000516
jadmanski0d390072008-11-19 21:19:56 +0000517 try:
518 self.run_once(*args, **dargs)
mbligha49c5cb2009-02-26 01:01:09 +0000519
520 # Priority to the run_once() argument over the attribute.
521 postprocess_attribute = getattr(self,
522 'postprocess_profiled_run',
523 False)
524
525 if (postprocess_profiled_run or
526 (postprocess_profiled_run is None and
527 postprocess_attribute)):
528 self.postprocess_iteration()
529
jadmanski0d390072008-11-19 21:19:56 +0000530 finally:
531 profilers.stop(self)
532 profilers.report(self)
mbligh60434712008-07-16 16:35:10 +0000533
mbligh1b0faf92009-12-19 05:26:13 +0000534 self.after_run_once()
535
mbligh60434712008-07-16 16:35:10 +0000536
537 def postprocess(self):
538 pass
539
540
mbligh34b297b2009-02-03 17:49:48 +0000541 def postprocess_iteration(self):
542 pass
543
544
mbligh60434712008-07-16 16:35:10 +0000545 def cleanup(self):
546 pass
mblighcd8a5162008-07-16 16:32:12 +0000547
548
mbligh1b0faf92009-12-19 05:26:13 +0000549 def before_run_once(self):
550 """
551 Override in tests that need it, will be called before any run_once()
552 call including the profiling run (when it's called before starting
553 the profilers).
554 """
555 pass
556
557
558 def after_run_once(self):
559 """
560 Called after every run_once (including from a profiled run when it's
561 called after stopping the profilers).
562 """
563 pass
564
565
Owen Lin9f852402014-04-15 16:35:05 +0800566 @staticmethod
567 def _make_writable_to_others(directory):
568 mode = os.stat(directory).st_mode
569 mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
570 os.chmod(directory, mode)
571
572
mbligh742ae422009-05-13 20:46:41 +0000573 def _exec(self, args, dargs):
showardee36bc72009-06-18 23:13:53 +0000574 self.job.logging.tee_redirect_debug_dir(self.debugdir,
575 log_name=self.tagged_testname)
jadmanski0afbb632008-06-06 21:10:57 +0000576 try:
jadmanski62655782008-07-28 21:27:46 +0000577 # write out the test attributes into a keyval
578 dargs = dargs.copy()
jadmanski23afbec2008-09-17 18:12:07 +0000579 run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
mbligh234a84f2008-11-20 19:57:43 +0000580 keyvals = dargs.pop('test_attributes', {}).copy()
jadmanski62655782008-07-28 21:27:46 +0000581 keyvals['version'] = self.version
jadmanski2ae0e052008-09-04 16:37:28 +0000582 for i, arg in enumerate(args):
583 keyvals['param-%d' % i] = repr(arg)
584 for name, arg in dargs.iteritems():
585 keyvals['param-%s' % name] = repr(arg)
jadmanski62655782008-07-28 21:27:46 +0000586 self.write_test_keyval(keyvals)
587
mblighcf238192008-07-17 01:18:44 +0000588 _validate_args(args, dargs, self.initialize, self.setup,
589 self.execute, self.cleanup)
mbligh6231cd62008-02-02 19:18:33 +0000590
jadmanski0afbb632008-06-06 21:10:57 +0000591 try:
Owen Lin9f852402014-04-15 16:35:05 +0800592 # Make resultsdir and tmpdir accessible to everyone. We may
593 # output data to these directories as others, e.g., chronos.
594 self._make_writable_to_others(self.tmpdir)
595 self._make_writable_to_others(self.resultsdir)
596
mblighcf238192008-07-17 01:18:44 +0000597 # Initialize:
mbligh5c1bb252009-03-25 22:06:49 +0000598 _cherry_pick_call(self.initialize, *args, **dargs)
mblighcf238192008-07-17 01:18:44 +0000599
mblighc5ddfd12008-08-04 17:15:00 +0000600 lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
601 try:
602 fcntl.flock(lockfile, fcntl.LOCK_EX)
603 # Setup: (compile and install the test, if needed)
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700604 p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
mblighc5ddfd12008-08-04 17:15:00 +0000605 utils.update_version(self.srcdir, self.preserve_srcdir,
606 self.version, self.setup,
607 *p_args, **p_dargs)
608 finally:
609 fcntl.flock(lockfile, fcntl.LOCK_UN)
610 lockfile.close()
mblighcf238192008-07-17 01:18:44 +0000611
mblighcf238192008-07-17 01:18:44 +0000612 # Execute:
jadmanski62655782008-07-28 21:27:46 +0000613 os.chdir(self.outputdir)
mbligh4395bbd2009-03-25 19:34:17 +0000614
mbligh5c1bb252009-03-25 22:06:49 +0000615 # call self.warmup cherry picking the arguments it accepts and
616 # translate exceptions if needed
617 _call_test_function(_cherry_pick_call, self.warmup,
618 *args, **dargs)
619
mblighcf238192008-07-17 01:18:44 +0000620 if hasattr(self, 'run_once'):
621 p_args, p_dargs = _cherry_pick_args(self.run_once,
622 args, dargs)
jadmanski886c81f2009-02-19 12:54:03 +0000623 # pull in any non-* and non-** args from self.execute
624 for param in _get_nonstar_args(self.execute):
625 if param in dargs:
626 p_dargs[param] = dargs[param]
mblighcf238192008-07-17 01:18:44 +0000627 else:
628 p_args, p_dargs = _cherry_pick_args(self.execute,
629 args, dargs)
mbligh5c1bb252009-03-25 22:06:49 +0000630
631 _call_test_function(self.execute, *p_args, **p_dargs)
mbligh234a84f2008-11-20 19:57:43 +0000632 except Exception:
633 # Save the exception while we run our cleanup() before
Prathmesh Prabhuac1a4c52014-08-20 17:26:54 -0700634 # reraising it, but log it to so actual time of error is known.
jadmanskid625c7f2008-08-27 14:08:52 +0000635 exc_info = sys.exc_info()
xixuan02b6fee2017-02-01 18:35:20 -0800636 logging.warning('The test failed with the following exception',
Prathmesh Prabhuac1a4c52014-08-20 17:26:54 -0700637 exc_info=True)
638
mbligh234a84f2008-11-20 19:57:43 +0000639 try:
640 try:
641 if run_cleanup:
xixuan02b6fee2017-02-01 18:35:20 -0800642 logging.debug('Running cleanup for test.')
mbligh5c1bb252009-03-25 22:06:49 +0000643 _cherry_pick_call(self.cleanup, *args, **dargs)
mbligh234a84f2008-11-20 19:57:43 +0000644 except Exception:
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700645 logging.error('Ignoring exception during cleanup() '
646 'phase:')
mbligh234a84f2008-11-20 19:57:43 +0000647 traceback.print_exc()
Dale Curtis456d3c12011-07-19 11:42:51 -0700648 logging.error('Now raising the earlier %s error',
649 exc_info[0])
mbligh6894ce22009-09-18 19:56:30 +0000650 self.crash_handler_report()
mbligh234a84f2008-11-20 19:57:43 +0000651 finally:
xixuan02b6fee2017-02-01 18:35:20 -0800652 # Raise exception after running cleanup, reporting crash,
653 # and restoring job's logging, even if the first two
654 # actions fail.
showard75cdfee2009-06-10 17:40:41 +0000655 self.job.logging.restore()
mbligh234a84f2008-11-20 19:57:43 +0000656 try:
657 raise exc_info[0], exc_info[1], exc_info[2]
658 finally:
659 # http://docs.python.org/library/sys.html#sys.exc_info
660 # Be nice and prevent a circular reference.
661 del exc_info
jadmanskid625c7f2008-08-27 14:08:52 +0000662 else:
mbligh234a84f2008-11-20 19:57:43 +0000663 try:
664 if run_cleanup:
mbligh5c1bb252009-03-25 22:06:49 +0000665 _cherry_pick_call(self.cleanup, *args, **dargs)
mbligh6894ce22009-09-18 19:56:30 +0000666 self.crash_handler_report()
mbligh234a84f2008-11-20 19:57:43 +0000667 finally:
showard75cdfee2009-06-10 17:40:41 +0000668 self.job.logging.restore()
jadmanski0afbb632008-06-06 21:10:57 +0000669 except error.AutotestError:
mbligh234a84f2008-11-20 19:57:43 +0000670 # Pass already-categorized errors on up.
jadmanski0afbb632008-06-06 21:10:57 +0000671 raise
672 except Exception, e:
mbligh234a84f2008-11-20 19:57:43 +0000673 # Anything else is an ERROR in our own code, not execute().
mblighc2180832008-07-25 03:26:12 +0000674 raise error.UnhandledTestError(e)
mbligh6231cd62008-02-02 19:18:33 +0000675
Dale Curtis74a314b2011-06-23 14:55:46 -0700676 def runsubtest(self, url, *args, **dargs):
677 """
678 Execute another autotest test from inside the current test's scope.
679
680 @param test: Parent test.
681 @param url: Url of new test.
682 @param tag: Tag added to test name.
683 @param args: Args for subtest.
684 @param dargs: Dictionary with args for subtest.
685 @iterations: Number of subtest iterations.
686 @profile_only: If true execute one profiled run.
687 """
688 dargs["profile_only"] = dargs.get("profile_only", False)
689 test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
690 return self.job.run_test(url, master_testpath=test_basepath,
691 *args, **dargs)
692
693
jadmanski886c81f2009-02-19 12:54:03 +0000694def _get_nonstar_args(func):
695 """Extract all the (normal) function parameter names.
696
697 Given a function, returns a tuple of parameter names, specifically
698 excluding the * and ** parameters, if the function accepts them.
699
700 @param func: A callable that we want to chose arguments for.
701
702 @return: A tuple of parameters accepted by the function.
703 """
704 return func.func_code.co_varnames[:func.func_code.co_argcount]
705
706
mblighcf238192008-07-17 01:18:44 +0000707def _cherry_pick_args(func, args, dargs):
mbligh234a84f2008-11-20 19:57:43 +0000708 """Sanitize positional and keyword arguments before calling a function.
709
710 Given a callable (func), an argument tuple and a dictionary of keyword
711 arguments, pick only those arguments which the function is prepared to
712 accept and return a new argument tuple and keyword argument dictionary.
713
714 Args:
715 func: A callable that we want to choose arguments for.
716 args: A tuple of positional arguments to consider passing to func.
717 dargs: A dictionary of keyword arguments to consider passing to func.
718 Returns:
719 A tuple of: (args tuple, keyword arguments dictionary)
720 """
mblighcf238192008-07-17 01:18:44 +0000721 # Cherry pick args:
722 if func.func_code.co_flags & 0x04:
723 # func accepts *args, so return the entire args.
724 p_args = args
725 else:
726 p_args = ()
727
728 # Cherry pick dargs:
729 if func.func_code.co_flags & 0x08:
730 # func accepts **dargs, so return the entire dargs.
731 p_dargs = dargs
732 else:
mbligh234a84f2008-11-20 19:57:43 +0000733 # Only return the keyword arguments that func accepts.
mblighcf238192008-07-17 01:18:44 +0000734 p_dargs = {}
jadmanski886c81f2009-02-19 12:54:03 +0000735 for param in _get_nonstar_args(func):
mblighcf238192008-07-17 01:18:44 +0000736 if param in dargs:
737 p_dargs[param] = dargs[param]
738
739 return p_args, p_dargs
740
741
mbligh5c1bb252009-03-25 22:06:49 +0000742def _cherry_pick_call(func, *args, **dargs):
743 """Cherry picks arguments from args/dargs based on what "func" accepts
744 and calls the function with the picked arguments."""
745 p_args, p_dargs = _cherry_pick_args(func, args, dargs)
746 return func(*p_args, **p_dargs)
747
748
mblighcf238192008-07-17 01:18:44 +0000749def _validate_args(args, dargs, *funcs):
mbligh234a84f2008-11-20 19:57:43 +0000750 """Verify that arguments are appropriate for at least one callable.
751
752 Given a list of callables as additional parameters, verify that
753 the proposed keyword arguments in dargs will each be accepted by at least
754 one of the callables.
755
756 NOTE: args is currently not supported and must be empty.
757
758 Args:
759 args: A tuple of proposed positional arguments.
760 dargs: A dictionary of proposed keyword arguments.
761 *funcs: Callables to be searched for acceptance of args and dargs.
762 Raises:
763 error.AutotestError: if an arg won't be accepted by any of *funcs.
764 """
mblighcf238192008-07-17 01:18:44 +0000765 all_co_flags = 0
766 all_varnames = ()
767 for func in funcs:
768 all_co_flags |= func.func_code.co_flags
769 all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
770
771 # Check if given args belongs to at least one of the methods below.
772 if len(args) > 0:
773 # Current implementation doesn't allow the use of args.
mbligh234a84f2008-11-20 19:57:43 +0000774 raise error.TestError('Unnamed arguments not accepted. Please '
775 'call job.run_test with named args only')
mblighcf238192008-07-17 01:18:44 +0000776
777 # Check if given dargs belongs to at least one of the methods below.
778 if len(dargs) > 0:
779 if not all_co_flags & 0x08:
780 # no func accepts *dargs, so:
781 for param in dargs:
782 if not param in all_varnames:
783 raise error.AutotestError('Unknown parameter: %s' % param)
784
785
mbligh6231cd62008-02-02 19:18:33 +0000786def _installtest(job, url):
mblighc5ddfd12008-08-04 17:15:00 +0000787 (group, name) = job.pkgmgr.get_package_name(url, 'test')
mbligh6231cd62008-02-02 19:18:33 +0000788
jadmanski0afbb632008-06-06 21:10:57 +0000789 # Bail if the test is already installed
790 group_dir = os.path.join(job.testdir, "download", group)
791 if os.path.exists(os.path.join(group_dir, name)):
792 return (group, name)
mbligh6231cd62008-02-02 19:18:33 +0000793
jadmanski0afbb632008-06-06 21:10:57 +0000794 # If the group directory is missing create it and add
795 # an empty __init__.py so that sub-directories are
796 # considered for import.
797 if not os.path.exists(group_dir):
lmr23421722010-06-17 17:51:07 +0000798 os.makedirs(group_dir)
jadmanski0afbb632008-06-06 21:10:57 +0000799 f = file(os.path.join(group_dir, '__init__.py'), 'w+')
800 f.close()
mbligh6231cd62008-02-02 19:18:33 +0000801
Dale Curtis456d3c12011-07-19 11:42:51 -0700802 logging.debug("%s: installing test url=%s", name, url)
mblighc5ddfd12008-08-04 17:15:00 +0000803 tarball = os.path.basename(url)
804 tarball_path = os.path.join(group_dir, tarball)
805 test_dir = os.path.join(group_dir, name)
806 job.pkgmgr.fetch_pkg(tarball, tarball_path,
807 repo_url = os.path.dirname(url))
808
809 # Create the directory for the test
810 if not os.path.exists(test_dir):
811 os.mkdir(os.path.join(group_dir, name))
812
813 job.pkgmgr.untar_pkg(tarball_path, test_dir)
814
815 os.remove(tarball_path)
mbligh6231cd62008-02-02 19:18:33 +0000816
jadmanski0afbb632008-06-06 21:10:57 +0000817 # For this 'sub-object' to be importable via the name
818 # 'group.name' we need to provide an __init__.py,
819 # so link the main entry point to this.
820 os.symlink(name + '.py', os.path.join(group_dir, name,
821 '__init__.py'))
mbligh6231cd62008-02-02 19:18:33 +0000822
jadmanski0afbb632008-06-06 21:10:57 +0000823 # The test is now installed.
824 return (group, name)
mbligh6231cd62008-02-02 19:18:33 +0000825
826
mbligh5c1bb252009-03-25 22:06:49 +0000827def _call_test_function(func, *args, **dargs):
828 """Calls a test function and translates exceptions so that errors
829 inside test code are considered test failures."""
830 try:
831 return func(*args, **dargs)
832 except error.AutotestError:
mbligh5c1bb252009-03-25 22:06:49 +0000833 raise
834 except Exception, e:
835 # Other exceptions must be treated as a FAIL when
836 # raised during the test functions
837 raise error.UnhandledTestFail(e)
838
839
mbligh6231cd62008-02-02 19:18:33 +0000840def runtest(job, url, tag, args, dargs,
jadmanski30e9b592008-09-25 19:51:57 +0000841 local_namespace={}, global_namespace={},
mbligh4395bbd2009-03-25 19:34:17 +0000842 before_test_hook=None, after_test_hook=None,
843 before_iteration_hook=None, after_iteration_hook=None):
jadmanski0afbb632008-06-06 21:10:57 +0000844 local_namespace = local_namespace.copy()
845 global_namespace = global_namespace.copy()
jadmanski0afbb632008-06-06 21:10:57 +0000846 # if this is not a plain test name then download and install the
847 # specified test
mblighc5ddfd12008-08-04 17:15:00 +0000848 if url.endswith('.tar.bz2'):
mbligh620ccf02010-03-26 17:44:29 +0000849 (testgroup, testname) = _installtest(job, url)
850 bindir = os.path.join(job.testdir, 'download', testgroup, testname)
lmr23421722010-06-17 17:51:07 +0000851 importdir = os.path.join(job.testdir, 'download')
mbligh620ccf02010-03-26 17:44:29 +0000852 modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
853 classname = '%s.%s' % (modulename, testname)
854 path = testname
jadmanski0afbb632008-06-06 21:10:57 +0000855 else:
mbligh620ccf02010-03-26 17:44:29 +0000856 # If the test is local, it may be under either testdir or site_testdir.
857 # Tests in site_testdir override tests defined in testdir
858 testname = path = url
859 testgroup = ''
jadmanski06767042010-03-29 18:28:33 +0000860 path = re.sub(':', '/', testname)
mbligh620ccf02010-03-26 17:44:29 +0000861 modulename = os.path.basename(path)
862 classname = '%s.%s' % (modulename, modulename)
mbligh6231cd62008-02-02 19:18:33 +0000863
mbligh620ccf02010-03-26 17:44:29 +0000864 # Try installing the test package
865 # The job object may be either a server side job or a client side job.
866 # 'install_pkg' method will be present only if it's a client side job.
mblighc5ddfd12008-08-04 17:15:00 +0000867 if hasattr(job, 'install_pkg'):
868 try:
mbligh620ccf02010-03-26 17:44:29 +0000869 bindir = os.path.join(job.testdir, testname)
mblighc5ddfd12008-08-04 17:15:00 +0000870 job.install_pkg(testname, 'test', bindir)
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700871 except error.PackageInstallError:
mblighc5ddfd12008-08-04 17:15:00 +0000872 # continue as a fall back mechanism and see if the test code
873 # already exists on the machine
874 pass
875
Ilja H. Friedel21bf5622014-09-08 18:59:04 -0700876 bindir = None
mbligh620ccf02010-03-26 17:44:29 +0000877 for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
878 if dir is not None and os.path.exists(os.path.join(dir, path)):
mbligh620ccf02010-03-26 17:44:29 +0000879 importdir = bindir = os.path.join(dir, path)
880 if not bindir:
881 raise error.TestError(testname + ': test does not exist')
882
Dale Curtis74a314b2011-06-23 14:55:46 -0700883 subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
884 outputdir = os.path.join(job.resultdir, subdir)
jadmanski0afbb632008-06-06 21:10:57 +0000885 if tag:
886 outputdir += '.' + tag
mbligh6231cd62008-02-02 19:18:33 +0000887
mblighc5ddfd12008-08-04 17:15:00 +0000888 local_namespace['job'] = job
889 local_namespace['bindir'] = bindir
890 local_namespace['outputdir'] = outputdir
891
lmr23421722010-06-17 17:51:07 +0000892 sys.path.insert(0, importdir)
jadmanski0afbb632008-06-06 21:10:57 +0000893 try:
mbligh620ccf02010-03-26 17:44:29 +0000894 exec ('import %s' % modulename, local_namespace, global_namespace)
895 exec ("mytest = %s(job, bindir, outputdir)" % classname,
jadmanski0afbb632008-06-06 21:10:57 +0000896 local_namespace, global_namespace)
897 finally:
jadmanski0afbb632008-06-06 21:10:57 +0000898 sys.path.pop(0)
mbligh6231cd62008-02-02 19:18:33 +0000899
jadmanski0afbb632008-06-06 21:10:57 +0000900 pwd = os.getcwd()
901 os.chdir(outputdir)
mbligh4395bbd2009-03-25 19:34:17 +0000902
jadmanski0afbb632008-06-06 21:10:57 +0000903 try:
904 mytest = global_namespace['mytest']
Dan Shi2ca97772013-10-21 17:17:27 -0700905 mytest.success = False
Xixuan Wu5019d692017-11-10 15:58:33 -0800906 if not job.fast and before_test_hook:
Xixuan Wue8ce6542017-11-07 17:22:10 -0800907 logging.info('Starting before_hook for %s', mytest.tagged_testname)
908 with metrics.SecondsTimer(
909 'chromeos/autotest/job/before_hook_duration'):
910 before_test_hook(mytest)
911 logging.info('before_hook completed')
mbligh742ae422009-05-13 20:46:41 +0000912
913 # we use the register iteration hooks methods to register the passed
914 # in hooks
915 if before_iteration_hook:
916 mytest.register_before_iteration_hook(before_iteration_hook)
917 if after_iteration_hook:
918 mytest.register_after_iteration_hook(after_iteration_hook)
919 mytest._exec(args, dargs)
Dan Shi2ca97772013-10-21 17:17:27 -0700920 mytest.success = True
jadmanski0afbb632008-06-06 21:10:57 +0000921 finally:
jadmanski213b02b2008-08-26 20:51:58 +0000922 os.chdir(pwd)
Xixuan Wu5019d692017-11-10 15:58:33 -0800923 if after_test_hook and (not mytest.success or not job.fast):
Xixuan Wue8ce6542017-11-07 17:22:10 -0800924 logging.info('Starting after_hook for %s', mytest.tagged_testname)
925 with metrics.SecondsTimer(
926 'chromeos/autotest/job/after_hook_duration'):
927 after_test_hook(mytest)
928 logging.info('after_hook completed')
Keith Haddow1e5c7012016-03-09 16:05:37 -0800929
Xixuan Wu5019d692017-11-10 15:58:33 -0800930 shutil.rmtree(mytest.tmpdir, ignore_errors=True)