blob: 3c77d386680a161a886e925428756761aba74939 [file] [log] [blame]
jadmanski4afc3672010-04-30 21:22:54 +00001import os, copy, logging, errno, fcntl, time, re, weakref, traceback
2import cPickle as pickle
jadmanskida2f1432009-11-06 15:20:09 +00003
jadmanski4afc3672010-04-30 21:22:54 +00004from autotest_lib.client.common_lib import autotemp, error, log
jadmanskida2f1432009-11-06 15:20:09 +00005
6
7class job_directory(object):
8 """Represents a job.*dir directory."""
9
10
mblighfc3da5b2010-01-06 18:37:22 +000011 class JobDirectoryException(error.AutotestError):
12 """Generic job_directory exception superclass."""
13
14
15 class MissingDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000016 """Raised when a directory required by the job does not exist."""
17 def __init__(self, path):
18 Exception.__init__(self, 'Directory %s does not exist' % path)
19
20
mblighfc3da5b2010-01-06 18:37:22 +000021 class UncreatableDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000022 """Raised when a directory required by the job is missing and cannot
23 be created."""
24 def __init__(self, path, error):
25 msg = 'Creation of directory %s failed with exception %s'
26 msg %= (path, error)
27 Exception.__init__(self, msg)
28
29
mblighfc3da5b2010-01-06 18:37:22 +000030 class UnwritableDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000031 """Raised when a writable directory required by the job exists
32 but is not writable."""
33 def __init__(self, path):
34 msg = 'Directory %s exists but is not writable' % path
35 Exception.__init__(self, msg)
36
37
38 def __init__(self, path, is_writable=False):
39 """
40 Instantiate a job directory.
41
jadmanski4afc3672010-04-30 21:22:54 +000042 @param path: The path of the directory. If None a temporary directory
jadmanskida2f1432009-11-06 15:20:09 +000043 will be created instead.
jadmanski4afc3672010-04-30 21:22:54 +000044 @param is_writable: If True, expect the directory to be writable.
jadmanskida2f1432009-11-06 15:20:09 +000045
jadmanski4afc3672010-04-30 21:22:54 +000046 @raise MissingDirectoryException: raised if is_writable=False and the
jadmanskida2f1432009-11-06 15:20:09 +000047 directory does not exist.
jadmanski4afc3672010-04-30 21:22:54 +000048 @raise UnwritableDirectoryException: raised if is_writable=True and
jadmanskida2f1432009-11-06 15:20:09 +000049 the directory exists but is not writable.
jadmanski4afc3672010-04-30 21:22:54 +000050 @raise UncreatableDirectoryException: raised if is_writable=True, the
jadmanskida2f1432009-11-06 15:20:09 +000051 directory does not exist and it cannot be created.
52 """
53 if path is None:
54 if is_writable:
55 self._tempdir = autotemp.tempdir(unique_id='autotest')
56 self.path = self._tempdir.name
57 else:
58 raise self.MissingDirectoryException(path)
59 else:
60 self._tempdir = None
61 self.path = path
62 self._ensure_valid(is_writable)
63
64
65 def _ensure_valid(self, is_writable):
66 """
67 Ensure that this is a valid directory.
68
69 Will check if a directory exists, can optionally also enforce that
70 it be writable. It can optionally create it if necessary. Creation
71 will still fail if the path is rooted in a non-writable directory, or
72 if a file already exists at the given location.
73
74 @param dir_path A path where a directory should be located
75 @param is_writable A boolean indicating that the directory should
76 not only exist, but also be writable.
77
78 @raises MissingDirectoryException raised if is_writable=False and the
79 directory does not exist.
80 @raises UnwritableDirectoryException raised if is_writable=True and
81 the directory is not wrtiable.
82 @raises UncreatableDirectoryException raised if is_writable=True, the
83 directory does not exist and it cannot be created
84 """
mbligh8054b0d2009-11-25 17:38:19 +000085 # ensure the directory exists
86 if is_writable:
87 try:
88 os.makedirs(self.path)
89 except OSError, e:
mblighfbf73ae2009-12-19 05:22:42 +000090 if e.errno != errno.EEXIST or not os.path.isdir(self.path):
jadmanskida2f1432009-11-06 15:20:09 +000091 raise self.UncreatableDirectoryException(self.path, e)
mbligh8054b0d2009-11-25 17:38:19 +000092 elif not os.path.isdir(self.path):
93 raise self.MissingDirectoryException(self.path)
jadmanskida2f1432009-11-06 15:20:09 +000094
95 # if is_writable=True, also check that the directory is writable
96 if is_writable and not os.access(self.path, os.W_OK):
97 raise self.UnwritableDirectoryException(self.path)
98
99
100 @staticmethod
101 def property_factory(attribute):
102 """
103 Create a job.*dir -> job._*dir.path property accessor.
104
105 @param attribute A string with the name of the attribute this is
106 exposed as. '_'+attribute must then be attribute that holds
107 either None or a job_directory-like object.
108
109 @returns A read-only property object that exposes a job_directory path
110 """
111 @property
112 def dir_property(self):
113 underlying_attribute = getattr(self, '_' + attribute)
114 if underlying_attribute is None:
115 return None
116 else:
117 return underlying_attribute.path
118 return dir_property
119
120
mbligha2c99492010-01-27 22:59:50 +0000121# decorator for use with job_state methods
122def with_backing_lock(method):
123 """A decorator to perform a lock-*-unlock cycle.
124
125 When applied to a method, this decorator will automatically wrap
126 calls to the method in a backing file lock and before the call
127 followed by a backing file unlock.
128 """
129 def wrapped_method(self, *args, **dargs):
130 already_have_lock = self._backing_file_lock is not None
131 if not already_have_lock:
132 self._lock_backing_file()
133 try:
134 return method(self, *args, **dargs)
135 finally:
136 if not already_have_lock:
137 self._unlock_backing_file()
138 wrapped_method.__name__ = method.__name__
139 wrapped_method.__doc__ = method.__doc__
140 return wrapped_method
141
142
143# decorator for use with job_state methods
144def with_backing_file(method):
145 """A decorator to perform a lock-read-*-write-unlock cycle.
146
147 When applied to a method, this decorator will automatically wrap
148 calls to the method in a lock-and-read before the call followed by a
149 write-and-unlock. Any operation that is reading or writing state
150 should be decorated with this method to ensure that backing file
151 state is consistently maintained.
152 """
153 @with_backing_lock
154 def wrapped_method(self, *args, **dargs):
155 self._read_from_backing_file()
156 try:
157 return method(self, *args, **dargs)
158 finally:
159 self._write_to_backing_file()
160 wrapped_method.__name__ = method.__name__
161 wrapped_method.__doc__ = method.__doc__
162 return wrapped_method
163
164
165
mblighfbf73ae2009-12-19 05:22:42 +0000166class job_state(object):
167 """A class for managing explicit job and user state, optionally persistent.
168
169 The class allows you to save state by name (like a dictionary). Any state
170 stored in this class should be picklable and deep copyable. While this is
171 not enforced it is recommended that only valid python identifiers be used
172 as names. Additionally, the namespace 'stateful_property' is used for
173 storing the valued associated with properties constructed using the
174 property_factory method.
175 """
176
177 NO_DEFAULT = object()
178 PICKLE_PROTOCOL = 2 # highest protocol available in python 2.4
179
180
181 def __init__(self):
182 """Initialize the job state."""
183 self._state = {}
184 self._backing_file = None
jadmanskifa2e8892010-01-26 20:26:05 +0000185 self._backing_file_initialized = False
186 self._backing_file_lock = None
mblighfbf73ae2009-12-19 05:22:42 +0000187
188
jadmanskifa2e8892010-01-26 20:26:05 +0000189 def _lock_backing_file(self):
190 """Acquire a lock on the backing file."""
191 if self._backing_file:
192 self._backing_file_lock = open(self._backing_file, 'a')
jadmanskia087eae2010-01-29 20:57:57 +0000193 fcntl.flock(self._backing_file_lock, fcntl.LOCK_EX)
jadmanskifa2e8892010-01-26 20:26:05 +0000194
195
196 def _unlock_backing_file(self):
197 """Release a lock on the backing file."""
198 if self._backing_file_lock:
jadmanskia087eae2010-01-29 20:57:57 +0000199 fcntl.flock(self._backing_file_lock, fcntl.LOCK_UN)
jadmanskifa2e8892010-01-26 20:26:05 +0000200 self._backing_file_lock.close()
201 self._backing_file_lock = None
202
203
204 def read_from_file(self, file_path, merge=True):
205 """Read in any state from the file at file_path.
206
207 When merge=True, any state specified only in-memory will be preserved.
208 Any state specified on-disk will be set in-memory, even if an in-memory
209 setting already exists.
210
jadmanski4afc3672010-04-30 21:22:54 +0000211 @param file_path: The path where the state should be read from. It must
jadmanskifa2e8892010-01-26 20:26:05 +0000212 exist but it can be empty.
jadmanski4afc3672010-04-30 21:22:54 +0000213 @param merge: If true, merge the on-disk state with the in-memory
jadmanskifa2e8892010-01-26 20:26:05 +0000214 state. If false, replace the in-memory state with the on-disk
215 state.
216
jadmanski4afc3672010-04-30 21:22:54 +0000217 @warning: This method is intentionally concurrency-unsafe. It makes no
jadmanskifa2e8892010-01-26 20:26:05 +0000218 attempt to control concurrent access to the file at file_path.
219 """
220
221 # we can assume that the file exists
222 if os.path.getsize(file_path) == 0:
223 on_disk_state = {}
224 else:
225 on_disk_state = pickle.load(open(file_path))
226
227 if merge:
228 # merge the on-disk state with the in-memory state
229 for namespace, namespace_dict in on_disk_state.iteritems():
230 in_memory_namespace = self._state.setdefault(namespace, {})
231 for name, value in namespace_dict.iteritems():
232 if name in in_memory_namespace:
233 if in_memory_namespace[name] != value:
234 logging.info('Persistent value of %s.%s from %s '
235 'overridding existing in-memory '
236 'value', namespace, name, file_path)
237 in_memory_namespace[name] = value
238 else:
239 logging.debug('Value of %s.%s is unchanged, '
240 'skipping import', namespace, name)
241 else:
242 logging.debug('Importing %s.%s from state file %s',
243 namespace, name, file_path)
244 in_memory_namespace[name] = value
245 else:
246 # just replace the in-memory state with the on-disk state
247 self._state = on_disk_state
jadmanskifa2e8892010-01-26 20:26:05 +0000248
mbligha2c99492010-01-27 22:59:50 +0000249 # lock the backing file before we refresh it
250 with_backing_lock(self.__class__._write_to_backing_file)(self)
jadmanskifa2e8892010-01-26 20:26:05 +0000251
252
253 def write_to_file(self, file_path):
254 """Write out the current state to the given path.
255
jadmanski4afc3672010-04-30 21:22:54 +0000256 @param file_path: The path where the state should be written out to.
jadmanskifa2e8892010-01-26 20:26:05 +0000257 Must be writable.
258
jadmanski4afc3672010-04-30 21:22:54 +0000259 @warning: This method is intentionally concurrency-unsafe. It makes no
mbligha2c99492010-01-27 22:59:50 +0000260 attempt to control concurrent access to the file at file_path.
jadmanskifa2e8892010-01-26 20:26:05 +0000261 """
262 outfile = open(file_path, 'w')
263 try:
264 pickle.dump(self._state, outfile, self.PICKLE_PROTOCOL)
265 finally:
266 outfile.close()
jadmanskifa2e8892010-01-26 20:26:05 +0000267
268
269 def _read_from_backing_file(self):
270 """Refresh the current state from the backing file.
271
272 If the backing file has never been read before (indicated by checking
273 self._backing_file_initialized) it will merge the file with the
274 in-memory state, rather than overwriting it.
275 """
276 if self._backing_file:
277 merge_backing_file = not self._backing_file_initialized
278 self.read_from_file(self._backing_file, merge=merge_backing_file)
279 self._backing_file_initialized = True
280
281
282 def _write_to_backing_file(self):
283 """Flush the current state to the backing file."""
284 if self._backing_file:
285 self.write_to_file(self._backing_file)
286
287
mbligha2c99492010-01-27 22:59:50 +0000288 @with_backing_file
jadmanskifa2e8892010-01-26 20:26:05 +0000289 def _synchronize_backing_file(self):
290 """Synchronizes the contents of the in-memory and on-disk state."""
291 # state is implicitly synchronized in _with_backing_file methods
292 pass
293
294
295 def set_backing_file(self, file_path):
296 """Change the path used as the backing file for the persistent state.
297
298 When a new backing file is specified if a file already exists then
299 its contents will be added into the current state, with conflicts
300 between the file and memory being resolved in favor of the file
301 contents. The file will then be kept in sync with the (combined)
302 in-memory state. The syncing can be disabled by setting this to None.
303
jadmanski4afc3672010-04-30 21:22:54 +0000304 @param file_path: A path on the filesystem that can be read from and
jadmanskifa2e8892010-01-26 20:26:05 +0000305 written to, or None to turn off the backing store.
306 """
307 self._synchronize_backing_file()
308 self._backing_file = file_path
309 self._backing_file_initialized = False
310 self._synchronize_backing_file()
311
312
mbligha2c99492010-01-27 22:59:50 +0000313 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000314 def get(self, namespace, name, default=NO_DEFAULT):
315 """Returns the value associated with a particular name.
316
jadmanski4afc3672010-04-30 21:22:54 +0000317 @param namespace: The namespace that the property should be stored in.
318 @param name: The name the value was saved with.
319 @param default: A default value to return if no state is currently
mblighfbf73ae2009-12-19 05:22:42 +0000320 associated with var.
321
jadmanski4afc3672010-04-30 21:22:54 +0000322 @return: A deep copy of the value associated with name. Note that this
mblighfbf73ae2009-12-19 05:22:42 +0000323 explicitly returns a deep copy to avoid problems with mutable
324 values; mutations are not persisted or shared.
jadmanski4afc3672010-04-30 21:22:54 +0000325 @raise KeyError: raised when no state is associated with var and a
mblighfbf73ae2009-12-19 05:22:42 +0000326 default value is not provided.
327 """
328 if self.has(namespace, name):
329 return copy.deepcopy(self._state[namespace][name])
330 elif default is self.NO_DEFAULT:
331 raise KeyError('No key %s in namespace %s' % (name, namespace))
332 else:
333 return default
334
335
mbligha2c99492010-01-27 22:59:50 +0000336 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000337 def set(self, namespace, name, value):
338 """Saves the value given with the provided name.
339
jadmanski4afc3672010-04-30 21:22:54 +0000340 @param namespace: The namespace that the property should be stored in.
341 @param name: The name the value should be saved with.
342 @param value: The value to save.
mblighfbf73ae2009-12-19 05:22:42 +0000343 """
344 namespace_dict = self._state.setdefault(namespace, {})
345 namespace_dict[name] = copy.deepcopy(value)
mblighfbf73ae2009-12-19 05:22:42 +0000346 logging.debug('Persistent state %s.%s now set to %r', namespace,
347 name, value)
348
349
mbligha2c99492010-01-27 22:59:50 +0000350 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000351 def has(self, namespace, name):
352 """Return a boolean indicating if namespace.name is defined.
353
jadmanski4afc3672010-04-30 21:22:54 +0000354 @param namespace: The namespace to check for a definition.
355 @param name: The name to check for a definition.
mblighfbf73ae2009-12-19 05:22:42 +0000356
jadmanski4afc3672010-04-30 21:22:54 +0000357 @return: True if the given name is defined in the given namespace and
mblighfbf73ae2009-12-19 05:22:42 +0000358 False otherwise.
359 """
360 return namespace in self._state and name in self._state[namespace]
361
362
mbligha2c99492010-01-27 22:59:50 +0000363 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000364 def discard(self, namespace, name):
365 """If namespace.name is a defined value, deletes it.
366
jadmanski4afc3672010-04-30 21:22:54 +0000367 @param namespace: The namespace that the property is stored in.
368 @param name: The name the value is saved with.
mblighfbf73ae2009-12-19 05:22:42 +0000369 """
370 if self.has(namespace, name):
371 del self._state[namespace][name]
372 if len(self._state[namespace]) == 0:
373 del self._state[namespace]
mblighfbf73ae2009-12-19 05:22:42 +0000374 logging.debug('Persistent state %s.%s deleted', namespace, name)
375 else:
376 logging.debug(
377 'Persistent state %s.%s not defined so nothing is discarded',
378 namespace, name)
379
380
mbligha2c99492010-01-27 22:59:50 +0000381 @with_backing_file
mblighfc3da5b2010-01-06 18:37:22 +0000382 def discard_namespace(self, namespace):
383 """Delete all defined namespace.* names.
384
jadmanski4afc3672010-04-30 21:22:54 +0000385 @param namespace: The namespace to be cleared.
mblighfc3da5b2010-01-06 18:37:22 +0000386 """
387 if namespace in self._state:
388 del self._state[namespace]
mblighfc3da5b2010-01-06 18:37:22 +0000389 logging.debug('Persistent state %s.* deleted', namespace)
390
391
mblighfbf73ae2009-12-19 05:22:42 +0000392 @staticmethod
mblighfc3da5b2010-01-06 18:37:22 +0000393 def property_factory(state_attribute, property_attribute, default,
394 namespace='global_properties'):
mblighfbf73ae2009-12-19 05:22:42 +0000395 """
396 Create a property object for an attribute using self.get and self.set.
397
jadmanski4afc3672010-04-30 21:22:54 +0000398 @param state_attribute: A string with the name of the attribute on
mblighfbf73ae2009-12-19 05:22:42 +0000399 job that contains the job_state instance.
jadmanski4afc3672010-04-30 21:22:54 +0000400 @param property_attribute: A string with the name of the attribute
mblighfbf73ae2009-12-19 05:22:42 +0000401 this property is exposed as.
jadmanski4afc3672010-04-30 21:22:54 +0000402 @param default: A default value that should be used for this property
mblighfbf73ae2009-12-19 05:22:42 +0000403 if it is not set.
jadmanski4afc3672010-04-30 21:22:54 +0000404 @param namespace: The namespace to store the attribute value in.
mblighfbf73ae2009-12-19 05:22:42 +0000405
jadmanski4afc3672010-04-30 21:22:54 +0000406 @return: A read-write property object that performs self.get calls
mblighfbf73ae2009-12-19 05:22:42 +0000407 to read the value and self.set calls to set it.
408 """
409 def getter(job):
410 state = getattr(job, state_attribute)
mblighfc3da5b2010-01-06 18:37:22 +0000411 return state.get(namespace, property_attribute, default)
mblighfbf73ae2009-12-19 05:22:42 +0000412 def setter(job, value):
413 state = getattr(job, state_attribute)
mblighfc3da5b2010-01-06 18:37:22 +0000414 state.set(namespace, property_attribute, value)
mblighfbf73ae2009-12-19 05:22:42 +0000415 return property(getter, setter)
416
417
jadmanski4afc3672010-04-30 21:22:54 +0000418class status_log_entry(object):
419 """Represents a single status log entry."""
420
jadmanski2a89dac2010-06-11 14:32:58 +0000421 RENDERED_NONE_VALUE = '----'
422 TIMESTAMP_FIELD = 'timestamp'
423 LOCALTIME_FIELD = 'localtime'
424
jadmanski4afc3672010-04-30 21:22:54 +0000425 def __init__(self, status_code, subdir, operation, message, fields,
426 timestamp=None):
427 """Construct a status.log entry.
428
429 @param status_code: A message status code. Must match the codes
430 accepted by autotest_lib.common_lib.log.is_valid_status.
431 @param subdir: A valid job subdirectory, or None.
432 @param operation: Description of the operation, or None.
433 @param message: A printable string describing event to be recorded.
434 @param fields: A dictionary of arbitrary alphanumeric key=value pairs
435 to be included in the log, or None.
436 @param timestamp: An optional integer timestamp, in the same format
437 as a time.time() timestamp. If unspecified, the current time is
438 used.
439
440 @raise ValueError: if any of the parameters are invalid
441 """
442 # non-space whitespace is forbidden in any fields
443 bad_char_regex = r'[\t\n\r\v\f]'
444
445 if not log.is_valid_status(status_code):
446 raise ValueError('status code %r is not valid' % status_code)
447 self.status_code = status_code
448
449 if subdir and re.search(bad_char_regex, subdir):
450 raise ValueError('Invalid character in subdir string')
451 self.subdir = subdir
452
453 if operation and re.search(bad_char_regex, operation):
454 raise ValueError('Invalid character in operation string')
455 self.operation = operation
456
457 # break the message line into a single-line message that goes into the
458 # database, and a block of additional lines that goes into the status
459 # log but will never be parsed
460 message_lines = message.split('\n')
461 self.message = message_lines[0].replace('\t', ' ' * 8)
462 self.extra_message_lines = message_lines[1:]
463 if re.search(bad_char_regex, self.message):
464 raise ValueError('Invalid character in message %r' % self.message)
465
466 if not fields:
467 self.fields = {}
468 else:
469 self.fields = fields.copy()
470 for key, value in self.fields.iteritems():
471 if re.search(bad_char_regex, key + value):
472 raise ValueError('Invalid character in %r=%r field'
473 % (key, value))
474
475 # build up the timestamp
476 if timestamp is None:
477 timestamp = int(time.time())
jadmanski2a89dac2010-06-11 14:32:58 +0000478 self.fields[self.TIMESTAMP_FIELD] = str(timestamp)
479 self.fields[self.LOCALTIME_FIELD] = time.strftime(
480 '%b %d %H:%M:%S', time.localtime(timestamp))
jadmanski4afc3672010-04-30 21:22:54 +0000481
482
483 def is_start(self):
484 """Indicates if this status log is the start of a new nested block.
485
486 @return: A boolean indicating if this entry starts a new nested block.
487 """
488 return self.status_code == 'START'
489
490
491 def is_end(self):
492 """Indicates if this status log is the end of a nested block.
493
494 @return: A boolean indicating if this entry ends a nested block.
495 """
496 return self.status_code.startswith('END ')
497
498
499 def render(self):
500 """Render the status log entry into a text string.
501
502 @return: A text string suitable for writing into a status log file.
503 """
504 # combine all the log line data into a tab-delimited string
jadmanski2a89dac2010-06-11 14:32:58 +0000505 subdir = self.subdir or self.RENDERED_NONE_VALUE
506 operation = self.operation or self.RENDERED_NONE_VALUE
jadmanski4afc3672010-04-30 21:22:54 +0000507 extra_fields = ['%s=%s' % field for field in self.fields.iteritems()]
508 line_items = [self.status_code, subdir, operation]
509 line_items += extra_fields + [self.message]
510 first_line = '\t'.join(line_items)
511
512 # append the extra unparsable lines, two-space indented
513 all_lines = [first_line]
514 all_lines += [' ' + line for line in self.extra_message_lines]
515 return '\n'.join(all_lines)
516
517
jadmanski2a89dac2010-06-11 14:32:58 +0000518 @classmethod
519 def parse(cls, line):
520 """Parse a status log entry from a text string.
521
522 This method is the inverse of render; it should always be true that
523 parse(entry.render()) produces a new status_log_entry equivalent to
524 entry.
525
526 @return: A new status_log_entry instance with fields extracted from the
527 given status line. If the line is an extra message line then None
528 is returned.
529 """
530 # extra message lines are always prepended with two spaces
531 if line.startswith(' '):
532 return None
533
534 line = line.lstrip('\t') # ignore indentation
535 entry_parts = line.split('\t')
536 if len(entry_parts) < 4:
537 raise ValueError('%r is not a valid status line' % line)
538 status_code, subdir, operation = entry_parts[:3]
539 if subdir == cls.RENDERED_NONE_VALUE:
540 subdir = None
541 if operation == cls.RENDERED_NONE_VALUE:
542 operation = None
543 message = entry_parts[-1]
544 fields = dict(part.split('=', 1) for part in entry_parts[3:-1])
545 if cls.TIMESTAMP_FIELD in fields:
546 timestamp = int(fields[cls.TIMESTAMP_FIELD])
547 else:
548 timestamp = None
549 return cls(status_code, subdir, operation, message, fields, timestamp)
550
551
jadmanski4afc3672010-04-30 21:22:54 +0000552class status_indenter(object):
553 """Abstract interface that a status log indenter should use."""
554
555 @property
556 def indent(self):
557 raise NotImplementedError
558
559
560 def increment(self):
561 """Increase indentation by one level."""
562 raise NotImplementedError
563
564
565 def decrement(self):
566 """Decrease indentation by one level."""
567
568
569class status_logger(object):
570 """Represents a status log file. Responsible for translating messages
571 into on-disk status log lines.
572
573 @property global_filename: The filename to write top-level logs to.
574 @property subdir_filename: The filename to write subdir-level logs to.
575 """
576 def __init__(self, job, indenter, global_filename='status',
577 subdir_filename='status', record_hook=None):
578 """Construct a logger instance.
579
580 @param job: A reference to the job object this is logging for. Only a
581 weak reference to the job is held, to avoid a
582 status_logger <-> job circular reference.
583 @param indenter: A status_indenter instance, for tracking the
584 indentation level.
585 @param global_filename: An optional filename to initialize the
586 self.global_filename attribute.
587 @param subdir_filename: An optional filename to initialize the
588 self.subdir_filename attribute.
jadmanski2a89dac2010-06-11 14:32:58 +0000589 @param record_hook: An optional function to be called before an entry
jadmanski4afc3672010-04-30 21:22:54 +0000590 is logged. The function should expect a single parameter, a
591 copy of the status_log_entry object.
592 """
593 self._jobref = weakref.ref(job)
594 self._indenter = indenter
595 self.global_filename = global_filename
596 self.subdir_filename = subdir_filename
597 self._record_hook = record_hook
598
jadmanski4afc3672010-04-30 21:22:54 +0000599
600 def render_entry(self, log_entry):
601 """Render a status_log_entry as it would be written to a log file.
602
603 @param log_entry: A status_log_entry instance to be rendered.
604
605 @return: The status log entry, rendered as it would be written to the
606 logs (including indentation).
607 """
608 if log_entry.is_end():
609 indent = self._indenter.indent - 1
610 else:
611 indent = self._indenter.indent
jadmanskibbb026c2010-07-19 16:41:27 +0000612 return '\t' * indent + log_entry.render().rstrip('\n')
jadmanski4afc3672010-04-30 21:22:54 +0000613
614
jadmanski2a89dac2010-06-11 14:32:58 +0000615 def record_entry(self, log_entry, log_in_subdir=True):
jadmanski4afc3672010-04-30 21:22:54 +0000616 """Record a status_log_entry into the appropriate status log files.
617
618 @param log_entry: A status_log_entry instance to be recorded into the
619 status logs.
jadmanski2a89dac2010-06-11 14:32:58 +0000620 @param log_in_subdir: A boolean that indicates (when true) that subdir
621 logs should be written into the subdirectory status log file.
jadmanski4afc3672010-04-30 21:22:54 +0000622 """
623 # acquire a strong reference for the duration of the method
624 job = self._jobref()
625 if job is None:
626 logging.warning('Something attempted to write a status log entry '
627 'after its job terminated, ignoring the attempt.')
628 logging.warning(traceback.format_stack())
629 return
630
jadmanski2a89dac2010-06-11 14:32:58 +0000631 # call the record hook if one was given
632 if self._record_hook:
633 self._record_hook(log_entry)
634
jadmanski4afc3672010-04-30 21:22:54 +0000635 # figure out where we need to log to
636 log_files = [os.path.join(job.resultdir, self.global_filename)]
jadmanski2a89dac2010-06-11 14:32:58 +0000637 if log_in_subdir and log_entry.subdir:
jadmanski4afc3672010-04-30 21:22:54 +0000638 log_files.append(os.path.join(job.resultdir, log_entry.subdir,
639 self.subdir_filename))
640
641 # write out to entry to the log files
642 log_text = self.render_entry(log_entry)
643 for log_file in log_files:
644 fileobj = open(log_file, 'a')
645 try:
646 print >> fileobj, log_text
647 finally:
648 fileobj.close()
649
jadmanski4afc3672010-04-30 21:22:54 +0000650 # adjust the indentation if this was a START or END entry
651 if log_entry.is_start():
652 self._indenter.increment()
653 elif log_entry.is_end():
654 self._indenter.decrement()
655
656
jadmanskida2f1432009-11-06 15:20:09 +0000657class base_job(object):
658 """An abstract base class for the various autotest job classes.
659
jadmanski4afc3672010-04-30 21:22:54 +0000660 @property autodir: The top level autotest directory.
661 @property clientdir: The autotest client directory.
662 @property serverdir: The autotest server directory. [OPTIONAL]
663 @property resultdir: The directory where results should be written out.
664 [WRITABLE]
jadmanskida2f1432009-11-06 15:20:09 +0000665
jadmanski4afc3672010-04-30 21:22:54 +0000666 @property pkgdir: The job packages directory. [WRITABLE]
667 @property tmpdir: The job temporary directory. [WRITABLE]
668 @property testdir: The job test directory. [WRITABLE]
669 @property site_testdir: The job site test directory. [WRITABLE]
jadmanskida2f1432009-11-06 15:20:09 +0000670
jadmanski4afc3672010-04-30 21:22:54 +0000671 @property bindir: The client bin/ directory.
672 @property configdir: The client config/ directory.
673 @property profdir: The client profilers/ directory.
674 @property toolsdir: The client tools/ directory.
jadmanskida2f1432009-11-06 15:20:09 +0000675
jadmanski4afc3672010-04-30 21:22:54 +0000676 @property conmuxdir: The conmux directory. [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000677
jadmanski4afc3672010-04-30 21:22:54 +0000678 @property control: A path to the control file to be executed. [OPTIONAL]
679 @property hosts: A set of all live Host objects currently in use by the
680 job. Code running in the context of a local client can safely assume
681 that this set contains only a single entry.
682 @property machines: A list of the machine names associated with the job.
683 @property user: The user executing the job.
684 @property tag: A tag identifying the job. Often used by the scheduler to
685 give a name of the form NUMBER-USERNAME/HOSTNAME.
686 @property args: A list of addtional miscellaneous command-line arguments
687 provided when starting the job.
jadmanskida2f1432009-11-06 15:20:09 +0000688
jadmanski4afc3672010-04-30 21:22:54 +0000689 @property last_boot_tag: The label of the kernel from the last reboot.
690 [OPTIONAL,PERSISTENT]
691 @property automatic_test_tag: A string which, if set, will be automatically
692 added to the test name when running tests.
mblighfc3da5b2010-01-06 18:37:22 +0000693
jadmanski4afc3672010-04-30 21:22:54 +0000694 @property default_profile_only: A boolean indicating the default value of
695 profile_only used by test.execute. [PERSISTENT]
696 @property drop_caches: A boolean indicating if caches should be dropped
697 before each test is executed.
698 @property drop_caches_between_iterations: A boolean indicating if caches
699 should be dropped before each test iteration is executed.
700 @property run_test_cleanup: A boolean indicating if test.cleanup should be
701 run by default after a test completes, if the run_cleanup argument is
702 not specified. [PERSISTENT]
jadmanskida2f1432009-11-06 15:20:09 +0000703
jadmanski4afc3672010-04-30 21:22:54 +0000704 @property num_tests_run: The number of tests run during the job. [OPTIONAL]
705 @property num_tests_failed: The number of tests failed during the job.
706 [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000707
jadmanski4afc3672010-04-30 21:22:54 +0000708 @property bootloader: An instance of the boottool class. May not be
709 available on job instances where access to the bootloader is not
710 available (e.g. on the server running a server job). [OPTIONAL]
711 @property harness: An instance of the client test harness. Only available
712 in contexts where client test execution happens. [OPTIONAL]
713 @property logging: An instance of the logging manager associated with the
714 job.
715 @property profilers: An instance of the profiler manager associated with
716 the job.
717 @property sysinfo: An instance of the sysinfo object. Only available in
718 contexts where it's possible to collect sysinfo.
719 @property warning_manager: A class for managing which types of WARN
720 messages should be logged and which should be supressed. [OPTIONAL]
721 @property warning_loggers: A set of readable streams that will be monitored
722 for WARN messages to be logged. [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000723
724 Abstract methods:
725 _find_base_directories [CLASSMETHOD]
726 Returns the location of autodir, clientdir and serverdir
727
728 _find_resultdir
729 Returns the location of resultdir. Gets a copy of any parameters
730 passed into base_job.__init__. Can return None to indicate that
731 no resultdir is to be used.
jadmanski4afc3672010-04-30 21:22:54 +0000732
733 _get_status_logger
734 Returns a status_logger instance for recording job status logs.
jadmanskida2f1432009-11-06 15:20:09 +0000735 """
736
mblighfc3da5b2010-01-06 18:37:22 +0000737 # capture the dependency on several helper classes with factories
jadmanskida2f1432009-11-06 15:20:09 +0000738 _job_directory = job_directory
mblighfbf73ae2009-12-19 05:22:42 +0000739 _job_state = job_state
jadmanskida2f1432009-11-06 15:20:09 +0000740
741
mblighfc3da5b2010-01-06 18:37:22 +0000742 # all the job directory attributes
743 autodir = _job_directory.property_factory('autodir')
744 clientdir = _job_directory.property_factory('clientdir')
745 serverdir = _job_directory.property_factory('serverdir')
746 resultdir = _job_directory.property_factory('resultdir')
747 pkgdir = _job_directory.property_factory('pkgdir')
748 tmpdir = _job_directory.property_factory('tmpdir')
749 testdir = _job_directory.property_factory('testdir')
750 site_testdir = _job_directory.property_factory('site_testdir')
751 bindir = _job_directory.property_factory('bindir')
752 configdir = _job_directory.property_factory('configdir')
753 profdir = _job_directory.property_factory('profdir')
754 toolsdir = _job_directory.property_factory('toolsdir')
755 conmuxdir = _job_directory.property_factory('conmuxdir')
756
757
758 # all the generic persistent properties
mbligh9de6ed72010-01-11 19:01:10 +0000759 tag = _job_state.property_factory('_state', 'tag', '')
mblighfc3da5b2010-01-06 18:37:22 +0000760 default_profile_only = _job_state.property_factory(
761 '_state', 'default_profile_only', False)
762 run_test_cleanup = _job_state.property_factory(
763 '_state', 'run_test_cleanup', True)
764 last_boot_tag = _job_state.property_factory(
765 '_state', 'last_boot_tag', None)
766 automatic_test_tag = _job_state.property_factory(
767 '_state', 'automatic_test_tag', None)
768
769 # the use_sequence_number property
770 _sequence_number = _job_state.property_factory(
771 '_state', '_sequence_number', None)
772 def _get_use_sequence_number(self):
773 return bool(self._sequence_number)
774 def _set_use_sequence_number(self, value):
775 if value:
776 self._sequence_number = 1
777 else:
778 self._sequence_number = None
779 use_sequence_number = property(_get_use_sequence_number,
780 _set_use_sequence_number)
781
782
jadmanskida2f1432009-11-06 15:20:09 +0000783 def __init__(self, *args, **dargs):
784 # initialize the base directories, all others are relative to these
785 autodir, clientdir, serverdir = self._find_base_directories()
786 self._autodir = self._job_directory(autodir)
787 self._clientdir = self._job_directory(clientdir)
788 if serverdir:
789 self._serverdir = self._job_directory(serverdir)
790 else:
791 self._serverdir = None
792
793 # initialize all the other directories relative to the base ones
794 self._initialize_dir_properties()
795 self._resultdir = self._job_directory(
796 self._find_resultdir(*args, **dargs), True)
797 self._execution_contexts = []
798
mblighfbf73ae2009-12-19 05:22:42 +0000799 # initialize all the job state
800 self._state = self._job_state()
801
jadmanskida2f1432009-11-06 15:20:09 +0000802
803 @classmethod
804 def _find_base_directories(cls):
805 raise NotImplementedError()
806
807
808 def _initialize_dir_properties(self):
809 """
810 Initializes all the secondary self.*dir properties. Requires autodir,
811 clientdir and serverdir to already be initialized.
812 """
813 # create some stubs for use as shortcuts
814 def readonly_dir(*args):
815 return self._job_directory(os.path.join(*args))
816 def readwrite_dir(*args):
817 return self._job_directory(os.path.join(*args), True)
818
819 # various client-specific directories
820 self._bindir = readonly_dir(self.clientdir, 'bin')
jadmanskida2f1432009-11-06 15:20:09 +0000821 self._configdir = readonly_dir(self.clientdir, 'config')
822 self._profdir = readonly_dir(self.clientdir, 'profilers')
823 self._pkgdir = readwrite_dir(self.clientdir, 'packages')
824 self._toolsdir = readonly_dir(self.clientdir, 'tools')
825
826 # directories which are in serverdir on a server, clientdir on a client
827 if self.serverdir:
828 root = self.serverdir
829 else:
830 root = self.clientdir
831 self._tmpdir = readwrite_dir(root, 'tmp')
832 self._testdir = readwrite_dir(root, 'tests')
833 self._site_testdir = readwrite_dir(root, 'site_tests')
834
835 # various server-specific directories
836 if self.serverdir:
837 self._conmuxdir = readonly_dir(self.autodir, 'conmux')
838 else:
839 self._conmuxdir = None
840
841
842 def _find_resultdir(self, *args, **dargs):
843 raise NotImplementedError()
844
845
846 def push_execution_context(self, resultdir):
847 """
848 Save off the current context of the job and change to the given one.
849
850 In practice method just changes the resultdir, but it may become more
851 extensive in the future. The expected use case is for when a child
852 job needs to be executed in some sort of nested context (for example
853 the way parallel_simple does). The original context can be restored
854 with a pop_execution_context call.
855
jadmanski4afc3672010-04-30 21:22:54 +0000856 @param resultdir: The new resultdir, relative to the current one.
jadmanskida2f1432009-11-06 15:20:09 +0000857 """
858 new_dir = self._job_directory(
859 os.path.join(self.resultdir, resultdir), True)
860 self._execution_contexts.append(self._resultdir)
861 self._resultdir = new_dir
862
863
864 def pop_execution_context(self):
865 """
866 Reverse the effects of the previous push_execution_context call.
867
jadmanski4afc3672010-04-30 21:22:54 +0000868 @raise IndexError: raised when the stack of contexts is empty.
jadmanskida2f1432009-11-06 15:20:09 +0000869 """
870 if not self._execution_contexts:
871 raise IndexError('No old execution context to restore')
872 self._resultdir = self._execution_contexts.pop()
mblighfbf73ae2009-12-19 05:22:42 +0000873
874
875 def get_state(self, name, default=_job_state.NO_DEFAULT):
876 """Returns the value associated with a particular name.
877
jadmanski4afc3672010-04-30 21:22:54 +0000878 @param name: The name the value was saved with.
879 @param default: A default value to return if no state is currently
mblighfbf73ae2009-12-19 05:22:42 +0000880 associated with var.
881
jadmanski4afc3672010-04-30 21:22:54 +0000882 @return: A deep copy of the value associated with name. Note that this
mblighfbf73ae2009-12-19 05:22:42 +0000883 explicitly returns a deep copy to avoid problems with mutable
884 values; mutations are not persisted or shared.
jadmanski4afc3672010-04-30 21:22:54 +0000885 @raise KeyError: raised when no state is associated with var and a
mblighfbf73ae2009-12-19 05:22:42 +0000886 default value is not provided.
887 """
888 try:
889 return self._state.get('public', name, default=default)
890 except KeyError:
891 raise KeyError(name)
892
893
894 def set_state(self, name, value):
895 """Saves the value given with the provided name.
896
jadmanski4afc3672010-04-30 21:22:54 +0000897 @param name: The name the value should be saved with.
898 @param value: The value to save.
mblighfbf73ae2009-12-19 05:22:42 +0000899 """
900 self._state.set('public', name, value)
901
902
mblighfc3da5b2010-01-06 18:37:22 +0000903 def _build_tagged_test_name(self, testname, dargs):
904 """Builds the fully tagged testname and subdirectory for job.run_test.
mblighfbf73ae2009-12-19 05:22:42 +0000905
jadmanski4afc3672010-04-30 21:22:54 +0000906 @param testname: The base name of the test
907 @param dargs: The ** arguments passed to run_test. And arguments
mblighfc3da5b2010-01-06 18:37:22 +0000908 consumed by this method will be removed from the dictionary.
mblighfbf73ae2009-12-19 05:22:42 +0000909
jadmanski4afc3672010-04-30 21:22:54 +0000910 @return: A 3-tuple of the full name of the test, the subdirectory it
mblighfc3da5b2010-01-06 18:37:22 +0000911 should be stored in, and the full tag of the subdir.
mblighfbf73ae2009-12-19 05:22:42 +0000912 """
mblighfc3da5b2010-01-06 18:37:22 +0000913 tag_parts = []
914
915 # build up the parts of the tag used for the test name
916 base_tag = dargs.pop('tag', None)
917 if base_tag:
918 tag_parts.append(str(base_tag))
919 if self.use_sequence_number:
920 tag_parts.append('_%02d_' % self._sequence_number)
921 self._sequence_number += 1
922 if self.automatic_test_tag:
923 tag_parts.append(self.automatic_test_tag)
924 full_testname = '.'.join([testname] + tag_parts)
925
926 # build up the subdir and tag as well
927 subdir_tag = dargs.pop('subdir_tag', None)
928 if subdir_tag:
929 tag_parts.append(subdir_tag)
930 subdir = '.'.join([testname] + tag_parts)
931 tag = '.'.join(tag_parts)
932
933 return full_testname, subdir, tag
mblighfbf73ae2009-12-19 05:22:42 +0000934
935
mblighfc3da5b2010-01-06 18:37:22 +0000936 def _make_test_outputdir(self, subdir):
937 """Creates an output directory for a test to run it.
mblighfbf73ae2009-12-19 05:22:42 +0000938
jadmanski4afc3672010-04-30 21:22:54 +0000939 @param subdir: The subdirectory of the test. Generally computed by
mblighfc3da5b2010-01-06 18:37:22 +0000940 _build_tagged_test_name.
941
jadmanski4afc3672010-04-30 21:22:54 +0000942 @return: A job_directory instance corresponding to the outputdir of
mblighfc3da5b2010-01-06 18:37:22 +0000943 the test.
jadmanski4afc3672010-04-30 21:22:54 +0000944 @raise TestError: If the output directory is invalid.
mblighfbf73ae2009-12-19 05:22:42 +0000945 """
mblighfc3da5b2010-01-06 18:37:22 +0000946 # explicitly check that this subdirectory is new
947 path = os.path.join(self.resultdir, subdir)
948 if os.path.exists(path):
949 msg = ('%s already exists; multiple tests cannot run with the '
950 'same subdirectory' % subdir)
951 raise error.TestError(msg)
mblighfbf73ae2009-12-19 05:22:42 +0000952
mblighfc3da5b2010-01-06 18:37:22 +0000953 # create the outputdir and raise a TestError if it isn't valid
954 try:
955 outputdir = self._job_directory(path, True)
956 return outputdir
957 except self._job_directory.JobDirectoryException, e:
958 logging.exception('%s directory creation failed with %s',
959 subdir, e)
960 raise error.TestError('%s directory creation failed' % subdir)
jadmanski4afc3672010-04-30 21:22:54 +0000961
962
963 def record(self, status_code, subdir, operation, status='',
964 optional_fields=None):
965 """Record a job-level status event.
966
967 Logs an event noteworthy to the Autotest job as a whole. Messages will
968 be written into a global status log file, as well as a subdir-local
969 status log file (if subdir is specified).
970
971 @param status_code: A string status code describing the type of status
972 entry being recorded. It must pass log.is_valid_status to be
973 considered valid.
974 @param subdir: A specific results subdirectory this also applies to, or
975 None. If not None the subdirectory must exist.
976 @param operation: A string describing the operation that was run.
977 @param status: An optional human-readable message describing the status
978 entry, for example an error message or "completed successfully".
979 @param optional_fields: An optional dictionary of addtional named fields
980 to be included with the status message. Every time timestamp and
981 localtime entries are generated with the current time and added
982 to this dictionary.
983 """
984 entry = status_log_entry(status_code, subdir, operation, status,
985 optional_fields)
jadmanski2a89dac2010-06-11 14:32:58 +0000986 self.record_entry(entry)
987
988
989 def record_entry(self, entry, log_in_subdir=True):
990 """Record a job-level status event, using a status_log_entry.
991
992 This is the same as self.record but using an existing status log
993 entry object rather than constructing one for you.
994
995 @param entry: A status_log_entry object
996 @param log_in_subdir: A boolean that indicates (when true) that subdir
997 logs should be written into the subdirectory status log file.
998 """
999 self._get_status_logger().record_entry(entry, log_in_subdir)