blob: 4a2271c145a363b4c6cc0756a710ba08c632a57b [file] [log] [blame]
jadmanski4afc3672010-04-30 21:22:54 +00001import os, copy, logging, errno, fcntl, time, re, weakref, traceback
2import cPickle as pickle
jadmanskida2f1432009-11-06 15:20:09 +00003
jadmanski4afc3672010-04-30 21:22:54 +00004from autotest_lib.client.common_lib import autotemp, error, log
jadmanskida2f1432009-11-06 15:20:09 +00005
6
7class job_directory(object):
8 """Represents a job.*dir directory."""
9
10
mblighfc3da5b2010-01-06 18:37:22 +000011 class JobDirectoryException(error.AutotestError):
12 """Generic job_directory exception superclass."""
13
14
15 class MissingDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000016 """Raised when a directory required by the job does not exist."""
17 def __init__(self, path):
18 Exception.__init__(self, 'Directory %s does not exist' % path)
19
20
mblighfc3da5b2010-01-06 18:37:22 +000021 class UncreatableDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000022 """Raised when a directory required by the job is missing and cannot
23 be created."""
24 def __init__(self, path, error):
25 msg = 'Creation of directory %s failed with exception %s'
26 msg %= (path, error)
27 Exception.__init__(self, msg)
28
29
mblighfc3da5b2010-01-06 18:37:22 +000030 class UnwritableDirectoryException(JobDirectoryException):
jadmanskida2f1432009-11-06 15:20:09 +000031 """Raised when a writable directory required by the job exists
32 but is not writable."""
33 def __init__(self, path):
34 msg = 'Directory %s exists but is not writable' % path
35 Exception.__init__(self, msg)
36
37
38 def __init__(self, path, is_writable=False):
39 """
40 Instantiate a job directory.
41
jadmanski4afc3672010-04-30 21:22:54 +000042 @param path: The path of the directory. If None a temporary directory
jadmanskida2f1432009-11-06 15:20:09 +000043 will be created instead.
jadmanski4afc3672010-04-30 21:22:54 +000044 @param is_writable: If True, expect the directory to be writable.
jadmanskida2f1432009-11-06 15:20:09 +000045
jadmanski4afc3672010-04-30 21:22:54 +000046 @raise MissingDirectoryException: raised if is_writable=False and the
jadmanskida2f1432009-11-06 15:20:09 +000047 directory does not exist.
jadmanski4afc3672010-04-30 21:22:54 +000048 @raise UnwritableDirectoryException: raised if is_writable=True and
jadmanskida2f1432009-11-06 15:20:09 +000049 the directory exists but is not writable.
jadmanski4afc3672010-04-30 21:22:54 +000050 @raise UncreatableDirectoryException: raised if is_writable=True, the
jadmanskida2f1432009-11-06 15:20:09 +000051 directory does not exist and it cannot be created.
52 """
53 if path is None:
54 if is_writable:
55 self._tempdir = autotemp.tempdir(unique_id='autotest')
56 self.path = self._tempdir.name
57 else:
58 raise self.MissingDirectoryException(path)
59 else:
60 self._tempdir = None
61 self.path = path
62 self._ensure_valid(is_writable)
63
64
65 def _ensure_valid(self, is_writable):
66 """
67 Ensure that this is a valid directory.
68
69 Will check if a directory exists, can optionally also enforce that
70 it be writable. It can optionally create it if necessary. Creation
71 will still fail if the path is rooted in a non-writable directory, or
72 if a file already exists at the given location.
73
74 @param dir_path A path where a directory should be located
75 @param is_writable A boolean indicating that the directory should
76 not only exist, but also be writable.
77
78 @raises MissingDirectoryException raised if is_writable=False and the
79 directory does not exist.
80 @raises UnwritableDirectoryException raised if is_writable=True and
81 the directory is not wrtiable.
82 @raises UncreatableDirectoryException raised if is_writable=True, the
83 directory does not exist and it cannot be created
84 """
mbligh8054b0d2009-11-25 17:38:19 +000085 # ensure the directory exists
86 if is_writable:
87 try:
88 os.makedirs(self.path)
89 except OSError, e:
mblighfbf73ae2009-12-19 05:22:42 +000090 if e.errno != errno.EEXIST or not os.path.isdir(self.path):
jadmanskida2f1432009-11-06 15:20:09 +000091 raise self.UncreatableDirectoryException(self.path, e)
mbligh8054b0d2009-11-25 17:38:19 +000092 elif not os.path.isdir(self.path):
93 raise self.MissingDirectoryException(self.path)
jadmanskida2f1432009-11-06 15:20:09 +000094
95 # if is_writable=True, also check that the directory is writable
96 if is_writable and not os.access(self.path, os.W_OK):
97 raise self.UnwritableDirectoryException(self.path)
98
99
100 @staticmethod
101 def property_factory(attribute):
102 """
103 Create a job.*dir -> job._*dir.path property accessor.
104
105 @param attribute A string with the name of the attribute this is
106 exposed as. '_'+attribute must then be attribute that holds
107 either None or a job_directory-like object.
108
109 @returns A read-only property object that exposes a job_directory path
110 """
111 @property
112 def dir_property(self):
113 underlying_attribute = getattr(self, '_' + attribute)
114 if underlying_attribute is None:
115 return None
116 else:
117 return underlying_attribute.path
118 return dir_property
119
120
mbligha2c99492010-01-27 22:59:50 +0000121# decorator for use with job_state methods
122def with_backing_lock(method):
123 """A decorator to perform a lock-*-unlock cycle.
124
125 When applied to a method, this decorator will automatically wrap
126 calls to the method in a backing file lock and before the call
127 followed by a backing file unlock.
128 """
129 def wrapped_method(self, *args, **dargs):
130 already_have_lock = self._backing_file_lock is not None
131 if not already_have_lock:
132 self._lock_backing_file()
133 try:
134 return method(self, *args, **dargs)
135 finally:
136 if not already_have_lock:
137 self._unlock_backing_file()
138 wrapped_method.__name__ = method.__name__
139 wrapped_method.__doc__ = method.__doc__
140 return wrapped_method
141
142
143# decorator for use with job_state methods
144def with_backing_file(method):
145 """A decorator to perform a lock-read-*-write-unlock cycle.
146
147 When applied to a method, this decorator will automatically wrap
148 calls to the method in a lock-and-read before the call followed by a
149 write-and-unlock. Any operation that is reading or writing state
150 should be decorated with this method to ensure that backing file
151 state is consistently maintained.
152 """
153 @with_backing_lock
154 def wrapped_method(self, *args, **dargs):
155 self._read_from_backing_file()
156 try:
157 return method(self, *args, **dargs)
158 finally:
159 self._write_to_backing_file()
160 wrapped_method.__name__ = method.__name__
161 wrapped_method.__doc__ = method.__doc__
162 return wrapped_method
163
164
165
mblighfbf73ae2009-12-19 05:22:42 +0000166class job_state(object):
167 """A class for managing explicit job and user state, optionally persistent.
168
169 The class allows you to save state by name (like a dictionary). Any state
170 stored in this class should be picklable and deep copyable. While this is
171 not enforced it is recommended that only valid python identifiers be used
172 as names. Additionally, the namespace 'stateful_property' is used for
173 storing the valued associated with properties constructed using the
174 property_factory method.
175 """
176
177 NO_DEFAULT = object()
178 PICKLE_PROTOCOL = 2 # highest protocol available in python 2.4
179
180
181 def __init__(self):
182 """Initialize the job state."""
183 self._state = {}
184 self._backing_file = None
jadmanskifa2e8892010-01-26 20:26:05 +0000185 self._backing_file_initialized = False
186 self._backing_file_lock = None
mblighfbf73ae2009-12-19 05:22:42 +0000187
188
jadmanskifa2e8892010-01-26 20:26:05 +0000189 def _lock_backing_file(self):
190 """Acquire a lock on the backing file."""
191 if self._backing_file:
192 self._backing_file_lock = open(self._backing_file, 'a')
jadmanskia087eae2010-01-29 20:57:57 +0000193 fcntl.flock(self._backing_file_lock, fcntl.LOCK_EX)
jadmanskifa2e8892010-01-26 20:26:05 +0000194
195
196 def _unlock_backing_file(self):
197 """Release a lock on the backing file."""
198 if self._backing_file_lock:
jadmanskia087eae2010-01-29 20:57:57 +0000199 fcntl.flock(self._backing_file_lock, fcntl.LOCK_UN)
jadmanskifa2e8892010-01-26 20:26:05 +0000200 self._backing_file_lock.close()
201 self._backing_file_lock = None
202
203
204 def read_from_file(self, file_path, merge=True):
205 """Read in any state from the file at file_path.
206
207 When merge=True, any state specified only in-memory will be preserved.
208 Any state specified on-disk will be set in-memory, even if an in-memory
209 setting already exists.
210
jadmanski4afc3672010-04-30 21:22:54 +0000211 @param file_path: The path where the state should be read from. It must
jadmanskifa2e8892010-01-26 20:26:05 +0000212 exist but it can be empty.
jadmanski4afc3672010-04-30 21:22:54 +0000213 @param merge: If true, merge the on-disk state with the in-memory
jadmanskifa2e8892010-01-26 20:26:05 +0000214 state. If false, replace the in-memory state with the on-disk
215 state.
216
jadmanski4afc3672010-04-30 21:22:54 +0000217 @warning: This method is intentionally concurrency-unsafe. It makes no
jadmanskifa2e8892010-01-26 20:26:05 +0000218 attempt to control concurrent access to the file at file_path.
219 """
220
221 # we can assume that the file exists
222 if os.path.getsize(file_path) == 0:
223 on_disk_state = {}
224 else:
225 on_disk_state = pickle.load(open(file_path))
226
227 if merge:
228 # merge the on-disk state with the in-memory state
229 for namespace, namespace_dict in on_disk_state.iteritems():
230 in_memory_namespace = self._state.setdefault(namespace, {})
231 for name, value in namespace_dict.iteritems():
232 if name in in_memory_namespace:
233 if in_memory_namespace[name] != value:
234 logging.info('Persistent value of %s.%s from %s '
235 'overridding existing in-memory '
236 'value', namespace, name, file_path)
237 in_memory_namespace[name] = value
238 else:
239 logging.debug('Value of %s.%s is unchanged, '
240 'skipping import', namespace, name)
241 else:
242 logging.debug('Importing %s.%s from state file %s',
243 namespace, name, file_path)
244 in_memory_namespace[name] = value
245 else:
246 # just replace the in-memory state with the on-disk state
247 self._state = on_disk_state
jadmanskifa2e8892010-01-26 20:26:05 +0000248
mbligha2c99492010-01-27 22:59:50 +0000249 # lock the backing file before we refresh it
250 with_backing_lock(self.__class__._write_to_backing_file)(self)
jadmanskifa2e8892010-01-26 20:26:05 +0000251
252
253 def write_to_file(self, file_path):
254 """Write out the current state to the given path.
255
jadmanski4afc3672010-04-30 21:22:54 +0000256 @param file_path: The path where the state should be written out to.
jadmanskifa2e8892010-01-26 20:26:05 +0000257 Must be writable.
258
jadmanski4afc3672010-04-30 21:22:54 +0000259 @warning: This method is intentionally concurrency-unsafe. It makes no
mbligha2c99492010-01-27 22:59:50 +0000260 attempt to control concurrent access to the file at file_path.
jadmanskifa2e8892010-01-26 20:26:05 +0000261 """
262 outfile = open(file_path, 'w')
263 try:
264 pickle.dump(self._state, outfile, self.PICKLE_PROTOCOL)
265 finally:
266 outfile.close()
jadmanskifa2e8892010-01-26 20:26:05 +0000267
268
269 def _read_from_backing_file(self):
270 """Refresh the current state from the backing file.
271
272 If the backing file has never been read before (indicated by checking
273 self._backing_file_initialized) it will merge the file with the
274 in-memory state, rather than overwriting it.
275 """
276 if self._backing_file:
277 merge_backing_file = not self._backing_file_initialized
278 self.read_from_file(self._backing_file, merge=merge_backing_file)
279 self._backing_file_initialized = True
280
281
282 def _write_to_backing_file(self):
283 """Flush the current state to the backing file."""
284 if self._backing_file:
285 self.write_to_file(self._backing_file)
286
287
mbligha2c99492010-01-27 22:59:50 +0000288 @with_backing_file
jadmanskifa2e8892010-01-26 20:26:05 +0000289 def _synchronize_backing_file(self):
290 """Synchronizes the contents of the in-memory and on-disk state."""
291 # state is implicitly synchronized in _with_backing_file methods
292 pass
293
294
295 def set_backing_file(self, file_path):
296 """Change the path used as the backing file for the persistent state.
297
298 When a new backing file is specified if a file already exists then
299 its contents will be added into the current state, with conflicts
300 between the file and memory being resolved in favor of the file
301 contents. The file will then be kept in sync with the (combined)
302 in-memory state. The syncing can be disabled by setting this to None.
303
jadmanski4afc3672010-04-30 21:22:54 +0000304 @param file_path: A path on the filesystem that can be read from and
jadmanskifa2e8892010-01-26 20:26:05 +0000305 written to, or None to turn off the backing store.
306 """
307 self._synchronize_backing_file()
308 self._backing_file = file_path
309 self._backing_file_initialized = False
310 self._synchronize_backing_file()
311
312
mbligha2c99492010-01-27 22:59:50 +0000313 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000314 def get(self, namespace, name, default=NO_DEFAULT):
315 """Returns the value associated with a particular name.
316
jadmanski4afc3672010-04-30 21:22:54 +0000317 @param namespace: The namespace that the property should be stored in.
318 @param name: The name the value was saved with.
319 @param default: A default value to return if no state is currently
mblighfbf73ae2009-12-19 05:22:42 +0000320 associated with var.
321
jadmanski4afc3672010-04-30 21:22:54 +0000322 @return: A deep copy of the value associated with name. Note that this
mblighfbf73ae2009-12-19 05:22:42 +0000323 explicitly returns a deep copy to avoid problems with mutable
324 values; mutations are not persisted or shared.
jadmanski4afc3672010-04-30 21:22:54 +0000325 @raise KeyError: raised when no state is associated with var and a
mblighfbf73ae2009-12-19 05:22:42 +0000326 default value is not provided.
327 """
328 if self.has(namespace, name):
329 return copy.deepcopy(self._state[namespace][name])
330 elif default is self.NO_DEFAULT:
331 raise KeyError('No key %s in namespace %s' % (name, namespace))
332 else:
333 return default
334
335
mbligha2c99492010-01-27 22:59:50 +0000336 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000337 def set(self, namespace, name, value):
338 """Saves the value given with the provided name.
339
jadmanski4afc3672010-04-30 21:22:54 +0000340 @param namespace: The namespace that the property should be stored in.
341 @param name: The name the value should be saved with.
342 @param value: The value to save.
mblighfbf73ae2009-12-19 05:22:42 +0000343 """
344 namespace_dict = self._state.setdefault(namespace, {})
345 namespace_dict[name] = copy.deepcopy(value)
mblighfbf73ae2009-12-19 05:22:42 +0000346 logging.debug('Persistent state %s.%s now set to %r', namespace,
347 name, value)
348
349
mbligha2c99492010-01-27 22:59:50 +0000350 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000351 def has(self, namespace, name):
352 """Return a boolean indicating if namespace.name is defined.
353
jadmanski4afc3672010-04-30 21:22:54 +0000354 @param namespace: The namespace to check for a definition.
355 @param name: The name to check for a definition.
mblighfbf73ae2009-12-19 05:22:42 +0000356
jadmanski4afc3672010-04-30 21:22:54 +0000357 @return: True if the given name is defined in the given namespace and
mblighfbf73ae2009-12-19 05:22:42 +0000358 False otherwise.
359 """
360 return namespace in self._state and name in self._state[namespace]
361
362
mbligha2c99492010-01-27 22:59:50 +0000363 @with_backing_file
mblighfbf73ae2009-12-19 05:22:42 +0000364 def discard(self, namespace, name):
365 """If namespace.name is a defined value, deletes it.
366
jadmanski4afc3672010-04-30 21:22:54 +0000367 @param namespace: The namespace that the property is stored in.
368 @param name: The name the value is saved with.
mblighfbf73ae2009-12-19 05:22:42 +0000369 """
370 if self.has(namespace, name):
371 del self._state[namespace][name]
372 if len(self._state[namespace]) == 0:
373 del self._state[namespace]
mblighfbf73ae2009-12-19 05:22:42 +0000374 logging.debug('Persistent state %s.%s deleted', namespace, name)
375 else:
376 logging.debug(
377 'Persistent state %s.%s not defined so nothing is discarded',
378 namespace, name)
379
380
mbligha2c99492010-01-27 22:59:50 +0000381 @with_backing_file
mblighfc3da5b2010-01-06 18:37:22 +0000382 def discard_namespace(self, namespace):
383 """Delete all defined namespace.* names.
384
jadmanski4afc3672010-04-30 21:22:54 +0000385 @param namespace: The namespace to be cleared.
mblighfc3da5b2010-01-06 18:37:22 +0000386 """
387 if namespace in self._state:
388 del self._state[namespace]
mblighfc3da5b2010-01-06 18:37:22 +0000389 logging.debug('Persistent state %s.* deleted', namespace)
390
391
mblighfbf73ae2009-12-19 05:22:42 +0000392 @staticmethod
mblighfc3da5b2010-01-06 18:37:22 +0000393 def property_factory(state_attribute, property_attribute, default,
394 namespace='global_properties'):
mblighfbf73ae2009-12-19 05:22:42 +0000395 """
396 Create a property object for an attribute using self.get and self.set.
397
jadmanski4afc3672010-04-30 21:22:54 +0000398 @param state_attribute: A string with the name of the attribute on
mblighfbf73ae2009-12-19 05:22:42 +0000399 job that contains the job_state instance.
jadmanski4afc3672010-04-30 21:22:54 +0000400 @param property_attribute: A string with the name of the attribute
mblighfbf73ae2009-12-19 05:22:42 +0000401 this property is exposed as.
jadmanski4afc3672010-04-30 21:22:54 +0000402 @param default: A default value that should be used for this property
mblighfbf73ae2009-12-19 05:22:42 +0000403 if it is not set.
jadmanski4afc3672010-04-30 21:22:54 +0000404 @param namespace: The namespace to store the attribute value in.
mblighfbf73ae2009-12-19 05:22:42 +0000405
jadmanski4afc3672010-04-30 21:22:54 +0000406 @return: A read-write property object that performs self.get calls
mblighfbf73ae2009-12-19 05:22:42 +0000407 to read the value and self.set calls to set it.
408 """
409 def getter(job):
410 state = getattr(job, state_attribute)
mblighfc3da5b2010-01-06 18:37:22 +0000411 return state.get(namespace, property_attribute, default)
mblighfbf73ae2009-12-19 05:22:42 +0000412 def setter(job, value):
413 state = getattr(job, state_attribute)
mblighfc3da5b2010-01-06 18:37:22 +0000414 state.set(namespace, property_attribute, value)
mblighfbf73ae2009-12-19 05:22:42 +0000415 return property(getter, setter)
416
417
jadmanski4afc3672010-04-30 21:22:54 +0000418class status_log_entry(object):
419 """Represents a single status log entry."""
420
jadmanski2a89dac2010-06-11 14:32:58 +0000421 RENDERED_NONE_VALUE = '----'
422 TIMESTAMP_FIELD = 'timestamp'
423 LOCALTIME_FIELD = 'localtime'
424
Eric Li7edb3042011-01-06 17:57:17 -0800425 # non-space whitespace is forbidden in any fields
426 BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]')
427
jadmanski4afc3672010-04-30 21:22:54 +0000428 def __init__(self, status_code, subdir, operation, message, fields,
429 timestamp=None):
430 """Construct a status.log entry.
431
432 @param status_code: A message status code. Must match the codes
433 accepted by autotest_lib.common_lib.log.is_valid_status.
434 @param subdir: A valid job subdirectory, or None.
435 @param operation: Description of the operation, or None.
436 @param message: A printable string describing event to be recorded.
437 @param fields: A dictionary of arbitrary alphanumeric key=value pairs
438 to be included in the log, or None.
439 @param timestamp: An optional integer timestamp, in the same format
440 as a time.time() timestamp. If unspecified, the current time is
441 used.
442
443 @raise ValueError: if any of the parameters are invalid
444 """
jadmanski4afc3672010-04-30 21:22:54 +0000445
446 if not log.is_valid_status(status_code):
447 raise ValueError('status code %r is not valid' % status_code)
448 self.status_code = status_code
449
Eric Li7edb3042011-01-06 17:57:17 -0800450 if subdir and self.BAD_CHAR_REGEX.search(subdir):
jadmanski4afc3672010-04-30 21:22:54 +0000451 raise ValueError('Invalid character in subdir string')
452 self.subdir = subdir
453
Eric Li7edb3042011-01-06 17:57:17 -0800454 if operation and self.BAD_CHAR_REGEX.search(operation):
jadmanski4afc3672010-04-30 21:22:54 +0000455 raise ValueError('Invalid character in operation string')
456 self.operation = operation
457
458 # break the message line into a single-line message that goes into the
459 # database, and a block of additional lines that goes into the status
460 # log but will never be parsed
461 message_lines = message.split('\n')
462 self.message = message_lines[0].replace('\t', ' ' * 8)
463 self.extra_message_lines = message_lines[1:]
Eric Li7edb3042011-01-06 17:57:17 -0800464 if self.BAD_CHAR_REGEX.search(self.message):
jadmanski4afc3672010-04-30 21:22:54 +0000465 raise ValueError('Invalid character in message %r' % self.message)
466
467 if not fields:
468 self.fields = {}
469 else:
470 self.fields = fields.copy()
471 for key, value in self.fields.iteritems():
Eric Li7edb3042011-01-06 17:57:17 -0800472 if self.BAD_CHAR_REGEX.search(key + value):
jadmanski4afc3672010-04-30 21:22:54 +0000473 raise ValueError('Invalid character in %r=%r field'
474 % (key, value))
475
476 # build up the timestamp
477 if timestamp is None:
478 timestamp = int(time.time())
jadmanski2a89dac2010-06-11 14:32:58 +0000479 self.fields[self.TIMESTAMP_FIELD] = str(timestamp)
480 self.fields[self.LOCALTIME_FIELD] = time.strftime(
481 '%b %d %H:%M:%S', time.localtime(timestamp))
jadmanski4afc3672010-04-30 21:22:54 +0000482
483
484 def is_start(self):
485 """Indicates if this status log is the start of a new nested block.
486
487 @return: A boolean indicating if this entry starts a new nested block.
488 """
489 return self.status_code == 'START'
490
491
492 def is_end(self):
493 """Indicates if this status log is the end of a nested block.
494
495 @return: A boolean indicating if this entry ends a nested block.
496 """
497 return self.status_code.startswith('END ')
498
499
500 def render(self):
501 """Render the status log entry into a text string.
502
503 @return: A text string suitable for writing into a status log file.
504 """
505 # combine all the log line data into a tab-delimited string
jadmanski2a89dac2010-06-11 14:32:58 +0000506 subdir = self.subdir or self.RENDERED_NONE_VALUE
507 operation = self.operation or self.RENDERED_NONE_VALUE
jadmanski4afc3672010-04-30 21:22:54 +0000508 extra_fields = ['%s=%s' % field for field in self.fields.iteritems()]
509 line_items = [self.status_code, subdir, operation]
510 line_items += extra_fields + [self.message]
511 first_line = '\t'.join(line_items)
512
513 # append the extra unparsable lines, two-space indented
514 all_lines = [first_line]
515 all_lines += [' ' + line for line in self.extra_message_lines]
516 return '\n'.join(all_lines)
517
518
jadmanski2a89dac2010-06-11 14:32:58 +0000519 @classmethod
520 def parse(cls, line):
521 """Parse a status log entry from a text string.
522
523 This method is the inverse of render; it should always be true that
524 parse(entry.render()) produces a new status_log_entry equivalent to
525 entry.
526
527 @return: A new status_log_entry instance with fields extracted from the
528 given status line. If the line is an extra message line then None
529 is returned.
530 """
531 # extra message lines are always prepended with two spaces
532 if line.startswith(' '):
533 return None
534
535 line = line.lstrip('\t') # ignore indentation
536 entry_parts = line.split('\t')
537 if len(entry_parts) < 4:
538 raise ValueError('%r is not a valid status line' % line)
539 status_code, subdir, operation = entry_parts[:3]
540 if subdir == cls.RENDERED_NONE_VALUE:
541 subdir = None
542 if operation == cls.RENDERED_NONE_VALUE:
543 operation = None
544 message = entry_parts[-1]
545 fields = dict(part.split('=', 1) for part in entry_parts[3:-1])
546 if cls.TIMESTAMP_FIELD in fields:
547 timestamp = int(fields[cls.TIMESTAMP_FIELD])
548 else:
549 timestamp = None
550 return cls(status_code, subdir, operation, message, fields, timestamp)
551
552
jadmanski4afc3672010-04-30 21:22:54 +0000553class status_indenter(object):
554 """Abstract interface that a status log indenter should use."""
555
556 @property
557 def indent(self):
558 raise NotImplementedError
559
560
561 def increment(self):
562 """Increase indentation by one level."""
563 raise NotImplementedError
564
565
566 def decrement(self):
567 """Decrease indentation by one level."""
568
569
570class status_logger(object):
571 """Represents a status log file. Responsible for translating messages
572 into on-disk status log lines.
573
574 @property global_filename: The filename to write top-level logs to.
575 @property subdir_filename: The filename to write subdir-level logs to.
576 """
577 def __init__(self, job, indenter, global_filename='status',
578 subdir_filename='status', record_hook=None):
579 """Construct a logger instance.
580
581 @param job: A reference to the job object this is logging for. Only a
582 weak reference to the job is held, to avoid a
583 status_logger <-> job circular reference.
584 @param indenter: A status_indenter instance, for tracking the
585 indentation level.
586 @param global_filename: An optional filename to initialize the
587 self.global_filename attribute.
588 @param subdir_filename: An optional filename to initialize the
589 self.subdir_filename attribute.
jadmanski2a89dac2010-06-11 14:32:58 +0000590 @param record_hook: An optional function to be called before an entry
jadmanski4afc3672010-04-30 21:22:54 +0000591 is logged. The function should expect a single parameter, a
592 copy of the status_log_entry object.
593 """
594 self._jobref = weakref.ref(job)
595 self._indenter = indenter
596 self.global_filename = global_filename
597 self.subdir_filename = subdir_filename
598 self._record_hook = record_hook
599
jadmanski4afc3672010-04-30 21:22:54 +0000600
601 def render_entry(self, log_entry):
602 """Render a status_log_entry as it would be written to a log file.
603
604 @param log_entry: A status_log_entry instance to be rendered.
605
606 @return: The status log entry, rendered as it would be written to the
607 logs (including indentation).
608 """
609 if log_entry.is_end():
610 indent = self._indenter.indent - 1
611 else:
612 indent = self._indenter.indent
jadmanskibbb026c2010-07-19 16:41:27 +0000613 return '\t' * indent + log_entry.render().rstrip('\n')
jadmanski4afc3672010-04-30 21:22:54 +0000614
615
jadmanski2a89dac2010-06-11 14:32:58 +0000616 def record_entry(self, log_entry, log_in_subdir=True):
jadmanski4afc3672010-04-30 21:22:54 +0000617 """Record a status_log_entry into the appropriate status log files.
618
619 @param log_entry: A status_log_entry instance to be recorded into the
620 status logs.
jadmanski2a89dac2010-06-11 14:32:58 +0000621 @param log_in_subdir: A boolean that indicates (when true) that subdir
622 logs should be written into the subdirectory status log file.
jadmanski4afc3672010-04-30 21:22:54 +0000623 """
624 # acquire a strong reference for the duration of the method
625 job = self._jobref()
626 if job is None:
627 logging.warning('Something attempted to write a status log entry '
628 'after its job terminated, ignoring the attempt.')
629 logging.warning(traceback.format_stack())
630 return
631
jadmanski2a89dac2010-06-11 14:32:58 +0000632 # call the record hook if one was given
633 if self._record_hook:
634 self._record_hook(log_entry)
635
jadmanski4afc3672010-04-30 21:22:54 +0000636 # figure out where we need to log to
637 log_files = [os.path.join(job.resultdir, self.global_filename)]
jadmanski2a89dac2010-06-11 14:32:58 +0000638 if log_in_subdir and log_entry.subdir:
jadmanski4afc3672010-04-30 21:22:54 +0000639 log_files.append(os.path.join(job.resultdir, log_entry.subdir,
640 self.subdir_filename))
641
642 # write out to entry to the log files
643 log_text = self.render_entry(log_entry)
644 for log_file in log_files:
645 fileobj = open(log_file, 'a')
646 try:
647 print >> fileobj, log_text
648 finally:
649 fileobj.close()
650
jadmanski4afc3672010-04-30 21:22:54 +0000651 # adjust the indentation if this was a START or END entry
652 if log_entry.is_start():
653 self._indenter.increment()
654 elif log_entry.is_end():
655 self._indenter.decrement()
656
657
jadmanskida2f1432009-11-06 15:20:09 +0000658class base_job(object):
659 """An abstract base class for the various autotest job classes.
660
jadmanski4afc3672010-04-30 21:22:54 +0000661 @property autodir: The top level autotest directory.
662 @property clientdir: The autotest client directory.
663 @property serverdir: The autotest server directory. [OPTIONAL]
664 @property resultdir: The directory where results should be written out.
665 [WRITABLE]
jadmanskida2f1432009-11-06 15:20:09 +0000666
jadmanski4afc3672010-04-30 21:22:54 +0000667 @property pkgdir: The job packages directory. [WRITABLE]
668 @property tmpdir: The job temporary directory. [WRITABLE]
669 @property testdir: The job test directory. [WRITABLE]
670 @property site_testdir: The job site test directory. [WRITABLE]
jadmanskida2f1432009-11-06 15:20:09 +0000671
jadmanski4afc3672010-04-30 21:22:54 +0000672 @property bindir: The client bin/ directory.
673 @property configdir: The client config/ directory.
674 @property profdir: The client profilers/ directory.
675 @property toolsdir: The client tools/ directory.
jadmanskida2f1432009-11-06 15:20:09 +0000676
jadmanski4afc3672010-04-30 21:22:54 +0000677 @property conmuxdir: The conmux directory. [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000678
jadmanski4afc3672010-04-30 21:22:54 +0000679 @property control: A path to the control file to be executed. [OPTIONAL]
680 @property hosts: A set of all live Host objects currently in use by the
681 job. Code running in the context of a local client can safely assume
682 that this set contains only a single entry.
683 @property machines: A list of the machine names associated with the job.
684 @property user: The user executing the job.
685 @property tag: A tag identifying the job. Often used by the scheduler to
686 give a name of the form NUMBER-USERNAME/HOSTNAME.
687 @property args: A list of addtional miscellaneous command-line arguments
688 provided when starting the job.
jadmanskida2f1432009-11-06 15:20:09 +0000689
jadmanski4afc3672010-04-30 21:22:54 +0000690 @property last_boot_tag: The label of the kernel from the last reboot.
691 [OPTIONAL,PERSISTENT]
692 @property automatic_test_tag: A string which, if set, will be automatically
693 added to the test name when running tests.
mblighfc3da5b2010-01-06 18:37:22 +0000694
jadmanski4afc3672010-04-30 21:22:54 +0000695 @property default_profile_only: A boolean indicating the default value of
696 profile_only used by test.execute. [PERSISTENT]
697 @property drop_caches: A boolean indicating if caches should be dropped
698 before each test is executed.
699 @property drop_caches_between_iterations: A boolean indicating if caches
700 should be dropped before each test iteration is executed.
701 @property run_test_cleanup: A boolean indicating if test.cleanup should be
702 run by default after a test completes, if the run_cleanup argument is
703 not specified. [PERSISTENT]
jadmanskida2f1432009-11-06 15:20:09 +0000704
jadmanski4afc3672010-04-30 21:22:54 +0000705 @property num_tests_run: The number of tests run during the job. [OPTIONAL]
706 @property num_tests_failed: The number of tests failed during the job.
707 [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000708
jadmanski4afc3672010-04-30 21:22:54 +0000709 @property bootloader: An instance of the boottool class. May not be
710 available on job instances where access to the bootloader is not
711 available (e.g. on the server running a server job). [OPTIONAL]
712 @property harness: An instance of the client test harness. Only available
713 in contexts where client test execution happens. [OPTIONAL]
714 @property logging: An instance of the logging manager associated with the
715 job.
716 @property profilers: An instance of the profiler manager associated with
717 the job.
718 @property sysinfo: An instance of the sysinfo object. Only available in
719 contexts where it's possible to collect sysinfo.
720 @property warning_manager: A class for managing which types of WARN
721 messages should be logged and which should be supressed. [OPTIONAL]
722 @property warning_loggers: A set of readable streams that will be monitored
723 for WARN messages to be logged. [OPTIONAL]
jadmanskida2f1432009-11-06 15:20:09 +0000724
725 Abstract methods:
726 _find_base_directories [CLASSMETHOD]
727 Returns the location of autodir, clientdir and serverdir
728
729 _find_resultdir
730 Returns the location of resultdir. Gets a copy of any parameters
731 passed into base_job.__init__. Can return None to indicate that
732 no resultdir is to be used.
jadmanski4afc3672010-04-30 21:22:54 +0000733
734 _get_status_logger
735 Returns a status_logger instance for recording job status logs.
jadmanskida2f1432009-11-06 15:20:09 +0000736 """
737
mblighfc3da5b2010-01-06 18:37:22 +0000738 # capture the dependency on several helper classes with factories
jadmanskida2f1432009-11-06 15:20:09 +0000739 _job_directory = job_directory
mblighfbf73ae2009-12-19 05:22:42 +0000740 _job_state = job_state
jadmanskida2f1432009-11-06 15:20:09 +0000741
742
mblighfc3da5b2010-01-06 18:37:22 +0000743 # all the job directory attributes
744 autodir = _job_directory.property_factory('autodir')
745 clientdir = _job_directory.property_factory('clientdir')
746 serverdir = _job_directory.property_factory('serverdir')
747 resultdir = _job_directory.property_factory('resultdir')
748 pkgdir = _job_directory.property_factory('pkgdir')
749 tmpdir = _job_directory.property_factory('tmpdir')
750 testdir = _job_directory.property_factory('testdir')
751 site_testdir = _job_directory.property_factory('site_testdir')
752 bindir = _job_directory.property_factory('bindir')
753 configdir = _job_directory.property_factory('configdir')
754 profdir = _job_directory.property_factory('profdir')
755 toolsdir = _job_directory.property_factory('toolsdir')
756 conmuxdir = _job_directory.property_factory('conmuxdir')
757
758
759 # all the generic persistent properties
mbligh9de6ed72010-01-11 19:01:10 +0000760 tag = _job_state.property_factory('_state', 'tag', '')
mblighfc3da5b2010-01-06 18:37:22 +0000761 default_profile_only = _job_state.property_factory(
762 '_state', 'default_profile_only', False)
763 run_test_cleanup = _job_state.property_factory(
764 '_state', 'run_test_cleanup', True)
765 last_boot_tag = _job_state.property_factory(
766 '_state', 'last_boot_tag', None)
767 automatic_test_tag = _job_state.property_factory(
768 '_state', 'automatic_test_tag', None)
769
770 # the use_sequence_number property
771 _sequence_number = _job_state.property_factory(
772 '_state', '_sequence_number', None)
773 def _get_use_sequence_number(self):
774 return bool(self._sequence_number)
775 def _set_use_sequence_number(self, value):
776 if value:
777 self._sequence_number = 1
778 else:
779 self._sequence_number = None
780 use_sequence_number = property(_get_use_sequence_number,
781 _set_use_sequence_number)
782
783
jadmanskida2f1432009-11-06 15:20:09 +0000784 def __init__(self, *args, **dargs):
785 # initialize the base directories, all others are relative to these
786 autodir, clientdir, serverdir = self._find_base_directories()
787 self._autodir = self._job_directory(autodir)
788 self._clientdir = self._job_directory(clientdir)
789 if serverdir:
790 self._serverdir = self._job_directory(serverdir)
791 else:
792 self._serverdir = None
793
794 # initialize all the other directories relative to the base ones
795 self._initialize_dir_properties()
796 self._resultdir = self._job_directory(
797 self._find_resultdir(*args, **dargs), True)
798 self._execution_contexts = []
799
mblighfbf73ae2009-12-19 05:22:42 +0000800 # initialize all the job state
801 self._state = self._job_state()
802
jadmanskida2f1432009-11-06 15:20:09 +0000803
804 @classmethod
805 def _find_base_directories(cls):
806 raise NotImplementedError()
807
808
809 def _initialize_dir_properties(self):
810 """
811 Initializes all the secondary self.*dir properties. Requires autodir,
812 clientdir and serverdir to already be initialized.
813 """
814 # create some stubs for use as shortcuts
815 def readonly_dir(*args):
816 return self._job_directory(os.path.join(*args))
817 def readwrite_dir(*args):
818 return self._job_directory(os.path.join(*args), True)
819
820 # various client-specific directories
821 self._bindir = readonly_dir(self.clientdir, 'bin')
jadmanskida2f1432009-11-06 15:20:09 +0000822 self._configdir = readonly_dir(self.clientdir, 'config')
823 self._profdir = readonly_dir(self.clientdir, 'profilers')
824 self._pkgdir = readwrite_dir(self.clientdir, 'packages')
825 self._toolsdir = readonly_dir(self.clientdir, 'tools')
826
827 # directories which are in serverdir on a server, clientdir on a client
828 if self.serverdir:
829 root = self.serverdir
830 else:
831 root = self.clientdir
832 self._tmpdir = readwrite_dir(root, 'tmp')
833 self._testdir = readwrite_dir(root, 'tests')
834 self._site_testdir = readwrite_dir(root, 'site_tests')
835
836 # various server-specific directories
837 if self.serverdir:
838 self._conmuxdir = readonly_dir(self.autodir, 'conmux')
839 else:
840 self._conmuxdir = None
841
842
843 def _find_resultdir(self, *args, **dargs):
844 raise NotImplementedError()
845
846
847 def push_execution_context(self, resultdir):
848 """
849 Save off the current context of the job and change to the given one.
850
851 In practice method just changes the resultdir, but it may become more
852 extensive in the future. The expected use case is for when a child
853 job needs to be executed in some sort of nested context (for example
854 the way parallel_simple does). The original context can be restored
855 with a pop_execution_context call.
856
jadmanski4afc3672010-04-30 21:22:54 +0000857 @param resultdir: The new resultdir, relative to the current one.
jadmanskida2f1432009-11-06 15:20:09 +0000858 """
859 new_dir = self._job_directory(
860 os.path.join(self.resultdir, resultdir), True)
861 self._execution_contexts.append(self._resultdir)
862 self._resultdir = new_dir
863
864
865 def pop_execution_context(self):
866 """
867 Reverse the effects of the previous push_execution_context call.
868
jadmanski4afc3672010-04-30 21:22:54 +0000869 @raise IndexError: raised when the stack of contexts is empty.
jadmanskida2f1432009-11-06 15:20:09 +0000870 """
871 if not self._execution_contexts:
872 raise IndexError('No old execution context to restore')
873 self._resultdir = self._execution_contexts.pop()
mblighfbf73ae2009-12-19 05:22:42 +0000874
875
876 def get_state(self, name, default=_job_state.NO_DEFAULT):
877 """Returns the value associated with a particular name.
878
jadmanski4afc3672010-04-30 21:22:54 +0000879 @param name: The name the value was saved with.
880 @param default: A default value to return if no state is currently
mblighfbf73ae2009-12-19 05:22:42 +0000881 associated with var.
882
jadmanski4afc3672010-04-30 21:22:54 +0000883 @return: A deep copy of the value associated with name. Note that this
mblighfbf73ae2009-12-19 05:22:42 +0000884 explicitly returns a deep copy to avoid problems with mutable
885 values; mutations are not persisted or shared.
jadmanski4afc3672010-04-30 21:22:54 +0000886 @raise KeyError: raised when no state is associated with var and a
mblighfbf73ae2009-12-19 05:22:42 +0000887 default value is not provided.
888 """
889 try:
890 return self._state.get('public', name, default=default)
891 except KeyError:
892 raise KeyError(name)
893
894
895 def set_state(self, name, value):
896 """Saves the value given with the provided name.
897
jadmanski4afc3672010-04-30 21:22:54 +0000898 @param name: The name the value should be saved with.
899 @param value: The value to save.
mblighfbf73ae2009-12-19 05:22:42 +0000900 """
901 self._state.set('public', name, value)
902
903
mblighfc3da5b2010-01-06 18:37:22 +0000904 def _build_tagged_test_name(self, testname, dargs):
905 """Builds the fully tagged testname and subdirectory for job.run_test.
mblighfbf73ae2009-12-19 05:22:42 +0000906
jadmanski4afc3672010-04-30 21:22:54 +0000907 @param testname: The base name of the test
908 @param dargs: The ** arguments passed to run_test. And arguments
mblighfc3da5b2010-01-06 18:37:22 +0000909 consumed by this method will be removed from the dictionary.
mblighfbf73ae2009-12-19 05:22:42 +0000910
jadmanski4afc3672010-04-30 21:22:54 +0000911 @return: A 3-tuple of the full name of the test, the subdirectory it
mblighfc3da5b2010-01-06 18:37:22 +0000912 should be stored in, and the full tag of the subdir.
mblighfbf73ae2009-12-19 05:22:42 +0000913 """
mblighfc3da5b2010-01-06 18:37:22 +0000914 tag_parts = []
915
916 # build up the parts of the tag used for the test name
917 base_tag = dargs.pop('tag', None)
918 if base_tag:
919 tag_parts.append(str(base_tag))
920 if self.use_sequence_number:
921 tag_parts.append('_%02d_' % self._sequence_number)
922 self._sequence_number += 1
923 if self.automatic_test_tag:
924 tag_parts.append(self.automatic_test_tag)
925 full_testname = '.'.join([testname] + tag_parts)
926
927 # build up the subdir and tag as well
928 subdir_tag = dargs.pop('subdir_tag', None)
929 if subdir_tag:
930 tag_parts.append(subdir_tag)
931 subdir = '.'.join([testname] + tag_parts)
932 tag = '.'.join(tag_parts)
933
934 return full_testname, subdir, tag
mblighfbf73ae2009-12-19 05:22:42 +0000935
936
mblighfc3da5b2010-01-06 18:37:22 +0000937 def _make_test_outputdir(self, subdir):
938 """Creates an output directory for a test to run it.
mblighfbf73ae2009-12-19 05:22:42 +0000939
jadmanski4afc3672010-04-30 21:22:54 +0000940 @param subdir: The subdirectory of the test. Generally computed by
mblighfc3da5b2010-01-06 18:37:22 +0000941 _build_tagged_test_name.
942
jadmanski4afc3672010-04-30 21:22:54 +0000943 @return: A job_directory instance corresponding to the outputdir of
mblighfc3da5b2010-01-06 18:37:22 +0000944 the test.
jadmanski4afc3672010-04-30 21:22:54 +0000945 @raise TestError: If the output directory is invalid.
mblighfbf73ae2009-12-19 05:22:42 +0000946 """
mblighfc3da5b2010-01-06 18:37:22 +0000947 # explicitly check that this subdirectory is new
948 path = os.path.join(self.resultdir, subdir)
949 if os.path.exists(path):
950 msg = ('%s already exists; multiple tests cannot run with the '
951 'same subdirectory' % subdir)
952 raise error.TestError(msg)
mblighfbf73ae2009-12-19 05:22:42 +0000953
mblighfc3da5b2010-01-06 18:37:22 +0000954 # create the outputdir and raise a TestError if it isn't valid
955 try:
956 outputdir = self._job_directory(path, True)
957 return outputdir
958 except self._job_directory.JobDirectoryException, e:
959 logging.exception('%s directory creation failed with %s',
960 subdir, e)
961 raise error.TestError('%s directory creation failed' % subdir)
jadmanski4afc3672010-04-30 21:22:54 +0000962
963
964 def record(self, status_code, subdir, operation, status='',
965 optional_fields=None):
966 """Record a job-level status event.
967
968 Logs an event noteworthy to the Autotest job as a whole. Messages will
969 be written into a global status log file, as well as a subdir-local
970 status log file (if subdir is specified).
971
972 @param status_code: A string status code describing the type of status
973 entry being recorded. It must pass log.is_valid_status to be
974 considered valid.
975 @param subdir: A specific results subdirectory this also applies to, or
976 None. If not None the subdirectory must exist.
977 @param operation: A string describing the operation that was run.
978 @param status: An optional human-readable message describing the status
979 entry, for example an error message or "completed successfully".
980 @param optional_fields: An optional dictionary of addtional named fields
981 to be included with the status message. Every time timestamp and
982 localtime entries are generated with the current time and added
983 to this dictionary.
984 """
985 entry = status_log_entry(status_code, subdir, operation, status,
986 optional_fields)
jadmanski2a89dac2010-06-11 14:32:58 +0000987 self.record_entry(entry)
988
989
990 def record_entry(self, entry, log_in_subdir=True):
991 """Record a job-level status event, using a status_log_entry.
992
993 This is the same as self.record but using an existing status log
994 entry object rather than constructing one for you.
995
996 @param entry: A status_log_entry object
997 @param log_in_subdir: A boolean that indicates (when true) that subdir
998 logs should be written into the subdirectory status log file.
999 """
1000 self._get_status_logger().record_entry(entry, log_in_subdir)