Richard Barnette | 9aec693 | 2016-06-03 13:31:46 -0700 | [diff] [blame] | 1 | # pylint: disable=missing-docstring |
| 2 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 3 | import os, copy, logging, errno, fcntl, time, re, weakref, traceback |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 4 | import tarfile |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 5 | import cPickle as pickle |
Fang Deng | d9a056f | 2013-10-29 11:31:27 -0700 | [diff] [blame] | 6 | import tempfile |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 7 | from autotest_lib.client.common_lib import autotemp, error, log |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 8 | |
| 9 | |
| 10 | class job_directory(object): |
| 11 | """Represents a job.*dir directory.""" |
| 12 | |
| 13 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 14 | class JobDirectoryException(error.AutotestError): |
| 15 | """Generic job_directory exception superclass.""" |
| 16 | |
| 17 | |
| 18 | class MissingDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 19 | """Raised when a directory required by the job does not exist.""" |
| 20 | def __init__(self, path): |
| 21 | Exception.__init__(self, 'Directory %s does not exist' % path) |
| 22 | |
| 23 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 24 | class UncreatableDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 25 | """Raised when a directory required by the job is missing and cannot |
| 26 | be created.""" |
| 27 | def __init__(self, path, error): |
| 28 | msg = 'Creation of directory %s failed with exception %s' |
| 29 | msg %= (path, error) |
| 30 | Exception.__init__(self, msg) |
| 31 | |
| 32 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 33 | class UnwritableDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 34 | """Raised when a writable directory required by the job exists |
| 35 | but is not writable.""" |
| 36 | def __init__(self, path): |
| 37 | msg = 'Directory %s exists but is not writable' % path |
| 38 | Exception.__init__(self, msg) |
| 39 | |
| 40 | |
| 41 | def __init__(self, path, is_writable=False): |
| 42 | """ |
| 43 | Instantiate a job directory. |
| 44 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 45 | @param path: The path of the directory. If None a temporary directory |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 46 | will be created instead. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 47 | @param is_writable: If True, expect the directory to be writable. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 48 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 49 | @raise MissingDirectoryException: raised if is_writable=False and the |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 50 | directory does not exist. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 51 | @raise UnwritableDirectoryException: raised if is_writable=True and |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 52 | the directory exists but is not writable. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 53 | @raise UncreatableDirectoryException: raised if is_writable=True, the |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 54 | directory does not exist and it cannot be created. |
| 55 | """ |
| 56 | if path is None: |
| 57 | if is_writable: |
| 58 | self._tempdir = autotemp.tempdir(unique_id='autotest') |
| 59 | self.path = self._tempdir.name |
| 60 | else: |
| 61 | raise self.MissingDirectoryException(path) |
| 62 | else: |
| 63 | self._tempdir = None |
| 64 | self.path = path |
| 65 | self._ensure_valid(is_writable) |
| 66 | |
| 67 | |
| 68 | def _ensure_valid(self, is_writable): |
| 69 | """ |
| 70 | Ensure that this is a valid directory. |
| 71 | |
| 72 | Will check if a directory exists, can optionally also enforce that |
| 73 | it be writable. It can optionally create it if necessary. Creation |
| 74 | will still fail if the path is rooted in a non-writable directory, or |
| 75 | if a file already exists at the given location. |
| 76 | |
| 77 | @param dir_path A path where a directory should be located |
| 78 | @param is_writable A boolean indicating that the directory should |
| 79 | not only exist, but also be writable. |
| 80 | |
| 81 | @raises MissingDirectoryException raised if is_writable=False and the |
| 82 | directory does not exist. |
| 83 | @raises UnwritableDirectoryException raised if is_writable=True and |
| 84 | the directory is not wrtiable. |
| 85 | @raises UncreatableDirectoryException raised if is_writable=True, the |
| 86 | directory does not exist and it cannot be created |
| 87 | """ |
mbligh | 8054b0d | 2009-11-25 17:38:19 +0000 | [diff] [blame] | 88 | # ensure the directory exists |
| 89 | if is_writable: |
| 90 | try: |
| 91 | os.makedirs(self.path) |
| 92 | except OSError, e: |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 93 | if e.errno != errno.EEXIST or not os.path.isdir(self.path): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 94 | raise self.UncreatableDirectoryException(self.path, e) |
mbligh | 8054b0d | 2009-11-25 17:38:19 +0000 | [diff] [blame] | 95 | elif not os.path.isdir(self.path): |
| 96 | raise self.MissingDirectoryException(self.path) |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 97 | |
| 98 | # if is_writable=True, also check that the directory is writable |
| 99 | if is_writable and not os.access(self.path, os.W_OK): |
| 100 | raise self.UnwritableDirectoryException(self.path) |
| 101 | |
| 102 | |
| 103 | @staticmethod |
| 104 | def property_factory(attribute): |
| 105 | """ |
| 106 | Create a job.*dir -> job._*dir.path property accessor. |
| 107 | |
| 108 | @param attribute A string with the name of the attribute this is |
| 109 | exposed as. '_'+attribute must then be attribute that holds |
| 110 | either None or a job_directory-like object. |
| 111 | |
| 112 | @returns A read-only property object that exposes a job_directory path |
| 113 | """ |
| 114 | @property |
| 115 | def dir_property(self): |
| 116 | underlying_attribute = getattr(self, '_' + attribute) |
| 117 | if underlying_attribute is None: |
| 118 | return None |
| 119 | else: |
| 120 | return underlying_attribute.path |
| 121 | return dir_property |
| 122 | |
| 123 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 124 | # decorator for use with job_state methods |
| 125 | def with_backing_lock(method): |
| 126 | """A decorator to perform a lock-*-unlock cycle. |
| 127 | |
| 128 | When applied to a method, this decorator will automatically wrap |
| 129 | calls to the method in a backing file lock and before the call |
| 130 | followed by a backing file unlock. |
| 131 | """ |
| 132 | def wrapped_method(self, *args, **dargs): |
| 133 | already_have_lock = self._backing_file_lock is not None |
| 134 | if not already_have_lock: |
| 135 | self._lock_backing_file() |
| 136 | try: |
| 137 | return method(self, *args, **dargs) |
| 138 | finally: |
| 139 | if not already_have_lock: |
| 140 | self._unlock_backing_file() |
| 141 | wrapped_method.__name__ = method.__name__ |
| 142 | wrapped_method.__doc__ = method.__doc__ |
| 143 | return wrapped_method |
| 144 | |
| 145 | |
| 146 | # decorator for use with job_state methods |
| 147 | def with_backing_file(method): |
| 148 | """A decorator to perform a lock-read-*-write-unlock cycle. |
| 149 | |
| 150 | When applied to a method, this decorator will automatically wrap |
| 151 | calls to the method in a lock-and-read before the call followed by a |
| 152 | write-and-unlock. Any operation that is reading or writing state |
| 153 | should be decorated with this method to ensure that backing file |
| 154 | state is consistently maintained. |
| 155 | """ |
| 156 | @with_backing_lock |
| 157 | def wrapped_method(self, *args, **dargs): |
| 158 | self._read_from_backing_file() |
| 159 | try: |
| 160 | return method(self, *args, **dargs) |
| 161 | finally: |
| 162 | self._write_to_backing_file() |
| 163 | wrapped_method.__name__ = method.__name__ |
| 164 | wrapped_method.__doc__ = method.__doc__ |
| 165 | return wrapped_method |
| 166 | |
| 167 | |
| 168 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 169 | class job_state(object): |
| 170 | """A class for managing explicit job and user state, optionally persistent. |
| 171 | |
| 172 | The class allows you to save state by name (like a dictionary). Any state |
| 173 | stored in this class should be picklable and deep copyable. While this is |
| 174 | not enforced it is recommended that only valid python identifiers be used |
| 175 | as names. Additionally, the namespace 'stateful_property' is used for |
| 176 | storing the valued associated with properties constructed using the |
| 177 | property_factory method. |
| 178 | """ |
| 179 | |
| 180 | NO_DEFAULT = object() |
| 181 | PICKLE_PROTOCOL = 2 # highest protocol available in python 2.4 |
| 182 | |
| 183 | |
| 184 | def __init__(self): |
| 185 | """Initialize the job state.""" |
| 186 | self._state = {} |
| 187 | self._backing_file = None |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 188 | self._backing_file_initialized = False |
| 189 | self._backing_file_lock = None |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 190 | |
| 191 | |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 192 | def _lock_backing_file(self): |
| 193 | """Acquire a lock on the backing file.""" |
| 194 | if self._backing_file: |
| 195 | self._backing_file_lock = open(self._backing_file, 'a') |
jadmanski | a087eae | 2010-01-29 20:57:57 +0000 | [diff] [blame] | 196 | fcntl.flock(self._backing_file_lock, fcntl.LOCK_EX) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 197 | |
| 198 | |
| 199 | def _unlock_backing_file(self): |
| 200 | """Release a lock on the backing file.""" |
| 201 | if self._backing_file_lock: |
jadmanski | a087eae | 2010-01-29 20:57:57 +0000 | [diff] [blame] | 202 | fcntl.flock(self._backing_file_lock, fcntl.LOCK_UN) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 203 | self._backing_file_lock.close() |
| 204 | self._backing_file_lock = None |
| 205 | |
| 206 | |
| 207 | def read_from_file(self, file_path, merge=True): |
| 208 | """Read in any state from the file at file_path. |
| 209 | |
| 210 | When merge=True, any state specified only in-memory will be preserved. |
| 211 | Any state specified on-disk will be set in-memory, even if an in-memory |
| 212 | setting already exists. |
| 213 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 214 | @param file_path: The path where the state should be read from. It must |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 215 | exist but it can be empty. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 216 | @param merge: If true, merge the on-disk state with the in-memory |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 217 | state. If false, replace the in-memory state with the on-disk |
| 218 | state. |
| 219 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 220 | @warning: This method is intentionally concurrency-unsafe. It makes no |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 221 | attempt to control concurrent access to the file at file_path. |
| 222 | """ |
| 223 | |
| 224 | # we can assume that the file exists |
| 225 | if os.path.getsize(file_path) == 0: |
| 226 | on_disk_state = {} |
| 227 | else: |
| 228 | on_disk_state = pickle.load(open(file_path)) |
| 229 | |
| 230 | if merge: |
| 231 | # merge the on-disk state with the in-memory state |
| 232 | for namespace, namespace_dict in on_disk_state.iteritems(): |
| 233 | in_memory_namespace = self._state.setdefault(namespace, {}) |
| 234 | for name, value in namespace_dict.iteritems(): |
| 235 | if name in in_memory_namespace: |
| 236 | if in_memory_namespace[name] != value: |
| 237 | logging.info('Persistent value of %s.%s from %s ' |
| 238 | 'overridding existing in-memory ' |
| 239 | 'value', namespace, name, file_path) |
| 240 | in_memory_namespace[name] = value |
| 241 | else: |
| 242 | logging.debug('Value of %s.%s is unchanged, ' |
| 243 | 'skipping import', namespace, name) |
| 244 | else: |
| 245 | logging.debug('Importing %s.%s from state file %s', |
| 246 | namespace, name, file_path) |
| 247 | in_memory_namespace[name] = value |
| 248 | else: |
| 249 | # just replace the in-memory state with the on-disk state |
| 250 | self._state = on_disk_state |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 251 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 252 | # lock the backing file before we refresh it |
| 253 | with_backing_lock(self.__class__._write_to_backing_file)(self) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 254 | |
| 255 | |
| 256 | def write_to_file(self, file_path): |
| 257 | """Write out the current state to the given path. |
| 258 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 259 | @param file_path: The path where the state should be written out to. |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 260 | Must be writable. |
| 261 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 262 | @warning: This method is intentionally concurrency-unsafe. It makes no |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 263 | attempt to control concurrent access to the file at file_path. |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 264 | """ |
| 265 | outfile = open(file_path, 'w') |
| 266 | try: |
| 267 | pickle.dump(self._state, outfile, self.PICKLE_PROTOCOL) |
| 268 | finally: |
| 269 | outfile.close() |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 270 | |
| 271 | |
| 272 | def _read_from_backing_file(self): |
| 273 | """Refresh the current state from the backing file. |
| 274 | |
| 275 | If the backing file has never been read before (indicated by checking |
| 276 | self._backing_file_initialized) it will merge the file with the |
| 277 | in-memory state, rather than overwriting it. |
| 278 | """ |
| 279 | if self._backing_file: |
| 280 | merge_backing_file = not self._backing_file_initialized |
| 281 | self.read_from_file(self._backing_file, merge=merge_backing_file) |
| 282 | self._backing_file_initialized = True |
| 283 | |
| 284 | |
| 285 | def _write_to_backing_file(self): |
| 286 | """Flush the current state to the backing file.""" |
| 287 | if self._backing_file: |
| 288 | self.write_to_file(self._backing_file) |
| 289 | |
| 290 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 291 | @with_backing_file |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 292 | def _synchronize_backing_file(self): |
| 293 | """Synchronizes the contents of the in-memory and on-disk state.""" |
| 294 | # state is implicitly synchronized in _with_backing_file methods |
| 295 | pass |
| 296 | |
| 297 | |
| 298 | def set_backing_file(self, file_path): |
| 299 | """Change the path used as the backing file for the persistent state. |
| 300 | |
| 301 | When a new backing file is specified if a file already exists then |
| 302 | its contents will be added into the current state, with conflicts |
| 303 | between the file and memory being resolved in favor of the file |
| 304 | contents. The file will then be kept in sync with the (combined) |
| 305 | in-memory state. The syncing can be disabled by setting this to None. |
| 306 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 307 | @param file_path: A path on the filesystem that can be read from and |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 308 | written to, or None to turn off the backing store. |
| 309 | """ |
| 310 | self._synchronize_backing_file() |
| 311 | self._backing_file = file_path |
| 312 | self._backing_file_initialized = False |
| 313 | self._synchronize_backing_file() |
| 314 | |
| 315 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 316 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 317 | def get(self, namespace, name, default=NO_DEFAULT): |
| 318 | """Returns the value associated with a particular name. |
| 319 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 320 | @param namespace: The namespace that the property should be stored in. |
| 321 | @param name: The name the value was saved with. |
| 322 | @param default: A default value to return if no state is currently |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 323 | associated with var. |
| 324 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 325 | @return: A deep copy of the value associated with name. Note that this |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 326 | explicitly returns a deep copy to avoid problems with mutable |
| 327 | values; mutations are not persisted or shared. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 328 | @raise KeyError: raised when no state is associated with var and a |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 329 | default value is not provided. |
| 330 | """ |
| 331 | if self.has(namespace, name): |
| 332 | return copy.deepcopy(self._state[namespace][name]) |
| 333 | elif default is self.NO_DEFAULT: |
| 334 | raise KeyError('No key %s in namespace %s' % (name, namespace)) |
| 335 | else: |
| 336 | return default |
| 337 | |
| 338 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 339 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 340 | def set(self, namespace, name, value): |
| 341 | """Saves the value given with the provided name. |
| 342 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 343 | @param namespace: The namespace that the property should be stored in. |
| 344 | @param name: The name the value should be saved with. |
| 345 | @param value: The value to save. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 346 | """ |
| 347 | namespace_dict = self._state.setdefault(namespace, {}) |
| 348 | namespace_dict[name] = copy.deepcopy(value) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 349 | logging.debug('Persistent state %s.%s now set to %r', namespace, |
| 350 | name, value) |
| 351 | |
| 352 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 353 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 354 | def has(self, namespace, name): |
| 355 | """Return a boolean indicating if namespace.name is defined. |
| 356 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 357 | @param namespace: The namespace to check for a definition. |
| 358 | @param name: The name to check for a definition. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 359 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 360 | @return: True if the given name is defined in the given namespace and |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 361 | False otherwise. |
| 362 | """ |
| 363 | return namespace in self._state and name in self._state[namespace] |
| 364 | |
| 365 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 366 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 367 | def discard(self, namespace, name): |
| 368 | """If namespace.name is a defined value, deletes it. |
| 369 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 370 | @param namespace: The namespace that the property is stored in. |
| 371 | @param name: The name the value is saved with. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 372 | """ |
| 373 | if self.has(namespace, name): |
| 374 | del self._state[namespace][name] |
| 375 | if len(self._state[namespace]) == 0: |
| 376 | del self._state[namespace] |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 377 | logging.debug('Persistent state %s.%s deleted', namespace, name) |
| 378 | else: |
| 379 | logging.debug( |
| 380 | 'Persistent state %s.%s not defined so nothing is discarded', |
| 381 | namespace, name) |
| 382 | |
| 383 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 384 | @with_backing_file |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 385 | def discard_namespace(self, namespace): |
| 386 | """Delete all defined namespace.* names. |
| 387 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 388 | @param namespace: The namespace to be cleared. |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 389 | """ |
| 390 | if namespace in self._state: |
| 391 | del self._state[namespace] |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 392 | logging.debug('Persistent state %s.* deleted', namespace) |
| 393 | |
| 394 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 395 | @staticmethod |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 396 | def property_factory(state_attribute, property_attribute, default, |
| 397 | namespace='global_properties'): |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 398 | """ |
| 399 | Create a property object for an attribute using self.get and self.set. |
| 400 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 401 | @param state_attribute: A string with the name of the attribute on |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 402 | job that contains the job_state instance. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 403 | @param property_attribute: A string with the name of the attribute |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 404 | this property is exposed as. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 405 | @param default: A default value that should be used for this property |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 406 | if it is not set. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 407 | @param namespace: The namespace to store the attribute value in. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 408 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 409 | @return: A read-write property object that performs self.get calls |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 410 | to read the value and self.set calls to set it. |
| 411 | """ |
| 412 | def getter(job): |
| 413 | state = getattr(job, state_attribute) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 414 | return state.get(namespace, property_attribute, default) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 415 | def setter(job, value): |
| 416 | state = getattr(job, state_attribute) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 417 | state.set(namespace, property_attribute, value) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 418 | return property(getter, setter) |
| 419 | |
| 420 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 421 | class status_log_entry(object): |
| 422 | """Represents a single status log entry.""" |
| 423 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 424 | RENDERED_NONE_VALUE = '----' |
| 425 | TIMESTAMP_FIELD = 'timestamp' |
| 426 | LOCALTIME_FIELD = 'localtime' |
| 427 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 428 | # non-space whitespace is forbidden in any fields |
| 429 | BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]') |
| 430 | |
xixuan | 6ae02f0 | 2016-11-03 11:12:34 -0700 | [diff] [blame] | 431 | def _init_message(self, message): |
| 432 | """Handle the message which describs event to be recorded. |
| 433 | |
| 434 | Break the message line into a single-line message that goes into the |
| 435 | database, and a block of additional lines that goes into the status |
| 436 | log but will never be parsed |
| 437 | When detecting a bad char in message, replace it with space instead |
| 438 | of raising an exception that cannot be parsed by tko parser. |
| 439 | |
| 440 | @param message: the input message. |
| 441 | |
| 442 | @return: filtered message without bad characters. |
| 443 | """ |
| 444 | message_lines = message.splitlines() |
| 445 | if message_lines: |
| 446 | self.message = message_lines[0] |
| 447 | self.extra_message_lines = message_lines[1:] |
| 448 | else: |
| 449 | self.message = '' |
| 450 | self.extra_message_lines = [] |
| 451 | |
| 452 | self.message = self.message.replace('\t', ' ' * 8) |
| 453 | self.message = self.BAD_CHAR_REGEX.sub(' ', self.message) |
| 454 | |
| 455 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 456 | def __init__(self, status_code, subdir, operation, message, fields, |
| 457 | timestamp=None): |
| 458 | """Construct a status.log entry. |
| 459 | |
| 460 | @param status_code: A message status code. Must match the codes |
| 461 | accepted by autotest_lib.common_lib.log.is_valid_status. |
| 462 | @param subdir: A valid job subdirectory, or None. |
| 463 | @param operation: Description of the operation, or None. |
| 464 | @param message: A printable string describing event to be recorded. |
| 465 | @param fields: A dictionary of arbitrary alphanumeric key=value pairs |
| 466 | to be included in the log, or None. |
| 467 | @param timestamp: An optional integer timestamp, in the same format |
| 468 | as a time.time() timestamp. If unspecified, the current time is |
| 469 | used. |
| 470 | |
| 471 | @raise ValueError: if any of the parameters are invalid |
| 472 | """ |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 473 | if not log.is_valid_status(status_code): |
| 474 | raise ValueError('status code %r is not valid' % status_code) |
| 475 | self.status_code = status_code |
| 476 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 477 | if subdir and self.BAD_CHAR_REGEX.search(subdir): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 478 | raise ValueError('Invalid character in subdir string') |
| 479 | self.subdir = subdir |
| 480 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 481 | if operation and self.BAD_CHAR_REGEX.search(operation): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 482 | raise ValueError('Invalid character in operation string') |
| 483 | self.operation = operation |
| 484 | |
xixuan | 6ae02f0 | 2016-11-03 11:12:34 -0700 | [diff] [blame] | 485 | self._init_message(message) |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 486 | |
| 487 | if not fields: |
| 488 | self.fields = {} |
| 489 | else: |
| 490 | self.fields = fields.copy() |
| 491 | for key, value in self.fields.iteritems(): |
Eric Li | d656d56 | 2011-04-20 11:48:29 -0700 | [diff] [blame] | 492 | if type(value) is int: |
| 493 | value = str(value) |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 494 | if self.BAD_CHAR_REGEX.search(key + value): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 495 | raise ValueError('Invalid character in %r=%r field' |
| 496 | % (key, value)) |
| 497 | |
| 498 | # build up the timestamp |
| 499 | if timestamp is None: |
| 500 | timestamp = int(time.time()) |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 501 | self.fields[self.TIMESTAMP_FIELD] = str(timestamp) |
| 502 | self.fields[self.LOCALTIME_FIELD] = time.strftime( |
| 503 | '%b %d %H:%M:%S', time.localtime(timestamp)) |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 504 | |
| 505 | |
| 506 | def is_start(self): |
| 507 | """Indicates if this status log is the start of a new nested block. |
| 508 | |
| 509 | @return: A boolean indicating if this entry starts a new nested block. |
| 510 | """ |
| 511 | return self.status_code == 'START' |
| 512 | |
| 513 | |
| 514 | def is_end(self): |
| 515 | """Indicates if this status log is the end of a nested block. |
| 516 | |
| 517 | @return: A boolean indicating if this entry ends a nested block. |
| 518 | """ |
| 519 | return self.status_code.startswith('END ') |
| 520 | |
| 521 | |
| 522 | def render(self): |
| 523 | """Render the status log entry into a text string. |
| 524 | |
| 525 | @return: A text string suitable for writing into a status log file. |
| 526 | """ |
| 527 | # combine all the log line data into a tab-delimited string |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 528 | subdir = self.subdir or self.RENDERED_NONE_VALUE |
| 529 | operation = self.operation or self.RENDERED_NONE_VALUE |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 530 | extra_fields = ['%s=%s' % field for field in self.fields.iteritems()] |
| 531 | line_items = [self.status_code, subdir, operation] |
| 532 | line_items += extra_fields + [self.message] |
| 533 | first_line = '\t'.join(line_items) |
| 534 | |
| 535 | # append the extra unparsable lines, two-space indented |
| 536 | all_lines = [first_line] |
| 537 | all_lines += [' ' + line for line in self.extra_message_lines] |
| 538 | return '\n'.join(all_lines) |
| 539 | |
| 540 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 541 | @classmethod |
| 542 | def parse(cls, line): |
| 543 | """Parse a status log entry from a text string. |
| 544 | |
| 545 | This method is the inverse of render; it should always be true that |
| 546 | parse(entry.render()) produces a new status_log_entry equivalent to |
| 547 | entry. |
| 548 | |
| 549 | @return: A new status_log_entry instance with fields extracted from the |
| 550 | given status line. If the line is an extra message line then None |
| 551 | is returned. |
| 552 | """ |
| 553 | # extra message lines are always prepended with two spaces |
| 554 | if line.startswith(' '): |
| 555 | return None |
| 556 | |
| 557 | line = line.lstrip('\t') # ignore indentation |
| 558 | entry_parts = line.split('\t') |
| 559 | if len(entry_parts) < 4: |
| 560 | raise ValueError('%r is not a valid status line' % line) |
| 561 | status_code, subdir, operation = entry_parts[:3] |
| 562 | if subdir == cls.RENDERED_NONE_VALUE: |
| 563 | subdir = None |
| 564 | if operation == cls.RENDERED_NONE_VALUE: |
| 565 | operation = None |
| 566 | message = entry_parts[-1] |
| 567 | fields = dict(part.split('=', 1) for part in entry_parts[3:-1]) |
| 568 | if cls.TIMESTAMP_FIELD in fields: |
| 569 | timestamp = int(fields[cls.TIMESTAMP_FIELD]) |
| 570 | else: |
| 571 | timestamp = None |
| 572 | return cls(status_code, subdir, operation, message, fields, timestamp) |
| 573 | |
| 574 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 575 | class status_indenter(object): |
| 576 | """Abstract interface that a status log indenter should use.""" |
| 577 | |
| 578 | @property |
| 579 | def indent(self): |
| 580 | raise NotImplementedError |
| 581 | |
| 582 | |
| 583 | def increment(self): |
| 584 | """Increase indentation by one level.""" |
| 585 | raise NotImplementedError |
| 586 | |
| 587 | |
| 588 | def decrement(self): |
| 589 | """Decrease indentation by one level.""" |
| 590 | |
| 591 | |
| 592 | class status_logger(object): |
| 593 | """Represents a status log file. Responsible for translating messages |
| 594 | into on-disk status log lines. |
| 595 | |
| 596 | @property global_filename: The filename to write top-level logs to. |
| 597 | @property subdir_filename: The filename to write subdir-level logs to. |
| 598 | """ |
| 599 | def __init__(self, job, indenter, global_filename='status', |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 600 | subdir_filename='status', record_hook=None, |
| 601 | tap_writer=None): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 602 | """Construct a logger instance. |
| 603 | |
| 604 | @param job: A reference to the job object this is logging for. Only a |
| 605 | weak reference to the job is held, to avoid a |
| 606 | status_logger <-> job circular reference. |
| 607 | @param indenter: A status_indenter instance, for tracking the |
| 608 | indentation level. |
| 609 | @param global_filename: An optional filename to initialize the |
| 610 | self.global_filename attribute. |
| 611 | @param subdir_filename: An optional filename to initialize the |
| 612 | self.subdir_filename attribute. |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 613 | @param record_hook: An optional function to be called before an entry |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 614 | is logged. The function should expect a single parameter, a |
| 615 | copy of the status_log_entry object. |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 616 | @param tap_writer: An instance of the class TAPReport for addionally |
| 617 | writing TAP files |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 618 | """ |
| 619 | self._jobref = weakref.ref(job) |
| 620 | self._indenter = indenter |
| 621 | self.global_filename = global_filename |
| 622 | self.subdir_filename = subdir_filename |
| 623 | self._record_hook = record_hook |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 624 | if tap_writer is None: |
| 625 | self._tap_writer = TAPReport(None) |
| 626 | else: |
| 627 | self._tap_writer = tap_writer |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 628 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 629 | |
| 630 | def render_entry(self, log_entry): |
| 631 | """Render a status_log_entry as it would be written to a log file. |
| 632 | |
| 633 | @param log_entry: A status_log_entry instance to be rendered. |
| 634 | |
| 635 | @return: The status log entry, rendered as it would be written to the |
| 636 | logs (including indentation). |
| 637 | """ |
| 638 | if log_entry.is_end(): |
| 639 | indent = self._indenter.indent - 1 |
| 640 | else: |
| 641 | indent = self._indenter.indent |
jadmanski | bbb026c | 2010-07-19 16:41:27 +0000 | [diff] [blame] | 642 | return '\t' * indent + log_entry.render().rstrip('\n') |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 643 | |
| 644 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 645 | def record_entry(self, log_entry, log_in_subdir=True): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 646 | """Record a status_log_entry into the appropriate status log files. |
| 647 | |
| 648 | @param log_entry: A status_log_entry instance to be recorded into the |
| 649 | status logs. |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 650 | @param log_in_subdir: A boolean that indicates (when true) that subdir |
| 651 | logs should be written into the subdirectory status log file. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 652 | """ |
| 653 | # acquire a strong reference for the duration of the method |
| 654 | job = self._jobref() |
| 655 | if job is None: |
| 656 | logging.warning('Something attempted to write a status log entry ' |
| 657 | 'after its job terminated, ignoring the attempt.') |
| 658 | logging.warning(traceback.format_stack()) |
| 659 | return |
| 660 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 661 | # call the record hook if one was given |
| 662 | if self._record_hook: |
| 663 | self._record_hook(log_entry) |
| 664 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 665 | # figure out where we need to log to |
| 666 | log_files = [os.path.join(job.resultdir, self.global_filename)] |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 667 | if log_in_subdir and log_entry.subdir: |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 668 | log_files.append(os.path.join(job.resultdir, log_entry.subdir, |
| 669 | self.subdir_filename)) |
| 670 | |
| 671 | # write out to entry to the log files |
| 672 | log_text = self.render_entry(log_entry) |
| 673 | for log_file in log_files: |
| 674 | fileobj = open(log_file, 'a') |
| 675 | try: |
| 676 | print >> fileobj, log_text |
| 677 | finally: |
| 678 | fileobj.close() |
| 679 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 680 | # write to TAPRecord instance |
| 681 | if log_entry.is_end() and self._tap_writer.do_tap_report: |
| 682 | self._tap_writer.record(log_entry, self._indenter.indent, log_files) |
| 683 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 684 | # adjust the indentation if this was a START or END entry |
| 685 | if log_entry.is_start(): |
| 686 | self._indenter.increment() |
| 687 | elif log_entry.is_end(): |
| 688 | self._indenter.decrement() |
| 689 | |
| 690 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 691 | class TAPReport(object): |
| 692 | """ |
| 693 | Deal with TAP reporting for the Autotest client. |
| 694 | """ |
| 695 | |
| 696 | job_statuses = { |
| 697 | "TEST_NA": False, |
| 698 | "ABORT": False, |
| 699 | "ERROR": False, |
| 700 | "FAIL": False, |
| 701 | "WARN": False, |
| 702 | "GOOD": True, |
| 703 | "START": True, |
| 704 | "END GOOD": True, |
| 705 | "ALERT": False, |
| 706 | "RUNNING": False, |
| 707 | "NOSTATUS": False |
| 708 | } |
| 709 | |
| 710 | |
| 711 | def __init__(self, enable, resultdir=None, global_filename='status'): |
| 712 | """ |
| 713 | @param enable: Set self.do_tap_report to trigger TAP reporting. |
| 714 | @param resultdir: Path where the TAP report files will be written. |
| 715 | @param global_filename: File name of the status files .tap extensions |
| 716 | will be appended. |
| 717 | """ |
| 718 | self.do_tap_report = enable |
| 719 | if resultdir is not None: |
| 720 | self.resultdir = os.path.abspath(resultdir) |
| 721 | self._reports_container = {} |
| 722 | self._keyval_container = {} # {'path1': [entries],} |
| 723 | self.global_filename = global_filename |
| 724 | |
| 725 | |
| 726 | @classmethod |
| 727 | def tap_ok(self, success, counter, message): |
| 728 | """ |
| 729 | return a TAP message string. |
| 730 | |
| 731 | @param success: True for positive message string. |
| 732 | @param counter: number of TAP line in plan. |
| 733 | @param message: additional message to report in TAP line. |
| 734 | """ |
| 735 | if success: |
| 736 | message = "ok %s - %s" % (counter, message) |
| 737 | else: |
| 738 | message = "not ok %s - %s" % (counter, message) |
| 739 | return message |
| 740 | |
| 741 | |
| 742 | def record(self, log_entry, indent, log_files): |
| 743 | """ |
| 744 | Append a job-level status event to self._reports_container. All |
| 745 | events will be written to TAP log files at the end of the test run. |
| 746 | Otherwise, it's impossilble to determine the TAP plan. |
| 747 | |
| 748 | @param log_entry: A string status code describing the type of status |
| 749 | entry being recorded. It must pass log.is_valid_status to be |
| 750 | considered valid. |
| 751 | @param indent: Level of the log_entry to determine the operation if |
| 752 | log_entry.operation is not given. |
| 753 | @param log_files: List of full path of files the TAP report will be |
| 754 | written to at the end of the test. |
| 755 | """ |
| 756 | for log_file in log_files: |
| 757 | log_file_path = os.path.dirname(log_file) |
| 758 | key = log_file_path.split(self.resultdir, 1)[1].strip(os.sep) |
| 759 | if not key: |
| 760 | key = 'root' |
| 761 | |
| 762 | if not self._reports_container.has_key(key): |
| 763 | self._reports_container[key] = [] |
| 764 | |
| 765 | if log_entry.operation: |
| 766 | operation = log_entry.operation |
| 767 | elif indent == 1: |
| 768 | operation = "job" |
| 769 | else: |
| 770 | operation = "unknown" |
| 771 | entry = self.tap_ok( |
| 772 | self.job_statuses.get(log_entry.status_code, False), |
| 773 | len(self._reports_container[key]) + 1, operation + "\n" |
| 774 | ) |
| 775 | self._reports_container[key].append(entry) |
| 776 | |
| 777 | |
| 778 | def record_keyval(self, path, dictionary, type_tag=None): |
| 779 | """ |
| 780 | Append a key-value pairs of dictionary to self._keyval_container in |
| 781 | TAP format. Once finished write out the keyval.tap file to the file |
| 782 | system. |
| 783 | |
| 784 | If type_tag is None, then the key must be composed of alphanumeric |
| 785 | characters (or dashes + underscores). However, if type-tag is not |
| 786 | null then the keys must also have "{type_tag}" as a suffix. At |
| 787 | the moment the only valid values of type_tag are "attr" and "perf". |
| 788 | |
| 789 | @param path: The full path of the keyval.tap file to be created |
| 790 | @param dictionary: The keys and values. |
| 791 | @param type_tag: The type of the values |
| 792 | """ |
| 793 | self._keyval_container.setdefault(path, [0, []]) |
| 794 | self._keyval_container[path][0] += 1 |
| 795 | |
| 796 | if type_tag is None: |
| 797 | key_regex = re.compile(r'^[-\.\w]+$') |
| 798 | else: |
| 799 | if type_tag not in ('attr', 'perf'): |
| 800 | raise ValueError('Invalid type tag: %s' % type_tag) |
| 801 | escaped_tag = re.escape(type_tag) |
| 802 | key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag) |
| 803 | self._keyval_container[path][1].extend([ |
| 804 | self.tap_ok(True, self._keyval_container[path][0], "results"), |
| 805 | "\n ---\n", |
| 806 | ]) |
| 807 | try: |
| 808 | for key in sorted(dictionary.keys()): |
| 809 | if not key_regex.search(key): |
| 810 | raise ValueError('Invalid key: %s' % key) |
| 811 | self._keyval_container[path][1].append( |
| 812 | ' %s: %s\n' % (key.replace('{', '_').rstrip('}'), |
| 813 | dictionary[key]) |
| 814 | ) |
| 815 | finally: |
| 816 | self._keyval_container[path][1].append(" ...\n") |
| 817 | self._write_keyval() |
| 818 | |
| 819 | |
| 820 | def _write_reports(self): |
| 821 | """ |
| 822 | Write TAP reports to file. |
| 823 | """ |
| 824 | for key in self._reports_container.keys(): |
| 825 | if key == 'root': |
| 826 | sub_dir = '' |
| 827 | else: |
| 828 | sub_dir = key |
| 829 | tap_fh = open(os.sep.join( |
| 830 | [self.resultdir, sub_dir, self.global_filename] |
| 831 | ) + ".tap", 'w') |
| 832 | tap_fh.write('1..' + str(len(self._reports_container[key])) + '\n') |
| 833 | tap_fh.writelines(self._reports_container[key]) |
| 834 | tap_fh.close() |
| 835 | |
| 836 | |
| 837 | def _write_keyval(self): |
| 838 | """ |
| 839 | Write the self._keyval_container key values to a file. |
| 840 | """ |
| 841 | for path in self._keyval_container.keys(): |
| 842 | tap_fh = open(path + ".tap", 'w') |
| 843 | tap_fh.write('1..' + str(self._keyval_container[path][0]) + '\n') |
| 844 | tap_fh.writelines(self._keyval_container[path][1]) |
| 845 | tap_fh.close() |
| 846 | |
| 847 | |
| 848 | def write(self): |
| 849 | """ |
| 850 | Write the TAP reports to files. |
| 851 | """ |
| 852 | self._write_reports() |
| 853 | |
| 854 | |
| 855 | def _write_tap_archive(self): |
| 856 | """ |
| 857 | Write a tar archive containing all the TAP files and |
| 858 | a meta.yml containing the file names. |
| 859 | """ |
| 860 | os.chdir(self.resultdir) |
| 861 | tap_files = [] |
| 862 | for rel_path, d, files in os.walk('.'): |
| 863 | tap_files.extend(["/".join( |
| 864 | [rel_path, f]) for f in files if f.endswith('.tap')]) |
| 865 | meta_yaml = open('meta.yml', 'w') |
| 866 | meta_yaml.write('file_order:\n') |
| 867 | tap_tar = tarfile.open(self.resultdir + '/tap.tar.gz', 'w:gz') |
| 868 | for f in tap_files: |
| 869 | meta_yaml.write(" - " + f.lstrip('./') + "\n") |
| 870 | tap_tar.add(f) |
| 871 | meta_yaml.close() |
| 872 | tap_tar.add('meta.yml') |
| 873 | tap_tar.close() |
| 874 | |
| 875 | |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 876 | class base_job(object): |
| 877 | """An abstract base class for the various autotest job classes. |
| 878 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 879 | @property autodir: The top level autotest directory. |
| 880 | @property clientdir: The autotest client directory. |
| 881 | @property serverdir: The autotest server directory. [OPTIONAL] |
| 882 | @property resultdir: The directory where results should be written out. |
| 883 | [WRITABLE] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 884 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 885 | @property pkgdir: The job packages directory. [WRITABLE] |
| 886 | @property tmpdir: The job temporary directory. [WRITABLE] |
| 887 | @property testdir: The job test directory. [WRITABLE] |
| 888 | @property site_testdir: The job site test directory. [WRITABLE] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 889 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 890 | @property bindir: The client bin/ directory. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 891 | @property profdir: The client profilers/ directory. |
| 892 | @property toolsdir: The client tools/ directory. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 893 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 894 | @property control: A path to the control file to be executed. [OPTIONAL] |
| 895 | @property hosts: A set of all live Host objects currently in use by the |
| 896 | job. Code running in the context of a local client can safely assume |
| 897 | that this set contains only a single entry. |
| 898 | @property machines: A list of the machine names associated with the job. |
| 899 | @property user: The user executing the job. |
| 900 | @property tag: A tag identifying the job. Often used by the scheduler to |
| 901 | give a name of the form NUMBER-USERNAME/HOSTNAME. |
Scott Zawalski | 91493c8 | 2013-01-25 16:15:20 -0500 | [diff] [blame] | 902 | @property test_retry: The number of times to retry a test if the test did |
| 903 | not complete successfully. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 904 | @property args: A list of addtional miscellaneous command-line arguments |
| 905 | provided when starting the job. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 906 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 907 | @property automatic_test_tag: A string which, if set, will be automatically |
| 908 | added to the test name when running tests. |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 909 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 910 | @property default_profile_only: A boolean indicating the default value of |
| 911 | profile_only used by test.execute. [PERSISTENT] |
| 912 | @property drop_caches: A boolean indicating if caches should be dropped |
| 913 | before each test is executed. |
| 914 | @property drop_caches_between_iterations: A boolean indicating if caches |
| 915 | should be dropped before each test iteration is executed. |
| 916 | @property run_test_cleanup: A boolean indicating if test.cleanup should be |
| 917 | run by default after a test completes, if the run_cleanup argument is |
| 918 | not specified. [PERSISTENT] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 919 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 920 | @property num_tests_run: The number of tests run during the job. [OPTIONAL] |
| 921 | @property num_tests_failed: The number of tests failed during the job. |
| 922 | [OPTIONAL] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 923 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 924 | @property harness: An instance of the client test harness. Only available |
| 925 | in contexts where client test execution happens. [OPTIONAL] |
| 926 | @property logging: An instance of the logging manager associated with the |
| 927 | job. |
| 928 | @property profilers: An instance of the profiler manager associated with |
| 929 | the job. |
| 930 | @property sysinfo: An instance of the sysinfo object. Only available in |
| 931 | contexts where it's possible to collect sysinfo. |
| 932 | @property warning_manager: A class for managing which types of WARN |
| 933 | messages should be logged and which should be supressed. [OPTIONAL] |
| 934 | @property warning_loggers: A set of readable streams that will be monitored |
| 935 | for WARN messages to be logged. [OPTIONAL] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 936 | |
| 937 | Abstract methods: |
| 938 | _find_base_directories [CLASSMETHOD] |
| 939 | Returns the location of autodir, clientdir and serverdir |
| 940 | |
| 941 | _find_resultdir |
| 942 | Returns the location of resultdir. Gets a copy of any parameters |
| 943 | passed into base_job.__init__. Can return None to indicate that |
| 944 | no resultdir is to be used. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 945 | |
| 946 | _get_status_logger |
| 947 | Returns a status_logger instance for recording job status logs. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 948 | """ |
| 949 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 950 | # capture the dependency on several helper classes with factories |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 951 | _job_directory = job_directory |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 952 | _job_state = job_state |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 953 | |
| 954 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 955 | # all the job directory attributes |
| 956 | autodir = _job_directory.property_factory('autodir') |
| 957 | clientdir = _job_directory.property_factory('clientdir') |
| 958 | serverdir = _job_directory.property_factory('serverdir') |
| 959 | resultdir = _job_directory.property_factory('resultdir') |
| 960 | pkgdir = _job_directory.property_factory('pkgdir') |
| 961 | tmpdir = _job_directory.property_factory('tmpdir') |
| 962 | testdir = _job_directory.property_factory('testdir') |
| 963 | site_testdir = _job_directory.property_factory('site_testdir') |
| 964 | bindir = _job_directory.property_factory('bindir') |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 965 | profdir = _job_directory.property_factory('profdir') |
| 966 | toolsdir = _job_directory.property_factory('toolsdir') |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 967 | |
| 968 | |
| 969 | # all the generic persistent properties |
mbligh | 9de6ed7 | 2010-01-11 19:01:10 +0000 | [diff] [blame] | 970 | tag = _job_state.property_factory('_state', 'tag', '') |
Scott Zawalski | 91493c8 | 2013-01-25 16:15:20 -0500 | [diff] [blame] | 971 | test_retry = _job_state.property_factory('_state', 'test_retry', 0) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 972 | default_profile_only = _job_state.property_factory( |
| 973 | '_state', 'default_profile_only', False) |
| 974 | run_test_cleanup = _job_state.property_factory( |
| 975 | '_state', 'run_test_cleanup', True) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 976 | automatic_test_tag = _job_state.property_factory( |
| 977 | '_state', 'automatic_test_tag', None) |
| 978 | |
| 979 | # the use_sequence_number property |
| 980 | _sequence_number = _job_state.property_factory( |
| 981 | '_state', '_sequence_number', None) |
| 982 | def _get_use_sequence_number(self): |
| 983 | return bool(self._sequence_number) |
| 984 | def _set_use_sequence_number(self, value): |
| 985 | if value: |
| 986 | self._sequence_number = 1 |
| 987 | else: |
| 988 | self._sequence_number = None |
| 989 | use_sequence_number = property(_get_use_sequence_number, |
| 990 | _set_use_sequence_number) |
| 991 | |
Dan Shi | 70647ca | 2015-07-16 22:52:35 -0700 | [diff] [blame] | 992 | # parent job id is passed in from autoserv command line. It's only used in |
| 993 | # server job. The property is added here for unittest |
| 994 | # (base_job_unittest.py) to be consistent on validating public properties of |
| 995 | # a base_job object. |
| 996 | parent_job_id = None |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 997 | |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 998 | def __init__(self, *args, **dargs): |
| 999 | # initialize the base directories, all others are relative to these |
| 1000 | autodir, clientdir, serverdir = self._find_base_directories() |
| 1001 | self._autodir = self._job_directory(autodir) |
| 1002 | self._clientdir = self._job_directory(clientdir) |
Scott Zawalski | 91493c8 | 2013-01-25 16:15:20 -0500 | [diff] [blame] | 1003 | # TODO(scottz): crosbug.com/38259, needed to pass unittests for now. |
| 1004 | self.label = None |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1005 | if serverdir: |
| 1006 | self._serverdir = self._job_directory(serverdir) |
| 1007 | else: |
| 1008 | self._serverdir = None |
| 1009 | |
| 1010 | # initialize all the other directories relative to the base ones |
| 1011 | self._initialize_dir_properties() |
| 1012 | self._resultdir = self._job_directory( |
| 1013 | self._find_resultdir(*args, **dargs), True) |
| 1014 | self._execution_contexts = [] |
| 1015 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1016 | # initialize all the job state |
| 1017 | self._state = self._job_state() |
| 1018 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 1019 | # initialize tap reporting |
| 1020 | if dargs.has_key('options'): |
| 1021 | self._tap = self._tap_init(dargs['options'].tap_report) |
| 1022 | else: |
| 1023 | self._tap = self._tap_init(False) |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1024 | |
| 1025 | @classmethod |
| 1026 | def _find_base_directories(cls): |
| 1027 | raise NotImplementedError() |
| 1028 | |
| 1029 | |
| 1030 | def _initialize_dir_properties(self): |
| 1031 | """ |
| 1032 | Initializes all the secondary self.*dir properties. Requires autodir, |
| 1033 | clientdir and serverdir to already be initialized. |
| 1034 | """ |
| 1035 | # create some stubs for use as shortcuts |
| 1036 | def readonly_dir(*args): |
| 1037 | return self._job_directory(os.path.join(*args)) |
| 1038 | def readwrite_dir(*args): |
| 1039 | return self._job_directory(os.path.join(*args), True) |
| 1040 | |
| 1041 | # various client-specific directories |
| 1042 | self._bindir = readonly_dir(self.clientdir, 'bin') |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1043 | self._profdir = readonly_dir(self.clientdir, 'profilers') |
| 1044 | self._pkgdir = readwrite_dir(self.clientdir, 'packages') |
| 1045 | self._toolsdir = readonly_dir(self.clientdir, 'tools') |
| 1046 | |
| 1047 | # directories which are in serverdir on a server, clientdir on a client |
Fang Deng | d9a056f | 2013-10-29 11:31:27 -0700 | [diff] [blame] | 1048 | # tmp tests, and site_tests need to be read_write for client, but only |
| 1049 | # read for server. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1050 | if self.serverdir: |
| 1051 | root = self.serverdir |
Aviv Keshet | 36bf74a | 2013-08-15 16:09:03 -0700 | [diff] [blame] | 1052 | r_or_rw_dir = readonly_dir |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1053 | else: |
| 1054 | root = self.clientdir |
Aviv Keshet | 36bf74a | 2013-08-15 16:09:03 -0700 | [diff] [blame] | 1055 | r_or_rw_dir = readwrite_dir |
Aviv Keshet | 36bf74a | 2013-08-15 16:09:03 -0700 | [diff] [blame] | 1056 | self._testdir = r_or_rw_dir(root, 'tests') |
| 1057 | self._site_testdir = r_or_rw_dir(root, 'site_tests') |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1058 | |
| 1059 | # various server-specific directories |
| 1060 | if self.serverdir: |
Fang Deng | d9a056f | 2013-10-29 11:31:27 -0700 | [diff] [blame] | 1061 | self._tmpdir = readwrite_dir(tempfile.gettempdir()) |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1062 | else: |
Fang Deng | d9a056f | 2013-10-29 11:31:27 -0700 | [diff] [blame] | 1063 | self._tmpdir = readwrite_dir(root, 'tmp') |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1064 | |
| 1065 | |
| 1066 | def _find_resultdir(self, *args, **dargs): |
| 1067 | raise NotImplementedError() |
| 1068 | |
| 1069 | |
| 1070 | def push_execution_context(self, resultdir): |
| 1071 | """ |
| 1072 | Save off the current context of the job and change to the given one. |
| 1073 | |
| 1074 | In practice method just changes the resultdir, but it may become more |
| 1075 | extensive in the future. The expected use case is for when a child |
| 1076 | job needs to be executed in some sort of nested context (for example |
| 1077 | the way parallel_simple does). The original context can be restored |
| 1078 | with a pop_execution_context call. |
| 1079 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1080 | @param resultdir: The new resultdir, relative to the current one. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1081 | """ |
| 1082 | new_dir = self._job_directory( |
| 1083 | os.path.join(self.resultdir, resultdir), True) |
| 1084 | self._execution_contexts.append(self._resultdir) |
| 1085 | self._resultdir = new_dir |
| 1086 | |
| 1087 | |
| 1088 | def pop_execution_context(self): |
| 1089 | """ |
| 1090 | Reverse the effects of the previous push_execution_context call. |
| 1091 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1092 | @raise IndexError: raised when the stack of contexts is empty. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 1093 | """ |
| 1094 | if not self._execution_contexts: |
| 1095 | raise IndexError('No old execution context to restore') |
| 1096 | self._resultdir = self._execution_contexts.pop() |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1097 | |
| 1098 | |
| 1099 | def get_state(self, name, default=_job_state.NO_DEFAULT): |
| 1100 | """Returns the value associated with a particular name. |
| 1101 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1102 | @param name: The name the value was saved with. |
| 1103 | @param default: A default value to return if no state is currently |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1104 | associated with var. |
| 1105 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1106 | @return: A deep copy of the value associated with name. Note that this |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1107 | explicitly returns a deep copy to avoid problems with mutable |
| 1108 | values; mutations are not persisted or shared. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1109 | @raise KeyError: raised when no state is associated with var and a |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1110 | default value is not provided. |
| 1111 | """ |
| 1112 | try: |
| 1113 | return self._state.get('public', name, default=default) |
| 1114 | except KeyError: |
| 1115 | raise KeyError(name) |
| 1116 | |
| 1117 | |
| 1118 | def set_state(self, name, value): |
| 1119 | """Saves the value given with the provided name. |
| 1120 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1121 | @param name: The name the value should be saved with. |
| 1122 | @param value: The value to save. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1123 | """ |
| 1124 | self._state.set('public', name, value) |
| 1125 | |
| 1126 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1127 | def _build_tagged_test_name(self, testname, dargs): |
| 1128 | """Builds the fully tagged testname and subdirectory for job.run_test. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1129 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1130 | @param testname: The base name of the test |
| 1131 | @param dargs: The ** arguments passed to run_test. And arguments |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1132 | consumed by this method will be removed from the dictionary. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1133 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1134 | @return: A 3-tuple of the full name of the test, the subdirectory it |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1135 | should be stored in, and the full tag of the subdir. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1136 | """ |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1137 | tag_parts = [] |
| 1138 | |
| 1139 | # build up the parts of the tag used for the test name |
Dale Curtis | 74a314b | 2011-06-23 14:55:46 -0700 | [diff] [blame] | 1140 | master_testpath = dargs.get('master_testpath', "") |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1141 | base_tag = dargs.pop('tag', None) |
| 1142 | if base_tag: |
| 1143 | tag_parts.append(str(base_tag)) |
| 1144 | if self.use_sequence_number: |
| 1145 | tag_parts.append('_%02d_' % self._sequence_number) |
| 1146 | self._sequence_number += 1 |
| 1147 | if self.automatic_test_tag: |
| 1148 | tag_parts.append(self.automatic_test_tag) |
| 1149 | full_testname = '.'.join([testname] + tag_parts) |
| 1150 | |
| 1151 | # build up the subdir and tag as well |
| 1152 | subdir_tag = dargs.pop('subdir_tag', None) |
| 1153 | if subdir_tag: |
| 1154 | tag_parts.append(subdir_tag) |
| 1155 | subdir = '.'.join([testname] + tag_parts) |
Dale Curtis | 74a314b | 2011-06-23 14:55:46 -0700 | [diff] [blame] | 1156 | subdir = os.path.join(master_testpath, subdir) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1157 | tag = '.'.join(tag_parts) |
| 1158 | |
| 1159 | return full_testname, subdir, tag |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1160 | |
| 1161 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1162 | def _make_test_outputdir(self, subdir): |
| 1163 | """Creates an output directory for a test to run it. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1164 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1165 | @param subdir: The subdirectory of the test. Generally computed by |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1166 | _build_tagged_test_name. |
| 1167 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1168 | @return: A job_directory instance corresponding to the outputdir of |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1169 | the test. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1170 | @raise TestError: If the output directory is invalid. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1171 | """ |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1172 | # explicitly check that this subdirectory is new |
| 1173 | path = os.path.join(self.resultdir, subdir) |
| 1174 | if os.path.exists(path): |
| 1175 | msg = ('%s already exists; multiple tests cannot run with the ' |
| 1176 | 'same subdirectory' % subdir) |
| 1177 | raise error.TestError(msg) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 1178 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 1179 | # create the outputdir and raise a TestError if it isn't valid |
| 1180 | try: |
| 1181 | outputdir = self._job_directory(path, True) |
| 1182 | return outputdir |
| 1183 | except self._job_directory.JobDirectoryException, e: |
| 1184 | logging.exception('%s directory creation failed with %s', |
| 1185 | subdir, e) |
| 1186 | raise error.TestError('%s directory creation failed' % subdir) |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1187 | |
Eric Li | 861b2d5 | 2011-02-04 14:50:35 -0800 | [diff] [blame] | 1188 | def _tap_init(self, enable): |
| 1189 | """Initialize TAP reporting |
| 1190 | """ |
| 1191 | return TAPReport(enable, resultdir=self.resultdir) |
| 1192 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1193 | |
| 1194 | def record(self, status_code, subdir, operation, status='', |
| 1195 | optional_fields=None): |
| 1196 | """Record a job-level status event. |
| 1197 | |
| 1198 | Logs an event noteworthy to the Autotest job as a whole. Messages will |
| 1199 | be written into a global status log file, as well as a subdir-local |
| 1200 | status log file (if subdir is specified). |
| 1201 | |
| 1202 | @param status_code: A string status code describing the type of status |
| 1203 | entry being recorded. It must pass log.is_valid_status to be |
| 1204 | considered valid. |
| 1205 | @param subdir: A specific results subdirectory this also applies to, or |
| 1206 | None. If not None the subdirectory must exist. |
| 1207 | @param operation: A string describing the operation that was run. |
| 1208 | @param status: An optional human-readable message describing the status |
| 1209 | entry, for example an error message or "completed successfully". |
| 1210 | @param optional_fields: An optional dictionary of addtional named fields |
| 1211 | to be included with the status message. Every time timestamp and |
| 1212 | localtime entries are generated with the current time and added |
| 1213 | to this dictionary. |
| 1214 | """ |
| 1215 | entry = status_log_entry(status_code, subdir, operation, status, |
| 1216 | optional_fields) |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 1217 | self.record_entry(entry) |
| 1218 | |
| 1219 | |
| 1220 | def record_entry(self, entry, log_in_subdir=True): |
| 1221 | """Record a job-level status event, using a status_log_entry. |
| 1222 | |
| 1223 | This is the same as self.record but using an existing status log |
| 1224 | entry object rather than constructing one for you. |
| 1225 | |
| 1226 | @param entry: A status_log_entry object |
| 1227 | @param log_in_subdir: A boolean that indicates (when true) that subdir |
| 1228 | logs should be written into the subdirectory status log file. |
| 1229 | """ |
| 1230 | self._get_status_logger().record_entry(entry, log_in_subdir) |