jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 1 | import os, copy, logging, errno, fcntl, time, re, weakref, traceback |
| 2 | import cPickle as pickle |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 3 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 4 | from autotest_lib.client.common_lib import autotemp, error, log |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 5 | |
| 6 | |
| 7 | class job_directory(object): |
| 8 | """Represents a job.*dir directory.""" |
| 9 | |
| 10 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 11 | class JobDirectoryException(error.AutotestError): |
| 12 | """Generic job_directory exception superclass.""" |
| 13 | |
| 14 | |
| 15 | class MissingDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 16 | """Raised when a directory required by the job does not exist.""" |
| 17 | def __init__(self, path): |
| 18 | Exception.__init__(self, 'Directory %s does not exist' % path) |
| 19 | |
| 20 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 21 | class UncreatableDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 22 | """Raised when a directory required by the job is missing and cannot |
| 23 | be created.""" |
| 24 | def __init__(self, path, error): |
| 25 | msg = 'Creation of directory %s failed with exception %s' |
| 26 | msg %= (path, error) |
| 27 | Exception.__init__(self, msg) |
| 28 | |
| 29 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 30 | class UnwritableDirectoryException(JobDirectoryException): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 31 | """Raised when a writable directory required by the job exists |
| 32 | but is not writable.""" |
| 33 | def __init__(self, path): |
| 34 | msg = 'Directory %s exists but is not writable' % path |
| 35 | Exception.__init__(self, msg) |
| 36 | |
| 37 | |
| 38 | def __init__(self, path, is_writable=False): |
| 39 | """ |
| 40 | Instantiate a job directory. |
| 41 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 42 | @param path: The path of the directory. If None a temporary directory |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 43 | will be created instead. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 44 | @param is_writable: If True, expect the directory to be writable. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 45 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 46 | @raise MissingDirectoryException: raised if is_writable=False and the |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 47 | directory does not exist. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 48 | @raise UnwritableDirectoryException: raised if is_writable=True and |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 49 | the directory exists but is not writable. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 50 | @raise UncreatableDirectoryException: raised if is_writable=True, the |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 51 | directory does not exist and it cannot be created. |
| 52 | """ |
| 53 | if path is None: |
| 54 | if is_writable: |
| 55 | self._tempdir = autotemp.tempdir(unique_id='autotest') |
| 56 | self.path = self._tempdir.name |
| 57 | else: |
| 58 | raise self.MissingDirectoryException(path) |
| 59 | else: |
| 60 | self._tempdir = None |
| 61 | self.path = path |
| 62 | self._ensure_valid(is_writable) |
| 63 | |
| 64 | |
| 65 | def _ensure_valid(self, is_writable): |
| 66 | """ |
| 67 | Ensure that this is a valid directory. |
| 68 | |
| 69 | Will check if a directory exists, can optionally also enforce that |
| 70 | it be writable. It can optionally create it if necessary. Creation |
| 71 | will still fail if the path is rooted in a non-writable directory, or |
| 72 | if a file already exists at the given location. |
| 73 | |
| 74 | @param dir_path A path where a directory should be located |
| 75 | @param is_writable A boolean indicating that the directory should |
| 76 | not only exist, but also be writable. |
| 77 | |
| 78 | @raises MissingDirectoryException raised if is_writable=False and the |
| 79 | directory does not exist. |
| 80 | @raises UnwritableDirectoryException raised if is_writable=True and |
| 81 | the directory is not wrtiable. |
| 82 | @raises UncreatableDirectoryException raised if is_writable=True, the |
| 83 | directory does not exist and it cannot be created |
| 84 | """ |
mbligh | 8054b0d | 2009-11-25 17:38:19 +0000 | [diff] [blame] | 85 | # ensure the directory exists |
| 86 | if is_writable: |
| 87 | try: |
| 88 | os.makedirs(self.path) |
| 89 | except OSError, e: |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 90 | if e.errno != errno.EEXIST or not os.path.isdir(self.path): |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 91 | raise self.UncreatableDirectoryException(self.path, e) |
mbligh | 8054b0d | 2009-11-25 17:38:19 +0000 | [diff] [blame] | 92 | elif not os.path.isdir(self.path): |
| 93 | raise self.MissingDirectoryException(self.path) |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 94 | |
| 95 | # if is_writable=True, also check that the directory is writable |
| 96 | if is_writable and not os.access(self.path, os.W_OK): |
| 97 | raise self.UnwritableDirectoryException(self.path) |
| 98 | |
| 99 | |
| 100 | @staticmethod |
| 101 | def property_factory(attribute): |
| 102 | """ |
| 103 | Create a job.*dir -> job._*dir.path property accessor. |
| 104 | |
| 105 | @param attribute A string with the name of the attribute this is |
| 106 | exposed as. '_'+attribute must then be attribute that holds |
| 107 | either None or a job_directory-like object. |
| 108 | |
| 109 | @returns A read-only property object that exposes a job_directory path |
| 110 | """ |
| 111 | @property |
| 112 | def dir_property(self): |
| 113 | underlying_attribute = getattr(self, '_' + attribute) |
| 114 | if underlying_attribute is None: |
| 115 | return None |
| 116 | else: |
| 117 | return underlying_attribute.path |
| 118 | return dir_property |
| 119 | |
| 120 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 121 | # decorator for use with job_state methods |
| 122 | def with_backing_lock(method): |
| 123 | """A decorator to perform a lock-*-unlock cycle. |
| 124 | |
| 125 | When applied to a method, this decorator will automatically wrap |
| 126 | calls to the method in a backing file lock and before the call |
| 127 | followed by a backing file unlock. |
| 128 | """ |
| 129 | def wrapped_method(self, *args, **dargs): |
| 130 | already_have_lock = self._backing_file_lock is not None |
| 131 | if not already_have_lock: |
| 132 | self._lock_backing_file() |
| 133 | try: |
| 134 | return method(self, *args, **dargs) |
| 135 | finally: |
| 136 | if not already_have_lock: |
| 137 | self._unlock_backing_file() |
| 138 | wrapped_method.__name__ = method.__name__ |
| 139 | wrapped_method.__doc__ = method.__doc__ |
| 140 | return wrapped_method |
| 141 | |
| 142 | |
| 143 | # decorator for use with job_state methods |
| 144 | def with_backing_file(method): |
| 145 | """A decorator to perform a lock-read-*-write-unlock cycle. |
| 146 | |
| 147 | When applied to a method, this decorator will automatically wrap |
| 148 | calls to the method in a lock-and-read before the call followed by a |
| 149 | write-and-unlock. Any operation that is reading or writing state |
| 150 | should be decorated with this method to ensure that backing file |
| 151 | state is consistently maintained. |
| 152 | """ |
| 153 | @with_backing_lock |
| 154 | def wrapped_method(self, *args, **dargs): |
| 155 | self._read_from_backing_file() |
| 156 | try: |
| 157 | return method(self, *args, **dargs) |
| 158 | finally: |
| 159 | self._write_to_backing_file() |
| 160 | wrapped_method.__name__ = method.__name__ |
| 161 | wrapped_method.__doc__ = method.__doc__ |
| 162 | return wrapped_method |
| 163 | |
| 164 | |
| 165 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 166 | class job_state(object): |
| 167 | """A class for managing explicit job and user state, optionally persistent. |
| 168 | |
| 169 | The class allows you to save state by name (like a dictionary). Any state |
| 170 | stored in this class should be picklable and deep copyable. While this is |
| 171 | not enforced it is recommended that only valid python identifiers be used |
| 172 | as names. Additionally, the namespace 'stateful_property' is used for |
| 173 | storing the valued associated with properties constructed using the |
| 174 | property_factory method. |
| 175 | """ |
| 176 | |
| 177 | NO_DEFAULT = object() |
| 178 | PICKLE_PROTOCOL = 2 # highest protocol available in python 2.4 |
| 179 | |
| 180 | |
| 181 | def __init__(self): |
| 182 | """Initialize the job state.""" |
| 183 | self._state = {} |
| 184 | self._backing_file = None |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 185 | self._backing_file_initialized = False |
| 186 | self._backing_file_lock = None |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 187 | |
| 188 | |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 189 | def _lock_backing_file(self): |
| 190 | """Acquire a lock on the backing file.""" |
| 191 | if self._backing_file: |
| 192 | self._backing_file_lock = open(self._backing_file, 'a') |
jadmanski | a087eae | 2010-01-29 20:57:57 +0000 | [diff] [blame] | 193 | fcntl.flock(self._backing_file_lock, fcntl.LOCK_EX) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 194 | |
| 195 | |
| 196 | def _unlock_backing_file(self): |
| 197 | """Release a lock on the backing file.""" |
| 198 | if self._backing_file_lock: |
jadmanski | a087eae | 2010-01-29 20:57:57 +0000 | [diff] [blame] | 199 | fcntl.flock(self._backing_file_lock, fcntl.LOCK_UN) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 200 | self._backing_file_lock.close() |
| 201 | self._backing_file_lock = None |
| 202 | |
| 203 | |
| 204 | def read_from_file(self, file_path, merge=True): |
| 205 | """Read in any state from the file at file_path. |
| 206 | |
| 207 | When merge=True, any state specified only in-memory will be preserved. |
| 208 | Any state specified on-disk will be set in-memory, even if an in-memory |
| 209 | setting already exists. |
| 210 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 211 | @param file_path: The path where the state should be read from. It must |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 212 | exist but it can be empty. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 213 | @param merge: If true, merge the on-disk state with the in-memory |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 214 | state. If false, replace the in-memory state with the on-disk |
| 215 | state. |
| 216 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 217 | @warning: This method is intentionally concurrency-unsafe. It makes no |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 218 | attempt to control concurrent access to the file at file_path. |
| 219 | """ |
| 220 | |
| 221 | # we can assume that the file exists |
| 222 | if os.path.getsize(file_path) == 0: |
| 223 | on_disk_state = {} |
| 224 | else: |
| 225 | on_disk_state = pickle.load(open(file_path)) |
| 226 | |
| 227 | if merge: |
| 228 | # merge the on-disk state with the in-memory state |
| 229 | for namespace, namespace_dict in on_disk_state.iteritems(): |
| 230 | in_memory_namespace = self._state.setdefault(namespace, {}) |
| 231 | for name, value in namespace_dict.iteritems(): |
| 232 | if name in in_memory_namespace: |
| 233 | if in_memory_namespace[name] != value: |
| 234 | logging.info('Persistent value of %s.%s from %s ' |
| 235 | 'overridding existing in-memory ' |
| 236 | 'value', namespace, name, file_path) |
| 237 | in_memory_namespace[name] = value |
| 238 | else: |
| 239 | logging.debug('Value of %s.%s is unchanged, ' |
| 240 | 'skipping import', namespace, name) |
| 241 | else: |
| 242 | logging.debug('Importing %s.%s from state file %s', |
| 243 | namespace, name, file_path) |
| 244 | in_memory_namespace[name] = value |
| 245 | else: |
| 246 | # just replace the in-memory state with the on-disk state |
| 247 | self._state = on_disk_state |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 248 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 249 | # lock the backing file before we refresh it |
| 250 | with_backing_lock(self.__class__._write_to_backing_file)(self) |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 251 | |
| 252 | |
| 253 | def write_to_file(self, file_path): |
| 254 | """Write out the current state to the given path. |
| 255 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 256 | @param file_path: The path where the state should be written out to. |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 257 | Must be writable. |
| 258 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 259 | @warning: This method is intentionally concurrency-unsafe. It makes no |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 260 | attempt to control concurrent access to the file at file_path. |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 261 | """ |
| 262 | outfile = open(file_path, 'w') |
| 263 | try: |
| 264 | pickle.dump(self._state, outfile, self.PICKLE_PROTOCOL) |
| 265 | finally: |
| 266 | outfile.close() |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 267 | |
| 268 | |
| 269 | def _read_from_backing_file(self): |
| 270 | """Refresh the current state from the backing file. |
| 271 | |
| 272 | If the backing file has never been read before (indicated by checking |
| 273 | self._backing_file_initialized) it will merge the file with the |
| 274 | in-memory state, rather than overwriting it. |
| 275 | """ |
| 276 | if self._backing_file: |
| 277 | merge_backing_file = not self._backing_file_initialized |
| 278 | self.read_from_file(self._backing_file, merge=merge_backing_file) |
| 279 | self._backing_file_initialized = True |
| 280 | |
| 281 | |
| 282 | def _write_to_backing_file(self): |
| 283 | """Flush the current state to the backing file.""" |
| 284 | if self._backing_file: |
| 285 | self.write_to_file(self._backing_file) |
| 286 | |
| 287 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 288 | @with_backing_file |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 289 | def _synchronize_backing_file(self): |
| 290 | """Synchronizes the contents of the in-memory and on-disk state.""" |
| 291 | # state is implicitly synchronized in _with_backing_file methods |
| 292 | pass |
| 293 | |
| 294 | |
| 295 | def set_backing_file(self, file_path): |
| 296 | """Change the path used as the backing file for the persistent state. |
| 297 | |
| 298 | When a new backing file is specified if a file already exists then |
| 299 | its contents will be added into the current state, with conflicts |
| 300 | between the file and memory being resolved in favor of the file |
| 301 | contents. The file will then be kept in sync with the (combined) |
| 302 | in-memory state. The syncing can be disabled by setting this to None. |
| 303 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 304 | @param file_path: A path on the filesystem that can be read from and |
jadmanski | fa2e889 | 2010-01-26 20:26:05 +0000 | [diff] [blame] | 305 | written to, or None to turn off the backing store. |
| 306 | """ |
| 307 | self._synchronize_backing_file() |
| 308 | self._backing_file = file_path |
| 309 | self._backing_file_initialized = False |
| 310 | self._synchronize_backing_file() |
| 311 | |
| 312 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 313 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 314 | def get(self, namespace, name, default=NO_DEFAULT): |
| 315 | """Returns the value associated with a particular name. |
| 316 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 317 | @param namespace: The namespace that the property should be stored in. |
| 318 | @param name: The name the value was saved with. |
| 319 | @param default: A default value to return if no state is currently |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 320 | associated with var. |
| 321 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 322 | @return: A deep copy of the value associated with name. Note that this |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 323 | explicitly returns a deep copy to avoid problems with mutable |
| 324 | values; mutations are not persisted or shared. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 325 | @raise KeyError: raised when no state is associated with var and a |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 326 | default value is not provided. |
| 327 | """ |
| 328 | if self.has(namespace, name): |
| 329 | return copy.deepcopy(self._state[namespace][name]) |
| 330 | elif default is self.NO_DEFAULT: |
| 331 | raise KeyError('No key %s in namespace %s' % (name, namespace)) |
| 332 | else: |
| 333 | return default |
| 334 | |
| 335 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 336 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 337 | def set(self, namespace, name, value): |
| 338 | """Saves the value given with the provided name. |
| 339 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 340 | @param namespace: The namespace that the property should be stored in. |
| 341 | @param name: The name the value should be saved with. |
| 342 | @param value: The value to save. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 343 | """ |
| 344 | namespace_dict = self._state.setdefault(namespace, {}) |
| 345 | namespace_dict[name] = copy.deepcopy(value) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 346 | logging.debug('Persistent state %s.%s now set to %r', namespace, |
| 347 | name, value) |
| 348 | |
| 349 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 350 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 351 | def has(self, namespace, name): |
| 352 | """Return a boolean indicating if namespace.name is defined. |
| 353 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 354 | @param namespace: The namespace to check for a definition. |
| 355 | @param name: The name to check for a definition. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 356 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 357 | @return: True if the given name is defined in the given namespace and |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 358 | False otherwise. |
| 359 | """ |
| 360 | return namespace in self._state and name in self._state[namespace] |
| 361 | |
| 362 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 363 | @with_backing_file |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 364 | def discard(self, namespace, name): |
| 365 | """If namespace.name is a defined value, deletes it. |
| 366 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 367 | @param namespace: The namespace that the property is stored in. |
| 368 | @param name: The name the value is saved with. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 369 | """ |
| 370 | if self.has(namespace, name): |
| 371 | del self._state[namespace][name] |
| 372 | if len(self._state[namespace]) == 0: |
| 373 | del self._state[namespace] |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 374 | logging.debug('Persistent state %s.%s deleted', namespace, name) |
| 375 | else: |
| 376 | logging.debug( |
| 377 | 'Persistent state %s.%s not defined so nothing is discarded', |
| 378 | namespace, name) |
| 379 | |
| 380 | |
mbligh | a2c9949 | 2010-01-27 22:59:50 +0000 | [diff] [blame] | 381 | @with_backing_file |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 382 | def discard_namespace(self, namespace): |
| 383 | """Delete all defined namespace.* names. |
| 384 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 385 | @param namespace: The namespace to be cleared. |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 386 | """ |
| 387 | if namespace in self._state: |
| 388 | del self._state[namespace] |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 389 | logging.debug('Persistent state %s.* deleted', namespace) |
| 390 | |
| 391 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 392 | @staticmethod |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 393 | def property_factory(state_attribute, property_attribute, default, |
| 394 | namespace='global_properties'): |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 395 | """ |
| 396 | Create a property object for an attribute using self.get and self.set. |
| 397 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 398 | @param state_attribute: A string with the name of the attribute on |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 399 | job that contains the job_state instance. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 400 | @param property_attribute: A string with the name of the attribute |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 401 | this property is exposed as. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 402 | @param default: A default value that should be used for this property |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 403 | if it is not set. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 404 | @param namespace: The namespace to store the attribute value in. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 405 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 406 | @return: A read-write property object that performs self.get calls |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 407 | to read the value and self.set calls to set it. |
| 408 | """ |
| 409 | def getter(job): |
| 410 | state = getattr(job, state_attribute) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 411 | return state.get(namespace, property_attribute, default) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 412 | def setter(job, value): |
| 413 | state = getattr(job, state_attribute) |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 414 | state.set(namespace, property_attribute, value) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 415 | return property(getter, setter) |
| 416 | |
| 417 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 418 | class status_log_entry(object): |
| 419 | """Represents a single status log entry.""" |
| 420 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 421 | RENDERED_NONE_VALUE = '----' |
| 422 | TIMESTAMP_FIELD = 'timestamp' |
| 423 | LOCALTIME_FIELD = 'localtime' |
| 424 | |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 425 | # non-space whitespace is forbidden in any fields |
| 426 | BAD_CHAR_REGEX = re.compile(r'[\t\n\r\v\f]') |
| 427 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 428 | def __init__(self, status_code, subdir, operation, message, fields, |
| 429 | timestamp=None): |
| 430 | """Construct a status.log entry. |
| 431 | |
| 432 | @param status_code: A message status code. Must match the codes |
| 433 | accepted by autotest_lib.common_lib.log.is_valid_status. |
| 434 | @param subdir: A valid job subdirectory, or None. |
| 435 | @param operation: Description of the operation, or None. |
| 436 | @param message: A printable string describing event to be recorded. |
| 437 | @param fields: A dictionary of arbitrary alphanumeric key=value pairs |
| 438 | to be included in the log, or None. |
| 439 | @param timestamp: An optional integer timestamp, in the same format |
| 440 | as a time.time() timestamp. If unspecified, the current time is |
| 441 | used. |
| 442 | |
| 443 | @raise ValueError: if any of the parameters are invalid |
| 444 | """ |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 445 | |
| 446 | if not log.is_valid_status(status_code): |
| 447 | raise ValueError('status code %r is not valid' % status_code) |
| 448 | self.status_code = status_code |
| 449 | |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 450 | if subdir and self.BAD_CHAR_REGEX.search(subdir): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 451 | raise ValueError('Invalid character in subdir string') |
| 452 | self.subdir = subdir |
| 453 | |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 454 | if operation and self.BAD_CHAR_REGEX.search(operation): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 455 | raise ValueError('Invalid character in operation string') |
| 456 | self.operation = operation |
| 457 | |
| 458 | # break the message line into a single-line message that goes into the |
| 459 | # database, and a block of additional lines that goes into the status |
| 460 | # log but will never be parsed |
| 461 | message_lines = message.split('\n') |
| 462 | self.message = message_lines[0].replace('\t', ' ' * 8) |
| 463 | self.extra_message_lines = message_lines[1:] |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 464 | if self.BAD_CHAR_REGEX.search(self.message): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 465 | raise ValueError('Invalid character in message %r' % self.message) |
| 466 | |
| 467 | if not fields: |
| 468 | self.fields = {} |
| 469 | else: |
| 470 | self.fields = fields.copy() |
| 471 | for key, value in self.fields.iteritems(): |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 472 | if self.BAD_CHAR_REGEX.search(key + value): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 473 | raise ValueError('Invalid character in %r=%r field' |
| 474 | % (key, value)) |
| 475 | |
| 476 | # build up the timestamp |
| 477 | if timestamp is None: |
| 478 | timestamp = int(time.time()) |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 479 | self.fields[self.TIMESTAMP_FIELD] = str(timestamp) |
| 480 | self.fields[self.LOCALTIME_FIELD] = time.strftime( |
| 481 | '%b %d %H:%M:%S', time.localtime(timestamp)) |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 482 | |
| 483 | |
| 484 | def is_start(self): |
| 485 | """Indicates if this status log is the start of a new nested block. |
| 486 | |
| 487 | @return: A boolean indicating if this entry starts a new nested block. |
| 488 | """ |
| 489 | return self.status_code == 'START' |
| 490 | |
| 491 | |
| 492 | def is_end(self): |
| 493 | """Indicates if this status log is the end of a nested block. |
| 494 | |
| 495 | @return: A boolean indicating if this entry ends a nested block. |
| 496 | """ |
| 497 | return self.status_code.startswith('END ') |
| 498 | |
| 499 | |
| 500 | def render(self): |
| 501 | """Render the status log entry into a text string. |
| 502 | |
| 503 | @return: A text string suitable for writing into a status log file. |
| 504 | """ |
| 505 | # combine all the log line data into a tab-delimited string |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 506 | subdir = self.subdir or self.RENDERED_NONE_VALUE |
| 507 | operation = self.operation or self.RENDERED_NONE_VALUE |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 508 | extra_fields = ['%s=%s' % field for field in self.fields.iteritems()] |
| 509 | line_items = [self.status_code, subdir, operation] |
| 510 | line_items += extra_fields + [self.message] |
| 511 | first_line = '\t'.join(line_items) |
| 512 | |
| 513 | # append the extra unparsable lines, two-space indented |
| 514 | all_lines = [first_line] |
| 515 | all_lines += [' ' + line for line in self.extra_message_lines] |
| 516 | return '\n'.join(all_lines) |
| 517 | |
| 518 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 519 | @classmethod |
| 520 | def parse(cls, line): |
| 521 | """Parse a status log entry from a text string. |
| 522 | |
| 523 | This method is the inverse of render; it should always be true that |
| 524 | parse(entry.render()) produces a new status_log_entry equivalent to |
| 525 | entry. |
| 526 | |
| 527 | @return: A new status_log_entry instance with fields extracted from the |
| 528 | given status line. If the line is an extra message line then None |
| 529 | is returned. |
| 530 | """ |
| 531 | # extra message lines are always prepended with two spaces |
| 532 | if line.startswith(' '): |
| 533 | return None |
| 534 | |
| 535 | line = line.lstrip('\t') # ignore indentation |
| 536 | entry_parts = line.split('\t') |
| 537 | if len(entry_parts) < 4: |
| 538 | raise ValueError('%r is not a valid status line' % line) |
| 539 | status_code, subdir, operation = entry_parts[:3] |
| 540 | if subdir == cls.RENDERED_NONE_VALUE: |
| 541 | subdir = None |
| 542 | if operation == cls.RENDERED_NONE_VALUE: |
| 543 | operation = None |
| 544 | message = entry_parts[-1] |
| 545 | fields = dict(part.split('=', 1) for part in entry_parts[3:-1]) |
| 546 | if cls.TIMESTAMP_FIELD in fields: |
| 547 | timestamp = int(fields[cls.TIMESTAMP_FIELD]) |
| 548 | else: |
| 549 | timestamp = None |
| 550 | return cls(status_code, subdir, operation, message, fields, timestamp) |
| 551 | |
| 552 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 553 | class status_indenter(object): |
| 554 | """Abstract interface that a status log indenter should use.""" |
| 555 | |
| 556 | @property |
| 557 | def indent(self): |
| 558 | raise NotImplementedError |
| 559 | |
| 560 | |
| 561 | def increment(self): |
| 562 | """Increase indentation by one level.""" |
| 563 | raise NotImplementedError |
| 564 | |
| 565 | |
| 566 | def decrement(self): |
| 567 | """Decrease indentation by one level.""" |
| 568 | |
| 569 | |
| 570 | class status_logger(object): |
| 571 | """Represents a status log file. Responsible for translating messages |
| 572 | into on-disk status log lines. |
| 573 | |
| 574 | @property global_filename: The filename to write top-level logs to. |
| 575 | @property subdir_filename: The filename to write subdir-level logs to. |
| 576 | """ |
| 577 | def __init__(self, job, indenter, global_filename='status', |
| 578 | subdir_filename='status', record_hook=None): |
| 579 | """Construct a logger instance. |
| 580 | |
| 581 | @param job: A reference to the job object this is logging for. Only a |
| 582 | weak reference to the job is held, to avoid a |
| 583 | status_logger <-> job circular reference. |
| 584 | @param indenter: A status_indenter instance, for tracking the |
| 585 | indentation level. |
| 586 | @param global_filename: An optional filename to initialize the |
| 587 | self.global_filename attribute. |
| 588 | @param subdir_filename: An optional filename to initialize the |
| 589 | self.subdir_filename attribute. |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 590 | @param record_hook: An optional function to be called before an entry |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 591 | is logged. The function should expect a single parameter, a |
| 592 | copy of the status_log_entry object. |
| 593 | """ |
| 594 | self._jobref = weakref.ref(job) |
| 595 | self._indenter = indenter |
| 596 | self.global_filename = global_filename |
| 597 | self.subdir_filename = subdir_filename |
| 598 | self._record_hook = record_hook |
| 599 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 600 | |
| 601 | def render_entry(self, log_entry): |
| 602 | """Render a status_log_entry as it would be written to a log file. |
| 603 | |
| 604 | @param log_entry: A status_log_entry instance to be rendered. |
| 605 | |
| 606 | @return: The status log entry, rendered as it would be written to the |
| 607 | logs (including indentation). |
| 608 | """ |
| 609 | if log_entry.is_end(): |
| 610 | indent = self._indenter.indent - 1 |
| 611 | else: |
| 612 | indent = self._indenter.indent |
jadmanski | bbb026c | 2010-07-19 16:41:27 +0000 | [diff] [blame] | 613 | return '\t' * indent + log_entry.render().rstrip('\n') |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 614 | |
| 615 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 616 | def record_entry(self, log_entry, log_in_subdir=True): |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 617 | """Record a status_log_entry into the appropriate status log files. |
| 618 | |
| 619 | @param log_entry: A status_log_entry instance to be recorded into the |
| 620 | status logs. |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 621 | @param log_in_subdir: A boolean that indicates (when true) that subdir |
| 622 | logs should be written into the subdirectory status log file. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 623 | """ |
| 624 | # acquire a strong reference for the duration of the method |
| 625 | job = self._jobref() |
| 626 | if job is None: |
| 627 | logging.warning('Something attempted to write a status log entry ' |
| 628 | 'after its job terminated, ignoring the attempt.') |
| 629 | logging.warning(traceback.format_stack()) |
| 630 | return |
| 631 | |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 632 | # call the record hook if one was given |
| 633 | if self._record_hook: |
| 634 | self._record_hook(log_entry) |
| 635 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 636 | # figure out where we need to log to |
| 637 | log_files = [os.path.join(job.resultdir, self.global_filename)] |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 638 | if log_in_subdir and log_entry.subdir: |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 639 | log_files.append(os.path.join(job.resultdir, log_entry.subdir, |
| 640 | self.subdir_filename)) |
| 641 | |
| 642 | # write out to entry to the log files |
| 643 | log_text = self.render_entry(log_entry) |
| 644 | for log_file in log_files: |
| 645 | fileobj = open(log_file, 'a') |
| 646 | try: |
| 647 | print >> fileobj, log_text |
| 648 | finally: |
| 649 | fileobj.close() |
| 650 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 651 | # adjust the indentation if this was a START or END entry |
| 652 | if log_entry.is_start(): |
| 653 | self._indenter.increment() |
| 654 | elif log_entry.is_end(): |
| 655 | self._indenter.decrement() |
| 656 | |
| 657 | |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 658 | class base_job(object): |
| 659 | """An abstract base class for the various autotest job classes. |
| 660 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 661 | @property autodir: The top level autotest directory. |
| 662 | @property clientdir: The autotest client directory. |
| 663 | @property serverdir: The autotest server directory. [OPTIONAL] |
| 664 | @property resultdir: The directory where results should be written out. |
| 665 | [WRITABLE] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 666 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 667 | @property pkgdir: The job packages directory. [WRITABLE] |
| 668 | @property tmpdir: The job temporary directory. [WRITABLE] |
| 669 | @property testdir: The job test directory. [WRITABLE] |
| 670 | @property site_testdir: The job site test directory. [WRITABLE] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 671 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 672 | @property bindir: The client bin/ directory. |
| 673 | @property configdir: The client config/ directory. |
| 674 | @property profdir: The client profilers/ directory. |
| 675 | @property toolsdir: The client tools/ directory. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 676 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 677 | @property conmuxdir: The conmux directory. [OPTIONAL] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 678 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 679 | @property control: A path to the control file to be executed. [OPTIONAL] |
| 680 | @property hosts: A set of all live Host objects currently in use by the |
| 681 | job. Code running in the context of a local client can safely assume |
| 682 | that this set contains only a single entry. |
| 683 | @property machines: A list of the machine names associated with the job. |
| 684 | @property user: The user executing the job. |
| 685 | @property tag: A tag identifying the job. Often used by the scheduler to |
| 686 | give a name of the form NUMBER-USERNAME/HOSTNAME. |
| 687 | @property args: A list of addtional miscellaneous command-line arguments |
| 688 | provided when starting the job. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 689 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 690 | @property last_boot_tag: The label of the kernel from the last reboot. |
| 691 | [OPTIONAL,PERSISTENT] |
| 692 | @property automatic_test_tag: A string which, if set, will be automatically |
| 693 | added to the test name when running tests. |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 694 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 695 | @property default_profile_only: A boolean indicating the default value of |
| 696 | profile_only used by test.execute. [PERSISTENT] |
| 697 | @property drop_caches: A boolean indicating if caches should be dropped |
| 698 | before each test is executed. |
| 699 | @property drop_caches_between_iterations: A boolean indicating if caches |
| 700 | should be dropped before each test iteration is executed. |
| 701 | @property run_test_cleanup: A boolean indicating if test.cleanup should be |
| 702 | run by default after a test completes, if the run_cleanup argument is |
| 703 | not specified. [PERSISTENT] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 704 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 705 | @property num_tests_run: The number of tests run during the job. [OPTIONAL] |
| 706 | @property num_tests_failed: The number of tests failed during the job. |
| 707 | [OPTIONAL] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 708 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 709 | @property bootloader: An instance of the boottool class. May not be |
| 710 | available on job instances where access to the bootloader is not |
| 711 | available (e.g. on the server running a server job). [OPTIONAL] |
| 712 | @property harness: An instance of the client test harness. Only available |
| 713 | in contexts where client test execution happens. [OPTIONAL] |
| 714 | @property logging: An instance of the logging manager associated with the |
| 715 | job. |
| 716 | @property profilers: An instance of the profiler manager associated with |
| 717 | the job. |
| 718 | @property sysinfo: An instance of the sysinfo object. Only available in |
| 719 | contexts where it's possible to collect sysinfo. |
| 720 | @property warning_manager: A class for managing which types of WARN |
| 721 | messages should be logged and which should be supressed. [OPTIONAL] |
| 722 | @property warning_loggers: A set of readable streams that will be monitored |
| 723 | for WARN messages to be logged. [OPTIONAL] |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 724 | |
| 725 | Abstract methods: |
| 726 | _find_base_directories [CLASSMETHOD] |
| 727 | Returns the location of autodir, clientdir and serverdir |
| 728 | |
| 729 | _find_resultdir |
| 730 | Returns the location of resultdir. Gets a copy of any parameters |
| 731 | passed into base_job.__init__. Can return None to indicate that |
| 732 | no resultdir is to be used. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 733 | |
| 734 | _get_status_logger |
| 735 | Returns a status_logger instance for recording job status logs. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 736 | """ |
| 737 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 738 | # capture the dependency on several helper classes with factories |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 739 | _job_directory = job_directory |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 740 | _job_state = job_state |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 741 | |
| 742 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 743 | # all the job directory attributes |
| 744 | autodir = _job_directory.property_factory('autodir') |
| 745 | clientdir = _job_directory.property_factory('clientdir') |
| 746 | serverdir = _job_directory.property_factory('serverdir') |
| 747 | resultdir = _job_directory.property_factory('resultdir') |
| 748 | pkgdir = _job_directory.property_factory('pkgdir') |
| 749 | tmpdir = _job_directory.property_factory('tmpdir') |
| 750 | testdir = _job_directory.property_factory('testdir') |
| 751 | site_testdir = _job_directory.property_factory('site_testdir') |
| 752 | bindir = _job_directory.property_factory('bindir') |
| 753 | configdir = _job_directory.property_factory('configdir') |
| 754 | profdir = _job_directory.property_factory('profdir') |
| 755 | toolsdir = _job_directory.property_factory('toolsdir') |
| 756 | conmuxdir = _job_directory.property_factory('conmuxdir') |
| 757 | |
| 758 | |
| 759 | # all the generic persistent properties |
mbligh | 9de6ed7 | 2010-01-11 19:01:10 +0000 | [diff] [blame] | 760 | tag = _job_state.property_factory('_state', 'tag', '') |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 761 | default_profile_only = _job_state.property_factory( |
| 762 | '_state', 'default_profile_only', False) |
| 763 | run_test_cleanup = _job_state.property_factory( |
| 764 | '_state', 'run_test_cleanup', True) |
| 765 | last_boot_tag = _job_state.property_factory( |
| 766 | '_state', 'last_boot_tag', None) |
| 767 | automatic_test_tag = _job_state.property_factory( |
| 768 | '_state', 'automatic_test_tag', None) |
| 769 | |
| 770 | # the use_sequence_number property |
| 771 | _sequence_number = _job_state.property_factory( |
| 772 | '_state', '_sequence_number', None) |
| 773 | def _get_use_sequence_number(self): |
| 774 | return bool(self._sequence_number) |
| 775 | def _set_use_sequence_number(self, value): |
| 776 | if value: |
| 777 | self._sequence_number = 1 |
| 778 | else: |
| 779 | self._sequence_number = None |
| 780 | use_sequence_number = property(_get_use_sequence_number, |
| 781 | _set_use_sequence_number) |
| 782 | |
| 783 | |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 784 | def __init__(self, *args, **dargs): |
| 785 | # initialize the base directories, all others are relative to these |
| 786 | autodir, clientdir, serverdir = self._find_base_directories() |
| 787 | self._autodir = self._job_directory(autodir) |
| 788 | self._clientdir = self._job_directory(clientdir) |
| 789 | if serverdir: |
| 790 | self._serverdir = self._job_directory(serverdir) |
| 791 | else: |
| 792 | self._serverdir = None |
| 793 | |
| 794 | # initialize all the other directories relative to the base ones |
| 795 | self._initialize_dir_properties() |
| 796 | self._resultdir = self._job_directory( |
| 797 | self._find_resultdir(*args, **dargs), True) |
| 798 | self._execution_contexts = [] |
| 799 | |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 800 | # initialize all the job state |
| 801 | self._state = self._job_state() |
| 802 | |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 803 | |
| 804 | @classmethod |
| 805 | def _find_base_directories(cls): |
| 806 | raise NotImplementedError() |
| 807 | |
| 808 | |
| 809 | def _initialize_dir_properties(self): |
| 810 | """ |
| 811 | Initializes all the secondary self.*dir properties. Requires autodir, |
| 812 | clientdir and serverdir to already be initialized. |
| 813 | """ |
| 814 | # create some stubs for use as shortcuts |
| 815 | def readonly_dir(*args): |
| 816 | return self._job_directory(os.path.join(*args)) |
| 817 | def readwrite_dir(*args): |
| 818 | return self._job_directory(os.path.join(*args), True) |
| 819 | |
| 820 | # various client-specific directories |
| 821 | self._bindir = readonly_dir(self.clientdir, 'bin') |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 822 | self._configdir = readonly_dir(self.clientdir, 'config') |
| 823 | self._profdir = readonly_dir(self.clientdir, 'profilers') |
| 824 | self._pkgdir = readwrite_dir(self.clientdir, 'packages') |
| 825 | self._toolsdir = readonly_dir(self.clientdir, 'tools') |
| 826 | |
| 827 | # directories which are in serverdir on a server, clientdir on a client |
| 828 | if self.serverdir: |
| 829 | root = self.serverdir |
| 830 | else: |
| 831 | root = self.clientdir |
| 832 | self._tmpdir = readwrite_dir(root, 'tmp') |
| 833 | self._testdir = readwrite_dir(root, 'tests') |
| 834 | self._site_testdir = readwrite_dir(root, 'site_tests') |
| 835 | |
| 836 | # various server-specific directories |
| 837 | if self.serverdir: |
| 838 | self._conmuxdir = readonly_dir(self.autodir, 'conmux') |
| 839 | else: |
| 840 | self._conmuxdir = None |
| 841 | |
| 842 | |
| 843 | def _find_resultdir(self, *args, **dargs): |
| 844 | raise NotImplementedError() |
| 845 | |
| 846 | |
| 847 | def push_execution_context(self, resultdir): |
| 848 | """ |
| 849 | Save off the current context of the job and change to the given one. |
| 850 | |
| 851 | In practice method just changes the resultdir, but it may become more |
| 852 | extensive in the future. The expected use case is for when a child |
| 853 | job needs to be executed in some sort of nested context (for example |
| 854 | the way parallel_simple does). The original context can be restored |
| 855 | with a pop_execution_context call. |
| 856 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 857 | @param resultdir: The new resultdir, relative to the current one. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 858 | """ |
| 859 | new_dir = self._job_directory( |
| 860 | os.path.join(self.resultdir, resultdir), True) |
| 861 | self._execution_contexts.append(self._resultdir) |
| 862 | self._resultdir = new_dir |
| 863 | |
| 864 | |
| 865 | def pop_execution_context(self): |
| 866 | """ |
| 867 | Reverse the effects of the previous push_execution_context call. |
| 868 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 869 | @raise IndexError: raised when the stack of contexts is empty. |
jadmanski | da2f143 | 2009-11-06 15:20:09 +0000 | [diff] [blame] | 870 | """ |
| 871 | if not self._execution_contexts: |
| 872 | raise IndexError('No old execution context to restore') |
| 873 | self._resultdir = self._execution_contexts.pop() |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 874 | |
| 875 | |
| 876 | def get_state(self, name, default=_job_state.NO_DEFAULT): |
| 877 | """Returns the value associated with a particular name. |
| 878 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 879 | @param name: The name the value was saved with. |
| 880 | @param default: A default value to return if no state is currently |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 881 | associated with var. |
| 882 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 883 | @return: A deep copy of the value associated with name. Note that this |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 884 | explicitly returns a deep copy to avoid problems with mutable |
| 885 | values; mutations are not persisted or shared. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 886 | @raise KeyError: raised when no state is associated with var and a |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 887 | default value is not provided. |
| 888 | """ |
| 889 | try: |
| 890 | return self._state.get('public', name, default=default) |
| 891 | except KeyError: |
| 892 | raise KeyError(name) |
| 893 | |
| 894 | |
| 895 | def set_state(self, name, value): |
| 896 | """Saves the value given with the provided name. |
| 897 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 898 | @param name: The name the value should be saved with. |
| 899 | @param value: The value to save. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 900 | """ |
| 901 | self._state.set('public', name, value) |
| 902 | |
| 903 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 904 | def _build_tagged_test_name(self, testname, dargs): |
| 905 | """Builds the fully tagged testname and subdirectory for job.run_test. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 906 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 907 | @param testname: The base name of the test |
| 908 | @param dargs: The ** arguments passed to run_test. And arguments |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 909 | consumed by this method will be removed from the dictionary. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 910 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 911 | @return: A 3-tuple of the full name of the test, the subdirectory it |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 912 | should be stored in, and the full tag of the subdir. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 913 | """ |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 914 | tag_parts = [] |
| 915 | |
| 916 | # build up the parts of the tag used for the test name |
| 917 | base_tag = dargs.pop('tag', None) |
| 918 | if base_tag: |
| 919 | tag_parts.append(str(base_tag)) |
| 920 | if self.use_sequence_number: |
| 921 | tag_parts.append('_%02d_' % self._sequence_number) |
| 922 | self._sequence_number += 1 |
| 923 | if self.automatic_test_tag: |
| 924 | tag_parts.append(self.automatic_test_tag) |
| 925 | full_testname = '.'.join([testname] + tag_parts) |
| 926 | |
| 927 | # build up the subdir and tag as well |
| 928 | subdir_tag = dargs.pop('subdir_tag', None) |
| 929 | if subdir_tag: |
| 930 | tag_parts.append(subdir_tag) |
| 931 | subdir = '.'.join([testname] + tag_parts) |
| 932 | tag = '.'.join(tag_parts) |
| 933 | |
| 934 | return full_testname, subdir, tag |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 935 | |
| 936 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 937 | def _make_test_outputdir(self, subdir): |
| 938 | """Creates an output directory for a test to run it. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 939 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 940 | @param subdir: The subdirectory of the test. Generally computed by |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 941 | _build_tagged_test_name. |
| 942 | |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 943 | @return: A job_directory instance corresponding to the outputdir of |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 944 | the test. |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 945 | @raise TestError: If the output directory is invalid. |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 946 | """ |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 947 | # explicitly check that this subdirectory is new |
| 948 | path = os.path.join(self.resultdir, subdir) |
| 949 | if os.path.exists(path): |
| 950 | msg = ('%s already exists; multiple tests cannot run with the ' |
| 951 | 'same subdirectory' % subdir) |
| 952 | raise error.TestError(msg) |
mbligh | fbf73ae | 2009-12-19 05:22:42 +0000 | [diff] [blame] | 953 | |
mbligh | fc3da5b | 2010-01-06 18:37:22 +0000 | [diff] [blame] | 954 | # create the outputdir and raise a TestError if it isn't valid |
| 955 | try: |
| 956 | outputdir = self._job_directory(path, True) |
| 957 | return outputdir |
| 958 | except self._job_directory.JobDirectoryException, e: |
| 959 | logging.exception('%s directory creation failed with %s', |
| 960 | subdir, e) |
| 961 | raise error.TestError('%s directory creation failed' % subdir) |
jadmanski | 4afc367 | 2010-04-30 21:22:54 +0000 | [diff] [blame] | 962 | |
| 963 | |
| 964 | def record(self, status_code, subdir, operation, status='', |
| 965 | optional_fields=None): |
| 966 | """Record a job-level status event. |
| 967 | |
| 968 | Logs an event noteworthy to the Autotest job as a whole. Messages will |
| 969 | be written into a global status log file, as well as a subdir-local |
| 970 | status log file (if subdir is specified). |
| 971 | |
| 972 | @param status_code: A string status code describing the type of status |
| 973 | entry being recorded. It must pass log.is_valid_status to be |
| 974 | considered valid. |
| 975 | @param subdir: A specific results subdirectory this also applies to, or |
| 976 | None. If not None the subdirectory must exist. |
| 977 | @param operation: A string describing the operation that was run. |
| 978 | @param status: An optional human-readable message describing the status |
| 979 | entry, for example an error message or "completed successfully". |
| 980 | @param optional_fields: An optional dictionary of addtional named fields |
| 981 | to be included with the status message. Every time timestamp and |
| 982 | localtime entries are generated with the current time and added |
| 983 | to this dictionary. |
| 984 | """ |
| 985 | entry = status_log_entry(status_code, subdir, operation, status, |
| 986 | optional_fields) |
jadmanski | 2a89dac | 2010-06-11 14:32:58 +0000 | [diff] [blame] | 987 | self.record_entry(entry) |
| 988 | |
| 989 | |
| 990 | def record_entry(self, entry, log_in_subdir=True): |
| 991 | """Record a job-level status event, using a status_log_entry. |
| 992 | |
| 993 | This is the same as self.record but using an existing status log |
| 994 | entry object rather than constructing one for you. |
| 995 | |
| 996 | @param entry: A status_log_entry object |
| 997 | @param log_in_subdir: A boolean that indicates (when true) that subdir |
| 998 | logs should be written into the subdirectory status log file. |
| 999 | """ |
| 1000 | self._get_status_logger().record_entry(entry, log_in_subdir) |