showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 1 | import logging, os, signal, sys, warnings |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 2 | |
| 3 | # primary public APIs |
| 4 | |
| 5 | def configure_logging(logging_config, **kwargs): |
| 6 | """ |
| 7 | Configure the logging module using the specific configuration object, which |
| 8 | should be an instance of logging_config.LoggingConfig (usually of a |
| 9 | subclass). Any keyword args will be passed to the object's |
| 10 | configure_logging() method. |
| 11 | |
| 12 | Every entry point should call this method at application startup. |
| 13 | """ |
| 14 | LoggingManager.logging_config_object = logging_config |
| 15 | logging_config.configure_logging(**kwargs) |
| 16 | |
| 17 | |
| 18 | def get_logging_manager(manage_stdout_and_stderr=False, redirect_fds=False): |
| 19 | """ |
| 20 | Create a LoggingManager that's managing sys.stdout and sys.stderr. |
| 21 | |
| 22 | Every entry point that wants to capture stdout/stderr and/or use |
| 23 | LoggingManager to manage a stack of destinations should call this method |
| 24 | at application startup. |
| 25 | """ |
showard | 75cdfee | 2009-06-10 17:40:41 +0000 | [diff] [blame] | 26 | if redirect_fds: |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 27 | manager = FdRedirectionLoggingManager() |
| 28 | else: |
| 29 | manager = LoggingManager() |
| 30 | if manage_stdout_and_stderr: |
| 31 | manager.manage_stdout() |
| 32 | manager.manage_stderr() |
| 33 | return manager |
| 34 | |
| 35 | |
| 36 | # implementation follows |
| 37 | |
| 38 | logger = logging.getLogger() |
| 39 | |
showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 40 | |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 41 | def _current_handlers(): |
| 42 | return set(logger.handlers) |
| 43 | |
| 44 | |
showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 45 | _caller_code_to_skip_in_logging_stack = set() |
| 46 | |
| 47 | |
showard | 48e6905 | 2009-09-24 22:20:30 +0000 | [diff] [blame] | 48 | def do_not_report_as_logging_caller(func): |
showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 49 | """Decorator to annotate functions we will tell logging not to log.""" |
| 50 | # These are not the droids you are looking for. |
| 51 | # You may go about your business. |
| 52 | _caller_code_to_skip_in_logging_stack.add(func.func_code) |
| 53 | return func |
| 54 | |
| 55 | |
| 56 | # Copied from Python 2.4 logging/__init__.py Logger.findCaller and enhanced. |
| 57 | # The logging code remains the same and compatible with this monkey patching |
| 58 | # through at least Python version 2.6.2. |
| 59 | def _logging_manager_aware_logger__find_caller(unused): |
| 60 | """ |
| 61 | Find the stack frame of the caller so that we can note the source |
| 62 | file name, line number and function name. |
| 63 | """ |
showard | 8f2ac92 | 2009-06-30 02:00:21 +0000 | [diff] [blame] | 64 | f = sys._getframe(2).f_back |
showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 65 | rv = "(unknown file)", 0, "(unknown function)" |
| 66 | while hasattr(f, "f_code"): |
| 67 | co = f.f_code |
| 68 | filename = os.path.normcase(co.co_filename) |
| 69 | if filename == logging._srcfile: |
| 70 | f = f.f_back |
| 71 | continue |
| 72 | ### START additional code. |
| 73 | if co in _caller_code_to_skip_in_logging_stack: |
| 74 | f = f.f_back |
| 75 | continue |
| 76 | ### END additional code. |
| 77 | rv = (filename, f.f_lineno, co.co_name) |
| 78 | break |
| 79 | return rv |
| 80 | |
| 81 | |
Eric Li | 7edb304 | 2011-01-06 17:57:17 -0800 | [diff] [blame^] | 82 | if sys.version_info[:2] > (2, 7): |
mbligh | 358276b | 2009-07-03 01:02:27 +0000 | [diff] [blame] | 83 | warnings.warn('This module has not been reviewed for Python %s' % |
showard | 89f9016 | 2009-06-30 01:59:08 +0000 | [diff] [blame] | 84 | sys.version) |
| 85 | |
| 86 | |
| 87 | # Monkey patch our way around logging's design... |
| 88 | _original_logger__find_caller = logging.Logger.findCaller |
| 89 | logging.Logger.findCaller = _logging_manager_aware_logger__find_caller |
| 90 | |
| 91 | |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 92 | class LoggingFile(object): |
| 93 | """ |
| 94 | File-like object that will receive messages pass them to the logging |
| 95 | infrastructure in an appropriate way. |
| 96 | """ |
| 97 | def __init__(self, prefix='', level=logging.DEBUG): |
| 98 | """ |
| 99 | @param prefix - The prefix for each line logged by this object. |
| 100 | """ |
| 101 | self._prefix = prefix |
| 102 | self._level = level |
| 103 | self._buffer = [] |
| 104 | |
| 105 | |
showard | 48e6905 | 2009-09-24 22:20:30 +0000 | [diff] [blame] | 106 | @do_not_report_as_logging_caller |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 107 | def write(self, data): |
| 108 | """" |
| 109 | Writes data only if it constitutes a whole line. If it's not the case, |
| 110 | store it in a buffer and wait until we have a complete line. |
| 111 | @param data - Raw data (a string) that will be processed. |
| 112 | """ |
| 113 | # splitlines() discards a trailing blank line, so use split() instead |
| 114 | data_lines = data.split('\n') |
| 115 | if len(data_lines) > 1: |
| 116 | self._buffer.append(data_lines[0]) |
| 117 | self._flush_buffer() |
| 118 | for line in data_lines[1:-1]: |
| 119 | self._log_line(line) |
| 120 | if data_lines[-1]: |
| 121 | self._buffer.append(data_lines[-1]) |
| 122 | |
| 123 | |
showard | 48e6905 | 2009-09-24 22:20:30 +0000 | [diff] [blame] | 124 | @do_not_report_as_logging_caller |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 125 | def _log_line(self, line): |
| 126 | """ |
| 127 | Passes lines of output to the logging module. |
| 128 | """ |
| 129 | logging.log(self._level, self._prefix + line) |
| 130 | |
| 131 | |
showard | 48e6905 | 2009-09-24 22:20:30 +0000 | [diff] [blame] | 132 | @do_not_report_as_logging_caller |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 133 | def _flush_buffer(self): |
| 134 | if self._buffer: |
| 135 | self._log_line(''.join(self._buffer)) |
| 136 | self._buffer = [] |
| 137 | |
| 138 | |
showard | 48e6905 | 2009-09-24 22:20:30 +0000 | [diff] [blame] | 139 | @do_not_report_as_logging_caller |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 140 | def flush(self): |
| 141 | self._flush_buffer() |
| 142 | |
| 143 | |
| 144 | class _StreamManager(object): |
| 145 | """ |
| 146 | Redirects all output for some output stream (normally stdout or stderr) to |
| 147 | the logging module by replacing the file objects with a new LoggingFile |
| 148 | that calls logging.log(). |
| 149 | """ |
| 150 | def __init__(self, stream, level, stream_setter): |
| 151 | """ |
| 152 | @param stream: stream object to manage |
| 153 | @param level: level at which data written to the stream will be logged |
| 154 | @param stream_setter: function accepting a stream object that will |
| 155 | replace the given stream in its original location. |
| 156 | """ |
| 157 | self._stream = stream |
| 158 | self._level = level |
| 159 | self._stream_setter = stream_setter |
| 160 | self._logging_stream = None |
| 161 | |
| 162 | |
| 163 | def _replace_with_logger(self): |
| 164 | self._logging_stream = LoggingFile(level=self._level) |
| 165 | self._stream_setter(self._logging_stream) |
| 166 | |
| 167 | |
| 168 | def _restore_stream(self): |
| 169 | self._stream_setter(self._stream) |
| 170 | |
| 171 | |
| 172 | def flush(self): |
| 173 | self._logging_stream.flush() |
| 174 | |
| 175 | |
| 176 | def start_logging(self): |
| 177 | """Start directing the stream to the logging module.""" |
| 178 | self._replace_with_logger() |
| 179 | |
| 180 | |
| 181 | def stop_logging(self): |
| 182 | """Restore the stream to its original settings.""" |
| 183 | self._restore_stream() |
| 184 | |
| 185 | |
| 186 | def on_push_context(self, context): |
| 187 | """ |
| 188 | Called when the logging manager is about to push a new context onto the |
| 189 | stack and has changed logging settings. The StreamHandler can modify |
| 190 | the context to be saved before returning. |
| 191 | """ |
| 192 | pass |
| 193 | |
| 194 | |
| 195 | def on_restore_context(self, context): |
| 196 | """ |
| 197 | Called when the logging manager is restoring a previous context. |
| 198 | """ |
| 199 | pass |
| 200 | |
| 201 | |
| 202 | |
| 203 | class LoggingManager(object): |
| 204 | """ |
| 205 | Manages a stack of logging configurations, allowing clients to conveniently |
| 206 | add and remove logging destinations. Also keeps a list of StreamManagers |
| 207 | to easily direct streams into the logging module. |
| 208 | """ |
| 209 | |
| 210 | STREAM_MANAGER_CLASS = _StreamManager |
| 211 | |
| 212 | logging_config_object = None |
| 213 | |
| 214 | def __init__(self): |
| 215 | """ |
| 216 | This class should not ordinarily be constructed directly (other than in |
| 217 | tests). Use the module-global factory method get_logging_manager() |
| 218 | instead. |
| 219 | """ |
| 220 | if self.logging_config_object is None: |
| 221 | raise RuntimeError('You must call configure_logging() before this') |
| 222 | |
| 223 | # _context_stack holds a stack of context dicts. Each context dict |
| 224 | # contains: |
| 225 | # * old_handlers: list of registered logging Handlers |
| 226 | # contexts may also be extended by _StreamHandlers |
| 227 | self._context_stack = [] |
| 228 | self._streams = [] |
| 229 | self._started = False |
| 230 | |
| 231 | |
| 232 | def manage_stream(self, stream, level, stream_setter): |
| 233 | """ |
| 234 | Tells this manager to manage the given stream. All data written to the |
| 235 | stream will be directed to the logging module instead. Must be called |
| 236 | before start_logging(). |
| 237 | |
| 238 | @param stream: stream to manage |
| 239 | @param level: level to log data written to this stream |
| 240 | @param stream_setter: function to set the stream to a new object |
| 241 | """ |
| 242 | if self._started: |
| 243 | raise RuntimeError('You must call this before start_logging()') |
| 244 | self._streams.append(self.STREAM_MANAGER_CLASS(stream, level, |
| 245 | stream_setter)) |
| 246 | |
| 247 | |
| 248 | def _sys_stream_setter(self, stream_name): |
| 249 | assert stream_name in ('stdout', 'stderr'), stream_name |
| 250 | def set_stream(file_object): |
| 251 | setattr(sys, stream_name, file_object) |
| 252 | return set_stream |
| 253 | |
| 254 | |
| 255 | def manage_stdout(self): |
showard | 29f5909 | 2009-06-22 18:15:01 +0000 | [diff] [blame] | 256 | self.manage_stream(sys.stdout, logging.INFO, |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 257 | self._sys_stream_setter('stdout')) |
| 258 | |
| 259 | |
| 260 | def manage_stderr(self): |
showard | 10d8417 | 2009-06-18 23:16:50 +0000 | [diff] [blame] | 261 | self.manage_stream(sys.stderr, self.logging_config_object.stderr_level, |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 262 | self._sys_stream_setter('stderr')) |
| 263 | |
| 264 | |
| 265 | def start_logging(self): |
| 266 | """ |
| 267 | Begin capturing output to the logging module. |
| 268 | """ |
| 269 | for stream_manager in self._streams: |
| 270 | stream_manager.start_logging() |
| 271 | self._started = True |
| 272 | |
| 273 | |
| 274 | def stop_logging(self): |
| 275 | """ |
| 276 | Restore output to its original state. |
| 277 | """ |
| 278 | while self._context_stack: |
| 279 | self._pop_context() |
| 280 | |
| 281 | for stream_manager in self._streams: |
| 282 | stream_manager.stop_logging() |
| 283 | |
| 284 | self._started = False |
| 285 | |
| 286 | |
| 287 | def _clear_all_handlers(self): |
| 288 | for handler in _current_handlers(): |
| 289 | logger.removeHandler(handler) |
| 290 | |
| 291 | |
| 292 | def _get_context(self): |
| 293 | return {'old_handlers': _current_handlers()} |
| 294 | |
| 295 | |
| 296 | def _push_context(self, context): |
| 297 | for stream_manager in self._streams: |
| 298 | stream_manager.on_push_context(context) |
| 299 | self._context_stack.append(context) |
| 300 | |
| 301 | |
| 302 | def _flush_all_streams(self): |
| 303 | for stream_manager in self._streams: |
| 304 | stream_manager.flush() |
| 305 | |
| 306 | |
| 307 | def _add_log_handlers(self, add_handlers_fn): |
| 308 | """ |
| 309 | Modify the logging module's registered handlers and push a new context |
| 310 | onto the stack. |
| 311 | @param add_handlers_fn: function to modify the registered logging |
| 312 | handlers. Accepts a context dictionary which may be modified. |
| 313 | """ |
| 314 | self._flush_all_streams() |
| 315 | context = self._get_context() |
| 316 | |
| 317 | add_handlers_fn(context) |
| 318 | |
| 319 | self._push_context(context) |
| 320 | |
| 321 | |
| 322 | class _TaggingFormatter(logging.Formatter): |
| 323 | """ |
| 324 | Delegates to a given formatter, but prefixes each line of output with a |
| 325 | tag. |
| 326 | """ |
| 327 | def __init__(self, base_formatter, tag): |
| 328 | self.base_formatter = base_formatter |
| 329 | prefix = tag + ' : ' |
| 330 | self._fmt = base_formatter._fmt.replace('%(message)s', |
| 331 | prefix + '%(message)s') |
| 332 | self.datefmt = base_formatter.datefmt |
| 333 | |
| 334 | |
| 335 | def _add_tagging_formatter(self, tag): |
| 336 | for handler in _current_handlers(): |
| 337 | tagging_formatter = self._TaggingFormatter(handler.formatter, tag) |
| 338 | handler.setFormatter(tagging_formatter) |
| 339 | |
| 340 | |
| 341 | def _do_redirect(self, stream=None, filename=None, level=None, |
| 342 | clear_other_handlers=False): |
| 343 | """ |
| 344 | @param clear_other_handlers - if true, clear out all other logging |
| 345 | handlers. |
| 346 | """ |
| 347 | assert bool(stream) != bool(filename) # xor |
| 348 | if not self._started: |
| 349 | raise RuntimeError('You must call start_logging() before this') |
| 350 | |
| 351 | def add_handler(context): |
| 352 | if clear_other_handlers: |
| 353 | self._clear_all_handlers() |
| 354 | |
| 355 | if stream: |
| 356 | handler = self.logging_config_object.add_stream_handler(stream) |
| 357 | else: |
| 358 | handler = self.logging_config_object.add_file_handler(filename) |
| 359 | |
| 360 | if level: |
| 361 | handler.setLevel(level) |
| 362 | |
| 363 | self._add_log_handlers(add_handler) |
| 364 | |
| 365 | |
| 366 | def redirect(self, filename): |
| 367 | """Redirect output to the specified file""" |
| 368 | self._do_redirect(filename=filename, clear_other_handlers=True) |
| 369 | |
| 370 | |
| 371 | def redirect_to_stream(self, stream): |
| 372 | """Redirect output to the given stream""" |
| 373 | self._do_redirect(stream=stream, clear_other_handlers=True) |
| 374 | |
| 375 | |
| 376 | def tee_redirect(self, filename, level=None): |
| 377 | """Tee output to the specified file""" |
| 378 | self._do_redirect(filename=filename, level=level) |
| 379 | |
| 380 | |
| 381 | def tee_redirect_to_stream(self, stream): |
| 382 | """Tee output to the given stream""" |
| 383 | self._do_redirect(stream=stream) |
| 384 | |
| 385 | |
| 386 | def tee_redirect_debug_dir(self, debug_dir, log_name=None, tag=None): |
| 387 | """ |
| 388 | Tee output to a full new set of debug logs in the given directory. |
| 389 | """ |
| 390 | def add_handlers(context): |
| 391 | if tag: |
| 392 | self._add_tagging_formatter(tag) |
| 393 | context['tag_added'] = True |
| 394 | self.logging_config_object.add_debug_file_handlers( |
| 395 | debug_dir, log_name=log_name) |
| 396 | self._add_log_handlers(add_handlers) |
| 397 | |
| 398 | |
| 399 | def _restore_context(self, context): |
| 400 | for stream_handler in self._streams: |
| 401 | stream_handler.on_restore_context(context) |
| 402 | |
| 403 | # restore logging handlers |
| 404 | old_handlers = context['old_handlers'] |
| 405 | for handler in _current_handlers() - old_handlers: |
| 406 | handler.close() |
| 407 | self._clear_all_handlers() |
| 408 | for handler in old_handlers: |
| 409 | logger.addHandler(handler) |
| 410 | |
| 411 | if 'tag_added' in context: |
| 412 | for handler in _current_handlers(): |
| 413 | tagging_formatter = handler.formatter |
| 414 | handler.setFormatter(tagging_formatter.base_formatter) |
| 415 | |
| 416 | |
| 417 | def _pop_context(self): |
| 418 | self._flush_all_streams() |
| 419 | context = self._context_stack.pop() |
| 420 | self._restore_context(context) |
| 421 | |
| 422 | |
| 423 | def undo_redirect(self): |
| 424 | """ |
| 425 | Undo the last redirection (that hasn't yet been undone). |
| 426 | |
| 427 | If any subprocesses have been launched since the redirection was |
| 428 | performed, they must have ended by the time this is called. Otherwise, |
| 429 | this will hang waiting for the logging subprocess to end. |
| 430 | """ |
| 431 | if not self._context_stack: |
| 432 | raise RuntimeError('No redirects to undo') |
| 433 | self._pop_context() |
| 434 | |
| 435 | |
| 436 | def restore(self): |
| 437 | """ |
| 438 | Same as undo_redirect(). For backwards compatibility with |
| 439 | fd_stack. |
| 440 | """ |
| 441 | self.undo_redirect() |
| 442 | |
| 443 | |
| 444 | class _FdRedirectionStreamManager(_StreamManager): |
| 445 | """ |
| 446 | Like StreamManager, but also captures output from subprocesses by modifying |
| 447 | the underlying file descriptors. |
| 448 | |
| 449 | For the underlying file descriptors, we spawn a subprocess that writes all |
| 450 | input to the logging module, and we point the FD to that subprocess. As a |
| 451 | result, every time we redirect output we need to spawn a new subprocess to |
| 452 | pick up the new logging settings (without disturbing any existing processes |
| 453 | using the old logging subprocess). |
| 454 | |
| 455 | If, one day, we could get all code using utils.run() and friends to launch |
| 456 | subprocesses, we'd no longer need to handle raw FD output, and we could |
| 457 | get rid of all this business with subprocesses. Another option would be |
| 458 | to capture all stray output to a single, separate destination. |
| 459 | """ |
| 460 | def __init__(self, stream, level, stream_setter): |
| 461 | if not hasattr(stream, 'fileno'): |
| 462 | # with fake, in-process file objects, subprocess output won't be |
| 463 | # captured. this should never happen in normal use, since the |
| 464 | # factory methods will only pass sys.stdout and sys.stderr. |
| 465 | raise ValueError("FdRedirectionLoggingManager won't work with " |
| 466 | "streams that aren't backed by file " |
| 467 | "descriptors") |
| 468 | |
| 469 | super(_FdRedirectionStreamManager, self).__init__(stream, level, |
| 470 | stream_setter) |
| 471 | self._fd = stream.fileno() |
| 472 | self._fd_copy_stream = None |
| 473 | |
| 474 | |
| 475 | def _point_stream_handlers_to_copy(self): |
| 476 | """ |
| 477 | point logging StreamHandlers that point to this stream to a safe |
| 478 | copy of the underlying FD. otherwise, StreamHandler output will go |
| 479 | to the logging subprocess, effectively getting doubly logged. |
| 480 | """ |
| 481 | fd_copy = os.dup(self._fd) |
| 482 | self._fd_copy_stream = os.fdopen(fd_copy, 'w') |
| 483 | self._redirect_logging_stream_handlers(self._stream, |
| 484 | self._fd_copy_stream) |
| 485 | |
| 486 | |
| 487 | def _restore_stream_handlers(self): |
| 488 | """ point logging StreamHandlers back to the original FD """ |
| 489 | self._redirect_logging_stream_handlers(self._fd_copy_stream, |
| 490 | self._stream) |
| 491 | self._fd_copy_stream.close() |
| 492 | |
| 493 | |
| 494 | def _redirect_logging_stream_handlers(self, old_stream, new_stream): |
| 495 | """ |
| 496 | Redirect all configured logging StreamHandlers pointing to |
| 497 | old_stream to point to new_stream instead. |
| 498 | """ |
| 499 | for handler in _current_handlers(): |
| 500 | points_to_stream = (isinstance(handler, logging.StreamHandler) and |
| 501 | hasattr(handler.stream, 'fileno') and |
| 502 | handler.stream.fileno() == old_stream.fileno()) |
| 503 | if points_to_stream: |
| 504 | logger.removeHandler(handler) |
| 505 | handler.close() # doesn't close the stream, just the handler |
| 506 | |
| 507 | new_handler = logging.StreamHandler(new_stream) |
| 508 | new_handler.setLevel(handler.level) |
| 509 | new_handler.setFormatter(handler.formatter) |
| 510 | for log_filter in handler.filters: |
| 511 | new_handler.addFilter(log_filter) |
| 512 | logger.addHandler(new_handler) |
| 513 | |
| 514 | |
| 515 | def start_logging(self): |
| 516 | super(_FdRedirectionStreamManager, self).start_logging() |
| 517 | self._point_stream_handlers_to_copy() |
| 518 | |
| 519 | |
| 520 | def stop_logging(self): |
| 521 | super(_FdRedirectionStreamManager, self).stop_logging() |
| 522 | self._restore_stream_handlers() |
| 523 | |
| 524 | |
| 525 | def _spawn_logging_subprocess(self): |
| 526 | """ |
| 527 | Spawn a subprocess to log all input to the logging module with the |
| 528 | current settings, and direct output to it. |
| 529 | """ |
| 530 | read_end, write_end = os.pipe() |
| 531 | pid = os.fork() |
| 532 | if pid: # parent |
| 533 | os.close(read_end) |
| 534 | os.dup2(write_end, self._fd) # point FD to the subprocess |
| 535 | os.close(write_end) |
| 536 | return pid |
| 537 | else: # child |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 538 | try: |
jadmanski | d3e3ac9 | 2009-06-15 23:14:47 +0000 | [diff] [blame] | 539 | os.close(write_end) |
| 540 | # ensure this subprocess doesn't hold any pipes to others |
| 541 | os.close(1) |
| 542 | os.close(2) |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 543 | self._run_logging_subprocess(read_end) # never returns |
| 544 | except: |
| 545 | # don't let exceptions in the child escape |
jadmanski | d3e3ac9 | 2009-06-15 23:14:47 +0000 | [diff] [blame] | 546 | try: |
| 547 | logging.exception('Logging subprocess died:') |
| 548 | finally: |
| 549 | os._exit(1) |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 550 | |
| 551 | |
| 552 | def _run_logging_subprocess(self, read_fd): |
| 553 | """ |
| 554 | Always run from a subprocess. Read from read_fd and write to the |
| 555 | logging module until EOF. |
| 556 | """ |
| 557 | signal.signal(signal.SIGTERM, signal.SIG_DFL) # clear handler |
| 558 | input_file = os.fdopen(read_fd, 'r') |
| 559 | for line in iter(input_file.readline, ''): |
| 560 | logging.log(self._level, line.rstrip('\n')) |
showard | 2d76764 | 2009-06-15 20:23:40 +0000 | [diff] [blame] | 561 | logging.debug('Logging subprocess finished') |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 562 | os._exit(0) |
| 563 | |
| 564 | |
| 565 | def _context_id(self): |
| 566 | return '%s_context' % id(self) |
| 567 | |
| 568 | |
| 569 | def on_push_context(self, context): |
| 570 | # adds a context dict for this stream, $id_context, with the following: |
| 571 | # * old_fd: FD holding a copy of the managed FD before launching a new |
| 572 | # subprocess. |
| 573 | # * child_pid: PID of the logging subprocess launched |
| 574 | fd_copy = os.dup(self._fd) |
| 575 | child_pid = self._spawn_logging_subprocess() |
| 576 | my_context = {'old_fd': fd_copy, 'child_pid': child_pid} |
| 577 | context[self._context_id()] = my_context |
| 578 | |
| 579 | |
| 580 | def on_restore_context(self, context): |
| 581 | my_context = context[self._context_id()] |
| 582 | |
| 583 | # shut down subprocess |
| 584 | child_pid = my_context['child_pid'] |
showard | 2d76764 | 2009-06-15 20:23:40 +0000 | [diff] [blame] | 585 | try: |
| 586 | os.close(self._fd) |
| 587 | os.waitpid(child_pid, 0) |
| 588 | except OSError: |
| 589 | logging.exception('Failed to cleanly shutdown logging subprocess:') |
showard | e96f7a9 | 2009-06-10 00:17:33 +0000 | [diff] [blame] | 590 | |
| 591 | # restore previous FD |
| 592 | old_fd = my_context['old_fd'] |
| 593 | os.dup2(old_fd, self._fd) |
| 594 | os.close(old_fd) |
| 595 | |
| 596 | |
| 597 | class FdRedirectionLoggingManager(LoggingManager): |
| 598 | """ |
| 599 | A simple extension of LoggingManager to use FdRedirectionStreamManagers, |
| 600 | so that managed streams have their underlying FDs redirected. |
| 601 | """ |
| 602 | |
| 603 | STREAM_MANAGER_CLASS = _FdRedirectionStreamManager |
| 604 | |
| 605 | def start_logging(self): |
| 606 | super(FdRedirectionLoggingManager, self).start_logging() |
| 607 | # spawn the initial logging subprocess |
| 608 | self._push_context(self._get_context()) |
| 609 | |
| 610 | |
| 611 | def undo_redirect(self): |
| 612 | # len == 1 would mean only start_logging() had been called (but no |
| 613 | # redirects had occurred) |
| 614 | if len(self._context_stack) < 2: |
| 615 | raise RuntimeError('No redirects to undo') |
| 616 | super(FdRedirectionLoggingManager, self).undo_redirect() |