blob: 7c6592f535af13ecbf68314127bcd55dfceca373 [file] [log] [blame]
Alex Miller2a9a2bb2014-09-11 12:54:06 -07001"""
2Autotest has some surprisingly complicated logging behaviour.
3
4Managers allow you to define logging contexts, which define a set of rules on
5how to handle stdout/stderr. The niceness that you get by going through the
6logging_manager for this is that you can push/pop these contexts. So if you
7wish to temporarily change how logging works, this gives you that mechanism.
8
9Most of this file is actually setting up for a confusing
10fork-for-a-logging-subprocess ordeal that's better explained as
11
12 normal python logging
13 ^
14 |
15 +--------+ |
16 |AUTOSERV| +---+---+
17 | +------stdout---->+LOGGING|
18 | fork() | +---+---+
19 ++------++ ^
20 | | |
21 +--------+ +--------+ stdout
22 | | |
23 +---------+--------+ +--------+---------+ |
24 | AUTOSERV | | AUTOSERV | |
25 | | | | |
26 ++----------------++ ++----------------++ |
27 || test || || test || |
28 || || || ||---+
29 ||logging.info('')|| ||logging.info('')||
30 |------------------| |------------------|
31 +------------------+ +------------------+
32
33Each top-level box is a process. When autoserv starts up, it'll fork off a
34logging subprocess, and set its stdout/stderr fd's to the subprocess's stdin.
35When we fork to create the processes that will run the test code, they inherit
36this. As do any processes they themselves fork and exec (such as shelling out
37to some command). This isn't the nicest, as it involves closing and dup'ing
38over stdout/stderr, but it does make sure that the whole process tree logs to
39python logging in a very consistent way.
40"""
41
42
Ruben Rodriguez Buchillon8e197982019-08-05 15:20:50 -070043import fcntl, logging, os, signal, sys, time, warnings
showarde96f7a92009-06-10 00:17:33 +000044
45# primary public APIs
46
47def configure_logging(logging_config, **kwargs):
48 """
49 Configure the logging module using the specific configuration object, which
50 should be an instance of logging_config.LoggingConfig (usually of a
51 subclass). Any keyword args will be passed to the object's
52 configure_logging() method.
53
54 Every entry point should call this method at application startup.
55 """
56 LoggingManager.logging_config_object = logging_config
57 logging_config.configure_logging(**kwargs)
58
59
60def get_logging_manager(manage_stdout_and_stderr=False, redirect_fds=False):
61 """
62 Create a LoggingManager that's managing sys.stdout and sys.stderr.
63
64 Every entry point that wants to capture stdout/stderr and/or use
65 LoggingManager to manage a stack of destinations should call this method
66 at application startup.
67 """
showard75cdfee2009-06-10 17:40:41 +000068 if redirect_fds:
showarde96f7a92009-06-10 00:17:33 +000069 manager = FdRedirectionLoggingManager()
70 else:
71 manager = LoggingManager()
72 if manage_stdout_and_stderr:
73 manager.manage_stdout()
74 manager.manage_stderr()
75 return manager
76
77
78# implementation follows
79
80logger = logging.getLogger()
81
showard89f90162009-06-30 01:59:08 +000082
showarde96f7a92009-06-10 00:17:33 +000083def _current_handlers():
84 return set(logger.handlers)
85
86
showard89f90162009-06-30 01:59:08 +000087_caller_code_to_skip_in_logging_stack = set()
88
89
showard48e69052009-09-24 22:20:30 +000090def do_not_report_as_logging_caller(func):
showard89f90162009-06-30 01:59:08 +000091 """Decorator to annotate functions we will tell logging not to log."""
92 # These are not the droids you are looking for.
93 # You may go about your business.
94 _caller_code_to_skip_in_logging_stack.add(func.func_code)
95 return func
96
97
98# Copied from Python 2.4 logging/__init__.py Logger.findCaller and enhanced.
99# The logging code remains the same and compatible with this monkey patching
100# through at least Python version 2.6.2.
101def _logging_manager_aware_logger__find_caller(unused):
102 """
103 Find the stack frame of the caller so that we can note the source
104 file name, line number and function name.
105 """
showard8f2ac922009-06-30 02:00:21 +0000106 f = sys._getframe(2).f_back
showard89f90162009-06-30 01:59:08 +0000107 rv = "(unknown file)", 0, "(unknown function)"
108 while hasattr(f, "f_code"):
109 co = f.f_code
110 filename = os.path.normcase(co.co_filename)
111 if filename == logging._srcfile:
112 f = f.f_back
113 continue
114 ### START additional code.
115 if co in _caller_code_to_skip_in_logging_stack:
116 f = f.f_back
117 continue
118 ### END additional code.
119 rv = (filename, f.f_lineno, co.co_name)
120 break
121 return rv
122
123
Eric Li861b2d52011-02-04 14:50:35 -0800124if sys.version_info[:2] > (2, 7):
mbligh358276b2009-07-03 01:02:27 +0000125 warnings.warn('This module has not been reviewed for Python %s' %
showard89f90162009-06-30 01:59:08 +0000126 sys.version)
127
128
129# Monkey patch our way around logging's design...
130_original_logger__find_caller = logging.Logger.findCaller
131logging.Logger.findCaller = _logging_manager_aware_logger__find_caller
132
133
showarde96f7a92009-06-10 00:17:33 +0000134class LoggingFile(object):
135 """
136 File-like object that will receive messages pass them to the logging
137 infrastructure in an appropriate way.
138 """
Alex Miller28ddb132012-10-29 10:00:11 -0700139
140 # This object is used to replace stdout and stderr, but doesn't expose
141 # the same interface as a file object. To work around the most troublesome
142 # part of the API, |fileno()|, we need to be able to provide a fake fd that
143 # can pass basic checks.
144 _fake_fds = os.pipe()
145
showarde96f7a92009-06-10 00:17:33 +0000146 def __init__(self, prefix='', level=logging.DEBUG):
147 """
148 @param prefix - The prefix for each line logged by this object.
149 """
150 self._prefix = prefix
151 self._level = level
152 self._buffer = []
153
154
showard48e69052009-09-24 22:20:30 +0000155 @do_not_report_as_logging_caller
showarde96f7a92009-06-10 00:17:33 +0000156 def write(self, data):
157 """"
158 Writes data only if it constitutes a whole line. If it's not the case,
159 store it in a buffer and wait until we have a complete line.
160 @param data - Raw data (a string) that will be processed.
161 """
162 # splitlines() discards a trailing blank line, so use split() instead
163 data_lines = data.split('\n')
164 if len(data_lines) > 1:
165 self._buffer.append(data_lines[0])
166 self._flush_buffer()
167 for line in data_lines[1:-1]:
168 self._log_line(line)
169 if data_lines[-1]:
170 self._buffer.append(data_lines[-1])
171
172
showard48e69052009-09-24 22:20:30 +0000173 @do_not_report_as_logging_caller
showarde96f7a92009-06-10 00:17:33 +0000174 def _log_line(self, line):
175 """
176 Passes lines of output to the logging module.
177 """
178 logging.log(self._level, self._prefix + line)
179
180
showard48e69052009-09-24 22:20:30 +0000181 @do_not_report_as_logging_caller
showarde96f7a92009-06-10 00:17:33 +0000182 def _flush_buffer(self):
183 if self._buffer:
184 self._log_line(''.join(self._buffer))
185 self._buffer = []
186
187
showard48e69052009-09-24 22:20:30 +0000188 @do_not_report_as_logging_caller
showarde96f7a92009-06-10 00:17:33 +0000189 def flush(self):
190 self._flush_buffer()
191
192
Alex Miller28ddb132012-10-29 10:00:11 -0700193 @do_not_report_as_logging_caller
Achuith Bhandarkar221f5f02014-10-30 17:15:54 -0700194 def isatty(self):
195 return False
196
197
198 @do_not_report_as_logging_caller
Alex Miller28ddb132012-10-29 10:00:11 -0700199 def fileno(self):
200 # We return the read end of the pipe here becauase if we return the
201 # write end, one can make the reasonable assumption that writing to the
202 # fd is the same as stdout.write(). As we aren't reading from the other
203 # end of the pipe, writing to this fd should be an error.
204 return self._fake_fds[0]
205
206
showarde96f7a92009-06-10 00:17:33 +0000207class _StreamManager(object):
208 """
209 Redirects all output for some output stream (normally stdout or stderr) to
210 the logging module by replacing the file objects with a new LoggingFile
211 that calls logging.log().
212 """
213 def __init__(self, stream, level, stream_setter):
214 """
215 @param stream: stream object to manage
216 @param level: level at which data written to the stream will be logged
217 @param stream_setter: function accepting a stream object that will
218 replace the given stream in its original location.
219 """
220 self._stream = stream
221 self._level = level
222 self._stream_setter = stream_setter
223 self._logging_stream = None
224
225
226 def _replace_with_logger(self):
227 self._logging_stream = LoggingFile(level=self._level)
228 self._stream_setter(self._logging_stream)
229
230
231 def _restore_stream(self):
232 self._stream_setter(self._stream)
233
234
235 def flush(self):
236 self._logging_stream.flush()
237
238
239 def start_logging(self):
240 """Start directing the stream to the logging module."""
241 self._replace_with_logger()
242
243
244 def stop_logging(self):
245 """Restore the stream to its original settings."""
246 self._restore_stream()
247
248
249 def on_push_context(self, context):
250 """
251 Called when the logging manager is about to push a new context onto the
252 stack and has changed logging settings. The StreamHandler can modify
253 the context to be saved before returning.
254 """
255 pass
256
257
258 def on_restore_context(self, context):
259 """
260 Called when the logging manager is restoring a previous context.
261 """
262 pass
263
264
265
266class LoggingManager(object):
267 """
268 Manages a stack of logging configurations, allowing clients to conveniently
269 add and remove logging destinations. Also keeps a list of StreamManagers
270 to easily direct streams into the logging module.
271 """
272
273 STREAM_MANAGER_CLASS = _StreamManager
274
275 logging_config_object = None
276
277 def __init__(self):
278 """
279 This class should not ordinarily be constructed directly (other than in
280 tests). Use the module-global factory method get_logging_manager()
281 instead.
282 """
283 if self.logging_config_object is None:
284 raise RuntimeError('You must call configure_logging() before this')
285
286 # _context_stack holds a stack of context dicts. Each context dict
287 # contains:
288 # * old_handlers: list of registered logging Handlers
289 # contexts may also be extended by _StreamHandlers
290 self._context_stack = []
291 self._streams = []
292 self._started = False
293
294
295 def manage_stream(self, stream, level, stream_setter):
296 """
297 Tells this manager to manage the given stream. All data written to the
298 stream will be directed to the logging module instead. Must be called
299 before start_logging().
300
301 @param stream: stream to manage
302 @param level: level to log data written to this stream
303 @param stream_setter: function to set the stream to a new object
304 """
305 if self._started:
306 raise RuntimeError('You must call this before start_logging()')
307 self._streams.append(self.STREAM_MANAGER_CLASS(stream, level,
308 stream_setter))
309
310
311 def _sys_stream_setter(self, stream_name):
312 assert stream_name in ('stdout', 'stderr'), stream_name
313 def set_stream(file_object):
314 setattr(sys, stream_name, file_object)
315 return set_stream
316
317
318 def manage_stdout(self):
showard29f59092009-06-22 18:15:01 +0000319 self.manage_stream(sys.stdout, logging.INFO,
showarde96f7a92009-06-10 00:17:33 +0000320 self._sys_stream_setter('stdout'))
321
322
323 def manage_stderr(self):
showard10d84172009-06-18 23:16:50 +0000324 self.manage_stream(sys.stderr, self.logging_config_object.stderr_level,
showarde96f7a92009-06-10 00:17:33 +0000325 self._sys_stream_setter('stderr'))
326
327
328 def start_logging(self):
329 """
330 Begin capturing output to the logging module.
331 """
332 for stream_manager in self._streams:
333 stream_manager.start_logging()
334 self._started = True
335
336
337 def stop_logging(self):
338 """
339 Restore output to its original state.
340 """
341 while self._context_stack:
342 self._pop_context()
343
344 for stream_manager in self._streams:
345 stream_manager.stop_logging()
346
347 self._started = False
348
349
350 def _clear_all_handlers(self):
351 for handler in _current_handlers():
352 logger.removeHandler(handler)
353
354
355 def _get_context(self):
356 return {'old_handlers': _current_handlers()}
357
358
359 def _push_context(self, context):
360 for stream_manager in self._streams:
361 stream_manager.on_push_context(context)
362 self._context_stack.append(context)
363
364
365 def _flush_all_streams(self):
366 for stream_manager in self._streams:
367 stream_manager.flush()
368
369
370 def _add_log_handlers(self, add_handlers_fn):
371 """
372 Modify the logging module's registered handlers and push a new context
373 onto the stack.
374 @param add_handlers_fn: function to modify the registered logging
375 handlers. Accepts a context dictionary which may be modified.
376 """
377 self._flush_all_streams()
378 context = self._get_context()
379
380 add_handlers_fn(context)
381
382 self._push_context(context)
383
384
385 class _TaggingFormatter(logging.Formatter):
386 """
387 Delegates to a given formatter, but prefixes each line of output with a
388 tag.
389 """
390 def __init__(self, base_formatter, tag):
391 self.base_formatter = base_formatter
392 prefix = tag + ' : '
393 self._fmt = base_formatter._fmt.replace('%(message)s',
394 prefix + '%(message)s')
395 self.datefmt = base_formatter.datefmt
396
397
398 def _add_tagging_formatter(self, tag):
399 for handler in _current_handlers():
400 tagging_formatter = self._TaggingFormatter(handler.formatter, tag)
401 handler.setFormatter(tagging_formatter)
402
403
404 def _do_redirect(self, stream=None, filename=None, level=None,
405 clear_other_handlers=False):
406 """
407 @param clear_other_handlers - if true, clear out all other logging
408 handlers.
409 """
410 assert bool(stream) != bool(filename) # xor
411 if not self._started:
412 raise RuntimeError('You must call start_logging() before this')
413
414 def add_handler(context):
415 if clear_other_handlers:
416 self._clear_all_handlers()
417
418 if stream:
419 handler = self.logging_config_object.add_stream_handler(stream)
420 else:
421 handler = self.logging_config_object.add_file_handler(filename)
422
423 if level:
424 handler.setLevel(level)
425
426 self._add_log_handlers(add_handler)
427
428
429 def redirect(self, filename):
430 """Redirect output to the specified file"""
431 self._do_redirect(filename=filename, clear_other_handlers=True)
432
433
434 def redirect_to_stream(self, stream):
435 """Redirect output to the given stream"""
436 self._do_redirect(stream=stream, clear_other_handlers=True)
437
438
439 def tee_redirect(self, filename, level=None):
440 """Tee output to the specified file"""
441 self._do_redirect(filename=filename, level=level)
442
443
444 def tee_redirect_to_stream(self, stream):
445 """Tee output to the given stream"""
446 self._do_redirect(stream=stream)
447
448
449 def tee_redirect_debug_dir(self, debug_dir, log_name=None, tag=None):
450 """
451 Tee output to a full new set of debug logs in the given directory.
452 """
453 def add_handlers(context):
454 if tag:
455 self._add_tagging_formatter(tag)
456 context['tag_added'] = True
457 self.logging_config_object.add_debug_file_handlers(
458 debug_dir, log_name=log_name)
459 self._add_log_handlers(add_handlers)
460
461
462 def _restore_context(self, context):
463 for stream_handler in self._streams:
464 stream_handler.on_restore_context(context)
465
466 # restore logging handlers
467 old_handlers = context['old_handlers']
468 for handler in _current_handlers() - old_handlers:
469 handler.close()
470 self._clear_all_handlers()
471 for handler in old_handlers:
472 logger.addHandler(handler)
473
474 if 'tag_added' in context:
475 for handler in _current_handlers():
476 tagging_formatter = handler.formatter
477 handler.setFormatter(tagging_formatter.base_formatter)
478
479
480 def _pop_context(self):
481 self._flush_all_streams()
482 context = self._context_stack.pop()
483 self._restore_context(context)
484
485
486 def undo_redirect(self):
487 """
488 Undo the last redirection (that hasn't yet been undone).
489
490 If any subprocesses have been launched since the redirection was
491 performed, they must have ended by the time this is called. Otherwise,
492 this will hang waiting for the logging subprocess to end.
493 """
494 if not self._context_stack:
495 raise RuntimeError('No redirects to undo')
496 self._pop_context()
497
498
499 def restore(self):
500 """
501 Same as undo_redirect(). For backwards compatibility with
502 fd_stack.
503 """
504 self.undo_redirect()
505
506
507class _FdRedirectionStreamManager(_StreamManager):
508 """
509 Like StreamManager, but also captures output from subprocesses by modifying
510 the underlying file descriptors.
511
512 For the underlying file descriptors, we spawn a subprocess that writes all
513 input to the logging module, and we point the FD to that subprocess. As a
514 result, every time we redirect output we need to spawn a new subprocess to
515 pick up the new logging settings (without disturbing any existing processes
516 using the old logging subprocess).
517
518 If, one day, we could get all code using utils.run() and friends to launch
519 subprocesses, we'd no longer need to handle raw FD output, and we could
520 get rid of all this business with subprocesses. Another option would be
521 to capture all stray output to a single, separate destination.
522 """
Ruben Rodriguez Buchillon8e197982019-08-05 15:20:50 -0700523
524 WAITPID_TIMEOUT_S = 5
525 WAITPID_SAMPLE_TIME_S = 0.1
526
showarde96f7a92009-06-10 00:17:33 +0000527 def __init__(self, stream, level, stream_setter):
528 if not hasattr(stream, 'fileno'):
529 # with fake, in-process file objects, subprocess output won't be
530 # captured. this should never happen in normal use, since the
531 # factory methods will only pass sys.stdout and sys.stderr.
532 raise ValueError("FdRedirectionLoggingManager won't work with "
533 "streams that aren't backed by file "
534 "descriptors")
535
536 super(_FdRedirectionStreamManager, self).__init__(stream, level,
537 stream_setter)
538 self._fd = stream.fileno()
539 self._fd_copy_stream = None
540
541
542 def _point_stream_handlers_to_copy(self):
543 """
544 point logging StreamHandlers that point to this stream to a safe
545 copy of the underlying FD. otherwise, StreamHandler output will go
546 to the logging subprocess, effectively getting doubly logged.
547 """
548 fd_copy = os.dup(self._fd)
549 self._fd_copy_stream = os.fdopen(fd_copy, 'w')
550 self._redirect_logging_stream_handlers(self._stream,
551 self._fd_copy_stream)
552
553
554 def _restore_stream_handlers(self):
555 """ point logging StreamHandlers back to the original FD """
556 self._redirect_logging_stream_handlers(self._fd_copy_stream,
557 self._stream)
558 self._fd_copy_stream.close()
559
560
561 def _redirect_logging_stream_handlers(self, old_stream, new_stream):
562 """
563 Redirect all configured logging StreamHandlers pointing to
564 old_stream to point to new_stream instead.
565 """
566 for handler in _current_handlers():
567 points_to_stream = (isinstance(handler, logging.StreamHandler) and
568 hasattr(handler.stream, 'fileno') and
569 handler.stream.fileno() == old_stream.fileno())
570 if points_to_stream:
571 logger.removeHandler(handler)
572 handler.close() # doesn't close the stream, just the handler
573
574 new_handler = logging.StreamHandler(new_stream)
575 new_handler.setLevel(handler.level)
576 new_handler.setFormatter(handler.formatter)
577 for log_filter in handler.filters:
578 new_handler.addFilter(log_filter)
579 logger.addHandler(new_handler)
580
581
582 def start_logging(self):
583 super(_FdRedirectionStreamManager, self).start_logging()
584 self._point_stream_handlers_to_copy()
585
586
587 def stop_logging(self):
588 super(_FdRedirectionStreamManager, self).stop_logging()
589 self._restore_stream_handlers()
590
591
592 def _spawn_logging_subprocess(self):
593 """
594 Spawn a subprocess to log all input to the logging module with the
595 current settings, and direct output to it.
596 """
597 read_end, write_end = os.pipe()
598 pid = os.fork()
599 if pid: # parent
600 os.close(read_end)
601 os.dup2(write_end, self._fd) # point FD to the subprocess
602 os.close(write_end)
J. Richard Barnetteb1091da2012-05-10 17:19:27 -0700603 fd_flags = fcntl.fcntl(self._fd, fcntl.F_GETFD)
604 fcntl.fcntl(self._fd, fcntl.F_SETFD,
605 fd_flags | fcntl.FD_CLOEXEC)
showarde96f7a92009-06-10 00:17:33 +0000606 return pid
607 else: # child
showarde96f7a92009-06-10 00:17:33 +0000608 try:
jadmanskid3e3ac92009-06-15 23:14:47 +0000609 os.close(write_end)
610 # ensure this subprocess doesn't hold any pipes to others
611 os.close(1)
612 os.close(2)
showarde96f7a92009-06-10 00:17:33 +0000613 self._run_logging_subprocess(read_end) # never returns
614 except:
615 # don't let exceptions in the child escape
jadmanskid3e3ac92009-06-15 23:14:47 +0000616 try:
617 logging.exception('Logging subprocess died:')
618 finally:
619 os._exit(1)
showarde96f7a92009-06-10 00:17:33 +0000620
621
622 def _run_logging_subprocess(self, read_fd):
623 """
624 Always run from a subprocess. Read from read_fd and write to the
625 logging module until EOF.
626 """
Ruben Rodriguez Buchillon8e197982019-08-05 15:20:50 -0700627 # A SIGTERM will be issued by the main process if it believes this
628 # subprocess to the stuck. In that case, simply exit with status 1.
629 # See below for pending TODO on how to remove this.
630 signal.signal(signal.SIGTERM, lambda signum, stack: os._exit(1))
showarde96f7a92009-06-10 00:17:33 +0000631 input_file = os.fdopen(read_fd, 'r')
632 for line in iter(input_file.readline, ''):
633 logging.log(self._level, line.rstrip('\n'))
showard2d767642009-06-15 20:23:40 +0000634 logging.debug('Logging subprocess finished')
showarde96f7a92009-06-10 00:17:33 +0000635 os._exit(0)
636
637
638 def _context_id(self):
639 return '%s_context' % id(self)
640
641
642 def on_push_context(self, context):
643 # adds a context dict for this stream, $id_context, with the following:
644 # * old_fd: FD holding a copy of the managed FD before launching a new
645 # subprocess.
646 # * child_pid: PID of the logging subprocess launched
647 fd_copy = os.dup(self._fd)
648 child_pid = self._spawn_logging_subprocess()
649 my_context = {'old_fd': fd_copy, 'child_pid': child_pid}
650 context[self._context_id()] = my_context
651
652
653 def on_restore_context(self, context):
654 my_context = context[self._context_id()]
655
656 # shut down subprocess
657 child_pid = my_context['child_pid']
showard2d767642009-06-15 20:23:40 +0000658 try:
659 os.close(self._fd)
Ruben Rodriguez Buchillon8e197982019-08-05 15:20:50 -0700660 # TODO(crbug.com/970115): remove this work-around in favor of
661 # a clean waitpid(child_pid, 0) once the issue described in the
662 # bug is root caused and resolved.
663 end = time.time() + self.WAITPID_TIMEOUT_S
664 while time.time() < end:
665 if os.waitpid(child_pid, os.WNOHANG) != (0, 0):
666 break
667 time.sleep(self.WAITPID_SAMPLE_TIME_S)
668 else:
669 # After timeout the process still hasn't finished. Send a
670 # SIGTERM as it's likely stuck.
671 os.kill(child_pid, signal.SIGTERM)
672 logging.exception('Ended up killing logging subprocess at %s.',
673 child_pid)
showard2d767642009-06-15 20:23:40 +0000674 except OSError:
675 logging.exception('Failed to cleanly shutdown logging subprocess:')
showarde96f7a92009-06-10 00:17:33 +0000676
677 # restore previous FD
678 old_fd = my_context['old_fd']
679 os.dup2(old_fd, self._fd)
680 os.close(old_fd)
681
682
683class FdRedirectionLoggingManager(LoggingManager):
684 """
685 A simple extension of LoggingManager to use FdRedirectionStreamManagers,
686 so that managed streams have their underlying FDs redirected.
687 """
688
689 STREAM_MANAGER_CLASS = _FdRedirectionStreamManager
690
691 def start_logging(self):
692 super(FdRedirectionLoggingManager, self).start_logging()
693 # spawn the initial logging subprocess
694 self._push_context(self._get_context())
695
696
697 def undo_redirect(self):
698 # len == 1 would mean only start_logging() had been called (but no
699 # redirects had occurred)
700 if len(self._context_stack) < 2:
701 raise RuntimeError('No redirects to undo')
702 super(FdRedirectionLoggingManager, self).undo_redirect()