blob: 17f4ff6e474c23ef688d97600484176f82cf53a7 [file] [log] [blame]
Vinay Sajipc63619b2010-12-19 12:56:57 +00001.. _logging-cookbook:
2
3================
4Logging Cookbook
5================
6
7:Author: Vinay Sajip <vinay_sajip at red-dove dot com>
8
Georg Brandl375aec22011-01-15 17:03:02 +00009This page contains a number of recipes related to logging, which have been found
10useful in the past.
Vinay Sajipc63619b2010-12-19 12:56:57 +000011
Vinay Sajipc63619b2010-12-19 12:56:57 +000012.. currentmodule:: logging
13
14Using logging in multiple modules
15---------------------------------
16
Vinay Sajip1397ce12010-12-24 12:03:48 +000017Multiple calls to ``logging.getLogger('someLogger')`` return a reference to the
18same logger object. This is true not only within the same module, but also
19across modules as long as it is in the same Python interpreter process. It is
20true for references to the same object; additionally, application code can
21define and configure a parent logger in one module and create (but not
22configure) a child logger in a separate module, and all logger calls to the
23child will pass up to the parent. Here is a main module::
Vinay Sajipc63619b2010-12-19 12:56:57 +000024
25 import logging
26 import auxiliary_module
27
28 # create logger with 'spam_application'
29 logger = logging.getLogger('spam_application')
30 logger.setLevel(logging.DEBUG)
31 # create file handler which logs even debug messages
32 fh = logging.FileHandler('spam.log')
33 fh.setLevel(logging.DEBUG)
34 # create console handler with a higher log level
35 ch = logging.StreamHandler()
36 ch.setLevel(logging.ERROR)
37 # create formatter and add it to the handlers
38 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
39 fh.setFormatter(formatter)
40 ch.setFormatter(formatter)
41 # add the handlers to the logger
42 logger.addHandler(fh)
43 logger.addHandler(ch)
44
45 logger.info('creating an instance of auxiliary_module.Auxiliary')
46 a = auxiliary_module.Auxiliary()
47 logger.info('created an instance of auxiliary_module.Auxiliary')
48 logger.info('calling auxiliary_module.Auxiliary.do_something')
49 a.do_something()
50 logger.info('finished auxiliary_module.Auxiliary.do_something')
51 logger.info('calling auxiliary_module.some_function()')
52 auxiliary_module.some_function()
53 logger.info('done with auxiliary_module.some_function()')
54
55Here is the auxiliary module::
56
57 import logging
58
59 # create logger
60 module_logger = logging.getLogger('spam_application.auxiliary')
61
62 class Auxiliary:
63 def __init__(self):
64 self.logger = logging.getLogger('spam_application.auxiliary.Auxiliary')
65 self.logger.info('creating an instance of Auxiliary')
Serhiy Storchakadba90392016-05-10 12:01:23 +030066
Vinay Sajipc63619b2010-12-19 12:56:57 +000067 def do_something(self):
68 self.logger.info('doing something')
69 a = 1 + 1
70 self.logger.info('done doing something')
71
72 def some_function():
73 module_logger.info('received a call to "some_function"')
74
Serhiy Storchaka46936d52018-04-08 19:18:04 +030075The output looks like this:
76
77.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +000078
79 2005-03-23 23:47:11,663 - spam_application - INFO -
80 creating an instance of auxiliary_module.Auxiliary
81 2005-03-23 23:47:11,665 - spam_application.auxiliary.Auxiliary - INFO -
82 creating an instance of Auxiliary
83 2005-03-23 23:47:11,665 - spam_application - INFO -
84 created an instance of auxiliary_module.Auxiliary
85 2005-03-23 23:47:11,668 - spam_application - INFO -
86 calling auxiliary_module.Auxiliary.do_something
87 2005-03-23 23:47:11,668 - spam_application.auxiliary.Auxiliary - INFO -
88 doing something
89 2005-03-23 23:47:11,669 - spam_application.auxiliary.Auxiliary - INFO -
90 done doing something
91 2005-03-23 23:47:11,670 - spam_application - INFO -
92 finished auxiliary_module.Auxiliary.do_something
93 2005-03-23 23:47:11,671 - spam_application - INFO -
94 calling auxiliary_module.some_function()
95 2005-03-23 23:47:11,672 - spam_application.auxiliary - INFO -
96 received a call to 'some_function'
97 2005-03-23 23:47:11,673 - spam_application - INFO -
98 done with auxiliary_module.some_function()
99
Vinay Sajipe10d3702016-02-20 19:02:46 +0000100Logging from multiple threads
101-----------------------------
102
103Logging from multiple threads requires no special effort. The following example
Berker Peksag563c9492016-03-20 12:50:56 +0200104shows logging from the main (initial) thread and another thread::
Vinay Sajipe10d3702016-02-20 19:02:46 +0000105
106 import logging
107 import threading
108 import time
109
110 def worker(arg):
111 while not arg['stop']:
112 logging.debug('Hi from myfunc')
113 time.sleep(0.5)
114
115 def main():
116 logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')
117 info = {'stop': False}
118 thread = threading.Thread(target=worker, args=(info,))
119 thread.start()
120 while True:
121 try:
122 logging.debug('Hello from main')
123 time.sleep(0.75)
124 except KeyboardInterrupt:
125 info['stop'] = True
126 break
127 thread.join()
128
129 if __name__ == '__main__':
130 main()
131
Serhiy Storchaka46936d52018-04-08 19:18:04 +0300132When run, the script should print something like the following:
133
134.. code-block:: none
Vinay Sajipe10d3702016-02-20 19:02:46 +0000135
136 0 Thread-1 Hi from myfunc
137 3 MainThread Hello from main
138 505 Thread-1 Hi from myfunc
139 755 MainThread Hello from main
140 1007 Thread-1 Hi from myfunc
141 1507 MainThread Hello from main
142 1508 Thread-1 Hi from myfunc
143 2010 Thread-1 Hi from myfunc
144 2258 MainThread Hello from main
145 2512 Thread-1 Hi from myfunc
146 3009 MainThread Hello from main
147 3013 Thread-1 Hi from myfunc
148 3515 Thread-1 Hi from myfunc
149 3761 MainThread Hello from main
150 4017 Thread-1 Hi from myfunc
151 4513 MainThread Hello from main
152 4518 Thread-1 Hi from myfunc
153
154This shows the logging output interspersed as one might expect. This approach
155works for more threads than shown here, of course.
156
Vinay Sajipc63619b2010-12-19 12:56:57 +0000157Multiple handlers and formatters
158--------------------------------
159
Vinay Sajip67f39772013-08-17 00:39:42 +0100160Loggers are plain Python objects. The :meth:`~Logger.addHandler` method has no
161minimum or maximum quota for the number of handlers you may add. Sometimes it
162will be beneficial for an application to log all messages of all severities to a
163text file while simultaneously logging errors or above to the console. To set
164this up, simply configure the appropriate handlers. The logging calls in the
Vinay Sajipc63619b2010-12-19 12:56:57 +0000165application code will remain unchanged. Here is a slight modification to the
166previous simple module-based configuration example::
167
168 import logging
169
170 logger = logging.getLogger('simple_example')
171 logger.setLevel(logging.DEBUG)
172 # create file handler which logs even debug messages
173 fh = logging.FileHandler('spam.log')
174 fh.setLevel(logging.DEBUG)
175 # create console handler with a higher log level
176 ch = logging.StreamHandler()
177 ch.setLevel(logging.ERROR)
178 # create formatter and add it to the handlers
179 formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
180 ch.setFormatter(formatter)
181 fh.setFormatter(formatter)
182 # add the handlers to logger
183 logger.addHandler(ch)
184 logger.addHandler(fh)
185
186 # 'application' code
187 logger.debug('debug message')
188 logger.info('info message')
yuji38kwmtcda73a52019-01-23 16:27:13 +0900189 logger.warning('warn message')
Vinay Sajipc63619b2010-12-19 12:56:57 +0000190 logger.error('error message')
191 logger.critical('critical message')
192
193Notice that the 'application' code does not care about multiple handlers. All
194that changed was the addition and configuration of a new handler named *fh*.
195
196The ability to create new handlers with higher- or lower-severity filters can be
197very helpful when writing and testing an application. Instead of using many
198``print`` statements for debugging, use ``logger.debug``: Unlike the print
199statements, which you will have to delete or comment out later, the logger.debug
200statements can remain intact in the source code and remain dormant until you
201need them again. At that time, the only change that needs to happen is to
202modify the severity level of the logger and/or handler to debug.
203
204.. _multiple-destinations:
205
206Logging to multiple destinations
207--------------------------------
208
209Let's say you want to log to console and file with different message formats and
210in differing circumstances. Say you want to log messages with levels of DEBUG
211and higher to file, and those messages at level INFO and higher to the console.
212Let's also assume that the file should contain timestamps, but the console
213messages should not. Here's how you can achieve this::
214
215 import logging
216
217 # set up logging to file - see previous section for more details
218 logging.basicConfig(level=logging.DEBUG,
219 format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
220 datefmt='%m-%d %H:%M',
221 filename='/temp/myapp.log',
222 filemode='w')
223 # define a Handler which writes INFO messages or higher to the sys.stderr
224 console = logging.StreamHandler()
225 console.setLevel(logging.INFO)
226 # set a format which is simpler for console use
227 formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
228 # tell the handler to use this format
229 console.setFormatter(formatter)
230 # add the handler to the root logger
231 logging.getLogger('').addHandler(console)
232
233 # Now, we can log to the root logger, or any other logger. First the root...
234 logging.info('Jackdaws love my big sphinx of quartz.')
235
236 # Now, define a couple of other loggers which might represent areas in your
237 # application:
238
239 logger1 = logging.getLogger('myapp.area1')
240 logger2 = logging.getLogger('myapp.area2')
241
242 logger1.debug('Quick zephyrs blow, vexing daft Jim.')
243 logger1.info('How quickly daft jumping zebras vex.')
244 logger2.warning('Jail zesty vixen who grabbed pay from quack.')
245 logger2.error('The five boxing wizards jump quickly.')
246
Serhiy Storchaka46936d52018-04-08 19:18:04 +0300247When you run this, on the console you will see
248
249.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +0000250
251 root : INFO Jackdaws love my big sphinx of quartz.
252 myapp.area1 : INFO How quickly daft jumping zebras vex.
253 myapp.area2 : WARNING Jail zesty vixen who grabbed pay from quack.
254 myapp.area2 : ERROR The five boxing wizards jump quickly.
255
Serhiy Storchaka46936d52018-04-08 19:18:04 +0300256and in the file you will see something like
257
258.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +0000259
260 10-22 22:19 root INFO Jackdaws love my big sphinx of quartz.
261 10-22 22:19 myapp.area1 DEBUG Quick zephyrs blow, vexing daft Jim.
262 10-22 22:19 myapp.area1 INFO How quickly daft jumping zebras vex.
263 10-22 22:19 myapp.area2 WARNING Jail zesty vixen who grabbed pay from quack.
264 10-22 22:19 myapp.area2 ERROR The five boxing wizards jump quickly.
265
266As you can see, the DEBUG message only shows up in the file. The other messages
267are sent to both destinations.
268
269This example uses console and file handlers, but you can use any number and
270combination of handlers you choose.
271
272
273Configuration server example
274----------------------------
275
276Here is an example of a module using the logging configuration server::
277
278 import logging
279 import logging.config
280 import time
281 import os
282
283 # read initial config file
284 logging.config.fileConfig('logging.conf')
285
286 # create and start listener on port 9999
287 t = logging.config.listen(9999)
288 t.start()
289
290 logger = logging.getLogger('simpleExample')
291
292 try:
293 # loop through logging calls to see the difference
294 # new configurations make, until Ctrl+C is pressed
295 while True:
296 logger.debug('debug message')
297 logger.info('info message')
yuji38kwmtcda73a52019-01-23 16:27:13 +0900298 logger.warning('warn message')
Vinay Sajipc63619b2010-12-19 12:56:57 +0000299 logger.error('error message')
300 logger.critical('critical message')
301 time.sleep(5)
302 except KeyboardInterrupt:
303 # cleanup
304 logging.config.stopListening()
305 t.join()
306
307And here is a script that takes a filename and sends that file to the server,
308properly preceded with the binary-encoded length, as the new logging
309configuration::
310
311 #!/usr/bin/env python
312 import socket, sys, struct
313
Vinay Sajip689b68a2010-12-22 15:04:15 +0000314 with open(sys.argv[1], 'rb') as f:
315 data_to_send = f.read()
Vinay Sajipc63619b2010-12-19 12:56:57 +0000316
317 HOST = 'localhost'
318 PORT = 9999
319 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
320 print('connecting...')
321 s.connect((HOST, PORT))
322 print('sending config...')
323 s.send(struct.pack('>L', len(data_to_send)))
324 s.send(data_to_send)
325 s.close()
326 print('complete')
327
328
329Dealing with handlers that block
330--------------------------------
331
332.. currentmodule:: logging.handlers
333
334Sometimes you have to get your logging handlers to do their work without
Vinay Sajip6b883a22012-02-27 11:02:45 +0000335blocking the thread you're logging from. This is common in Web applications,
Vinay Sajipc63619b2010-12-19 12:56:57 +0000336though of course it also occurs in other scenarios.
337
338A common culprit which demonstrates sluggish behaviour is the
339:class:`SMTPHandler`: sending emails can take a long time, for a
Vinay Sajip6b883a22012-02-27 11:02:45 +0000340number of reasons outside the developer's control (for example, a poorly
Vinay Sajipc63619b2010-12-19 12:56:57 +0000341performing mail or network infrastructure). But almost any network-based
342handler can block: Even a :class:`SocketHandler` operation may do a
343DNS query under the hood which is too slow (and this query can be deep in the
344socket library code, below the Python layer, and outside your control).
345
346One solution is to use a two-part approach. For the first part, attach only a
347:class:`QueueHandler` to those loggers which are accessed from
348performance-critical threads. They simply write to their queue, which can be
349sized to a large enough capacity or initialized with no upper bound to their
350size. The write to the queue will typically be accepted quickly, though you
Georg Brandl375aec22011-01-15 17:03:02 +0000351will probably need to catch the :exc:`queue.Full` exception as a precaution
Vinay Sajipc63619b2010-12-19 12:56:57 +0000352in your code. If you are a library developer who has performance-critical
353threads in their code, be sure to document this (together with a suggestion to
354attach only ``QueueHandlers`` to your loggers) for the benefit of other
355developers who will use your code.
356
357The second part of the solution is :class:`QueueListener`, which has been
358designed as the counterpart to :class:`QueueHandler`. A
Vinay Sajip6b883a22012-02-27 11:02:45 +0000359:class:`QueueListener` is very simple: it's passed a queue and some handlers,
Vinay Sajipc63619b2010-12-19 12:56:57 +0000360and it fires up an internal thread which listens to its queue for LogRecords
361sent from ``QueueHandlers`` (or any other source of ``LogRecords``, for that
362matter). The ``LogRecords`` are removed from the queue and passed to the
363handlers for processing.
364
365The advantage of having a separate :class:`QueueListener` class is that you
366can use the same instance to service multiple ``QueueHandlers``. This is more
367resource-friendly than, say, having threaded versions of the existing handler
368classes, which would eat up one thread per handler for no particular benefit.
369
370An example of using these two classes follows (imports omitted)::
371
Serhiy Storchakadba90392016-05-10 12:01:23 +0300372 que = queue.Queue(-1) # no limit on size
Vinay Sajipc63619b2010-12-19 12:56:57 +0000373 queue_handler = QueueHandler(que)
374 handler = logging.StreamHandler()
375 listener = QueueListener(que, handler)
376 root = logging.getLogger()
377 root.addHandler(queue_handler)
378 formatter = logging.Formatter('%(threadName)s: %(message)s')
379 handler.setFormatter(formatter)
380 listener.start()
381 # The log output will display the thread which generated
382 # the event (the main thread) rather than the internal
383 # thread which monitors the internal queue. This is what
384 # you want to happen.
385 root.warning('Look out!')
386 listener.stop()
387
Martin Panter1050d2d2016-07-26 11:18:21 +0200388which, when run, will produce:
389
390.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +0000391
392 MainThread: Look out!
393
Vinay Sajip365701a2015-02-09 19:49:00 +0000394.. versionchanged:: 3.5
395 Prior to Python 3.5, the :class:`QueueListener` always passed every message
396 received from the queue to every handler it was initialized with. (This was
397 because it was assumed that level filtering was all done on the other side,
398 where the queue is filled.) From 3.5 onwards, this behaviour can be changed
399 by passing a keyword argument ``respect_handler_level=True`` to the
400 listener's constructor. When this is done, the listener compares the level
401 of each message with the handler's level, and only passes a message to a
402 handler if it's appropriate to do so.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000403
404.. _network-logging:
405
406Sending and receiving logging events across a network
407-----------------------------------------------------
408
409Let's say you want to send logging events across a network, and handle them at
410the receiving end. A simple way of doing this is attaching a
411:class:`SocketHandler` instance to the root logger at the sending end::
412
413 import logging, logging.handlers
414
415 rootLogger = logging.getLogger('')
416 rootLogger.setLevel(logging.DEBUG)
417 socketHandler = logging.handlers.SocketHandler('localhost',
418 logging.handlers.DEFAULT_TCP_LOGGING_PORT)
419 # don't bother with a formatter, since a socket handler sends the event as
420 # an unformatted pickle
421 rootLogger.addHandler(socketHandler)
422
423 # Now, we can log to the root logger, or any other logger. First the root...
424 logging.info('Jackdaws love my big sphinx of quartz.')
425
426 # Now, define a couple of other loggers which might represent areas in your
427 # application:
428
429 logger1 = logging.getLogger('myapp.area1')
430 logger2 = logging.getLogger('myapp.area2')
431
432 logger1.debug('Quick zephyrs blow, vexing daft Jim.')
433 logger1.info('How quickly daft jumping zebras vex.')
434 logger2.warning('Jail zesty vixen who grabbed pay from quack.')
435 logger2.error('The five boxing wizards jump quickly.')
436
437At the receiving end, you can set up a receiver using the :mod:`socketserver`
438module. Here is a basic working example::
439
440 import pickle
441 import logging
442 import logging.handlers
443 import socketserver
444 import struct
445
446
447 class LogRecordStreamHandler(socketserver.StreamRequestHandler):
448 """Handler for a streaming logging request.
449
450 This basically logs the record using whatever logging policy is
451 configured locally.
452 """
453
454 def handle(self):
455 """
456 Handle multiple requests - each expected to be a 4-byte length,
457 followed by the LogRecord in pickle format. Logs the record
458 according to whatever policy is configured locally.
459 """
460 while True:
461 chunk = self.connection.recv(4)
462 if len(chunk) < 4:
463 break
464 slen = struct.unpack('>L', chunk)[0]
465 chunk = self.connection.recv(slen)
466 while len(chunk) < slen:
467 chunk = chunk + self.connection.recv(slen - len(chunk))
468 obj = self.unPickle(chunk)
469 record = logging.makeLogRecord(obj)
470 self.handleLogRecord(record)
471
472 def unPickle(self, data):
473 return pickle.loads(data)
474
475 def handleLogRecord(self, record):
476 # if a name is specified, we use the named logger rather than the one
477 # implied by the record.
478 if self.server.logname is not None:
479 name = self.server.logname
480 else:
481 name = record.name
482 logger = logging.getLogger(name)
483 # N.B. EVERY record gets logged. This is because Logger.handle
484 # is normally called AFTER logger-level filtering. If you want
485 # to do filtering, do it at the client end to save wasting
486 # cycles and network bandwidth!
487 logger.handle(record)
488
489 class LogRecordSocketReceiver(socketserver.ThreadingTCPServer):
490 """
491 Simple TCP socket-based logging receiver suitable for testing.
492 """
493
Raymond Hettinger4ab532b2014-03-28 16:39:25 -0700494 allow_reuse_address = True
Vinay Sajipc63619b2010-12-19 12:56:57 +0000495
496 def __init__(self, host='localhost',
497 port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
498 handler=LogRecordStreamHandler):
499 socketserver.ThreadingTCPServer.__init__(self, (host, port), handler)
500 self.abort = 0
501 self.timeout = 1
502 self.logname = None
503
504 def serve_until_stopped(self):
505 import select
506 abort = 0
507 while not abort:
508 rd, wr, ex = select.select([self.socket.fileno()],
509 [], [],
510 self.timeout)
511 if rd:
512 self.handle_request()
513 abort = self.abort
514
515 def main():
516 logging.basicConfig(
517 format='%(relativeCreated)5d %(name)-15s %(levelname)-8s %(message)s')
518 tcpserver = LogRecordSocketReceiver()
519 print('About to start TCP server...')
520 tcpserver.serve_until_stopped()
521
522 if __name__ == '__main__':
523 main()
524
525First run the server, and then the client. On the client side, nothing is
Serhiy Storchaka46936d52018-04-08 19:18:04 +0300526printed on the console; on the server side, you should see something like:
527
528.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +0000529
530 About to start TCP server...
531 59 root INFO Jackdaws love my big sphinx of quartz.
532 59 myapp.area1 DEBUG Quick zephyrs blow, vexing daft Jim.
533 69 myapp.area1 INFO How quickly daft jumping zebras vex.
534 69 myapp.area2 WARNING Jail zesty vixen who grabbed pay from quack.
535 69 myapp.area2 ERROR The five boxing wizards jump quickly.
536
537Note that there are some security issues with pickle in some scenarios. If
538these affect you, you can use an alternative serialization scheme by overriding
Vinay Sajip67f39772013-08-17 00:39:42 +0100539the :meth:`~handlers.SocketHandler.makePickle` method and implementing your
540alternative there, as well as adapting the above script to use your alternative
541serialization.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000542
543
544.. _context-info:
545
546Adding contextual information to your logging output
547----------------------------------------------------
548
549Sometimes you want logging output to contain contextual information in
550addition to the parameters passed to the logging call. For example, in a
551networked application, it may be desirable to log client-specific information
552in the log (e.g. remote client's username, or IP address). Although you could
553use the *extra* parameter to achieve this, it's not always convenient to pass
554the information in this way. While it might be tempting to create
555:class:`Logger` instances on a per-connection basis, this is not a good idea
556because these instances are not garbage collected. While this is not a problem
557in practice, when the number of :class:`Logger` instances is dependent on the
558level of granularity you want to use in logging an application, it could
559be hard to manage if the number of :class:`Logger` instances becomes
560effectively unbounded.
561
562
563Using LoggerAdapters to impart contextual information
564^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
565
566An easy way in which you can pass contextual information to be output along
567with logging event information is to use the :class:`LoggerAdapter` class.
568This class is designed to look like a :class:`Logger`, so that you can call
569:meth:`debug`, :meth:`info`, :meth:`warning`, :meth:`error`,
570:meth:`exception`, :meth:`critical` and :meth:`log`. These methods have the
571same signatures as their counterparts in :class:`Logger`, so you can use the
572two types of instances interchangeably.
573
574When you create an instance of :class:`LoggerAdapter`, you pass it a
575:class:`Logger` instance and a dict-like object which contains your contextual
576information. When you call one of the logging methods on an instance of
577:class:`LoggerAdapter`, it delegates the call to the underlying instance of
578:class:`Logger` passed to its constructor, and arranges to pass the contextual
579information in the delegated call. Here's a snippet from the code of
580:class:`LoggerAdapter`::
581
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +0300582 def debug(self, msg, /, *args, **kwargs):
Vinay Sajipc63619b2010-12-19 12:56:57 +0000583 """
584 Delegate a debug call to the underlying logger, after adding
585 contextual information from this adapter instance.
586 """
587 msg, kwargs = self.process(msg, kwargs)
588 self.logger.debug(msg, *args, **kwargs)
589
Vinay Sajip67f39772013-08-17 00:39:42 +0100590The :meth:`~LoggerAdapter.process` method of :class:`LoggerAdapter` is where the
591contextual information is added to the logging output. It's passed the message
592and keyword arguments of the logging call, and it passes back (potentially)
Vinay Sajipc63619b2010-12-19 12:56:57 +0000593modified versions of these to use in the call to the underlying logger. The
594default implementation of this method leaves the message alone, but inserts
595an 'extra' key in the keyword argument whose value is the dict-like object
596passed to the constructor. Of course, if you had passed an 'extra' keyword
597argument in the call to the adapter, it will be silently overwritten.
598
599The advantage of using 'extra' is that the values in the dict-like object are
600merged into the :class:`LogRecord` instance's __dict__, allowing you to use
601customized strings with your :class:`Formatter` instances which know about
602the keys of the dict-like object. If you need a different method, e.g. if you
603want to prepend or append the contextual information to the message string,
Vinay Sajip67f39772013-08-17 00:39:42 +0100604you just need to subclass :class:`LoggerAdapter` and override
605:meth:`~LoggerAdapter.process` to do what you need. Here is a simple example::
Vinay Sajipc63619b2010-12-19 12:56:57 +0000606
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100607 class CustomAdapter(logging.LoggerAdapter):
608 """
609 This example adapter expects the passed in dict-like object to have a
610 'connid' key, whose value in brackets is prepended to the log message.
611 """
612 def process(self, msg, kwargs):
613 return '[%s] %s' % (self.extra['connid'], msg), kwargs
Vinay Sajipc63619b2010-12-19 12:56:57 +0000614
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100615which you can use like this::
Vinay Sajipc63619b2010-12-19 12:56:57 +0000616
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100617 logger = logging.getLogger(__name__)
618 adapter = CustomAdapter(logger, {'connid': some_conn_id})
Vinay Sajipc63619b2010-12-19 12:56:57 +0000619
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100620Then any events that you log to the adapter will have the value of
621``some_conn_id`` prepended to the log messages.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000622
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100623Using objects other than dicts to pass contextual information
624~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Vinay Sajipc63619b2010-12-19 12:56:57 +0000625
Vinay Sajipa92fbe62013-07-24 17:52:01 +0100626You don't need to pass an actual dict to a :class:`LoggerAdapter` - you could
627pass an instance of a class which implements ``__getitem__`` and ``__iter__`` so
628that it looks like a dict to logging. This would be useful if you want to
629generate values dynamically (whereas the values in a dict would be constant).
Vinay Sajipc63619b2010-12-19 12:56:57 +0000630
631
632.. _filters-contextual:
633
634Using Filters to impart contextual information
635^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
636
637You can also add contextual information to log output using a user-defined
638:class:`Filter`. ``Filter`` instances are allowed to modify the ``LogRecords``
639passed to them, including adding additional attributes which can then be output
640using a suitable format string, or if needed a custom :class:`Formatter`.
641
642For example in a web application, the request being processed (or at least,
643the interesting parts of it) can be stored in a threadlocal
644(:class:`threading.local`) variable, and then accessed from a ``Filter`` to
645add, say, information from the request - say, the remote IP address and remote
646user's username - to the ``LogRecord``, using the attribute names 'ip' and
647'user' as in the ``LoggerAdapter`` example above. In that case, the same format
648string can be used to get similar output to that shown above. Here's an example
649script::
650
651 import logging
652 from random import choice
653
654 class ContextFilter(logging.Filter):
655 """
656 This is a filter which injects contextual information into the log.
657
658 Rather than use actual contextual information, we just use random
659 data in this demo.
660 """
661
662 USERS = ['jim', 'fred', 'sheila']
663 IPS = ['123.231.231.123', '127.0.0.1', '192.168.0.1']
664
665 def filter(self, record):
666
667 record.ip = choice(ContextFilter.IPS)
668 record.user = choice(ContextFilter.USERS)
669 return True
670
671 if __name__ == '__main__':
Serhiy Storchakadba90392016-05-10 12:01:23 +0300672 levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)
673 logging.basicConfig(level=logging.DEBUG,
674 format='%(asctime)-15s %(name)-5s %(levelname)-8s IP: %(ip)-15s User: %(user)-8s %(message)s')
675 a1 = logging.getLogger('a.b.c')
676 a2 = logging.getLogger('d.e.f')
Vinay Sajipc63619b2010-12-19 12:56:57 +0000677
Serhiy Storchakadba90392016-05-10 12:01:23 +0300678 f = ContextFilter()
679 a1.addFilter(f)
680 a2.addFilter(f)
681 a1.debug('A debug message')
682 a1.info('An info message with %s', 'some parameters')
683 for x in range(10):
684 lvl = choice(levels)
685 lvlname = logging.getLevelName(lvl)
686 a2.log(lvl, 'A message at %s level with %d %s', lvlname, 2, 'parameters')
Vinay Sajipc63619b2010-12-19 12:56:57 +0000687
Serhiy Storchaka46936d52018-04-08 19:18:04 +0300688which, when run, produces something like:
689
690.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +0000691
692 2010-09-06 22:38:15,292 a.b.c DEBUG IP: 123.231.231.123 User: fred A debug message
693 2010-09-06 22:38:15,300 a.b.c INFO IP: 192.168.0.1 User: sheila An info message with some parameters
694 2010-09-06 22:38:15,300 d.e.f CRITICAL IP: 127.0.0.1 User: sheila A message at CRITICAL level with 2 parameters
695 2010-09-06 22:38:15,300 d.e.f ERROR IP: 127.0.0.1 User: jim A message at ERROR level with 2 parameters
696 2010-09-06 22:38:15,300 d.e.f DEBUG IP: 127.0.0.1 User: sheila A message at DEBUG level with 2 parameters
697 2010-09-06 22:38:15,300 d.e.f ERROR IP: 123.231.231.123 User: fred A message at ERROR level with 2 parameters
698 2010-09-06 22:38:15,300 d.e.f CRITICAL IP: 192.168.0.1 User: jim A message at CRITICAL level with 2 parameters
699 2010-09-06 22:38:15,300 d.e.f CRITICAL IP: 127.0.0.1 User: sheila A message at CRITICAL level with 2 parameters
700 2010-09-06 22:38:15,300 d.e.f DEBUG IP: 192.168.0.1 User: jim A message at DEBUG level with 2 parameters
701 2010-09-06 22:38:15,301 d.e.f ERROR IP: 127.0.0.1 User: sheila A message at ERROR level with 2 parameters
702 2010-09-06 22:38:15,301 d.e.f DEBUG IP: 123.231.231.123 User: fred A message at DEBUG level with 2 parameters
703 2010-09-06 22:38:15,301 d.e.f INFO IP: 123.231.231.123 User: fred A message at INFO level with 2 parameters
704
705
706.. _multiple-processes:
707
708Logging to a single file from multiple processes
709------------------------------------------------
710
711Although logging is thread-safe, and logging to a single file from multiple
712threads in a single process *is* supported, logging to a single file from
713*multiple processes* is *not* supported, because there is no standard way to
714serialize access to a single file across multiple processes in Python. If you
715need to log to a single file from multiple processes, one way of doing this is
Vinay Sajip67f39772013-08-17 00:39:42 +0100716to have all the processes log to a :class:`~handlers.SocketHandler`, and have a
717separate process which implements a socket server which reads from the socket
718and logs to file. (If you prefer, you can dedicate one thread in one of the
719existing processes to perform this function.)
720:ref:`This section <network-logging>` documents this approach in more detail and
721includes a working socket receiver which can be used as a starting point for you
722to adapt in your own applications.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000723
Cheryl Sabella11c4eaa2018-10-02 12:35:05 -0400724You could also write your own handler which uses the :class:`~multiprocessing.Lock`
725class from the :mod:`multiprocessing` module to serialize access to the
Vinay Sajip67f39772013-08-17 00:39:42 +0100726file from your processes. The existing :class:`FileHandler` and subclasses do
727not make use of :mod:`multiprocessing` at present, though they may do so in the
728future. Note that at present, the :mod:`multiprocessing` module does not provide
Vinay Sajipc63619b2010-12-19 12:56:57 +0000729working lock functionality on all platforms (see
Georg Brandle73778c2014-10-29 08:36:35 +0100730https://bugs.python.org/issue3770).
Vinay Sajipc63619b2010-12-19 12:56:57 +0000731
732.. currentmodule:: logging.handlers
733
734Alternatively, you can use a ``Queue`` and a :class:`QueueHandler` to send
735all logging events to one of the processes in your multi-process application.
736The following example script demonstrates how you can do this; in the example
737a separate listener process listens for events sent by other processes and logs
738them according to its own logging configuration. Although the example only
739demonstrates one way of doing it (for example, you may want to use a listener
Georg Brandl7a0afd32011-02-07 15:44:27 +0000740thread rather than a separate listener process -- the implementation would be
Vinay Sajipc63619b2010-12-19 12:56:57 +0000741analogous) it does allow for completely different logging configurations for
742the listener and the other processes in your application, and can be used as
743the basis for code meeting your own specific requirements::
744
745 # You'll need these imports in your own code
746 import logging
747 import logging.handlers
748 import multiprocessing
749
750 # Next two import lines for this demo only
751 from random import choice, random
752 import time
753
754 #
755 # Because you'll want to define the logging configurations for listener and workers, the
756 # listener and worker process functions take a configurer parameter which is a callable
757 # for configuring logging for that process. These functions are also passed the queue,
758 # which they use for communication.
759 #
760 # In practice, you can configure the listener however you want, but note that in this
761 # simple example, the listener does not apply level or filter logic to received records.
Georg Brandl7a0afd32011-02-07 15:44:27 +0000762 # In practice, you would probably want to do this logic in the worker processes, to avoid
Vinay Sajipc63619b2010-12-19 12:56:57 +0000763 # sending events which would be filtered out between processes.
764 #
765 # The size of the rotated files is made small so you can see the results easily.
766 def listener_configurer():
767 root = logging.getLogger()
Raymond Hettingerb34705f2011-06-26 15:29:06 +0200768 h = logging.handlers.RotatingFileHandler('mptest.log', 'a', 300, 10)
Vinay Sajipc63619b2010-12-19 12:56:57 +0000769 f = logging.Formatter('%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
770 h.setFormatter(f)
771 root.addHandler(h)
772
773 # This is the listener process top-level loop: wait for logging events
774 # (LogRecords)on the queue and handle them, quit when you get a None for a
775 # LogRecord.
776 def listener_process(queue, configurer):
777 configurer()
778 while True:
779 try:
780 record = queue.get()
Serhiy Storchakadba90392016-05-10 12:01:23 +0300781 if record is None: # We send this as a sentinel to tell the listener to quit.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000782 break
783 logger = logging.getLogger(record.name)
Serhiy Storchakadba90392016-05-10 12:01:23 +0300784 logger.handle(record) # No level or filter logic applied - just do it!
Andrew Svetlov47395612012-11-02 22:07:26 +0200785 except Exception:
Vinay Sajipc63619b2010-12-19 12:56:57 +0000786 import sys, traceback
Vinay Sajip6b883a22012-02-27 11:02:45 +0000787 print('Whoops! Problem:', file=sys.stderr)
Vinay Sajipc63619b2010-12-19 12:56:57 +0000788 traceback.print_exc(file=sys.stderr)
789
790 # Arrays used for random selections in this demo
791
792 LEVELS = [logging.DEBUG, logging.INFO, logging.WARNING,
793 logging.ERROR, logging.CRITICAL]
794
795 LOGGERS = ['a.b.c', 'd.e.f']
796
797 MESSAGES = [
798 'Random message #1',
799 'Random message #2',
800 'Random message #3',
801 ]
802
803 # The worker configuration is done at the start of the worker process run.
804 # Note that on Windows you can't rely on fork semantics, so each process
805 # will run the logging configuration code when it starts.
806 def worker_configurer(queue):
Serhiy Storchakadba90392016-05-10 12:01:23 +0300807 h = logging.handlers.QueueHandler(queue) # Just the one handler needed
Vinay Sajipc63619b2010-12-19 12:56:57 +0000808 root = logging.getLogger()
809 root.addHandler(h)
Serhiy Storchakadba90392016-05-10 12:01:23 +0300810 # send all messages, for demo; no other level or filter logic applied.
811 root.setLevel(logging.DEBUG)
Vinay Sajipc63619b2010-12-19 12:56:57 +0000812
813 # This is the worker process top-level loop, which just logs ten events with
814 # random intervening delays before terminating.
815 # The print messages are just so you know it's doing something!
816 def worker_process(queue, configurer):
817 configurer(queue)
818 name = multiprocessing.current_process().name
819 print('Worker started: %s' % name)
820 for i in range(10):
821 time.sleep(random())
822 logger = logging.getLogger(choice(LOGGERS))
823 level = choice(LEVELS)
824 message = choice(MESSAGES)
825 logger.log(level, message)
826 print('Worker finished: %s' % name)
827
828 # Here's where the demo gets orchestrated. Create the queue, create and start
829 # the listener, create ten workers and start them, wait for them to finish,
830 # then send a None to the queue to tell the listener to finish.
831 def main():
832 queue = multiprocessing.Queue(-1)
833 listener = multiprocessing.Process(target=listener_process,
834 args=(queue, listener_configurer))
835 listener.start()
836 workers = []
837 for i in range(10):
838 worker = multiprocessing.Process(target=worker_process,
Serhiy Storchakadba90392016-05-10 12:01:23 +0300839 args=(queue, worker_configurer))
Vinay Sajipc63619b2010-12-19 12:56:57 +0000840 workers.append(worker)
841 worker.start()
842 for w in workers:
843 w.join()
844 queue.put_nowait(None)
845 listener.join()
846
847 if __name__ == '__main__':
848 main()
849
Vinay Sajipe6f1e432010-12-26 18:47:51 +0000850A variant of the above script keeps the logging in the main process, in a
851separate thread::
852
853 import logging
854 import logging.config
855 import logging.handlers
856 from multiprocessing import Process, Queue
857 import random
858 import threading
859 import time
860
861 def logger_thread(q):
862 while True:
863 record = q.get()
864 if record is None:
865 break
866 logger = logging.getLogger(record.name)
867 logger.handle(record)
868
869
870 def worker_process(q):
871 qh = logging.handlers.QueueHandler(q)
872 root = logging.getLogger()
873 root.setLevel(logging.DEBUG)
874 root.addHandler(qh)
875 levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
876 logging.CRITICAL]
877 loggers = ['foo', 'foo.bar', 'foo.bar.baz',
878 'spam', 'spam.ham', 'spam.ham.eggs']
879 for i in range(100):
880 lvl = random.choice(levels)
881 logger = logging.getLogger(random.choice(loggers))
882 logger.log(lvl, 'Message no. %d', i)
883
884 if __name__ == '__main__':
885 q = Queue()
886 d = {
887 'version': 1,
888 'formatters': {
889 'detailed': {
890 'class': 'logging.Formatter',
891 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
892 }
893 },
894 'handlers': {
895 'console': {
896 'class': 'logging.StreamHandler',
897 'level': 'INFO',
898 },
899 'file': {
900 'class': 'logging.FileHandler',
901 'filename': 'mplog.log',
902 'mode': 'w',
903 'formatter': 'detailed',
904 },
905 'foofile': {
906 'class': 'logging.FileHandler',
907 'filename': 'mplog-foo.log',
908 'mode': 'w',
909 'formatter': 'detailed',
910 },
911 'errors': {
912 'class': 'logging.FileHandler',
913 'filename': 'mplog-errors.log',
914 'mode': 'w',
915 'level': 'ERROR',
916 'formatter': 'detailed',
917 },
918 },
919 'loggers': {
920 'foo': {
Serhiy Storchakaf47036c2013-12-24 11:04:36 +0200921 'handlers': ['foofile']
Vinay Sajipe6f1e432010-12-26 18:47:51 +0000922 }
923 },
924 'root': {
925 'level': 'DEBUG',
926 'handlers': ['console', 'file', 'errors']
927 },
928 }
929 workers = []
930 for i in range(5):
931 wp = Process(target=worker_process, name='worker %d' % (i + 1), args=(q,))
932 workers.append(wp)
933 wp.start()
934 logging.config.dictConfig(d)
935 lp = threading.Thread(target=logger_thread, args=(q,))
936 lp.start()
937 # At this point, the main process could do some useful work of its own
938 # Once it's done that, it can wait for the workers to terminate...
939 for wp in workers:
940 wp.join()
941 # And now tell the logging thread to finish up, too
942 q.put(None)
943 lp.join()
944
945This variant shows how you can e.g. apply configuration for particular loggers
946- e.g. the ``foo`` logger has a special handler which stores all events in the
947``foo`` subsystem in a file ``mplog-foo.log``. This will be used by the logging
948machinery in the main process (even though the logging events are generated in
949the worker processes) to direct the messages to the appropriate destinations.
Vinay Sajipc63619b2010-12-19 12:56:57 +0000950
Vinay Sajipd3093522019-07-22 12:14:50 +0100951Using concurrent.futures.ProcessPoolExecutor
952^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
953
954If you want to use :class:`concurrent.futures.ProcessPoolExecutor` to start
955your worker processes, you need to create the queue slightly differently.
956Instead of
957
958.. code-block:: python
959
960 queue = multiprocessing.Queue(-1)
961
962you should use
963
964.. code-block:: python
965
966 queue = multiprocessing.Manager().Queue(-1) # also works with the examples above
967
968and you can then replace the worker creation from this::
969
970 workers = []
971 for i in range(10):
972 worker = multiprocessing.Process(target=worker_process,
973 args=(queue, worker_configurer))
974 workers.append(worker)
975 worker.start()
976 for w in workers:
977 w.join()
978
979to this (remembering to first import :mod:`concurrent.futures`)::
980
981 with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
982 for i in range(10):
983 executor.submit(worker_process, queue, worker_configurer)
984
985
Vinay Sajipc63619b2010-12-19 12:56:57 +0000986Using file rotation
987-------------------
988
989.. sectionauthor:: Doug Hellmann, Vinay Sajip (changes)
jimmy4f29f3c2017-12-13 13:37:51 +0100990.. (see <https://pymotw.com/3/logging/>)
Vinay Sajipc63619b2010-12-19 12:56:57 +0000991
992Sometimes you want to let a log file grow to a certain size, then open a new
993file and log to that. You may want to keep a certain number of these files, and
994when that many files have been created, rotate the files so that the number of
Georg Brandl7a0afd32011-02-07 15:44:27 +0000995files and the size of the files both remain bounded. For this usage pattern, the
Vinay Sajip67f39772013-08-17 00:39:42 +0100996logging package provides a :class:`~handlers.RotatingFileHandler`::
Vinay Sajipc63619b2010-12-19 12:56:57 +0000997
998 import glob
999 import logging
1000 import logging.handlers
1001
1002 LOG_FILENAME = 'logging_rotatingfile_example.out'
1003
1004 # Set up a specific logger with our desired output level
1005 my_logger = logging.getLogger('MyLogger')
1006 my_logger.setLevel(logging.DEBUG)
1007
1008 # Add the log message handler to the logger
1009 handler = logging.handlers.RotatingFileHandler(
1010 LOG_FILENAME, maxBytes=20, backupCount=5)
1011
1012 my_logger.addHandler(handler)
1013
1014 # Log some messages
1015 for i in range(20):
1016 my_logger.debug('i = %d' % i)
1017
1018 # See what files are created
1019 logfiles = glob.glob('%s*' % LOG_FILENAME)
1020
1021 for filename in logfiles:
1022 print(filename)
1023
1024The result should be 6 separate files, each with part of the log history for the
Serhiy Storchaka46936d52018-04-08 19:18:04 +03001025application:
1026
1027.. code-block:: none
Vinay Sajipc63619b2010-12-19 12:56:57 +00001028
1029 logging_rotatingfile_example.out
1030 logging_rotatingfile_example.out.1
1031 logging_rotatingfile_example.out.2
1032 logging_rotatingfile_example.out.3
1033 logging_rotatingfile_example.out.4
1034 logging_rotatingfile_example.out.5
1035
1036The most current file is always :file:`logging_rotatingfile_example.out`,
1037and each time it reaches the size limit it is renamed with the suffix
1038``.1``. Each of the existing backup files is renamed to increment the suffix
1039(``.1`` becomes ``.2``, etc.) and the ``.6`` file is erased.
1040
Ezio Melottie130a522011-10-19 10:58:56 +03001041Obviously this example sets the log length much too small as an extreme
Vinay Sajipc63619b2010-12-19 12:56:57 +00001042example. You would want to set *maxBytes* to an appropriate value.
1043
Vinay Sajip6b883a22012-02-27 11:02:45 +00001044.. _format-styles:
1045
1046Use of alternative formatting styles
1047------------------------------------
1048
1049When logging was added to the Python standard library, the only way of
1050formatting messages with variable content was to use the %-formatting
1051method. Since then, Python has gained two new formatting approaches:
Vinay Sajip39b83ac2012-02-28 08:05:23 +00001052:class:`string.Template` (added in Python 2.4) and :meth:`str.format`
1053(added in Python 2.6).
Vinay Sajip6b883a22012-02-27 11:02:45 +00001054
Vinay Sajip39b83ac2012-02-28 08:05:23 +00001055Logging (as of 3.2) provides improved support for these two additional
1056formatting styles. The :class:`Formatter` class been enhanced to take an
1057additional, optional keyword parameter named ``style``. This defaults to
1058``'%'``, but other possible values are ``'{'`` and ``'$'``, which correspond
Vinay Sajip6b883a22012-02-27 11:02:45 +00001059to the other two formatting styles. Backwards compatibility is maintained by
1060default (as you would expect), but by explicitly specifying a style parameter,
1061you get the ability to specify format strings which work with
1062:meth:`str.format` or :class:`string.Template`. Here's an example console
1063session to show the possibilities:
1064
1065.. code-block:: pycon
1066
1067 >>> import logging
1068 >>> root = logging.getLogger()
1069 >>> root.setLevel(logging.DEBUG)
1070 >>> handler = logging.StreamHandler()
1071 >>> bf = logging.Formatter('{asctime} {name} {levelname:8s} {message}',
1072 ... style='{')
1073 >>> handler.setFormatter(bf)
1074 >>> root.addHandler(handler)
1075 >>> logger = logging.getLogger('foo.bar')
1076 >>> logger.debug('This is a DEBUG message')
1077 2010-10-28 15:11:55,341 foo.bar DEBUG This is a DEBUG message
1078 >>> logger.critical('This is a CRITICAL message')
1079 2010-10-28 15:12:11,526 foo.bar CRITICAL This is a CRITICAL message
1080 >>> df = logging.Formatter('$asctime $name ${levelname} $message',
1081 ... style='$')
1082 >>> handler.setFormatter(df)
1083 >>> logger.debug('This is a DEBUG message')
1084 2010-10-28 15:13:06,924 foo.bar DEBUG This is a DEBUG message
1085 >>> logger.critical('This is a CRITICAL message')
1086 2010-10-28 15:13:11,494 foo.bar CRITICAL This is a CRITICAL message
1087 >>>
1088
1089Note that the formatting of logging messages for final output to logs is
1090completely independent of how an individual logging message is constructed.
1091That can still use %-formatting, as shown here::
1092
1093 >>> logger.error('This is an%s %s %s', 'other,', 'ERROR,', 'message')
1094 2010-10-28 15:19:29,833 foo.bar ERROR This is another, ERROR, message
1095 >>>
1096
1097Logging calls (``logger.debug()``, ``logger.info()`` etc.) only take
1098positional parameters for the actual logging message itself, with keyword
1099parameters used only for determining options for how to handle the actual
1100logging call (e.g. the ``exc_info`` keyword parameter to indicate that
1101traceback information should be logged, or the ``extra`` keyword parameter
1102to indicate additional contextual information to be added to the log). So
1103you cannot directly make logging calls using :meth:`str.format` or
1104:class:`string.Template` syntax, because internally the logging package
1105uses %-formatting to merge the format string and the variable arguments.
Brett Cannona3110a02017-08-18 10:00:31 -07001106There would be no changing this while preserving backward compatibility, since
Vinay Sajip6b883a22012-02-27 11:02:45 +00001107all logging calls which are out there in existing code will be using %-format
1108strings.
1109
1110There is, however, a way that you can use {}- and $- formatting to construct
1111your individual log messages. Recall that for a message you can use an
1112arbitrary object as a message format string, and that the logging package will
1113call ``str()`` on that object to get the actual format string. Consider the
1114following two classes::
1115
Ezio Melottiaf8838f2013-03-11 09:30:21 +02001116 class BraceMessage:
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001117 def __init__(self, fmt, /, *args, **kwargs):
Vinay Sajip6b883a22012-02-27 11:02:45 +00001118 self.fmt = fmt
1119 self.args = args
1120 self.kwargs = kwargs
1121
1122 def __str__(self):
1123 return self.fmt.format(*self.args, **self.kwargs)
1124
Ezio Melottiaf8838f2013-03-11 09:30:21 +02001125 class DollarMessage:
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001126 def __init__(self, fmt, /, **kwargs):
Vinay Sajip6b883a22012-02-27 11:02:45 +00001127 self.fmt = fmt
1128 self.kwargs = kwargs
1129
1130 def __str__(self):
1131 from string import Template
1132 return Template(self.fmt).substitute(**self.kwargs)
1133
1134Either of these can be used in place of a format string, to allow {}- or
1135$-formatting to be used to build the actual "message" part which appears in the
1136formatted log output in place of "%(message)s" or "{message}" or "$message".
1137It's a little unwieldy to use the class names whenever you want to log
1138something, but it's quite palatable if you use an alias such as __ (double
Serhiy Storchaka29b0a262016-12-04 10:20:55 +02001139underscore --- not to be confused with _, the single underscore used as a
Vinay Sajip6b883a22012-02-27 11:02:45 +00001140synonym/alias for :func:`gettext.gettext` or its brethren).
1141
1142The above classes are not included in Python, though they're easy enough to
1143copy and paste into your own code. They can be used as follows (assuming that
1144they're declared in a module called ``wherever``):
1145
1146.. code-block:: pycon
1147
1148 >>> from wherever import BraceMessage as __
Vinay Sajip39b83ac2012-02-28 08:05:23 +00001149 >>> print(__('Message with {0} {name}', 2, name='placeholders'))
Vinay Sajip6b883a22012-02-27 11:02:45 +00001150 Message with 2 placeholders
1151 >>> class Point: pass
1152 ...
1153 >>> p = Point()
1154 >>> p.x = 0.5
1155 >>> p.y = 0.5
1156 >>> print(__('Message with coordinates: ({point.x:.2f}, {point.y:.2f})',
1157 ... point=p))
1158 Message with coordinates: (0.50, 0.50)
1159 >>> from wherever import DollarMessage as __
1160 >>> print(__('Message with $num $what', num=2, what='placeholders'))
1161 Message with 2 placeholders
1162 >>>
1163
Vinay Sajip39b83ac2012-02-28 08:05:23 +00001164While the above examples use ``print()`` to show how the formatting works, you
1165would of course use ``logger.debug()`` or similar to actually log using this
1166approach.
1167
Vinay Sajip6b883a22012-02-27 11:02:45 +00001168One thing to note is that you pay no significant performance penalty with this
1169approach: the actual formatting happens not when you make the logging call, but
1170when (and if) the logged message is actually about to be output to a log by a
1171handler. So the only slightly unusual thing which might trip you up is that the
1172parentheses go around the format string and the arguments, not just the format
1173string. That's because the __ notation is just syntax sugar for a constructor
1174call to one of the XXXMessage classes.
1175
Vinay Sajip8028a5c2013-03-30 11:56:18 +00001176If you prefer, you can use a :class:`LoggerAdapter` to achieve a similar effect
1177to the above, as in the following example::
1178
1179 import logging
1180
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001181 class Message:
Vinay Sajip8028a5c2013-03-30 11:56:18 +00001182 def __init__(self, fmt, args):
1183 self.fmt = fmt
1184 self.args = args
1185
1186 def __str__(self):
1187 return self.fmt.format(*self.args)
1188
1189 class StyleAdapter(logging.LoggerAdapter):
1190 def __init__(self, logger, extra=None):
1191 super(StyleAdapter, self).__init__(logger, extra or {})
1192
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001193 def log(self, level, msg, /, *args, **kwargs):
Vinay Sajip8028a5c2013-03-30 11:56:18 +00001194 if self.isEnabledFor(level):
1195 msg, kwargs = self.process(msg, kwargs)
1196 self.logger._log(level, Message(msg, args), (), **kwargs)
1197
1198 logger = StyleAdapter(logging.getLogger(__name__))
1199
1200 def main():
1201 logger.debug('Hello, {}', 'world!')
1202
1203 if __name__ == '__main__':
1204 logging.basicConfig(level=logging.DEBUG)
1205 main()
1206
1207The above script should log the message ``Hello, world!`` when run with
1208Python 3.2 or later.
1209
Vinay Sajip6b883a22012-02-27 11:02:45 +00001210
Vinay Sajip982f5342012-02-27 11:56:29 +00001211.. currentmodule:: logging
1212
Georg Brandle9983862012-02-28 08:21:40 +01001213.. _custom-logrecord:
Vinay Sajip982f5342012-02-27 11:56:29 +00001214
Vinay Sajip9c10d6b2013-11-15 20:58:13 +00001215Customizing ``LogRecord``
Vinay Sajip982f5342012-02-27 11:56:29 +00001216-------------------------
1217
1218Every logging event is represented by a :class:`LogRecord` instance.
1219When an event is logged and not filtered out by a logger's level, a
1220:class:`LogRecord` is created, populated with information about the event and
1221then passed to the handlers for that logger (and its ancestors, up to and
1222including the logger where further propagation up the hierarchy is disabled).
1223Before Python 3.2, there were only two places where this creation was done:
1224
1225* :meth:`Logger.makeRecord`, which is called in the normal process of
1226 logging an event. This invoked :class:`LogRecord` directly to create an
1227 instance.
1228* :func:`makeLogRecord`, which is called with a dictionary containing
1229 attributes to be added to the LogRecord. This is typically invoked when a
1230 suitable dictionary has been received over the network (e.g. in pickle form
1231 via a :class:`~handlers.SocketHandler`, or in JSON form via an
1232 :class:`~handlers.HTTPHandler`).
1233
1234This has usually meant that if you need to do anything special with a
1235:class:`LogRecord`, you've had to do one of the following.
1236
1237* Create your own :class:`Logger` subclass, which overrides
1238 :meth:`Logger.makeRecord`, and set it using :func:`~logging.setLoggerClass`
1239 before any loggers that you care about are instantiated.
1240* Add a :class:`Filter` to a logger or handler, which does the
1241 necessary special manipulation you need when its
1242 :meth:`~Filter.filter` method is called.
1243
1244The first approach would be a little unwieldy in the scenario where (say)
1245several different libraries wanted to do different things. Each would attempt
1246to set its own :class:`Logger` subclass, and the one which did this last would
1247win.
1248
1249The second approach works reasonably well for many cases, but does not allow
1250you to e.g. use a specialized subclass of :class:`LogRecord`. Library
1251developers can set a suitable filter on their loggers, but they would have to
1252remember to do this every time they introduced a new logger (which they would
Georg Brandle9983862012-02-28 08:21:40 +01001253do simply by adding new packages or modules and doing ::
Vinay Sajip982f5342012-02-27 11:56:29 +00001254
1255 logger = logging.getLogger(__name__)
1256
1257at module level). It's probably one too many things to think about. Developers
1258could also add the filter to a :class:`~logging.NullHandler` attached to their
1259top-level logger, but this would not be invoked if an application developer
Serhiy Storchaka29b0a262016-12-04 10:20:55 +02001260attached a handler to a lower-level library logger --- so output from that
Vinay Sajip982f5342012-02-27 11:56:29 +00001261handler would not reflect the intentions of the library developer.
1262
1263In Python 3.2 and later, :class:`~logging.LogRecord` creation is done through a
1264factory, which you can specify. The factory is just a callable you can set with
1265:func:`~logging.setLogRecordFactory`, and interrogate with
1266:func:`~logging.getLogRecordFactory`. The factory is invoked with the same
1267signature as the :class:`~logging.LogRecord` constructor, as :class:`LogRecord`
1268is the default setting for the factory.
1269
1270This approach allows a custom factory to control all aspects of LogRecord
1271creation. For example, you could return a subclass, or just add some additional
1272attributes to the record once created, using a pattern similar to this::
1273
1274 old_factory = logging.getLogRecordFactory()
1275
1276 def record_factory(*args, **kwargs):
1277 record = old_factory(*args, **kwargs)
1278 record.custom_attribute = 0xdecafbad
1279 return record
1280
1281 logging.setLogRecordFactory(record_factory)
1282
1283This pattern allows different libraries to chain factories together, and as
1284long as they don't overwrite each other's attributes or unintentionally
1285overwrite the attributes provided as standard, there should be no surprises.
1286However, it should be borne in mind that each link in the chain adds run-time
1287overhead to all logging operations, and the technique should only be used when
1288the use of a :class:`Filter` does not provide the desired result.
1289
1290
Vinay Sajipc63619b2010-12-19 12:56:57 +00001291.. _zeromq-handlers:
1292
Vinay Sajip7d101292010-12-26 21:22:33 +00001293Subclassing QueueHandler - a ZeroMQ example
1294-------------------------------------------
Vinay Sajipc63619b2010-12-19 12:56:57 +00001295
1296You can use a :class:`QueueHandler` subclass to send messages to other kinds
1297of queues, for example a ZeroMQ 'publish' socket. In the example below,the
1298socket is created separately and passed to the handler (as its 'queue')::
1299
Serhiy Storchakadba90392016-05-10 12:01:23 +03001300 import zmq # using pyzmq, the Python binding for ZeroMQ
1301 import json # for serializing records portably
Vinay Sajipc63619b2010-12-19 12:56:57 +00001302
1303 ctx = zmq.Context()
Serhiy Storchakadba90392016-05-10 12:01:23 +03001304 sock = zmq.Socket(ctx, zmq.PUB) # or zmq.PUSH, or other suitable value
1305 sock.bind('tcp://*:5556') # or wherever
Vinay Sajipc63619b2010-12-19 12:56:57 +00001306
1307 class ZeroMQSocketHandler(QueueHandler):
1308 def enqueue(self, record):
Pablo Galindo586c0502017-09-07 21:53:13 +01001309 self.queue.send_json(record.__dict__)
1310
Vinay Sajipc63619b2010-12-19 12:56:57 +00001311
1312 handler = ZeroMQSocketHandler(sock)
1313
1314
1315Of course there are other ways of organizing this, for example passing in the
1316data needed by the handler to create the socket::
1317
1318 class ZeroMQSocketHandler(QueueHandler):
1319 def __init__(self, uri, socktype=zmq.PUB, ctx=None):
1320 self.ctx = ctx or zmq.Context()
1321 socket = zmq.Socket(self.ctx, socktype)
1322 socket.bind(uri)
Pablo Galindo586c0502017-09-07 21:53:13 +01001323 super().__init__(socket)
Vinay Sajipc63619b2010-12-19 12:56:57 +00001324
1325 def enqueue(self, record):
Pablo Galindo586c0502017-09-07 21:53:13 +01001326 self.queue.send_json(record.__dict__)
Vinay Sajipc63619b2010-12-19 12:56:57 +00001327
1328 def close(self):
1329 self.queue.close()
1330
1331
Vinay Sajip7d101292010-12-26 21:22:33 +00001332Subclassing QueueListener - a ZeroMQ example
1333--------------------------------------------
Vinay Sajipc63619b2010-12-19 12:56:57 +00001334
1335You can also subclass :class:`QueueListener` to get messages from other kinds
1336of queues, for example a ZeroMQ 'subscribe' socket. Here's an example::
1337
1338 class ZeroMQSocketListener(QueueListener):
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001339 def __init__(self, uri, /, *handlers, **kwargs):
Vinay Sajipc63619b2010-12-19 12:56:57 +00001340 self.ctx = kwargs.get('ctx') or zmq.Context()
1341 socket = zmq.Socket(self.ctx, zmq.SUB)
Pablo Galindo586c0502017-09-07 21:53:13 +01001342 socket.setsockopt_string(zmq.SUBSCRIBE, '') # subscribe to everything
Vinay Sajipc63619b2010-12-19 12:56:57 +00001343 socket.connect(uri)
Pablo Galindo586c0502017-09-07 21:53:13 +01001344 super().__init__(socket, *handlers, **kwargs)
Vinay Sajipc63619b2010-12-19 12:56:57 +00001345
1346 def dequeue(self):
Pablo Galindo586c0502017-09-07 21:53:13 +01001347 msg = self.queue.recv_json()
1348 return logging.makeLogRecord(msg)
Vinay Sajipc63619b2010-12-19 12:56:57 +00001349
1350
Vinay Sajip7d101292010-12-26 21:22:33 +00001351.. seealso::
Vinay Sajipc63619b2010-12-19 12:56:57 +00001352
Vinay Sajip7d101292010-12-26 21:22:33 +00001353 Module :mod:`logging`
1354 API reference for the logging module.
1355
1356 Module :mod:`logging.config`
1357 Configuration API for the logging module.
1358
1359 Module :mod:`logging.handlers`
1360 Useful handlers included with the logging module.
1361
1362 :ref:`A basic logging tutorial <logging-basic-tutorial>`
1363
1364 :ref:`A more advanced logging tutorial <logging-advanced-tutorial>`
Vinay Sajip631a7e22011-11-23 14:27:54 +00001365
1366
1367An example dictionary-based configuration
1368-----------------------------------------
1369
1370Below is an example of a logging configuration dictionary - it's taken from
Serhiy Storchaka90be7332016-04-11 12:18:56 +03001371the `documentation on the Django project <https://docs.djangoproject.com/en/1.9/topics/logging/#configuring-logging>`_.
Vinay Sajip67f39772013-08-17 00:39:42 +01001372This dictionary is passed to :func:`~config.dictConfig` to put the configuration into effect::
Vinay Sajip631a7e22011-11-23 14:27:54 +00001373
1374 LOGGING = {
1375 'version': 1,
1376 'disable_existing_loggers': True,
1377 'formatters': {
1378 'verbose': {
1379 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
1380 },
1381 'simple': {
1382 'format': '%(levelname)s %(message)s'
1383 },
1384 },
1385 'filters': {
1386 'special': {
1387 '()': 'project.logging.SpecialFilter',
1388 'foo': 'bar',
1389 }
1390 },
1391 'handlers': {
1392 'null': {
1393 'level':'DEBUG',
1394 'class':'django.utils.log.NullHandler',
1395 },
1396 'console':{
1397 'level':'DEBUG',
1398 'class':'logging.StreamHandler',
1399 'formatter': 'simple'
1400 },
1401 'mail_admins': {
1402 'level': 'ERROR',
1403 'class': 'django.utils.log.AdminEmailHandler',
1404 'filters': ['special']
1405 }
1406 },
1407 'loggers': {
1408 'django': {
1409 'handlers':['null'],
1410 'propagate': True,
1411 'level':'INFO',
1412 },
1413 'django.request': {
1414 'handlers': ['mail_admins'],
1415 'level': 'ERROR',
1416 'propagate': False,
1417 },
1418 'myproject.custom': {
1419 'handlers': ['console', 'mail_admins'],
1420 'level': 'INFO',
1421 'filters': ['special']
1422 }
1423 }
1424 }
1425
1426For more information about this configuration, you can see the `relevant
Serhiy Storchaka90be7332016-04-11 12:18:56 +03001427section <https://docs.djangoproject.com/en/1.9/topics/logging/#configuring-logging>`_
Vinay Sajip631a7e22011-11-23 14:27:54 +00001428of the Django documentation.
Vinay Sajip23b94d02012-01-04 12:02:26 +00001429
1430.. _cookbook-rotator-namer:
1431
Vinay Sajip9c10d6b2013-11-15 20:58:13 +00001432Using a rotator and namer to customize log rotation processing
Vinay Sajip23b94d02012-01-04 12:02:26 +00001433--------------------------------------------------------------
1434
1435An example of how you can define a namer and rotator is given in the following
1436snippet, which shows zlib-based compression of the log file::
1437
1438 def namer(name):
1439 return name + ".gz"
1440
1441 def rotator(source, dest):
1442 with open(source, "rb") as sf:
1443 data = sf.read()
1444 compressed = zlib.compress(data, 9)
1445 with open(dest, "wb") as df:
1446 df.write(compressed)
1447 os.remove(source)
1448
1449 rh = logging.handlers.RotatingFileHandler(...)
1450 rh.rotator = rotator
1451 rh.namer = namer
1452
Ezio Melotti226231c2012-01-18 05:40:00 +02001453These are not "true" .gz files, as they are bare compressed data, with no
1454"container" such as youd find in an actual gzip file. This snippet is just
Vinay Sajip23b94d02012-01-04 12:02:26 +00001455for illustration purposes.
1456
Vinay Sajip0292fa92012-04-08 01:49:12 +01001457A more elaborate multiprocessing example
1458----------------------------------------
1459
1460The following working example shows how logging can be used with multiprocessing
1461using configuration files. The configurations are fairly simple, but serve to
1462illustrate how more complex ones could be implemented in a real multiprocessing
1463scenario.
1464
1465In the example, the main process spawns a listener process and some worker
1466processes. Each of the main process, the listener and the workers have three
1467separate configurations (the workers all share the same configuration). We can
1468see logging in the main process, how the workers log to a QueueHandler and how
1469the listener implements a QueueListener and a more complex logging
1470configuration, and arranges to dispatch events received via the queue to the
1471handlers specified in the configuration. Note that these configurations are
1472purely illustrative, but you should be able to adapt this example to your own
1473scenario.
1474
1475Here's the script - the docstrings and the comments hopefully explain how it
1476works::
1477
1478 import logging
1479 import logging.config
1480 import logging.handlers
1481 from multiprocessing import Process, Queue, Event, current_process
1482 import os
1483 import random
1484 import time
1485
Ezio Melottiaf8838f2013-03-11 09:30:21 +02001486 class MyHandler:
Vinay Sajip0292fa92012-04-08 01:49:12 +01001487 """
1488 A simple handler for logging events. It runs in the listener process and
1489 dispatches events to loggers based on the name in the received record,
1490 which then get dispatched, by the logging system, to the handlers
Vinay Sajip838e6382012-04-09 19:46:24 +01001491 configured for those loggers.
Vinay Sajip0292fa92012-04-08 01:49:12 +01001492 """
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001493
Vinay Sajip0292fa92012-04-08 01:49:12 +01001494 def handle(self, record):
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001495 if record.name == "root":
1496 logger = logging.getLogger()
1497 else:
1498 logger = logging.getLogger(record.name)
1499
1500 if logger.isEnabledFor(record.levelno):
1501 # The process name is transformed just to show that it's the listener
1502 # doing the logging to files and console
1503 record.processName = '%s (for %s)' % (current_process().name, record.processName)
1504 logger.handle(record)
Vinay Sajip0292fa92012-04-08 01:49:12 +01001505
1506 def listener_process(q, stop_event, config):
1507 """
1508 This could be done in the main process, but is just done in a separate
1509 process for illustrative purposes.
1510
1511 This initialises logging according to the specified configuration,
1512 starts the listener and waits for the main process to signal completion
1513 via the event. The listener is then stopped, and the process exits.
1514 """
1515 logging.config.dictConfig(config)
1516 listener = logging.handlers.QueueListener(q, MyHandler())
1517 listener.start()
1518 if os.name == 'posix':
1519 # On POSIX, the setup logger will have been configured in the
1520 # parent process, but should have been disabled following the
1521 # dictConfig call.
1522 # On Windows, since fork isn't used, the setup logger won't
1523 # exist in the child, so it would be created and the message
1524 # would appear - hence the "if posix" clause.
1525 logger = logging.getLogger('setup')
1526 logger.critical('Should not appear, because of disabled logger ...')
1527 stop_event.wait()
1528 listener.stop()
1529
1530 def worker_process(config):
1531 """
1532 A number of these are spawned for the purpose of illustration. In
Berker Peksag315e1042015-05-19 01:36:55 +03001533 practice, they could be a heterogeneous bunch of processes rather than
Vinay Sajip0292fa92012-04-08 01:49:12 +01001534 ones which are identical to each other.
1535
1536 This initialises logging according to the specified configuration,
1537 and logs a hundred messages with random levels to randomly selected
1538 loggers.
1539
1540 A small sleep is added to allow other processes a chance to run. This
1541 is not strictly needed, but it mixes the output from the different
1542 processes a bit more than if it's left out.
1543 """
1544 logging.config.dictConfig(config)
1545 levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
1546 logging.CRITICAL]
1547 loggers = ['foo', 'foo.bar', 'foo.bar.baz',
1548 'spam', 'spam.ham', 'spam.ham.eggs']
1549 if os.name == 'posix':
1550 # On POSIX, the setup logger will have been configured in the
1551 # parent process, but should have been disabled following the
1552 # dictConfig call.
1553 # On Windows, since fork isn't used, the setup logger won't
1554 # exist in the child, so it would be created and the message
1555 # would appear - hence the "if posix" clause.
1556 logger = logging.getLogger('setup')
1557 logger.critical('Should not appear, because of disabled logger ...')
1558 for i in range(100):
1559 lvl = random.choice(levels)
1560 logger = logging.getLogger(random.choice(loggers))
1561 logger.log(lvl, 'Message no. %d', i)
1562 time.sleep(0.01)
1563
1564 def main():
1565 q = Queue()
1566 # The main process gets a simple configuration which prints to the console.
1567 config_initial = {
1568 'version': 1,
Vinay Sajip0292fa92012-04-08 01:49:12 +01001569 'handlers': {
1570 'console': {
1571 'class': 'logging.StreamHandler',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001572 'level': 'INFO'
1573 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001574 },
1575 'root': {
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001576 'handlers': ['console'],
1577 'level': 'DEBUG'
1578 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001579 }
1580 # The worker process configuration is just a QueueHandler attached to the
1581 # root logger, which allows all messages to be sent to the queue.
1582 # We disable existing loggers to disable the "setup" logger used in the
1583 # parent process. This is needed on POSIX because the logger will
1584 # be there in the child following a fork().
1585 config_worker = {
1586 'version': 1,
1587 'disable_existing_loggers': True,
1588 'handlers': {
1589 'queue': {
1590 'class': 'logging.handlers.QueueHandler',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001591 'queue': q
1592 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001593 },
1594 'root': {
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001595 'handlers': ['queue'],
1596 'level': 'DEBUG'
1597 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001598 }
1599 # The listener process configuration shows that the full flexibility of
1600 # logging configuration is available to dispatch events to handlers however
1601 # you want.
1602 # We disable existing loggers to disable the "setup" logger used in the
1603 # parent process. This is needed on POSIX because the logger will
1604 # be there in the child following a fork().
1605 config_listener = {
1606 'version': 1,
1607 'disable_existing_loggers': True,
1608 'formatters': {
1609 'detailed': {
1610 'class': 'logging.Formatter',
1611 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
1612 },
1613 'simple': {
1614 'class': 'logging.Formatter',
1615 'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
1616 }
1617 },
1618 'handlers': {
1619 'console': {
1620 'class': 'logging.StreamHandler',
Vinay Sajip0292fa92012-04-08 01:49:12 +01001621 'formatter': 'simple',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001622 'level': 'INFO'
Vinay Sajip0292fa92012-04-08 01:49:12 +01001623 },
1624 'file': {
1625 'class': 'logging.FileHandler',
1626 'filename': 'mplog.log',
1627 'mode': 'w',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001628 'formatter': 'detailed'
Vinay Sajip0292fa92012-04-08 01:49:12 +01001629 },
1630 'foofile': {
1631 'class': 'logging.FileHandler',
1632 'filename': 'mplog-foo.log',
1633 'mode': 'w',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001634 'formatter': 'detailed'
Vinay Sajip0292fa92012-04-08 01:49:12 +01001635 },
1636 'errors': {
1637 'class': 'logging.FileHandler',
1638 'filename': 'mplog-errors.log',
1639 'mode': 'w',
Vinay Sajip0292fa92012-04-08 01:49:12 +01001640 'formatter': 'detailed',
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001641 'level': 'ERROR'
1642 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001643 },
1644 'loggers': {
1645 'foo': {
Serhiy Storchakaf47036c2013-12-24 11:04:36 +02001646 'handlers': ['foofile']
Vinay Sajip0292fa92012-04-08 01:49:12 +01001647 }
1648 },
1649 'root': {
Géry Ogam5b3cbcd2018-09-25 09:24:52 +02001650 'handlers': ['console', 'file', 'errors'],
1651 'level': 'DEBUG'
1652 }
Vinay Sajip0292fa92012-04-08 01:49:12 +01001653 }
1654 # Log some initial events, just to show that logging in the parent works
1655 # normally.
1656 logging.config.dictConfig(config_initial)
1657 logger = logging.getLogger('setup')
1658 logger.info('About to create workers ...')
1659 workers = []
1660 for i in range(5):
1661 wp = Process(target=worker_process, name='worker %d' % (i + 1),
1662 args=(config_worker,))
1663 workers.append(wp)
1664 wp.start()
1665 logger.info('Started worker: %s', wp.name)
1666 logger.info('About to create listener ...')
1667 stop_event = Event()
1668 lp = Process(target=listener_process, name='listener',
1669 args=(q, stop_event, config_listener))
1670 lp.start()
1671 logger.info('Started listener')
1672 # We now hang around for the workers to finish their work.
1673 for wp in workers:
1674 wp.join()
1675 # Workers all done, listening can now stop.
1676 # Logging in the parent still works normally.
1677 logger.info('Telling listener to stop ...')
1678 stop_event.set()
1679 lp.join()
1680 logger.info('All done.')
1681
1682 if __name__ == '__main__':
1683 main()
1684
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001685
1686Inserting a BOM into messages sent to a SysLogHandler
1687-----------------------------------------------------
1688
Serhiy Storchaka0a36ac12018-05-31 07:39:00 +03001689:rfc:`5424` requires that a
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001690Unicode message be sent to a syslog daemon as a set of bytes which have the
1691following structure: an optional pure-ASCII component, followed by a UTF-8 Byte
Serhiy Storchaka0a36ac12018-05-31 07:39:00 +03001692Order Mark (BOM), followed by Unicode encoded using UTF-8. (See the
1693:rfc:`relevant section of the specification <5424#section-6>`.)
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001694
Vinay Sajip62930e12012-04-17 00:40:48 +01001695In Python 3.1, code was added to
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001696:class:`~logging.handlers.SysLogHandler` to insert a BOM into the message, but
1697unfortunately, it was implemented incorrectly, with the BOM appearing at the
1698beginning of the message and hence not allowing any pure-ASCII component to
1699appear before it.
1700
1701As this behaviour is broken, the incorrect BOM insertion code is being removed
Vinay Sajip62930e12012-04-17 00:40:48 +01001702from Python 3.2.4 and later. However, it is not being replaced, and if you
Serhiy Storchaka0a36ac12018-05-31 07:39:00 +03001703want to produce :rfc:`5424`-compliant messages which include a BOM, an optional
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001704pure-ASCII sequence before it and arbitrary Unicode after it, encoded using
1705UTF-8, then you need to do the following:
1706
1707#. Attach a :class:`~logging.Formatter` instance to your
1708 :class:`~logging.handlers.SysLogHandler` instance, with a format string
1709 such as::
1710
Vinay Sajip59b9a792012-04-16 15:46:18 +01001711 'ASCII section\ufeffUnicode section'
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001712
Georg Brandld50fe722013-03-23 16:00:41 +01001713 The Unicode code point U+FEFF, when encoded using UTF-8, will be
Vinay Sajip59b9a792012-04-16 15:46:18 +01001714 encoded as a UTF-8 BOM -- the byte-string ``b'\xef\xbb\xbf'``.
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001715
1716#. Replace the ASCII section with whatever placeholders you like, but make sure
1717 that the data that appears in there after substitution is always ASCII (that
1718 way, it will remain unchanged after UTF-8 encoding).
1719
1720#. Replace the Unicode section with whatever placeholders you like; if the data
Vinay Sajipa58d6682012-07-27 10:54:10 +01001721 which appears there after substitution contains characters outside the ASCII
1722 range, that's fine -- it will be encoded using UTF-8.
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001723
Vinay Sajip59b9a792012-04-16 15:46:18 +01001724The formatted message *will* be encoded using UTF-8 encoding by
1725``SysLogHandler``. If you follow the above rules, you should be able to produce
Serhiy Storchaka0a36ac12018-05-31 07:39:00 +03001726:rfc:`5424`-compliant messages. If you don't, logging may not complain, but your
Vinay Sajipb00e8f12012-04-16 15:28:50 +01001727messages will not be RFC 5424-compliant, and your syslog daemon may complain.
1728
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001729
1730Implementing structured logging
1731-------------------------------
1732
1733Although most logging messages are intended for reading by humans, and thus not
delirious-lettuce3378b202017-05-19 14:37:57 -06001734readily machine-parseable, there might be circumstances where you want to output
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001735messages in a structured format which *is* capable of being parsed by a program
Vinay Sajip3d9e9722013-01-23 09:31:19 +00001736(without needing complex regular expressions to parse the log message). This is
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001737straightforward to achieve using the logging package. There are a number of
1738ways in which this could be achieved, but the following is a simple approach
1739which uses JSON to serialise the event in a machine-parseable manner::
1740
1741 import json
1742 import logging
1743
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001744 class StructuredMessage:
1745 def __init__(self, message, /, **kwargs):
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001746 self.message = message
1747 self.kwargs = kwargs
1748
1749 def __str__(self):
1750 return '%s >>> %s' % (self.message, json.dumps(self.kwargs))
1751
1752 _ = StructuredMessage # optional, to improve readability
1753
1754 logging.basicConfig(level=logging.INFO, format='%(message)s')
1755 logging.info(_('message 1', foo='bar', bar='baz', num=123, fnum=123.456))
1756
Serhiy Storchaka46936d52018-04-08 19:18:04 +03001757If the above script is run, it prints:
1758
1759.. code-block:: none
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001760
1761 message 1 >>> {"fnum": 123.456, "num": 123, "bar": "baz", "foo": "bar"}
1762
Vinay Sajip3d9e9722013-01-23 09:31:19 +00001763Note that the order of items might be different according to the version of
1764Python used.
1765
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001766If you need more specialised processing, you can use a custom JSON encoder,
1767as in the following complete example::
1768
1769 from __future__ import unicode_literals
1770
1771 import json
1772 import logging
1773
Vinay Sajip3d9e9722013-01-23 09:31:19 +00001774 # This next bit is to ensure the script runs unchanged on 2.x and 3.x
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001775 try:
1776 unicode
1777 except NameError:
1778 unicode = str
1779
1780 class Encoder(json.JSONEncoder):
1781 def default(self, o):
1782 if isinstance(o, set):
1783 return tuple(o)
1784 elif isinstance(o, unicode):
1785 return o.encode('unicode_escape').decode('ascii')
1786 return super(Encoder, self).default(o)
1787
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03001788 class StructuredMessage:
1789 def __init__(self, message, /, **kwargs):
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001790 self.message = message
1791 self.kwargs = kwargs
1792
1793 def __str__(self):
1794 s = Encoder().encode(self.kwargs)
1795 return '%s >>> %s' % (self.message, s)
1796
Vinay Sajip3d9e9722013-01-23 09:31:19 +00001797 _ = StructuredMessage # optional, to improve readability
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001798
1799 def main():
1800 logging.basicConfig(level=logging.INFO, format='%(message)s')
Raymond Hettingerdf1b6992014-11-09 15:56:33 -08001801 logging.info(_('message 1', set_value={1, 2, 3}, snowman='\u2603'))
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001802
1803 if __name__ == '__main__':
1804 main()
1805
Serhiy Storchaka46936d52018-04-08 19:18:04 +03001806When the above script is run, it prints:
1807
1808.. code-block:: none
Vinay Sajip4b88d6c2013-01-22 15:57:39 +00001809
1810 message 1 >>> {"snowman": "\u2603", "set_value": [1, 2, 3]}
1811
Vinay Sajip3d9e9722013-01-23 09:31:19 +00001812Note that the order of items might be different according to the version of
1813Python used.
1814
Vinay Sajip554f22f2014-02-03 11:51:45 +00001815
1816.. _custom-handlers:
1817
Vinay Sajip2c1adcb2013-11-05 10:02:21 +00001818.. currentmodule:: logging.config
1819
Vinay Sajip9c10d6b2013-11-15 20:58:13 +00001820Customizing handlers with :func:`dictConfig`
Vinay Sajip2c1adcb2013-11-05 10:02:21 +00001821--------------------------------------------
1822
Vinay Sajip9c10d6b2013-11-15 20:58:13 +00001823There are times when you want to customize logging handlers in particular ways,
Vinay Sajip2c1adcb2013-11-05 10:02:21 +00001824and if you use :func:`dictConfig` you may be able to do this without
1825subclassing. As an example, consider that you may want to set the ownership of a
1826log file. On POSIX, this is easily done using :func:`shutil.chown`, but the file
Vinay Sajip9c10d6b2013-11-15 20:58:13 +00001827handlers in the stdlib don't offer built-in support. You can customize handler
Vinay Sajip2c1adcb2013-11-05 10:02:21 +00001828creation using a plain function such as::
1829
1830 def owned_file_handler(filename, mode='a', encoding=None, owner=None):
1831 if owner:
1832 if not os.path.exists(filename):
1833 open(filename, 'a').close()
1834 shutil.chown(filename, *owner)
1835 return logging.FileHandler(filename, mode, encoding)
1836
1837You can then specify, in a logging configuration passed to :func:`dictConfig`,
1838that a logging handler be created by calling this function::
1839
1840 LOGGING = {
1841 'version': 1,
1842 'disable_existing_loggers': False,
1843 'formatters': {
1844 'default': {
1845 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
1846 },
1847 },
1848 'handlers': {
1849 'file':{
1850 # The values below are popped from this dictionary and
1851 # used to create the handler, set the handler's level and
1852 # its formatter.
1853 '()': owned_file_handler,
1854 'level':'DEBUG',
1855 'formatter': 'default',
1856 # The values below are passed to the handler creator callable
1857 # as keyword arguments.
1858 'owner': ['pulse', 'pulse'],
1859 'filename': 'chowntest.log',
1860 'mode': 'w',
1861 'encoding': 'utf-8',
1862 },
1863 },
1864 'root': {
1865 'handlers': ['file'],
1866 'level': 'DEBUG',
1867 },
1868 }
1869
1870In this example I am setting the ownership using the ``pulse`` user and group,
1871just for the purposes of illustration. Putting it together into a working
1872script, ``chowntest.py``::
1873
1874 import logging, logging.config, os, shutil
1875
1876 def owned_file_handler(filename, mode='a', encoding=None, owner=None):
1877 if owner:
1878 if not os.path.exists(filename):
1879 open(filename, 'a').close()
1880 shutil.chown(filename, *owner)
1881 return logging.FileHandler(filename, mode, encoding)
1882
1883 LOGGING = {
1884 'version': 1,
1885 'disable_existing_loggers': False,
1886 'formatters': {
1887 'default': {
1888 'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
1889 },
1890 },
1891 'handlers': {
1892 'file':{
1893 # The values below are popped from this dictionary and
1894 # used to create the handler, set the handler's level and
1895 # its formatter.
1896 '()': owned_file_handler,
1897 'level':'DEBUG',
1898 'formatter': 'default',
1899 # The values below are passed to the handler creator callable
1900 # as keyword arguments.
1901 'owner': ['pulse', 'pulse'],
1902 'filename': 'chowntest.log',
1903 'mode': 'w',
1904 'encoding': 'utf-8',
1905 },
1906 },
1907 'root': {
1908 'handlers': ['file'],
1909 'level': 'DEBUG',
1910 },
1911 }
1912
1913 logging.config.dictConfig(LOGGING)
1914 logger = logging.getLogger('mylogger')
1915 logger.debug('A debug message')
1916
Martin Panter1050d2d2016-07-26 11:18:21 +02001917To run this, you will probably need to run as ``root``:
1918
1919.. code-block:: shell-session
Vinay Sajip2c1adcb2013-11-05 10:02:21 +00001920
1921 $ sudo python3.3 chowntest.py
1922 $ cat chowntest.log
1923 2013-11-05 09:34:51,128 DEBUG mylogger A debug message
1924 $ ls -l chowntest.log
1925 -rw-r--r-- 1 pulse pulse 55 2013-11-05 09:34 chowntest.log
1926
1927Note that this example uses Python 3.3 because that's where :func:`shutil.chown`
1928makes an appearance. This approach should work with any Python version that
1929supports :func:`dictConfig` - namely, Python 2.7, 3.2 or later. With pre-3.3
1930versions, you would need to implement the actual ownership change using e.g.
1931:func:`os.chown`.
1932
1933In practice, the handler-creating function may be in a utility module somewhere
1934in your project. Instead of the line in the configuration::
1935
1936 '()': owned_file_handler,
1937
1938you could use e.g.::
1939
1940 '()': 'ext://project.util.owned_file_handler',
1941
1942where ``project.util`` can be replaced with the actual name of the package
1943where the function resides. In the above working script, using
1944``'ext://__main__.owned_file_handler'`` should work. Here, the actual callable
1945is resolved by :func:`dictConfig` from the ``ext://`` specification.
1946
1947This example hopefully also points the way to how you could implement other
1948types of file change - e.g. setting specific POSIX permission bits - in the
1949same way, using :func:`os.chmod`.
1950
1951Of course, the approach could also be extended to types of handler other than a
1952:class:`~logging.FileHandler` - for example, one of the rotating file handlers,
1953or a different type of handler altogether.
1954
Vinay Sajipcbefe3b2014-01-15 15:09:05 +00001955
1956.. currentmodule:: logging
1957
1958.. _formatting-styles:
1959
1960Using particular formatting styles throughout your application
1961--------------------------------------------------------------
1962
1963In Python 3.2, the :class:`~logging.Formatter` gained a ``style`` keyword
1964parameter which, while defaulting to ``%`` for backward compatibility, allowed
1965the specification of ``{`` or ``$`` to support the formatting approaches
1966supported by :meth:`str.format` and :class:`string.Template`. Note that this
1967governs the formatting of logging messages for final output to logs, and is
1968completely orthogonal to how an individual logging message is constructed.
1969
1970Logging calls (:meth:`~Logger.debug`, :meth:`~Logger.info` etc.) only take
1971positional parameters for the actual logging message itself, with keyword
1972parameters used only for determining options for how to handle the logging call
1973(e.g. the ``exc_info`` keyword parameter to indicate that traceback information
1974should be logged, or the ``extra`` keyword parameter to indicate additional
1975contextual information to be added to the log). So you cannot directly make
1976logging calls using :meth:`str.format` or :class:`string.Template` syntax,
1977because internally the logging package uses %-formatting to merge the format
1978string and the variable arguments. There would no changing this while preserving
1979backward compatibility, since all logging calls which are out there in existing
1980code will be using %-format strings.
1981
1982There have been suggestions to associate format styles with specific loggers,
1983but that approach also runs into backward compatibility problems because any
1984existing code could be using a given logger name and using %-formatting.
1985
1986For logging to work interoperably between any third-party libraries and your
1987code, decisions about formatting need to be made at the level of the
1988individual logging call. This opens up a couple of ways in which alternative
1989formatting styles can be accommodated.
1990
1991
1992Using LogRecord factories
1993^^^^^^^^^^^^^^^^^^^^^^^^^
1994
1995In Python 3.2, along with the :class:`~logging.Formatter` changes mentioned
1996above, the logging package gained the ability to allow users to set their own
1997:class:`LogRecord` subclasses, using the :func:`setLogRecordFactory` function.
1998You can use this to set your own subclass of :class:`LogRecord`, which does the
1999Right Thing by overriding the :meth:`~LogRecord.getMessage` method. The base
2000class implementation of this method is where the ``msg % args`` formatting
2001happens, and where you can substitute your alternate formatting; however, you
2002should be careful to support all formatting styles and allow %-formatting as
2003the default, to ensure interoperability with other code. Care should also be
2004taken to call ``str(self.msg)``, just as the base implementation does.
2005
2006Refer to the reference documentation on :func:`setLogRecordFactory` and
2007:class:`LogRecord` for more information.
2008
2009
2010Using custom message objects
2011^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2012
2013There is another, perhaps simpler way that you can use {}- and $- formatting to
2014construct your individual log messages. You may recall (from
2015:ref:`arbitrary-object-messages`) that when logging you can use an arbitrary
2016object as a message format string, and that the logging package will call
2017:func:`str` on that object to get the actual format string. Consider the
2018following two classes::
2019
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03002020 class BraceMessage:
2021 def __init__(self, fmt, /, *args, **kwargs):
Vinay Sajipcbefe3b2014-01-15 15:09:05 +00002022 self.fmt = fmt
2023 self.args = args
2024 self.kwargs = kwargs
2025
2026 def __str__(self):
2027 return self.fmt.format(*self.args, **self.kwargs)
2028
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03002029 class DollarMessage:
2030 def __init__(self, fmt, /, **kwargs):
Vinay Sajipcbefe3b2014-01-15 15:09:05 +00002031 self.fmt = fmt
2032 self.kwargs = kwargs
2033
2034 def __str__(self):
2035 from string import Template
2036 return Template(self.fmt).substitute(**self.kwargs)
2037
2038Either of these can be used in place of a format string, to allow {}- or
2039$-formatting to be used to build the actual "message" part which appears in the
2040formatted log output in place of “%(message)s or “{message}” or $message”.
2041If you find it a little unwieldy to use the class names whenever you want to log
2042something, you can make it more palatable if you use an alias such as ``M`` or
2043``_`` for the message (or perhaps ``__``, if you are using ``_`` for
2044localization).
2045
Vinay Sajipeb14dec2014-01-17 18:36:02 +00002046Examples of this approach are given below. Firstly, formatting with
2047:meth:`str.format`::
2048
2049 >>> __ = BraceMessage
2050 >>> print(__('Message with {0} {1}', 2, 'placeholders'))
2051 Message with 2 placeholders
2052 >>> class Point: pass
2053 ...
2054 >>> p = Point()
2055 >>> p.x = 0.5
2056 >>> p.y = 0.5
2057 >>> print(__('Message with coordinates: ({point.x:.2f}, {point.y:.2f})', point=p))
2058 Message with coordinates: (0.50, 0.50)
2059
2060Secondly, formatting with :class:`string.Template`::
2061
2062 >>> __ = DollarMessage
2063 >>> print(__('Message with $num $what', num=2, what='placeholders'))
2064 Message with 2 placeholders
2065 >>>
2066
2067One thing to note is that you pay no significant performance penalty with this
2068approach: the actual formatting happens not when you make the logging call, but
2069when (and if) the logged message is actually about to be output to a log by a
2070handler. So the only slightly unusual thing which might trip you up is that the
2071parentheses go around the format string and the arguments, not just the format
2072string. Thats because the __ notation is just syntax sugar for a constructor
2073call to one of the ``XXXMessage`` classes shown above.
Vinay Sajip554f22f2014-02-03 11:51:45 +00002074
2075
2076.. _filters-dictconfig:
2077
2078.. currentmodule:: logging.config
2079
2080Configuring filters with :func:`dictConfig`
2081-------------------------------------------
2082
2083You *can* configure filters using :func:`~logging.config.dictConfig`, though it
2084might not be obvious at first glance how to do it (hence this recipe). Since
2085:class:`~logging.Filter` is the only filter class included in the standard
2086library, and it is unlikely to cater to many requirements (it's only there as a
2087base class), you will typically need to define your own :class:`~logging.Filter`
2088subclass with an overridden :meth:`~logging.Filter.filter` method. To do this,
2089specify the ``()`` key in the configuration dictionary for the filter,
2090specifying a callable which will be used to create the filter (a class is the
2091most obvious, but you can provide any callable which returns a
2092:class:`~logging.Filter` instance). Here is a complete example::
2093
2094 import logging
2095 import logging.config
2096 import sys
2097
2098 class MyFilter(logging.Filter):
2099 def __init__(self, param=None):
2100 self.param = param
2101
2102 def filter(self, record):
2103 if self.param is None:
2104 allow = True
2105 else:
2106 allow = self.param not in record.msg
2107 if allow:
2108 record.msg = 'changed: ' + record.msg
2109 return allow
2110
2111 LOGGING = {
2112 'version': 1,
2113 'filters': {
2114 'myfilter': {
2115 '()': MyFilter,
2116 'param': 'noshow',
2117 }
2118 },
2119 'handlers': {
2120 'console': {
2121 'class': 'logging.StreamHandler',
2122 'filters': ['myfilter']
2123 }
2124 },
2125 'root': {
2126 'level': 'DEBUG',
2127 'handlers': ['console']
2128 },
2129 }
2130
2131 if __name__ == '__main__':
2132 logging.config.dictConfig(LOGGING)
2133 logging.debug('hello')
2134 logging.debug('hello - noshow')
2135
2136This example shows how you can pass configuration data to the callable which
2137constructs the instance, in the form of keyword parameters. When run, the above
Serhiy Storchaka46936d52018-04-08 19:18:04 +03002138script will print:
2139
2140.. code-block:: none
Vinay Sajip554f22f2014-02-03 11:51:45 +00002141
2142 changed: hello
2143
2144which shows that the filter is working as configured.
2145
2146A couple of extra points to note:
2147
2148* If you can't refer to the callable directly in the configuration (e.g. if it
2149 lives in a different module, and you can't import it directly where the
2150 configuration dictionary is), you can use the form ``ext://...`` as described
2151 in :ref:`logging-config-dict-externalobj`. For example, you could have used
2152 the text ``'ext://__main__.MyFilter'`` instead of ``MyFilter`` in the above
2153 example.
2154
2155* As well as for filters, this technique can also be used to configure custom
2156 handlers and formatters. See :ref:`logging-config-dict-userdef` for more
2157 information on how logging supports using user-defined objects in its
2158 configuration, and see the other cookbook recipe :ref:`custom-handlers` above.
2159
Vinay Sajipdb071642015-01-28 07:32:38 +00002160
2161.. _custom-format-exception:
2162
2163Customized exception formatting
2164-------------------------------
2165
2166There might be times when you want to do customized exception formatting - for
2167argument's sake, let's say you want exactly one line per logged event, even
2168when exception information is present. You can do this with a custom formatter
2169class, as shown in the following example::
2170
2171 import logging
2172
2173 class OneLineExceptionFormatter(logging.Formatter):
2174 def formatException(self, exc_info):
2175 """
2176 Format an exception so that it prints on a single line.
2177 """
2178 result = super(OneLineExceptionFormatter, self).formatException(exc_info)
Serhiy Storchakadba90392016-05-10 12:01:23 +03002179 return repr(result) # or format into one line however you want to
Vinay Sajipdb071642015-01-28 07:32:38 +00002180
2181 def format(self, record):
2182 s = super(OneLineExceptionFormatter, self).format(record)
2183 if record.exc_text:
2184 s = s.replace('\n', '') + '|'
2185 return s
2186
2187 def configure_logging():
2188 fh = logging.FileHandler('output.txt', 'w')
2189 f = OneLineExceptionFormatter('%(asctime)s|%(levelname)s|%(message)s|',
2190 '%d/%m/%Y %H:%M:%S')
2191 fh.setFormatter(f)
2192 root = logging.getLogger()
2193 root.setLevel(logging.DEBUG)
2194 root.addHandler(fh)
2195
2196 def main():
2197 configure_logging()
2198 logging.info('Sample message')
2199 try:
2200 x = 1 / 0
2201 except ZeroDivisionError as e:
2202 logging.exception('ZeroDivisionError: %s', e)
2203
2204 if __name__ == '__main__':
2205 main()
2206
Serhiy Storchaka46936d52018-04-08 19:18:04 +03002207When run, this produces a file with exactly two lines:
2208
2209.. code-block:: none
Vinay Sajipdb071642015-01-28 07:32:38 +00002210
2211 28/01/2015 07:21:23|INFO|Sample message|
2212 28/01/2015 07:21:23|ERROR|ZeroDivisionError: integer division or modulo by zero|'Traceback (most recent call last):\n File "logtest7.py", line 30, in main\n x = 1 / 0\nZeroDivisionError: integer division or modulo by zero'|
2213
2214While the above treatment is simplistic, it points the way to how exception
2215information can be formatted to your liking. The :mod:`traceback` module may be
2216helpful for more specialized needs.
Vinay Sajipf046dfe2015-02-01 15:17:34 +00002217
2218.. _spoken-messages:
2219
2220Speaking logging messages
2221-------------------------
2222
2223There might be situations when it is desirable to have logging messages rendered
Martin Panter8f137832017-01-14 08:24:20 +00002224in an audible rather than a visible format. This is easy to do if you have
2225text-to-speech (TTS) functionality available in your system, even if it doesn't have
Vinay Sajipf046dfe2015-02-01 15:17:34 +00002226a Python binding. Most TTS systems have a command line program you can run, and
2227this can be invoked from a handler using :mod:`subprocess`. It's assumed here
2228that TTS command line programs won't expect to interact with users or take a
2229long time to complete, and that the frequency of logged messages will be not so
2230high as to swamp the user with messages, and that it's acceptable to have the
2231messages spoken one at a time rather than concurrently, The example implementation
2232below waits for one message to be spoken before the next is processed, and this
2233might cause other handlers to be kept waiting. Here is a short example showing
2234the approach, which assumes that the ``espeak`` TTS package is available::
2235
2236 import logging
2237 import subprocess
2238 import sys
2239
2240 class TTSHandler(logging.Handler):
2241 def emit(self, record):
2242 msg = self.format(record)
2243 # Speak slowly in a female English voice
2244 cmd = ['espeak', '-s150', '-ven+f3', msg]
2245 p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
2246 stderr=subprocess.STDOUT)
2247 # wait for the program to finish
2248 p.communicate()
2249
2250 def configure_logging():
2251 h = TTSHandler()
2252 root = logging.getLogger()
2253 root.addHandler(h)
2254 # the default formatter just returns the message
2255 root.setLevel(logging.DEBUG)
2256
2257 def main():
2258 logging.info('Hello')
2259 logging.debug('Goodbye')
2260
2261 if __name__ == '__main__':
2262 configure_logging()
2263 sys.exit(main())
2264
2265When run, this script should say "Hello" and then "Goodbye" in a female voice.
2266
2267The above approach can, of course, be adapted to other TTS systems and even
2268other systems altogether which can process messages via external programs run
2269from a command line.
2270
Vinay Sajipff1f3d92015-10-10 00:52:35 +01002271
2272.. _buffered-logging:
2273
2274Buffering logging messages and outputting them conditionally
2275------------------------------------------------------------
2276
2277There might be situations where you want to log messages in a temporary area
2278and only output them if a certain condition occurs. For example, you may want to
2279start logging debug events in a function, and if the function completes without
2280errors, you don't want to clutter the log with the collected debug information,
2281but if there is an error, you want all the debug information to be output as well
2282as the error.
2283
2284Here is an example which shows how you could do this using a decorator for your
2285functions where you want logging to behave this way. It makes use of the
2286:class:`logging.handlers.MemoryHandler`, which allows buffering of logged events
2287until some condition occurs, at which point the buffered events are ``flushed``
2288- passed to another handler (the ``target`` handler) for processing. By default,
2289the ``MemoryHandler`` flushed when its buffer gets filled up or an event whose
2290level is greater than or equal to a specified threshold is seen. You can use this
2291recipe with a more specialised subclass of ``MemoryHandler`` if you want custom
2292flushing behavior.
2293
2294The example script has a simple function, ``foo``, which just cycles through
2295all the logging levels, writing to ``sys.stderr`` to say what level it's about
Martin Panterf05641642016-05-08 13:48:10 +00002296to log at, and then actually logging a message at that level. You can pass a
Vinay Sajipff1f3d92015-10-10 00:52:35 +01002297parameter to ``foo`` which, if true, will log at ERROR and CRITICAL levels -
2298otherwise, it only logs at DEBUG, INFO and WARNING levels.
2299
2300The script just arranges to decorate ``foo`` with a decorator which will do the
2301conditional logging that's required. The decorator takes a logger as a parameter
2302and attaches a memory handler for the duration of the call to the decorated
2303function. The decorator can be additionally parameterised using a target handler,
Vinay Sajip84de34e2019-07-01 12:41:21 +01002304a level at which flushing should occur, and a capacity for the buffer (number of
2305records buffered). These default to a :class:`~logging.StreamHandler` which
2306writes to ``sys.stderr``, ``logging.ERROR`` and ``100`` respectively.
Vinay Sajipff1f3d92015-10-10 00:52:35 +01002307
2308Here's the script::
2309
2310 import logging
2311 from logging.handlers import MemoryHandler
2312 import sys
2313
2314 logger = logging.getLogger(__name__)
2315 logger.addHandler(logging.NullHandler())
2316
2317 def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None):
2318 if target_handler is None:
2319 target_handler = logging.StreamHandler()
2320 if flush_level is None:
2321 flush_level = logging.ERROR
2322 if capacity is None:
2323 capacity = 100
2324 handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler)
2325
2326 def decorator(fn):
2327 def wrapper(*args, **kwargs):
2328 logger.addHandler(handler)
2329 try:
2330 return fn(*args, **kwargs)
2331 except Exception:
2332 logger.exception('call failed')
2333 raise
2334 finally:
2335 super(MemoryHandler, handler).flush()
2336 logger.removeHandler(handler)
2337 return wrapper
2338
2339 return decorator
2340
2341 def write_line(s):
2342 sys.stderr.write('%s\n' % s)
2343
2344 def foo(fail=False):
2345 write_line('about to log at DEBUG ...')
2346 logger.debug('Actually logged at DEBUG')
2347 write_line('about to log at INFO ...')
2348 logger.info('Actually logged at INFO')
2349 write_line('about to log at WARNING ...')
2350 logger.warning('Actually logged at WARNING')
2351 if fail:
2352 write_line('about to log at ERROR ...')
2353 logger.error('Actually logged at ERROR')
2354 write_line('about to log at CRITICAL ...')
2355 logger.critical('Actually logged at CRITICAL')
2356 return fail
2357
2358 decorated_foo = log_if_errors(logger)(foo)
2359
2360 if __name__ == '__main__':
2361 logger.setLevel(logging.DEBUG)
2362 write_line('Calling undecorated foo with False')
2363 assert not foo(False)
2364 write_line('Calling undecorated foo with True')
2365 assert foo(True)
2366 write_line('Calling decorated foo with False')
2367 assert not decorated_foo(False)
2368 write_line('Calling decorated foo with True')
2369 assert decorated_foo(True)
2370
Serhiy Storchaka46936d52018-04-08 19:18:04 +03002371When this script is run, the following output should be observed:
2372
2373.. code-block:: none
Vinay Sajipff1f3d92015-10-10 00:52:35 +01002374
2375 Calling undecorated foo with False
2376 about to log at DEBUG ...
2377 about to log at INFO ...
2378 about to log at WARNING ...
2379 Calling undecorated foo with True
2380 about to log at DEBUG ...
2381 about to log at INFO ...
2382 about to log at WARNING ...
2383 about to log at ERROR ...
2384 about to log at CRITICAL ...
2385 Calling decorated foo with False
2386 about to log at DEBUG ...
2387 about to log at INFO ...
2388 about to log at WARNING ...
2389 Calling decorated foo with True
2390 about to log at DEBUG ...
2391 about to log at INFO ...
2392 about to log at WARNING ...
2393 about to log at ERROR ...
2394 Actually logged at DEBUG
2395 Actually logged at INFO
2396 Actually logged at WARNING
2397 Actually logged at ERROR
2398 about to log at CRITICAL ...
2399 Actually logged at CRITICAL
2400
2401As you can see, actual logging output only occurs when an event is logged whose
2402severity is ERROR or greater, but in that case, any previous events at lower
2403severities are also logged.
2404
2405You can of course use the conventional means of decoration::
2406
2407 @log_if_errors(logger)
2408 def foo(fail=False):
2409 ...
Vinay Sajip4de9dae2015-10-17 13:58:19 +01002410
2411
2412.. _utc-formatting:
2413
2414Formatting times using UTC (GMT) via configuration
2415--------------------------------------------------
2416
2417Sometimes you want to format times using UTC, which can be done using a class
2418such as `UTCFormatter`, shown below::
2419
2420 import logging
2421 import time
2422
2423 class UTCFormatter(logging.Formatter):
2424 converter = time.gmtime
2425
Berker Peksagf84499a2015-10-20 03:41:38 +03002426and you can then use the ``UTCFormatter`` in your code instead of
Vinay Sajip4de9dae2015-10-17 13:58:19 +01002427:class:`~logging.Formatter`. If you want to do that via configuration, you can
2428use the :func:`~logging.config.dictConfig` API with an approach illustrated by
2429the following complete example::
2430
2431 import logging
2432 import logging.config
2433 import time
2434
2435 class UTCFormatter(logging.Formatter):
2436 converter = time.gmtime
2437
2438 LOGGING = {
2439 'version': 1,
2440 'disable_existing_loggers': False,
2441 'formatters': {
2442 'utc': {
2443 '()': UTCFormatter,
2444 'format': '%(asctime)s %(message)s',
2445 },
2446 'local': {
2447 'format': '%(asctime)s %(message)s',
2448 }
2449 },
2450 'handlers': {
2451 'console1': {
2452 'class': 'logging.StreamHandler',
2453 'formatter': 'utc',
2454 },
2455 'console2': {
2456 'class': 'logging.StreamHandler',
2457 'formatter': 'local',
2458 },
2459 },
2460 'root': {
2461 'handlers': ['console1', 'console2'],
2462 }
2463 }
2464
2465 if __name__ == '__main__':
2466 logging.config.dictConfig(LOGGING)
2467 logging.warning('The local time is %s', time.asctime())
2468
Serhiy Storchaka46936d52018-04-08 19:18:04 +03002469When this script is run, it should print something like:
2470
2471.. code-block:: none
Vinay Sajip4de9dae2015-10-17 13:58:19 +01002472
2473 2015-10-17 12:53:29,501 The local time is Sat Oct 17 13:53:29 2015
2474 2015-10-17 13:53:29,501 The local time is Sat Oct 17 13:53:29 2015
2475
2476showing how the time is formatted both as local time and UTC, one for each
2477handler.
Vinay Sajipd93a6012016-04-01 23:13:01 +01002478
2479
2480.. _context-manager:
2481
2482Using a context manager for selective logging
2483---------------------------------------------
2484
2485There are times when it would be useful to temporarily change the logging
2486configuration and revert it back after doing something. For this, a context
2487manager is the most obvious way of saving and restoring the logging context.
2488Here is a simple example of such a context manager, which allows you to
2489optionally change the logging level and add a logging handler purely in the
2490scope of the context manager::
2491
2492 import logging
2493 import sys
2494
Serhiy Storchaka70c5f2a2019-06-01 11:38:24 +03002495 class LoggingContext:
Vinay Sajipd93a6012016-04-01 23:13:01 +01002496 def __init__(self, logger, level=None, handler=None, close=True):
2497 self.logger = logger
2498 self.level = level
2499 self.handler = handler
2500 self.close = close
2501
2502 def __enter__(self):
2503 if self.level is not None:
2504 self.old_level = self.logger.level
2505 self.logger.setLevel(self.level)
2506 if self.handler:
2507 self.logger.addHandler(self.handler)
2508
2509 def __exit__(self, et, ev, tb):
2510 if self.level is not None:
2511 self.logger.setLevel(self.old_level)
2512 if self.handler:
2513 self.logger.removeHandler(self.handler)
2514 if self.handler and self.close:
2515 self.handler.close()
2516 # implicit return of None => don't swallow exceptions
2517
2518If you specify a level value, the logger's level is set to that value in the
2519scope of the with block covered by the context manager. If you specify a
2520handler, it is added to the logger on entry to the block and removed on exit
2521from the block. You can also ask the manager to close the handler for you on
2522block exit - you could do this if you don't need the handler any more.
2523
2524To illustrate how it works, we can add the following block of code to the
2525above::
2526
2527 if __name__ == '__main__':
2528 logger = logging.getLogger('foo')
2529 logger.addHandler(logging.StreamHandler())
2530 logger.setLevel(logging.INFO)
2531 logger.info('1. This should appear just once on stderr.')
2532 logger.debug('2. This should not appear.')
2533 with LoggingContext(logger, level=logging.DEBUG):
2534 logger.debug('3. This should appear once on stderr.')
2535 logger.debug('4. This should not appear.')
2536 h = logging.StreamHandler(sys.stdout)
2537 with LoggingContext(logger, level=logging.DEBUG, handler=h, close=True):
2538 logger.debug('5. This should appear twice - once on stderr and once on stdout.')
2539 logger.info('6. This should appear just once on stderr.')
2540 logger.debug('7. This should not appear.')
2541
2542We initially set the logger's level to ``INFO``, so message #1 appears and
2543message #2 doesn't. We then change the level to ``DEBUG`` temporarily in the
2544following ``with`` block, and so message #3 appears. After the block exits, the
2545logger's level is restored to ``INFO`` and so message #4 doesn't appear. In the
2546next ``with`` block, we set the level to ``DEBUG`` again but also add a handler
2547writing to ``sys.stdout``. Thus, message #5 appears twice on the console (once
2548via ``stderr`` and once via ``stdout``). After the ``with`` statement's
2549completion, the status is as it was before so message #6 appears (like message
2550#1) whereas message #7 doesn't (just like message #2).
2551
Martin Panter1050d2d2016-07-26 11:18:21 +02002552If we run the resulting script, the result is as follows:
2553
2554.. code-block:: shell-session
Vinay Sajipd93a6012016-04-01 23:13:01 +01002555
2556 $ python logctx.py
2557 1. This should appear just once on stderr.
2558 3. This should appear once on stderr.
2559 5. This should appear twice - once on stderr and once on stdout.
2560 5. This should appear twice - once on stderr and once on stdout.
2561 6. This should appear just once on stderr.
2562
2563If we run it again, but pipe ``stderr`` to ``/dev/null``, we see the following,
Martin Panter1050d2d2016-07-26 11:18:21 +02002564which is the only message written to ``stdout``:
2565
2566.. code-block:: shell-session
Vinay Sajipd93a6012016-04-01 23:13:01 +01002567
2568 $ python logctx.py 2>/dev/null
2569 5. This should appear twice - once on stderr and once on stdout.
2570
Martin Panter1050d2d2016-07-26 11:18:21 +02002571Once again, but piping ``stdout`` to ``/dev/null``, we get:
2572
2573.. code-block:: shell-session
Vinay Sajipd93a6012016-04-01 23:13:01 +01002574
2575 $ python logctx.py >/dev/null
2576 1. This should appear just once on stderr.
2577 3. This should appear once on stderr.
2578 5. This should appear twice - once on stderr and once on stdout.
2579 6. This should appear just once on stderr.
2580
2581In this case, the message #5 printed to ``stdout`` doesn't appear, as expected.
2582
2583Of course, the approach described here can be generalised, for example to attach
2584logging filters temporarily. Note that the above code works in Python 2 as well
2585as Python 3.
Vinay Sajip1a4a10d2018-10-16 12:36:52 +01002586
2587
2588.. _starter-template:
2589
2590A CLI application starter template
2591----------------------------------
2592
2593Here's an example which shows how you can:
2594
2595* Use a logging level based on command-line arguments
2596* Dispatch to multiple subcommands in separate files, all logging at the same
2597 level in a consistent way
2598* Make use of simple, minimal configuration
2599
2600Suppose we have a command-line application whose job is to stop, start or
2601restart some services. This could be organised for the purposes of illustration
2602as a file ``app.py`` that is the main script for the application, with individual
2603commands implemented in ``start.py``, ``stop.py`` and ``restart.py``. Suppose
2604further that we want to control the verbosity of the application via a
2605command-line argument, defaulting to ``logging.INFO``. Here's one way that
2606``app.py`` could be written::
2607
2608 import argparse
2609 import importlib
2610 import logging
2611 import os
2612 import sys
2613
2614 def main(args=None):
2615 scriptname = os.path.basename(__file__)
2616 parser = argparse.ArgumentParser(scriptname)
2617 levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
2618 parser.add_argument('--log-level', default='INFO', choices=levels)
2619 subparsers = parser.add_subparsers(dest='command',
2620 help='Available commands:')
2621 start_cmd = subparsers.add_parser('start', help='Start a service')
2622 start_cmd.add_argument('name', metavar='NAME',
2623 help='Name of service to start')
2624 stop_cmd = subparsers.add_parser('stop',
2625 help='Stop one or more services')
2626 stop_cmd.add_argument('names', metavar='NAME', nargs='+',
2627 help='Name of service to stop')
2628 restart_cmd = subparsers.add_parser('restart',
2629 help='Restart one or more services')
2630 restart_cmd.add_argument('names', metavar='NAME', nargs='+',
2631 help='Name of service to restart')
2632 options = parser.parse_args()
2633 # the code to dispatch commands could all be in this file. For the purposes
2634 # of illustration only, we implement each command in a separate module.
2635 try:
2636 mod = importlib.import_module(options.command)
2637 cmd = getattr(mod, 'command')
2638 except (ImportError, AttributeError):
2639 print('Unable to find the code for command \'%s\'' % options.command)
2640 return 1
2641 # Could get fancy here and load configuration from file or dictionary
2642 logging.basicConfig(level=options.log_level,
2643 format='%(levelname)s %(name)s %(message)s')
2644 cmd(options)
2645
2646 if __name__ == '__main__':
2647 sys.exit(main())
2648
2649And the ``start``, ``stop`` and ``restart`` commands can be implemented in
2650separate modules, like so for starting::
2651
2652 # start.py
2653 import logging
2654
2655 logger = logging.getLogger(__name__)
2656
2657 def command(options):
2658 logger.debug('About to start %s', options.name)
2659 # actually do the command processing here ...
2660 logger.info('Started the \'%s\' service.', options.name)
2661
2662and thus for stopping::
2663
2664 # stop.py
2665 import logging
2666
2667 logger = logging.getLogger(__name__)
2668
2669 def command(options):
2670 n = len(options.names)
2671 if n == 1:
2672 plural = ''
2673 services = '\'%s\'' % options.names[0]
2674 else:
2675 plural = 's'
2676 services = ', '.join('\'%s\'' % name for name in options.names)
2677 i = services.rfind(', ')
2678 services = services[:i] + ' and ' + services[i + 2:]
2679 logger.debug('About to stop %s', services)
2680 # actually do the command processing here ...
2681 logger.info('Stopped the %s service%s.', services, plural)
2682
2683and similarly for restarting::
2684
2685 # restart.py
2686 import logging
2687
2688 logger = logging.getLogger(__name__)
2689
2690 def command(options):
2691 n = len(options.names)
2692 if n == 1:
2693 plural = ''
2694 services = '\'%s\'' % options.names[0]
2695 else:
2696 plural = 's'
2697 services = ', '.join('\'%s\'' % name for name in options.names)
2698 i = services.rfind(', ')
2699 services = services[:i] + ' and ' + services[i + 2:]
2700 logger.debug('About to restart %s', services)
2701 # actually do the command processing here ...
2702 logger.info('Restarted the %s service%s.', services, plural)
2703
2704If we run this application with the default log level, we get output like this:
2705
2706.. code-block:: shell-session
2707
2708 $ python app.py start foo
2709 INFO start Started the 'foo' service.
2710
2711 $ python app.py stop foo bar
2712 INFO stop Stopped the 'foo' and 'bar' services.
2713
2714 $ python app.py restart foo bar baz
2715 INFO restart Restarted the 'foo', 'bar' and 'baz' services.
2716
2717The first word is the logging level, and the second word is the module or
2718package name of the place where the event was logged.
2719
2720If we change the logging level, then we can change the information sent to the
2721log. For example, if we want more information:
2722
2723.. code-block:: shell-session
2724
2725 $ python app.py --log-level DEBUG start foo
2726 DEBUG start About to start foo
2727 INFO start Started the 'foo' service.
2728
2729 $ python app.py --log-level DEBUG stop foo bar
2730 DEBUG stop About to stop 'foo' and 'bar'
2731 INFO stop Stopped the 'foo' and 'bar' services.
2732
2733 $ python app.py --log-level DEBUG restart foo bar baz
2734 DEBUG restart About to restart 'foo', 'bar' and 'baz'
2735 INFO restart Restarted the 'foo', 'bar' and 'baz' services.
2736
2737And if we want less:
2738
2739.. code-block:: shell-session
2740
2741 $ python app.py --log-level WARNING start foo
2742 $ python app.py --log-level WARNING stop foo bar
2743 $ python app.py --log-level WARNING restart foo bar baz
2744
2745In this case, the commands don't print anything to the console, since nothing
2746at ``WARNING`` level or above is logged by them.
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002747
2748.. _qt-gui:
2749
2750A Qt GUI for logging
2751--------------------
2752
2753A question that comes up from time to time is about how to log to a GUI
2754application. The `Qt <https://www.qt.io/>`_ framework is a popular
2755cross-platform UI framework with Python bindings using `PySide2
2756<https://pypi.org/project/PySide2/>`_ or `PyQt5
2757<https://pypi.org/project/PyQt5/>`_ libraries.
2758
2759The following example shows how to log to a Qt GUI. This introduces a simple
2760``QtHandler`` class which takes a callable, which should be a slot in the main
2761thread that does GUI updates. A worker thread is also created to show how you
2762can log to the GUI from both the UI itself (via a button for manual logging)
Vinay Sajip472eced2019-07-31 07:36:45 +01002763as well as a worker thread doing work in the background (here, just logging
2764messages at random levels with random short delays in between).
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002765
2766The worker thread is implemented using Qt's ``QThread`` class rather than the
2767:mod:`threading` module, as there are circumstances where one has to use
2768``QThread``, which offers better integration with other ``Qt`` components.
2769
2770The code should work with recent releases of either ``PySide2`` or ``PyQt5``.
2771You should be able to adapt the approach to earlier versions of Qt. Please
Vinay Sajip472eced2019-07-31 07:36:45 +01002772refer to the comments in the code snippet for more detailed information.
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002773
2774.. code-block:: python3
2775
2776 import datetime
2777 import logging
2778 import random
2779 import sys
2780 import time
2781
2782 # Deal with minor differences between PySide2 and PyQt5
2783 try:
2784 from PySide2 import QtCore, QtGui, QtWidgets
2785 Signal = QtCore.Signal
2786 Slot = QtCore.Slot
2787 except ImportError:
2788 from PyQt5 import QtCore, QtGui, QtWidgets
2789 Signal = QtCore.pyqtSignal
2790 Slot = QtCore.pyqtSlot
2791
Vinay Sajip472eced2019-07-31 07:36:45 +01002792
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002793 logger = logging.getLogger(__name__)
2794
Vinay Sajip472eced2019-07-31 07:36:45 +01002795
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002796 #
2797 # Signals need to be contained in a QObject or subclass in order to be correctly
2798 # initialized.
2799 #
2800 class Signaller(QtCore.QObject):
Vinay Sajip472eced2019-07-31 07:36:45 +01002801 signal = Signal(str, logging.LogRecord)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002802
2803 #
2804 # Output to a Qt GUI is only supposed to happen on the main thread. So, this
2805 # handler is designed to take a slot function which is set up to run in the main
Vinay Sajip472eced2019-07-31 07:36:45 +01002806 # thread. In this example, the function takes a string argument which is a
2807 # formatted log message, and the log record which generated it. The formatted
2808 # string is just a convenience - you could format a string for output any way
2809 # you like in the slot function itself.
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002810 #
2811 # You specify the slot function to do whatever GUI updates you want. The handler
2812 # doesn't know or care about specific UI elements.
2813 #
2814 class QtHandler(logging.Handler):
2815 def __init__(self, slotfunc, *args, **kwargs):
2816 super(QtHandler, self).__init__(*args, **kwargs)
2817 self.signaller = Signaller()
2818 self.signaller.signal.connect(slotfunc)
2819
2820 def emit(self, record):
2821 s = self.format(record)
Vinay Sajip472eced2019-07-31 07:36:45 +01002822 self.signaller.signal.emit(s, record)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002823
2824 #
2825 # This example uses QThreads, which means that the threads at the Python level
2826 # are named something like "Dummy-1". The function below gets the Qt name of the
2827 # current thread.
2828 #
2829 def ctname():
2830 return QtCore.QThread.currentThread().objectName()
2831
Vinay Sajip472eced2019-07-31 07:36:45 +01002832
2833 #
2834 # Used to generate random levels for logging.
2835 #
2836 LEVELS = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
2837 logging.CRITICAL)
2838
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002839 #
2840 # This worker class represents work that is done in a thread separate to the
2841 # main thread. The way the thread is kicked off to do work is via a button press
2842 # that connects to a slot in the worker.
2843 #
2844 # Because the default threadName value in the LogRecord isn't much use, we add
2845 # a qThreadName which contains the QThread name as computed above, and pass that
2846 # value in an "extra" dictionary which is used to update the LogRecord with the
2847 # QThread name.
2848 #
2849 # This example worker just outputs messages sequentially, interspersed with
2850 # random delays of the order of a few seconds.
2851 #
2852 class Worker(QtCore.QObject):
2853 @Slot()
2854 def start(self):
2855 extra = {'qThreadName': ctname() }
2856 logger.debug('Started work', extra=extra)
2857 i = 1
2858 # Let the thread run until interrupted. This allows reasonably clean
2859 # thread termination.
2860 while not QtCore.QThread.currentThread().isInterruptionRequested():
2861 delay = 0.5 + random.random() * 2
2862 time.sleep(delay)
Vinay Sajip472eced2019-07-31 07:36:45 +01002863 level = random.choice(LEVELS)
2864 logger.log(level, 'Message after delay of %3.1f: %d', delay, i, extra=extra)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002865 i += 1
2866
2867 #
2868 # Implement a simple UI for this cookbook example. This contains:
2869 #
2870 # * A read-only text edit window which holds formatted log messages
2871 # * A button to start work and log stuff in a separate thread
2872 # * A button to log something from the main thread
2873 # * A button to clear the log window
2874 #
2875 class Window(QtWidgets.QWidget):
2876
Vinay Sajip472eced2019-07-31 07:36:45 +01002877 COLORS = {
2878 logging.DEBUG: 'black',
2879 logging.INFO: 'blue',
2880 logging.WARNING: 'orange',
2881 logging.ERROR: 'red',
2882 logging.CRITICAL: 'purple',
2883 }
2884
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002885 def __init__(self, app):
2886 super(Window, self).__init__()
2887 self.app = app
Vinay Sajip472eced2019-07-31 07:36:45 +01002888 self.textedit = te = QtWidgets.QPlainTextEdit(self)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002889 # Set whatever the default monospace font is for the platform
2890 f = QtGui.QFont('nosuchfont')
2891 f.setStyleHint(f.Monospace)
2892 te.setFont(f)
2893 te.setReadOnly(True)
2894 PB = QtWidgets.QPushButton
2895 self.work_button = PB('Start background work', self)
2896 self.log_button = PB('Log a message at a random level', self)
2897 self.clear_button = PB('Clear log window', self)
2898 self.handler = h = QtHandler(self.update_status)
2899 # Remember to use qThreadName rather than threadName in the format string.
2900 fs = '%(asctime)s %(qThreadName)-12s %(levelname)-8s %(message)s'
Vinay Sajip472eced2019-07-31 07:36:45 +01002901 formatter = logging.Formatter(fs)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002902 h.setFormatter(formatter)
2903 logger.addHandler(h)
2904 # Set up to terminate the QThread when we exit
2905 app.aboutToQuit.connect(self.force_quit)
2906
2907 # Lay out all the widgets
2908 layout = QtWidgets.QVBoxLayout(self)
2909 layout.addWidget(te)
2910 layout.addWidget(self.work_button)
2911 layout.addWidget(self.log_button)
2912 layout.addWidget(self.clear_button)
2913 self.setFixedSize(900, 400)
2914
2915 # Connect the non-worker slots and signals
2916 self.log_button.clicked.connect(self.manual_update)
2917 self.clear_button.clicked.connect(self.clear_display)
2918
2919 # Start a new worker thread and connect the slots for the worker
2920 self.start_thread()
2921 self.work_button.clicked.connect(self.worker.start)
2922 # Once started, the button should be disabled
2923 self.work_button.clicked.connect(lambda : self.work_button.setEnabled(False))
2924
2925 def start_thread(self):
2926 self.worker = Worker()
2927 self.worker_thread = QtCore.QThread()
2928 self.worker.setObjectName('Worker')
2929 self.worker_thread.setObjectName('WorkerThread') # for qThreadName
2930 self.worker.moveToThread(self.worker_thread)
2931 # This will start an event loop in the worker thread
2932 self.worker_thread.start()
2933
2934 def kill_thread(self):
2935 # Just tell the worker to stop, then tell it to quit and wait for that
2936 # to happen
2937 self.worker_thread.requestInterruption()
2938 if self.worker_thread.isRunning():
2939 self.worker_thread.quit()
2940 self.worker_thread.wait()
2941 else:
2942 print('worker has already exited.')
2943
2944 def force_quit(self):
2945 # For use when the window is closed
2946 if self.worker_thread.isRunning():
2947 self.kill_thread()
2948
2949 # The functions below update the UI and run in the main thread because
2950 # that's where the slots are set up
2951
Vinay Sajip53839562019-11-18 12:03:22 +00002952 @Slot(str, logging.LogRecord)
Vinay Sajip472eced2019-07-31 07:36:45 +01002953 def update_status(self, status, record):
2954 color = self.COLORS.get(record.levelno, 'black')
2955 s = '<pre><font color="%s">%s</font></pre>' % (color, status)
2956 self.textedit.appendHtml(s)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002957
2958 @Slot()
2959 def manual_update(self):
Vinay Sajip472eced2019-07-31 07:36:45 +01002960 # This function uses the formatted message passed in, but also uses
2961 # information from the record to format the message in an appropriate
2962 # color according to its severity (level).
2963 level = random.choice(LEVELS)
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002964 extra = {'qThreadName': ctname() }
2965 logger.log(level, 'Manually logged!', extra=extra)
2966
2967 @Slot()
2968 def clear_display(self):
2969 self.textedit.clear()
2970
Vinay Sajip472eced2019-07-31 07:36:45 +01002971
Vinay Sajip1ed915e2019-07-27 13:46:53 +01002972 def main():
2973 QtCore.QThread.currentThread().setObjectName('MainThread')
2974 logging.getLogger().setLevel(logging.DEBUG)
2975 app = QtWidgets.QApplication(sys.argv)
2976 example = Window(app)
2977 example.show()
2978 sys.exit(app.exec_())
2979
2980 if __name__=='__main__':
2981 main()