Benjamin Peterson | 190d56e | 2008-06-11 02:40:25 +0000 | [diff] [blame] | 1 | #
|
| 2 | # Module providing various facilities to other parts of the package
|
| 3 | #
|
| 4 | # multiprocessing/util.py
|
| 5 | #
|
| 6 | # Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
|
| 7 | #
|
| 8 |
|
| 9 | import itertools
|
| 10 | import weakref
|
| 11 | import copy_reg
|
| 12 | import atexit
|
| 13 | import threading # we want threading to install it's
|
| 14 | # cleanup function before multiprocessing does
|
| 15 |
|
| 16 | from multiprocessing.process import current_process, active_children
|
| 17 |
|
| 18 | __all__ = [
|
| 19 | 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
|
| 20 | 'log_to_stderr', 'get_temp_dir', 'register_after_fork',
|
| 21 | 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal'
|
| 22 | ]
|
| 23 |
|
| 24 | #
|
| 25 | # Logging
|
| 26 | #
|
| 27 |
|
| 28 | NOTSET = 0
|
| 29 | SUBDEBUG = 5
|
| 30 | DEBUG = 10
|
| 31 | INFO = 20
|
| 32 | SUBWARNING = 25
|
| 33 |
|
| 34 | LOGGER_NAME = 'multiprocessing'
|
| 35 | DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
|
| 36 |
|
| 37 | _logger = None
|
| 38 | _log_to_stderr = False
|
| 39 |
|
| 40 | def sub_debug(msg, *args):
|
| 41 | if _logger:
|
| 42 | _logger.log(SUBDEBUG, msg, *args)
|
| 43 |
|
| 44 | def debug(msg, *args):
|
| 45 | if _logger:
|
| 46 | _logger.log(DEBUG, msg, *args)
|
| 47 |
|
| 48 | def info(msg, *args):
|
| 49 | if _logger:
|
| 50 | _logger.log(INFO, msg, *args)
|
| 51 |
|
| 52 | def sub_warning(msg, *args):
|
| 53 | if _logger:
|
| 54 | _logger.log(SUBWARNING, msg, *args)
|
| 55 |
|
| 56 | def get_logger():
|
| 57 | '''
|
| 58 | Returns logger used by multiprocessing
|
| 59 | '''
|
| 60 | global _logger
|
| 61 |
|
| 62 | if not _logger:
|
| 63 | import logging, atexit
|
| 64 |
|
| 65 | # XXX multiprocessing should cleanup before logging
|
| 66 | if hasattr(atexit, 'unregister'):
|
| 67 | atexit.unregister(_exit_function)
|
| 68 | atexit.register(_exit_function)
|
| 69 | else:
|
| 70 | atexit._exithandlers.remove((_exit_function, (), {}))
|
| 71 | atexit._exithandlers.append((_exit_function, (), {}))
|
| 72 |
|
| 73 | _check_logger_class()
|
| 74 | _logger = logging.getLogger(LOGGER_NAME)
|
| 75 |
|
| 76 | return _logger
|
| 77 |
|
| 78 | def _check_logger_class():
|
| 79 | '''
|
| 80 | Make sure process name is recorded when loggers are used
|
| 81 | '''
|
| 82 | # XXX This function is unnecessary once logging is patched
|
| 83 | import logging
|
| 84 | if hasattr(logging, 'multiprocessing'):
|
| 85 | return
|
Benjamin Peterson | dfd7949 | 2008-06-13 19:13:39 +0000 | [diff] [blame^] | 86 |
|
Benjamin Peterson | 190d56e | 2008-06-11 02:40:25 +0000 | [diff] [blame] | 87 | logging._acquireLock()
|
| 88 | try:
|
| 89 | OldLoggerClass = logging.getLoggerClass()
|
| 90 | if not getattr(OldLoggerClass, '_process_aware', False):
|
| 91 | class ProcessAwareLogger(OldLoggerClass):
|
| 92 | _process_aware = True
|
| 93 | def makeRecord(self, *args, **kwds):
|
| 94 | record = OldLoggerClass.makeRecord(self, *args, **kwds)
|
| 95 | record.processName = current_process()._name
|
| 96 | return record
|
| 97 | logging.setLoggerClass(ProcessAwareLogger)
|
| 98 | finally:
|
| 99 | logging._releaseLock()
|
| 100 |
|
| 101 | def log_to_stderr(level=None):
|
| 102 | '''
|
| 103 | Turn on logging and add a handler which prints to stderr
|
| 104 | '''
|
| 105 | global _log_to_stderr
|
| 106 | import logging
|
| 107 | logger = get_logger()
|
| 108 | formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
| 109 | handler = logging.StreamHandler()
|
| 110 | handler.setFormatter(formatter)
|
| 111 | logger.addHandler(handler)
|
| 112 | if level is not None:
|
| 113 | logger.setLevel(level)
|
| 114 | _log_to_stderr = True
|
| 115 |
|
| 116 | #
|
| 117 | # Function returning a temp directory which will be removed on exit
|
| 118 | #
|
| 119 |
|
| 120 | def get_temp_dir():
|
| 121 | # get name of a temp directory which will be automatically cleaned up
|
| 122 | if current_process()._tempdir is None:
|
| 123 | import shutil, tempfile
|
| 124 | tempdir = tempfile.mkdtemp(prefix='pymp-')
|
| 125 | info('created temp directory %s', tempdir)
|
| 126 | Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
|
| 127 | current_process()._tempdir = tempdir
|
| 128 | return current_process()._tempdir
|
| 129 |
|
| 130 | #
|
| 131 | # Support for reinitialization of objects when bootstrapping a child process
|
| 132 | #
|
| 133 |
|
| 134 | _afterfork_registry = weakref.WeakValueDictionary()
|
| 135 | _afterfork_counter = itertools.count()
|
| 136 |
|
| 137 | def _run_after_forkers():
|
| 138 | items = list(_afterfork_registry.items())
|
| 139 | items.sort()
|
| 140 | for (index, ident, func), obj in items:
|
| 141 | try:
|
| 142 | func(obj)
|
| 143 | except Exception, e:
|
| 144 | info('after forker raised exception %s', e)
|
| 145 |
|
| 146 | def register_after_fork(obj, func):
|
| 147 | _afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj
|
| 148 |
|
| 149 | #
|
| 150 | # Finalization using weakrefs
|
| 151 | #
|
| 152 |
|
| 153 | _finalizer_registry = {}
|
| 154 | _finalizer_counter = itertools.count()
|
| 155 |
|
| 156 |
|
| 157 | class Finalize(object):
|
| 158 | '''
|
| 159 | Class which supports object finalization using weakrefs
|
| 160 | '''
|
| 161 | def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
|
| 162 | assert exitpriority is None or type(exitpriority) is int
|
| 163 |
|
| 164 | if obj is not None:
|
| 165 | self._weakref = weakref.ref(obj, self)
|
| 166 | else:
|
| 167 | assert exitpriority is not None
|
| 168 |
|
| 169 | self._callback = callback
|
| 170 | self._args = args
|
| 171 | self._kwargs = kwargs or {}
|
| 172 | self._key = (exitpriority, _finalizer_counter.next())
|
| 173 |
|
| 174 | _finalizer_registry[self._key] = self
|
| 175 |
|
| 176 | def __call__(self, wr=None):
|
| 177 | '''
|
| 178 | Run the callback unless it has already been called or cancelled
|
| 179 | '''
|
| 180 | try:
|
| 181 | del _finalizer_registry[self._key]
|
| 182 | except KeyError:
|
| 183 | sub_debug('finalizer no longer registered')
|
| 184 | else:
|
| 185 | sub_debug('finalizer calling %s with args %s and kwargs %s',
|
| 186 | self._callback, self._args, self._kwargs)
|
| 187 | res = self._callback(*self._args, **self._kwargs)
|
| 188 | self._weakref = self._callback = self._args = \
|
| 189 | self._kwargs = self._key = None
|
| 190 | return res
|
| 191 |
|
| 192 | def cancel(self):
|
| 193 | '''
|
| 194 | Cancel finalization of the object
|
| 195 | '''
|
| 196 | try:
|
| 197 | del _finalizer_registry[self._key]
|
| 198 | except KeyError:
|
| 199 | pass
|
| 200 | else:
|
| 201 | self._weakref = self._callback = self._args = \
|
| 202 | self._kwargs = self._key = None
|
| 203 |
|
| 204 | def still_active(self):
|
| 205 | '''
|
| 206 | Return whether this finalizer is still waiting to invoke callback
|
| 207 | '''
|
| 208 | return self._key in _finalizer_registry
|
| 209 |
|
| 210 | def __repr__(self):
|
| 211 | try:
|
| 212 | obj = self._weakref()
|
| 213 | except (AttributeError, TypeError):
|
| 214 | obj = None
|
| 215 |
|
| 216 | if obj is None:
|
| 217 | return '<Finalize object, dead>'
|
| 218 |
|
| 219 | x = '<Finalize object, callback=%s' % \
|
| 220 | getattr(self._callback, '__name__', self._callback)
|
| 221 | if self._args:
|
| 222 | x += ', args=' + str(self._args)
|
| 223 | if self._kwargs:
|
| 224 | x += ', kwargs=' + str(self._kwargs)
|
| 225 | if self._key[0] is not None:
|
| 226 | x += ', exitprority=' + str(self._key[0])
|
| 227 | return x + '>'
|
| 228 |
|
| 229 |
|
| 230 | def _run_finalizers(minpriority=None):
|
| 231 | '''
|
| 232 | Run all finalizers whose exit priority is not None and at least minpriority
|
| 233 |
|
| 234 | Finalizers with highest priority are called first; finalizers with
|
| 235 | the same priority will be called in reverse order of creation.
|
| 236 | '''
|
| 237 | if minpriority is None:
|
| 238 | f = lambda p : p[0][0] is not None
|
| 239 | else:
|
| 240 | f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
|
| 241 |
|
| 242 | items = [x for x in _finalizer_registry.items() if f(x)]
|
| 243 | items.sort(reverse=True)
|
| 244 |
|
| 245 | for key, finalizer in items:
|
| 246 | sub_debug('calling %s', finalizer)
|
| 247 | try:
|
| 248 | finalizer()
|
| 249 | except Exception:
|
| 250 | import traceback
|
| 251 | traceback.print_exc()
|
| 252 |
|
| 253 | if minpriority is None:
|
| 254 | _finalizer_registry.clear()
|
| 255 |
|
| 256 | #
|
| 257 | # Clean up on exit
|
| 258 | #
|
| 259 |
|
| 260 | def is_exiting():
|
| 261 | '''
|
| 262 | Returns true if the process is shutting down
|
| 263 | '''
|
| 264 | return _exiting or _exiting is None
|
| 265 |
|
| 266 | _exiting = False
|
| 267 |
|
| 268 | def _exit_function():
|
| 269 | global _exiting
|
| 270 |
|
| 271 | info('process shutting down')
|
| 272 | debug('running all "atexit" finalizers with priority >= 0')
|
| 273 | _run_finalizers(0)
|
| 274 |
|
| 275 | for p in active_children():
|
| 276 | if p._daemonic:
|
| 277 | info('calling terminate() for daemon %s', p.get_name())
|
| 278 | p._popen.terminate()
|
| 279 |
|
| 280 | for p in active_children():
|
| 281 | info('calling join() for process %s', p.get_name())
|
| 282 | p.join()
|
| 283 |
|
| 284 | debug('running the remaining "atexit" finalizers')
|
| 285 | _run_finalizers()
|
| 286 |
|
| 287 | atexit.register(_exit_function)
|
| 288 |
|
| 289 | #
|
| 290 | # Some fork aware types
|
| 291 | #
|
| 292 |
|
| 293 | class ForkAwareThreadLock(object):
|
| 294 | def __init__(self):
|
| 295 | self._lock = threading.Lock()
|
| 296 | self.acquire = self._lock.acquire
|
| 297 | self.release = self._lock.release
|
| 298 | register_after_fork(self, ForkAwareThreadLock.__init__)
|
| 299 |
|
| 300 | class ForkAwareLocal(threading.local):
|
| 301 | def __init__(self):
|
| 302 | register_after_fork(self, lambda obj : obj.__dict__.clear())
|
| 303 | def __reduce__(self):
|
| 304 | return type(self), ()
|
| 305 |
|
| 306 | #
|
| 307 | # Try making some callable types picklable
|
| 308 | #
|
| 309 |
|
| 310 | def _reduce_method(m):
|
| 311 | if m.im_self is None:
|
| 312 | return getattr, (m.im_class, m.im_func.func_name)
|
| 313 | else:
|
| 314 | return getattr, (m.im_self, m.im_func.func_name)
|
| 315 | copy_reg.pickle(type(Finalize.__init__), _reduce_method)
|
| 316 |
|
| 317 | def _reduce_method_descriptor(m):
|
| 318 | return getattr, (m.__objclass__, m.__name__)
|
| 319 | copy_reg.pickle(type(list.append), _reduce_method_descriptor)
|
| 320 | copy_reg.pickle(type(int.__add__), _reduce_method_descriptor)
|
| 321 |
|
| 322 | def _reduce_builtin_function_or_method(m):
|
| 323 | return getattr, (m.__self__, m.__name__)
|
| 324 | copy_reg.pickle(type(list().append), _reduce_builtin_function_or_method)
|
| 325 | copy_reg.pickle(type(int().__add__), _reduce_builtin_function_or_method)
|
| 326 |
|
| 327 | try:
|
| 328 | from functools import partial
|
| 329 | except ImportError:
|
| 330 | pass
|
| 331 | else:
|
| 332 | def _reduce_partial(p):
|
| 333 | return _rebuild_partial, (p.func, p.args, p.keywords or {})
|
| 334 | def _rebuild_partial(func, args, keywords):
|
| 335 | return partial(func, *args, **keywords)
|
| 336 | copy_reg.pickle(partial, _reduce_partial)
|