blob: 8082ad6f3455dc376889a1e3264c392d468aefad [file] [log] [blame]
Benjamin Petersone711caf2008-06-11 16:44:04 +00001#
2# Module providing the `Pool` class for managing a process pool
3#
4# multiprocessing/pool.py
5#
R. David Murray3fc969a2010-12-14 01:38:16 +00006# Copyright (c) 2006-2008, R Oudkerk
Richard Oudkerk3e268aa2012-04-30 12:13:55 +01007# Licensed to PSF under a Contributor Agreement.
Benjamin Petersone711caf2008-06-11 16:44:04 +00008#
9
10__all__ = ['Pool']
11
12#
13# Imports
14#
15
16import threading
17import queue
18import itertools
19import collections
Charles-François Natali37cfb0a2013-06-28 19:25:45 +020020import os
Benjamin Petersone711caf2008-06-11 16:44:04 +000021import time
Richard Oudkerk85757832013-05-06 11:38:25 +010022import traceback
Benjamin Petersone711caf2008-06-11 16:44:04 +000023
Charles-François Natali37cfb0a2013-06-28 19:25:45 +020024from multiprocessing import Process, TimeoutError
Benjamin Petersone711caf2008-06-11 16:44:04 +000025from multiprocessing.util import Finalize, debug
26
27#
28# Constants representing the state of a pool
29#
30
31RUN = 0
32CLOSE = 1
33TERMINATE = 2
34
35#
36# Miscellaneous
37#
38
39job_counter = itertools.count()
40
41def mapstar(args):
42 return list(map(*args))
43
Antoine Pitroude911b22011-12-21 11:03:24 +010044def starmapstar(args):
45 return list(itertools.starmap(args[0], args[1]))
46
Benjamin Petersone711caf2008-06-11 16:44:04 +000047#
Richard Oudkerk85757832013-05-06 11:38:25 +010048# Hack to embed stringification of remote traceback in local traceback
49#
50
51class RemoteTraceback(Exception):
52 def __init__(self, tb):
53 self.tb = tb
54 def __str__(self):
55 return self.tb
56
57class ExceptionWithTraceback:
58 def __init__(self, exc, tb):
59 tb = traceback.format_exception(type(exc), exc, tb)
60 tb = ''.join(tb)
61 self.exc = exc
62 self.tb = '\n"""\n%s"""' % tb
63 def __reduce__(self):
64 return rebuild_exc, (self.exc, self.tb)
65
66def rebuild_exc(exc, tb):
67 exc.__cause__ = RemoteTraceback(tb)
68 return exc
69
70#
Benjamin Petersone711caf2008-06-11 16:44:04 +000071# Code run by worker processes
72#
73
Ask Solem2afcbf22010-11-09 20:55:52 +000074class MaybeEncodingError(Exception):
75 """Wraps possible unpickleable errors, so they can be
76 safely sent through the socket."""
77
78 def __init__(self, exc, value):
79 self.exc = repr(exc)
80 self.value = repr(value)
81 super(MaybeEncodingError, self).__init__(self.exc, self.value)
82
83 def __str__(self):
84 return "Error sending result: '%s'. Reason: '%s'" % (self.value,
85 self.exc)
86
87 def __repr__(self):
88 return "<MaybeEncodingError: %s>" % str(self)
89
90
Jesse Noller1f0b6582010-01-27 03:36:01 +000091def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
92 assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
Benjamin Petersone711caf2008-06-11 16:44:04 +000093 put = outqueue.put
94 get = inqueue.get
95 if hasattr(inqueue, '_writer'):
96 inqueue._writer.close()
97 outqueue._reader.close()
98
99 if initializer is not None:
100 initializer(*initargs)
101
Jesse Noller1f0b6582010-01-27 03:36:01 +0000102 completed = 0
103 while maxtasks is None or (maxtasks and completed < maxtasks):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000104 try:
105 task = get()
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200106 except (EOFError, OSError):
107 debug('worker got EOFError or OSError -- exiting')
Benjamin Petersone711caf2008-06-11 16:44:04 +0000108 break
109
110 if task is None:
111 debug('worker got sentinel -- exiting')
112 break
113
114 job, i, func, args, kwds = task
115 try:
116 result = (True, func(*args, **kwds))
117 except Exception as e:
Richard Oudkerk85757832013-05-06 11:38:25 +0100118 e = ExceptionWithTraceback(e, e.__traceback__)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000119 result = (False, e)
Ask Solem2afcbf22010-11-09 20:55:52 +0000120 try:
121 put((job, i, result))
122 except Exception as e:
123 wrapped = MaybeEncodingError(e, result[1])
124 debug("Possible encoding error while sending result: %s" % (
125 wrapped))
126 put((job, i, (False, wrapped)))
Jesse Noller1f0b6582010-01-27 03:36:01 +0000127 completed += 1
128 debug('worker exiting after %d tasks' % completed)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000129
130#
131# Class representing a process pool
132#
133
134class Pool(object):
135 '''
Georg Brandl92905032008-11-22 08:51:39 +0000136 Class which supports an async version of applying functions to arguments.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000137 '''
138 Process = Process
139
Jesse Noller1f0b6582010-01-27 03:36:01 +0000140 def __init__(self, processes=None, initializer=None, initargs=(),
141 maxtasksperchild=None):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000142 self._setup_queues()
143 self._taskqueue = queue.Queue()
144 self._cache = {}
145 self._state = RUN
Jesse Noller1f0b6582010-01-27 03:36:01 +0000146 self._maxtasksperchild = maxtasksperchild
147 self._initializer = initializer
148 self._initargs = initargs
Benjamin Petersone711caf2008-06-11 16:44:04 +0000149
150 if processes is None:
Charles-François Natali37cfb0a2013-06-28 19:25:45 +0200151 processes = os.cpu_count() or 1
Victor Stinner2fae27b2011-06-20 17:53:35 +0200152 if processes < 1:
153 raise ValueError("Number of processes must be at least 1")
Benjamin Petersone711caf2008-06-11 16:44:04 +0000154
Florent Xicluna5d1155c2011-10-28 14:45:05 +0200155 if initializer is not None and not callable(initializer):
Benjamin Petersonf47ed4a2009-04-11 20:45:40 +0000156 raise TypeError('initializer must be a callable')
157
Jesse Noller1f0b6582010-01-27 03:36:01 +0000158 self._processes = processes
Benjamin Petersone711caf2008-06-11 16:44:04 +0000159 self._pool = []
Jesse Noller1f0b6582010-01-27 03:36:01 +0000160 self._repopulate_pool()
161
162 self._worker_handler = threading.Thread(
163 target=Pool._handle_workers,
164 args=(self, )
165 )
166 self._worker_handler.daemon = True
167 self._worker_handler._state = RUN
168 self._worker_handler.start()
169
Benjamin Petersone711caf2008-06-11 16:44:04 +0000170
171 self._task_handler = threading.Thread(
172 target=Pool._handle_tasks,
173 args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
174 )
Benjamin Petersonfae4c622008-08-18 18:40:08 +0000175 self._task_handler.daemon = True
Benjamin Petersone711caf2008-06-11 16:44:04 +0000176 self._task_handler._state = RUN
177 self._task_handler.start()
178
179 self._result_handler = threading.Thread(
180 target=Pool._handle_results,
181 args=(self._outqueue, self._quick_get, self._cache)
182 )
Benjamin Petersonfae4c622008-08-18 18:40:08 +0000183 self._result_handler.daemon = True
Benjamin Petersone711caf2008-06-11 16:44:04 +0000184 self._result_handler._state = RUN
185 self._result_handler.start()
186
187 self._terminate = Finalize(
188 self, self._terminate_pool,
189 args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
Jesse Noller1f0b6582010-01-27 03:36:01 +0000190 self._worker_handler, self._task_handler,
191 self._result_handler, self._cache),
Benjamin Petersone711caf2008-06-11 16:44:04 +0000192 exitpriority=15
193 )
194
Jesse Noller1f0b6582010-01-27 03:36:01 +0000195 def _join_exited_workers(self):
196 """Cleanup after any worker processes which have exited due to reaching
197 their specified lifetime. Returns True if any workers were cleaned up.
198 """
199 cleaned = False
200 for i in reversed(range(len(self._pool))):
201 worker = self._pool[i]
202 if worker.exitcode is not None:
203 # worker exited
204 debug('cleaning up worker %d' % i)
205 worker.join()
206 cleaned = True
207 del self._pool[i]
208 return cleaned
209
210 def _repopulate_pool(self):
211 """Bring the number of pool processes up to the specified number,
212 for use after reaping workers which have exited.
213 """
214 for i in range(self._processes - len(self._pool)):
215 w = self.Process(target=worker,
216 args=(self._inqueue, self._outqueue,
217 self._initializer,
218 self._initargs, self._maxtasksperchild)
219 )
220 self._pool.append(w)
221 w.name = w.name.replace('Process', 'PoolWorker')
222 w.daemon = True
223 w.start()
224 debug('added worker')
225
226 def _maintain_pool(self):
227 """Clean up any exited workers and start replacements for them.
228 """
229 if self._join_exited_workers():
230 self._repopulate_pool()
231
Benjamin Petersone711caf2008-06-11 16:44:04 +0000232 def _setup_queues(self):
233 from .queues import SimpleQueue
234 self._inqueue = SimpleQueue()
235 self._outqueue = SimpleQueue()
236 self._quick_put = self._inqueue._writer.send
237 self._quick_get = self._outqueue._reader.recv
238
239 def apply(self, func, args=(), kwds={}):
240 '''
Georg Brandl92905032008-11-22 08:51:39 +0000241 Equivalent of `func(*args, **kwds)`.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000242 '''
243 assert self._state == RUN
244 return self.apply_async(func, args, kwds).get()
245
246 def map(self, func, iterable, chunksize=None):
247 '''
Georg Brandl92905032008-11-22 08:51:39 +0000248 Apply `func` to each element in `iterable`, collecting the results
249 in a list that is returned.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000250 '''
Antoine Pitroude911b22011-12-21 11:03:24 +0100251 return self._map_async(func, iterable, mapstar, chunksize).get()
252
253 def starmap(self, func, iterable, chunksize=None):
254 '''
255 Like `map()` method but the elements of the `iterable` are expected to
256 be iterables as well and will be unpacked as arguments. Hence
257 `func` and (a, b) becomes func(a, b).
258 '''
Antoine Pitroude911b22011-12-21 11:03:24 +0100259 return self._map_async(func, iterable, starmapstar, chunksize).get()
260
261 def starmap_async(self, func, iterable, chunksize=None, callback=None,
262 error_callback=None):
263 '''
264 Asynchronous version of `starmap()` method.
265 '''
Antoine Pitroude911b22011-12-21 11:03:24 +0100266 return self._map_async(func, iterable, starmapstar, chunksize,
267 callback, error_callback)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000268
269 def imap(self, func, iterable, chunksize=1):
270 '''
Georg Brandl92905032008-11-22 08:51:39 +0000271 Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000272 '''
Benjamin Peterson3095f472012-09-25 12:45:42 -0400273 if self._state != RUN:
274 raise ValueError("Pool not running")
Benjamin Petersone711caf2008-06-11 16:44:04 +0000275 if chunksize == 1:
276 result = IMapIterator(self._cache)
277 self._taskqueue.put((((result._job, i, func, (x,), {})
278 for i, x in enumerate(iterable)), result._set_length))
279 return result
280 else:
281 assert chunksize > 1
282 task_batches = Pool._get_tasks(func, iterable, chunksize)
283 result = IMapIterator(self._cache)
284 self._taskqueue.put((((result._job, i, mapstar, (x,), {})
285 for i, x in enumerate(task_batches)), result._set_length))
286 return (item for chunk in result for item in chunk)
287
288 def imap_unordered(self, func, iterable, chunksize=1):
289 '''
Georg Brandl92905032008-11-22 08:51:39 +0000290 Like `imap()` method but ordering of results is arbitrary.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000291 '''
Benjamin Peterson3095f472012-09-25 12:45:42 -0400292 if self._state != RUN:
293 raise ValueError("Pool not running")
Benjamin Petersone711caf2008-06-11 16:44:04 +0000294 if chunksize == 1:
295 result = IMapUnorderedIterator(self._cache)
296 self._taskqueue.put((((result._job, i, func, (x,), {})
297 for i, x in enumerate(iterable)), result._set_length))
298 return result
299 else:
300 assert chunksize > 1
301 task_batches = Pool._get_tasks(func, iterable, chunksize)
302 result = IMapUnorderedIterator(self._cache)
303 self._taskqueue.put((((result._job, i, mapstar, (x,), {})
304 for i, x in enumerate(task_batches)), result._set_length))
305 return (item for chunk in result for item in chunk)
306
Ask Solem2afcbf22010-11-09 20:55:52 +0000307 def apply_async(self, func, args=(), kwds={}, callback=None,
308 error_callback=None):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000309 '''
Georg Brandl92905032008-11-22 08:51:39 +0000310 Asynchronous version of `apply()` method.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000311 '''
Benjamin Peterson3095f472012-09-25 12:45:42 -0400312 if self._state != RUN:
313 raise ValueError("Pool not running")
Ask Solem2afcbf22010-11-09 20:55:52 +0000314 result = ApplyResult(self._cache, callback, error_callback)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000315 self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
316 return result
317
Ask Solem2afcbf22010-11-09 20:55:52 +0000318 def map_async(self, func, iterable, chunksize=None, callback=None,
319 error_callback=None):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000320 '''
Georg Brandl92905032008-11-22 08:51:39 +0000321 Asynchronous version of `map()` method.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000322 '''
Hynek Schlawack254af262012-10-27 12:53:02 +0200323 return self._map_async(func, iterable, mapstar, chunksize, callback,
324 error_callback)
Antoine Pitroude911b22011-12-21 11:03:24 +0100325
326 def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
327 error_callback=None):
328 '''
329 Helper function to implement map, starmap and their async counterparts.
330 '''
Benjamin Peterson3095f472012-09-25 12:45:42 -0400331 if self._state != RUN:
332 raise ValueError("Pool not running")
Benjamin Petersone711caf2008-06-11 16:44:04 +0000333 if not hasattr(iterable, '__len__'):
334 iterable = list(iterable)
335
336 if chunksize is None:
337 chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
338 if extra:
339 chunksize += 1
Alexandre Vassalottie52e3782009-07-17 09:18:18 +0000340 if len(iterable) == 0:
341 chunksize = 0
Benjamin Petersone711caf2008-06-11 16:44:04 +0000342
343 task_batches = Pool._get_tasks(func, iterable, chunksize)
Ask Solem2afcbf22010-11-09 20:55:52 +0000344 result = MapResult(self._cache, chunksize, len(iterable), callback,
345 error_callback=error_callback)
Antoine Pitroude911b22011-12-21 11:03:24 +0100346 self._taskqueue.put((((result._job, i, mapper, (x,), {})
Benjamin Petersone711caf2008-06-11 16:44:04 +0000347 for i, x in enumerate(task_batches)), None))
348 return result
349
350 @staticmethod
Jesse Noller1f0b6582010-01-27 03:36:01 +0000351 def _handle_workers(pool):
Charles-François Natalif8859e12011-10-24 18:45:29 +0200352 thread = threading.current_thread()
353
354 # Keep maintaining workers until the cache gets drained, unless the pool
355 # is terminated.
356 while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
Jesse Noller1f0b6582010-01-27 03:36:01 +0000357 pool._maintain_pool()
358 time.sleep(0.1)
Antoine Pitrou81dee6b2011-04-11 00:18:59 +0200359 # send sentinel to stop workers
360 pool._taskqueue.put(None)
Jesse Noller1f0b6582010-01-27 03:36:01 +0000361 debug('worker handler exiting')
362
363 @staticmethod
Benjamin Petersone711caf2008-06-11 16:44:04 +0000364 def _handle_tasks(taskqueue, put, outqueue, pool):
Benjamin Peterson672b8032008-06-11 19:14:14 +0000365 thread = threading.current_thread()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000366
367 for taskseq, set_length in iter(taskqueue.get, None):
368 i = -1
369 for i, task in enumerate(taskseq):
370 if thread._state:
371 debug('task handler found thread._state != RUN')
372 break
373 try:
374 put(task)
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200375 except OSError:
Benjamin Petersone711caf2008-06-11 16:44:04 +0000376 debug('could not put task on queue')
377 break
378 else:
379 if set_length:
380 debug('doing set_length()')
381 set_length(i+1)
382 continue
383 break
384 else:
385 debug('task handler got sentinel')
386
387
388 try:
389 # tell result handler to finish when cache is empty
390 debug('task handler sending sentinel to result handler')
391 outqueue.put(None)
392
393 # tell workers there is no more work
394 debug('task handler sending sentinel to workers')
395 for p in pool:
396 put(None)
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200397 except OSError:
398 debug('task handler got OSError when sending sentinels')
Benjamin Petersone711caf2008-06-11 16:44:04 +0000399
400 debug('task handler exiting')
401
402 @staticmethod
403 def _handle_results(outqueue, get, cache):
Benjamin Peterson672b8032008-06-11 19:14:14 +0000404 thread = threading.current_thread()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000405
406 while 1:
407 try:
408 task = get()
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200409 except (OSError, EOFError):
410 debug('result handler got EOFError/OSError -- exiting')
Benjamin Petersone711caf2008-06-11 16:44:04 +0000411 return
412
413 if thread._state:
414 assert thread._state == TERMINATE
415 debug('result handler found thread._state=TERMINATE')
416 break
417
418 if task is None:
419 debug('result handler got sentinel')
420 break
421
422 job, i, obj = task
423 try:
424 cache[job]._set(i, obj)
425 except KeyError:
426 pass
427
428 while cache and thread._state != TERMINATE:
429 try:
430 task = get()
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200431 except (OSError, EOFError):
432 debug('result handler got EOFError/OSError -- exiting')
Benjamin Petersone711caf2008-06-11 16:44:04 +0000433 return
434
435 if task is None:
436 debug('result handler ignoring extra sentinel')
437 continue
438 job, i, obj = task
439 try:
440 cache[job]._set(i, obj)
441 except KeyError:
442 pass
443
444 if hasattr(outqueue, '_reader'):
445 debug('ensuring that outqueue is not full')
446 # If we don't make room available in outqueue then
447 # attempts to add the sentinel (None) to outqueue may
448 # block. There is guaranteed to be no more than 2 sentinels.
449 try:
450 for i in range(10):
451 if not outqueue._reader.poll():
452 break
453 get()
Andrew Svetlovf7a17b42012-12-25 16:47:37 +0200454 except (OSError, EOFError):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000455 pass
456
457 debug('result handler exiting: len(cache)=%s, thread._state=%s',
458 len(cache), thread._state)
459
460 @staticmethod
461 def _get_tasks(func, it, size):
462 it = iter(it)
463 while 1:
464 x = tuple(itertools.islice(it, size))
465 if not x:
466 return
467 yield (func, x)
468
469 def __reduce__(self):
470 raise NotImplementedError(
471 'pool objects cannot be passed between processes or pickled'
472 )
473
474 def close(self):
475 debug('closing pool')
476 if self._state == RUN:
477 self._state = CLOSE
Jesse Noller1f0b6582010-01-27 03:36:01 +0000478 self._worker_handler._state = CLOSE
Benjamin Petersone711caf2008-06-11 16:44:04 +0000479
480 def terminate(self):
481 debug('terminating pool')
482 self._state = TERMINATE
Jesse Noller1f0b6582010-01-27 03:36:01 +0000483 self._worker_handler._state = TERMINATE
Benjamin Petersone711caf2008-06-11 16:44:04 +0000484 self._terminate()
485
486 def join(self):
487 debug('joining pool')
488 assert self._state in (CLOSE, TERMINATE)
Jesse Noller1f0b6582010-01-27 03:36:01 +0000489 self._worker_handler.join()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000490 self._task_handler.join()
491 self._result_handler.join()
492 for p in self._pool:
493 p.join()
494
495 @staticmethod
496 def _help_stuff_finish(inqueue, task_handler, size):
497 # task_handler may be blocked trying to put items on inqueue
498 debug('removing tasks from inqueue until task handler finished')
499 inqueue._rlock.acquire()
Benjamin Peterson672b8032008-06-11 19:14:14 +0000500 while task_handler.is_alive() and inqueue._reader.poll():
Benjamin Petersone711caf2008-06-11 16:44:04 +0000501 inqueue._reader.recv()
502 time.sleep(0)
503
504 @classmethod
505 def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
Jesse Noller1f0b6582010-01-27 03:36:01 +0000506 worker_handler, task_handler, result_handler, cache):
Benjamin Petersone711caf2008-06-11 16:44:04 +0000507 # this is guaranteed to only be called once
508 debug('finalizing pool')
509
Jesse Noller1f0b6582010-01-27 03:36:01 +0000510 worker_handler._state = TERMINATE
Benjamin Petersone711caf2008-06-11 16:44:04 +0000511 task_handler._state = TERMINATE
Benjamin Petersone711caf2008-06-11 16:44:04 +0000512
513 debug('helping task handler/workers to finish')
514 cls._help_stuff_finish(inqueue, task_handler, len(pool))
515
Benjamin Peterson672b8032008-06-11 19:14:14 +0000516 assert result_handler.is_alive() or len(cache) == 0
Benjamin Petersone711caf2008-06-11 16:44:04 +0000517
518 result_handler._state = TERMINATE
519 outqueue.put(None) # sentinel
520
Antoine Pitrou81dee6b2011-04-11 00:18:59 +0200521 # We must wait for the worker handler to exit before terminating
522 # workers because we don't want workers to be restarted behind our back.
523 debug('joining worker handler')
Richard Oudkerkf29ec4b2012-06-18 15:54:57 +0100524 if threading.current_thread() is not worker_handler:
525 worker_handler.join()
Antoine Pitrou81dee6b2011-04-11 00:18:59 +0200526
Jesse Noller1f0b6582010-01-27 03:36:01 +0000527 # Terminate workers which haven't already finished.
Benjamin Petersone711caf2008-06-11 16:44:04 +0000528 if pool and hasattr(pool[0], 'terminate'):
529 debug('terminating workers')
530 for p in pool:
Jesse Noller1f0b6582010-01-27 03:36:01 +0000531 if p.exitcode is None:
532 p.terminate()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000533
534 debug('joining task handler')
Richard Oudkerkf29ec4b2012-06-18 15:54:57 +0100535 if threading.current_thread() is not task_handler:
536 task_handler.join()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000537
538 debug('joining result handler')
Richard Oudkerkf29ec4b2012-06-18 15:54:57 +0100539 if threading.current_thread() is not result_handler:
540 result_handler.join()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000541
542 if pool and hasattr(pool[0], 'terminate'):
543 debug('joining pool workers')
544 for p in pool:
Florent Xicluna998171f2010-03-08 13:32:17 +0000545 if p.is_alive():
Jesse Noller1f0b6582010-01-27 03:36:01 +0000546 # worker has not yet exited
Florent Xicluna998171f2010-03-08 13:32:17 +0000547 debug('cleaning up worker %d' % p.pid)
548 p.join()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000549
Richard Oudkerkd69cfe82012-06-18 17:47:52 +0100550 def __enter__(self):
551 return self
552
553 def __exit__(self, exc_type, exc_val, exc_tb):
554 self.terminate()
555
Benjamin Petersone711caf2008-06-11 16:44:04 +0000556#
557# Class whose instances are returned by `Pool.apply_async()`
558#
559
560class ApplyResult(object):
561
Ask Solem2afcbf22010-11-09 20:55:52 +0000562 def __init__(self, cache, callback, error_callback):
Richard Oudkerk692130a2012-05-25 13:26:53 +0100563 self._event = threading.Event()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000564 self._job = next(job_counter)
565 self._cache = cache
Benjamin Petersone711caf2008-06-11 16:44:04 +0000566 self._callback = callback
Ask Solem2afcbf22010-11-09 20:55:52 +0000567 self._error_callback = error_callback
Benjamin Petersone711caf2008-06-11 16:44:04 +0000568 cache[self._job] = self
569
570 def ready(self):
Richard Oudkerk692130a2012-05-25 13:26:53 +0100571 return self._event.is_set()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000572
573 def successful(self):
Richard Oudkerk692130a2012-05-25 13:26:53 +0100574 assert self.ready()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000575 return self._success
576
577 def wait(self, timeout=None):
Richard Oudkerk692130a2012-05-25 13:26:53 +0100578 self._event.wait(timeout)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000579
580 def get(self, timeout=None):
581 self.wait(timeout)
Richard Oudkerk692130a2012-05-25 13:26:53 +0100582 if not self.ready():
Benjamin Petersone711caf2008-06-11 16:44:04 +0000583 raise TimeoutError
584 if self._success:
585 return self._value
586 else:
587 raise self._value
588
589 def _set(self, i, obj):
590 self._success, self._value = obj
591 if self._callback and self._success:
592 self._callback(self._value)
Ask Solem2afcbf22010-11-09 20:55:52 +0000593 if self._error_callback and not self._success:
594 self._error_callback(self._value)
Richard Oudkerk692130a2012-05-25 13:26:53 +0100595 self._event.set()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000596 del self._cache[self._job]
597
Richard Oudkerkdef51ca2013-05-06 12:10:04 +0100598AsyncResult = ApplyResult # create alias -- see #17805
599
Benjamin Petersone711caf2008-06-11 16:44:04 +0000600#
601# Class whose instances are returned by `Pool.map_async()`
602#
603
604class MapResult(ApplyResult):
605
Ask Solem2afcbf22010-11-09 20:55:52 +0000606 def __init__(self, cache, chunksize, length, callback, error_callback):
607 ApplyResult.__init__(self, cache, callback,
608 error_callback=error_callback)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000609 self._success = True
610 self._value = [None] * length
611 self._chunksize = chunksize
612 if chunksize <= 0:
613 self._number_left = 0
Richard Oudkerk692130a2012-05-25 13:26:53 +0100614 self._event.set()
Richard Oudkerke41682b2012-06-06 19:04:57 +0100615 del cache[self._job]
Benjamin Petersone711caf2008-06-11 16:44:04 +0000616 else:
617 self._number_left = length//chunksize + bool(length % chunksize)
618
619 def _set(self, i, success_result):
620 success, result = success_result
621 if success:
622 self._value[i*self._chunksize:(i+1)*self._chunksize] = result
623 self._number_left -= 1
624 if self._number_left == 0:
625 if self._callback:
626 self._callback(self._value)
627 del self._cache[self._job]
Richard Oudkerk692130a2012-05-25 13:26:53 +0100628 self._event.set()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000629 else:
630 self._success = False
631 self._value = result
Ask Solem2afcbf22010-11-09 20:55:52 +0000632 if self._error_callback:
633 self._error_callback(self._value)
Benjamin Petersone711caf2008-06-11 16:44:04 +0000634 del self._cache[self._job]
Richard Oudkerk692130a2012-05-25 13:26:53 +0100635 self._event.set()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000636
637#
638# Class whose instances are returned by `Pool.imap()`
639#
640
641class IMapIterator(object):
642
643 def __init__(self, cache):
644 self._cond = threading.Condition(threading.Lock())
645 self._job = next(job_counter)
646 self._cache = cache
647 self._items = collections.deque()
648 self._index = 0
649 self._length = None
650 self._unsorted = {}
651 cache[self._job] = self
652
653 def __iter__(self):
654 return self
655
656 def next(self, timeout=None):
657 self._cond.acquire()
658 try:
659 try:
660 item = self._items.popleft()
661 except IndexError:
662 if self._index == self._length:
663 raise StopIteration
664 self._cond.wait(timeout)
665 try:
666 item = self._items.popleft()
667 except IndexError:
668 if self._index == self._length:
669 raise StopIteration
670 raise TimeoutError
671 finally:
672 self._cond.release()
673
674 success, value = item
675 if success:
676 return value
677 raise value
678
679 __next__ = next # XXX
680
681 def _set(self, i, obj):
682 self._cond.acquire()
683 try:
684 if self._index == i:
685 self._items.append(obj)
686 self._index += 1
687 while self._index in self._unsorted:
688 obj = self._unsorted.pop(self._index)
689 self._items.append(obj)
690 self._index += 1
691 self._cond.notify()
692 else:
693 self._unsorted[i] = obj
694
695 if self._index == self._length:
696 del self._cache[self._job]
697 finally:
698 self._cond.release()
699
700 def _set_length(self, length):
701 self._cond.acquire()
702 try:
703 self._length = length
704 if self._index == self._length:
705 self._cond.notify()
706 del self._cache[self._job]
707 finally:
708 self._cond.release()
709
710#
711# Class whose instances are returned by `Pool.imap_unordered()`
712#
713
714class IMapUnorderedIterator(IMapIterator):
715
716 def _set(self, i, obj):
717 self._cond.acquire()
718 try:
719 self._items.append(obj)
720 self._index += 1
721 self._cond.notify()
722 if self._index == self._length:
723 del self._cache[self._job]
724 finally:
725 self._cond.release()
726
727#
728#
729#
730
731class ThreadPool(Pool):
732
733 from .dummy import Process
734
735 def __init__(self, processes=None, initializer=None, initargs=()):
736 Pool.__init__(self, processes, initializer, initargs)
737
738 def _setup_queues(self):
739 self._inqueue = queue.Queue()
740 self._outqueue = queue.Queue()
741 self._quick_put = self._inqueue.put
742 self._quick_get = self._outqueue.get
743
744 @staticmethod
745 def _help_stuff_finish(inqueue, task_handler, size):
746 # put sentinels at head of inqueue to make workers finish
747 inqueue.not_empty.acquire()
748 try:
749 inqueue.queue.clear()
750 inqueue.queue.extend([None] * size)
Benjamin Peterson672b8032008-06-11 19:14:14 +0000751 inqueue.not_empty.notify_all()
Benjamin Petersone711caf2008-06-11 16:44:04 +0000752 finally:
753 inqueue.not_empty.release()