Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 1 | # |
| 2 | # Module providing the `Pool` class for managing a process pool |
| 3 | # |
| 4 | # multiprocessing/pool.py |
| 5 | # |
R. David Murray | 3fc969a | 2010-12-14 01:38:16 +0000 | [diff] [blame] | 6 | # Copyright (c) 2006-2008, R Oudkerk |
| 7 | # All rights reserved. |
| 8 | # |
| 9 | # Redistribution and use in source and binary forms, with or without |
| 10 | # modification, are permitted provided that the following conditions |
| 11 | # are met: |
| 12 | # |
| 13 | # 1. Redistributions of source code must retain the above copyright |
| 14 | # notice, this list of conditions and the following disclaimer. |
| 15 | # 2. Redistributions in binary form must reproduce the above copyright |
| 16 | # notice, this list of conditions and the following disclaimer in the |
| 17 | # documentation and/or other materials provided with the distribution. |
| 18 | # 3. Neither the name of author nor the names of any contributors may be |
| 19 | # used to endorse or promote products derived from this software |
| 20 | # without specific prior written permission. |
| 21 | # |
| 22 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND |
| 23 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 25 | # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 28 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 29 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 30 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 31 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 32 | # SUCH DAMAGE. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 33 | # |
| 34 | |
| 35 | __all__ = ['Pool'] |
| 36 | |
| 37 | # |
| 38 | # Imports |
| 39 | # |
| 40 | |
| 41 | import threading |
| 42 | import queue |
| 43 | import itertools |
| 44 | import collections |
| 45 | import time |
| 46 | |
| 47 | from multiprocessing import Process, cpu_count, TimeoutError |
| 48 | from multiprocessing.util import Finalize, debug |
| 49 | |
| 50 | # |
| 51 | # Constants representing the state of a pool |
| 52 | # |
| 53 | |
| 54 | RUN = 0 |
| 55 | CLOSE = 1 |
| 56 | TERMINATE = 2 |
| 57 | |
| 58 | # |
| 59 | # Miscellaneous |
| 60 | # |
| 61 | |
| 62 | job_counter = itertools.count() |
| 63 | |
| 64 | def mapstar(args): |
| 65 | return list(map(*args)) |
| 66 | |
Antoine Pitrou | de911b2 | 2011-12-21 11:03:24 +0100 | [diff] [blame] | 67 | def starmapstar(args): |
| 68 | return list(itertools.starmap(args[0], args[1])) |
| 69 | |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 70 | # |
| 71 | # Code run by worker processes |
| 72 | # |
| 73 | |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 74 | class MaybeEncodingError(Exception): |
| 75 | """Wraps possible unpickleable errors, so they can be |
| 76 | safely sent through the socket.""" |
| 77 | |
| 78 | def __init__(self, exc, value): |
| 79 | self.exc = repr(exc) |
| 80 | self.value = repr(value) |
| 81 | super(MaybeEncodingError, self).__init__(self.exc, self.value) |
| 82 | |
| 83 | def __str__(self): |
| 84 | return "Error sending result: '%s'. Reason: '%s'" % (self.value, |
| 85 | self.exc) |
| 86 | |
| 87 | def __repr__(self): |
| 88 | return "<MaybeEncodingError: %s>" % str(self) |
| 89 | |
| 90 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 91 | def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): |
| 92 | assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 93 | put = outqueue.put |
| 94 | get = inqueue.get |
| 95 | if hasattr(inqueue, '_writer'): |
| 96 | inqueue._writer.close() |
| 97 | outqueue._reader.close() |
| 98 | |
| 99 | if initializer is not None: |
| 100 | initializer(*initargs) |
| 101 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 102 | completed = 0 |
| 103 | while maxtasks is None or (maxtasks and completed < maxtasks): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 104 | try: |
| 105 | task = get() |
| 106 | except (EOFError, IOError): |
| 107 | debug('worker got EOFError or IOError -- exiting') |
| 108 | break |
| 109 | |
| 110 | if task is None: |
| 111 | debug('worker got sentinel -- exiting') |
| 112 | break |
| 113 | |
| 114 | job, i, func, args, kwds = task |
| 115 | try: |
| 116 | result = (True, func(*args, **kwds)) |
| 117 | except Exception as e: |
| 118 | result = (False, e) |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 119 | try: |
| 120 | put((job, i, result)) |
| 121 | except Exception as e: |
| 122 | wrapped = MaybeEncodingError(e, result[1]) |
| 123 | debug("Possible encoding error while sending result: %s" % ( |
| 124 | wrapped)) |
| 125 | put((job, i, (False, wrapped))) |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 126 | completed += 1 |
| 127 | debug('worker exiting after %d tasks' % completed) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 128 | |
| 129 | # |
| 130 | # Class representing a process pool |
| 131 | # |
| 132 | |
| 133 | class Pool(object): |
| 134 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 135 | Class which supports an async version of applying functions to arguments. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 136 | ''' |
| 137 | Process = Process |
| 138 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 139 | def __init__(self, processes=None, initializer=None, initargs=(), |
| 140 | maxtasksperchild=None): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 141 | self._setup_queues() |
| 142 | self._taskqueue = queue.Queue() |
| 143 | self._cache = {} |
| 144 | self._state = RUN |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 145 | self._maxtasksperchild = maxtasksperchild |
| 146 | self._initializer = initializer |
| 147 | self._initargs = initargs |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 148 | |
| 149 | if processes is None: |
| 150 | try: |
| 151 | processes = cpu_count() |
| 152 | except NotImplementedError: |
| 153 | processes = 1 |
Victor Stinner | 2fae27b | 2011-06-20 17:53:35 +0200 | [diff] [blame] | 154 | if processes < 1: |
| 155 | raise ValueError("Number of processes must be at least 1") |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 156 | |
Florent Xicluna | 5d1155c | 2011-10-28 14:45:05 +0200 | [diff] [blame] | 157 | if initializer is not None and not callable(initializer): |
Benjamin Peterson | f47ed4a | 2009-04-11 20:45:40 +0000 | [diff] [blame] | 158 | raise TypeError('initializer must be a callable') |
| 159 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 160 | self._processes = processes |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 161 | self._pool = [] |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 162 | self._repopulate_pool() |
| 163 | |
| 164 | self._worker_handler = threading.Thread( |
| 165 | target=Pool._handle_workers, |
| 166 | args=(self, ) |
| 167 | ) |
| 168 | self._worker_handler.daemon = True |
| 169 | self._worker_handler._state = RUN |
| 170 | self._worker_handler.start() |
| 171 | |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 172 | |
| 173 | self._task_handler = threading.Thread( |
| 174 | target=Pool._handle_tasks, |
| 175 | args=(self._taskqueue, self._quick_put, self._outqueue, self._pool) |
| 176 | ) |
Benjamin Peterson | fae4c62 | 2008-08-18 18:40:08 +0000 | [diff] [blame] | 177 | self._task_handler.daemon = True |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 178 | self._task_handler._state = RUN |
| 179 | self._task_handler.start() |
| 180 | |
| 181 | self._result_handler = threading.Thread( |
| 182 | target=Pool._handle_results, |
| 183 | args=(self._outqueue, self._quick_get, self._cache) |
| 184 | ) |
Benjamin Peterson | fae4c62 | 2008-08-18 18:40:08 +0000 | [diff] [blame] | 185 | self._result_handler.daemon = True |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 186 | self._result_handler._state = RUN |
| 187 | self._result_handler.start() |
| 188 | |
| 189 | self._terminate = Finalize( |
| 190 | self, self._terminate_pool, |
| 191 | args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 192 | self._worker_handler, self._task_handler, |
| 193 | self._result_handler, self._cache), |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 194 | exitpriority=15 |
| 195 | ) |
| 196 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 197 | def _join_exited_workers(self): |
| 198 | """Cleanup after any worker processes which have exited due to reaching |
| 199 | their specified lifetime. Returns True if any workers were cleaned up. |
| 200 | """ |
| 201 | cleaned = False |
| 202 | for i in reversed(range(len(self._pool))): |
| 203 | worker = self._pool[i] |
| 204 | if worker.exitcode is not None: |
| 205 | # worker exited |
| 206 | debug('cleaning up worker %d' % i) |
| 207 | worker.join() |
| 208 | cleaned = True |
| 209 | del self._pool[i] |
| 210 | return cleaned |
| 211 | |
| 212 | def _repopulate_pool(self): |
| 213 | """Bring the number of pool processes up to the specified number, |
| 214 | for use after reaping workers which have exited. |
| 215 | """ |
| 216 | for i in range(self._processes - len(self._pool)): |
| 217 | w = self.Process(target=worker, |
| 218 | args=(self._inqueue, self._outqueue, |
| 219 | self._initializer, |
| 220 | self._initargs, self._maxtasksperchild) |
| 221 | ) |
| 222 | self._pool.append(w) |
| 223 | w.name = w.name.replace('Process', 'PoolWorker') |
| 224 | w.daemon = True |
| 225 | w.start() |
| 226 | debug('added worker') |
| 227 | |
| 228 | def _maintain_pool(self): |
| 229 | """Clean up any exited workers and start replacements for them. |
| 230 | """ |
| 231 | if self._join_exited_workers(): |
| 232 | self._repopulate_pool() |
| 233 | |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 234 | def _setup_queues(self): |
| 235 | from .queues import SimpleQueue |
| 236 | self._inqueue = SimpleQueue() |
| 237 | self._outqueue = SimpleQueue() |
| 238 | self._quick_put = self._inqueue._writer.send |
| 239 | self._quick_get = self._outqueue._reader.recv |
| 240 | |
| 241 | def apply(self, func, args=(), kwds={}): |
| 242 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 243 | Equivalent of `func(*args, **kwds)`. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 244 | ''' |
| 245 | assert self._state == RUN |
| 246 | return self.apply_async(func, args, kwds).get() |
| 247 | |
| 248 | def map(self, func, iterable, chunksize=None): |
| 249 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 250 | Apply `func` to each element in `iterable`, collecting the results |
| 251 | in a list that is returned. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 252 | ''' |
| 253 | assert self._state == RUN |
Antoine Pitrou | de911b2 | 2011-12-21 11:03:24 +0100 | [diff] [blame] | 254 | return self._map_async(func, iterable, mapstar, chunksize).get() |
| 255 | |
| 256 | def starmap(self, func, iterable, chunksize=None): |
| 257 | ''' |
| 258 | Like `map()` method but the elements of the `iterable` are expected to |
| 259 | be iterables as well and will be unpacked as arguments. Hence |
| 260 | `func` and (a, b) becomes func(a, b). |
| 261 | ''' |
| 262 | assert self._state == RUN |
| 263 | return self._map_async(func, iterable, starmapstar, chunksize).get() |
| 264 | |
| 265 | def starmap_async(self, func, iterable, chunksize=None, callback=None, |
| 266 | error_callback=None): |
| 267 | ''' |
| 268 | Asynchronous version of `starmap()` method. |
| 269 | ''' |
| 270 | assert self._state == RUN |
| 271 | return self._map_async(func, iterable, starmapstar, chunksize, |
| 272 | callback, error_callback) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 273 | |
| 274 | def imap(self, func, iterable, chunksize=1): |
| 275 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 276 | Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 277 | ''' |
| 278 | assert self._state == RUN |
| 279 | if chunksize == 1: |
| 280 | result = IMapIterator(self._cache) |
| 281 | self._taskqueue.put((((result._job, i, func, (x,), {}) |
| 282 | for i, x in enumerate(iterable)), result._set_length)) |
| 283 | return result |
| 284 | else: |
| 285 | assert chunksize > 1 |
| 286 | task_batches = Pool._get_tasks(func, iterable, chunksize) |
| 287 | result = IMapIterator(self._cache) |
| 288 | self._taskqueue.put((((result._job, i, mapstar, (x,), {}) |
| 289 | for i, x in enumerate(task_batches)), result._set_length)) |
| 290 | return (item for chunk in result for item in chunk) |
| 291 | |
| 292 | def imap_unordered(self, func, iterable, chunksize=1): |
| 293 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 294 | Like `imap()` method but ordering of results is arbitrary. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 295 | ''' |
| 296 | assert self._state == RUN |
| 297 | if chunksize == 1: |
| 298 | result = IMapUnorderedIterator(self._cache) |
| 299 | self._taskqueue.put((((result._job, i, func, (x,), {}) |
| 300 | for i, x in enumerate(iterable)), result._set_length)) |
| 301 | return result |
| 302 | else: |
| 303 | assert chunksize > 1 |
| 304 | task_batches = Pool._get_tasks(func, iterable, chunksize) |
| 305 | result = IMapUnorderedIterator(self._cache) |
| 306 | self._taskqueue.put((((result._job, i, mapstar, (x,), {}) |
| 307 | for i, x in enumerate(task_batches)), result._set_length)) |
| 308 | return (item for chunk in result for item in chunk) |
| 309 | |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 310 | def apply_async(self, func, args=(), kwds={}, callback=None, |
| 311 | error_callback=None): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 312 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 313 | Asynchronous version of `apply()` method. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 314 | ''' |
| 315 | assert self._state == RUN |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 316 | result = ApplyResult(self._cache, callback, error_callback) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 317 | self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) |
| 318 | return result |
| 319 | |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 320 | def map_async(self, func, iterable, chunksize=None, callback=None, |
| 321 | error_callback=None): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 322 | ''' |
Georg Brandl | 9290503 | 2008-11-22 08:51:39 +0000 | [diff] [blame] | 323 | Asynchronous version of `map()` method. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 324 | ''' |
| 325 | assert self._state == RUN |
Antoine Pitrou | de911b2 | 2011-12-21 11:03:24 +0100 | [diff] [blame] | 326 | return self._map_async(func, iterable, mapstar, chunksize) |
| 327 | |
| 328 | def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, |
| 329 | error_callback=None): |
| 330 | ''' |
| 331 | Helper function to implement map, starmap and their async counterparts. |
| 332 | ''' |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 333 | if not hasattr(iterable, '__len__'): |
| 334 | iterable = list(iterable) |
| 335 | |
| 336 | if chunksize is None: |
| 337 | chunksize, extra = divmod(len(iterable), len(self._pool) * 4) |
| 338 | if extra: |
| 339 | chunksize += 1 |
Alexandre Vassalotti | e52e378 | 2009-07-17 09:18:18 +0000 | [diff] [blame] | 340 | if len(iterable) == 0: |
| 341 | chunksize = 0 |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 342 | |
| 343 | task_batches = Pool._get_tasks(func, iterable, chunksize) |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 344 | result = MapResult(self._cache, chunksize, len(iterable), callback, |
| 345 | error_callback=error_callback) |
Antoine Pitrou | de911b2 | 2011-12-21 11:03:24 +0100 | [diff] [blame] | 346 | self._taskqueue.put((((result._job, i, mapper, (x,), {}) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 347 | for i, x in enumerate(task_batches)), None)) |
| 348 | return result |
| 349 | |
| 350 | @staticmethod |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 351 | def _handle_workers(pool): |
Charles-François Natali | f8859e1 | 2011-10-24 18:45:29 +0200 | [diff] [blame] | 352 | thread = threading.current_thread() |
| 353 | |
| 354 | # Keep maintaining workers until the cache gets drained, unless the pool |
| 355 | # is terminated. |
| 356 | while thread._state == RUN or (pool._cache and thread._state != TERMINATE): |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 357 | pool._maintain_pool() |
| 358 | time.sleep(0.1) |
Antoine Pitrou | 81dee6b | 2011-04-11 00:18:59 +0200 | [diff] [blame] | 359 | # send sentinel to stop workers |
| 360 | pool._taskqueue.put(None) |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 361 | debug('worker handler exiting') |
| 362 | |
| 363 | @staticmethod |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 364 | def _handle_tasks(taskqueue, put, outqueue, pool): |
Benjamin Peterson | 672b803 | 2008-06-11 19:14:14 +0000 | [diff] [blame] | 365 | thread = threading.current_thread() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 366 | |
| 367 | for taskseq, set_length in iter(taskqueue.get, None): |
| 368 | i = -1 |
| 369 | for i, task in enumerate(taskseq): |
| 370 | if thread._state: |
| 371 | debug('task handler found thread._state != RUN') |
| 372 | break |
| 373 | try: |
| 374 | put(task) |
| 375 | except IOError: |
| 376 | debug('could not put task on queue') |
| 377 | break |
| 378 | else: |
| 379 | if set_length: |
| 380 | debug('doing set_length()') |
| 381 | set_length(i+1) |
| 382 | continue |
| 383 | break |
| 384 | else: |
| 385 | debug('task handler got sentinel') |
| 386 | |
| 387 | |
| 388 | try: |
| 389 | # tell result handler to finish when cache is empty |
| 390 | debug('task handler sending sentinel to result handler') |
| 391 | outqueue.put(None) |
| 392 | |
| 393 | # tell workers there is no more work |
| 394 | debug('task handler sending sentinel to workers') |
| 395 | for p in pool: |
| 396 | put(None) |
| 397 | except IOError: |
| 398 | debug('task handler got IOError when sending sentinels') |
| 399 | |
| 400 | debug('task handler exiting') |
| 401 | |
| 402 | @staticmethod |
| 403 | def _handle_results(outqueue, get, cache): |
Benjamin Peterson | 672b803 | 2008-06-11 19:14:14 +0000 | [diff] [blame] | 404 | thread = threading.current_thread() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 405 | |
| 406 | while 1: |
| 407 | try: |
| 408 | task = get() |
| 409 | except (IOError, EOFError): |
| 410 | debug('result handler got EOFError/IOError -- exiting') |
| 411 | return |
| 412 | |
| 413 | if thread._state: |
| 414 | assert thread._state == TERMINATE |
| 415 | debug('result handler found thread._state=TERMINATE') |
| 416 | break |
| 417 | |
| 418 | if task is None: |
| 419 | debug('result handler got sentinel') |
| 420 | break |
| 421 | |
| 422 | job, i, obj = task |
| 423 | try: |
| 424 | cache[job]._set(i, obj) |
| 425 | except KeyError: |
| 426 | pass |
| 427 | |
| 428 | while cache and thread._state != TERMINATE: |
| 429 | try: |
| 430 | task = get() |
| 431 | except (IOError, EOFError): |
| 432 | debug('result handler got EOFError/IOError -- exiting') |
| 433 | return |
| 434 | |
| 435 | if task is None: |
| 436 | debug('result handler ignoring extra sentinel') |
| 437 | continue |
| 438 | job, i, obj = task |
| 439 | try: |
| 440 | cache[job]._set(i, obj) |
| 441 | except KeyError: |
| 442 | pass |
| 443 | |
| 444 | if hasattr(outqueue, '_reader'): |
| 445 | debug('ensuring that outqueue is not full') |
| 446 | # If we don't make room available in outqueue then |
| 447 | # attempts to add the sentinel (None) to outqueue may |
| 448 | # block. There is guaranteed to be no more than 2 sentinels. |
| 449 | try: |
| 450 | for i in range(10): |
| 451 | if not outqueue._reader.poll(): |
| 452 | break |
| 453 | get() |
| 454 | except (IOError, EOFError): |
| 455 | pass |
| 456 | |
| 457 | debug('result handler exiting: len(cache)=%s, thread._state=%s', |
| 458 | len(cache), thread._state) |
| 459 | |
| 460 | @staticmethod |
| 461 | def _get_tasks(func, it, size): |
| 462 | it = iter(it) |
| 463 | while 1: |
| 464 | x = tuple(itertools.islice(it, size)) |
| 465 | if not x: |
| 466 | return |
| 467 | yield (func, x) |
| 468 | |
| 469 | def __reduce__(self): |
| 470 | raise NotImplementedError( |
| 471 | 'pool objects cannot be passed between processes or pickled' |
| 472 | ) |
| 473 | |
| 474 | def close(self): |
| 475 | debug('closing pool') |
| 476 | if self._state == RUN: |
| 477 | self._state = CLOSE |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 478 | self._worker_handler._state = CLOSE |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 479 | |
| 480 | def terminate(self): |
| 481 | debug('terminating pool') |
| 482 | self._state = TERMINATE |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 483 | self._worker_handler._state = TERMINATE |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 484 | self._terminate() |
| 485 | |
| 486 | def join(self): |
| 487 | debug('joining pool') |
| 488 | assert self._state in (CLOSE, TERMINATE) |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 489 | self._worker_handler.join() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 490 | self._task_handler.join() |
| 491 | self._result_handler.join() |
| 492 | for p in self._pool: |
| 493 | p.join() |
| 494 | |
| 495 | @staticmethod |
| 496 | def _help_stuff_finish(inqueue, task_handler, size): |
| 497 | # task_handler may be blocked trying to put items on inqueue |
| 498 | debug('removing tasks from inqueue until task handler finished') |
| 499 | inqueue._rlock.acquire() |
Benjamin Peterson | 672b803 | 2008-06-11 19:14:14 +0000 | [diff] [blame] | 500 | while task_handler.is_alive() and inqueue._reader.poll(): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 501 | inqueue._reader.recv() |
| 502 | time.sleep(0) |
| 503 | |
| 504 | @classmethod |
| 505 | def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 506 | worker_handler, task_handler, result_handler, cache): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 507 | # this is guaranteed to only be called once |
| 508 | debug('finalizing pool') |
| 509 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 510 | worker_handler._state = TERMINATE |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 511 | task_handler._state = TERMINATE |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 512 | |
| 513 | debug('helping task handler/workers to finish') |
| 514 | cls._help_stuff_finish(inqueue, task_handler, len(pool)) |
| 515 | |
Benjamin Peterson | 672b803 | 2008-06-11 19:14:14 +0000 | [diff] [blame] | 516 | assert result_handler.is_alive() or len(cache) == 0 |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 517 | |
| 518 | result_handler._state = TERMINATE |
| 519 | outqueue.put(None) # sentinel |
| 520 | |
Antoine Pitrou | 81dee6b | 2011-04-11 00:18:59 +0200 | [diff] [blame] | 521 | # We must wait for the worker handler to exit before terminating |
| 522 | # workers because we don't want workers to be restarted behind our back. |
| 523 | debug('joining worker handler') |
| 524 | worker_handler.join() |
| 525 | |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 526 | # Terminate workers which haven't already finished. |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 527 | if pool and hasattr(pool[0], 'terminate'): |
| 528 | debug('terminating workers') |
| 529 | for p in pool: |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 530 | if p.exitcode is None: |
| 531 | p.terminate() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 532 | |
| 533 | debug('joining task handler') |
Antoine Pitrou | 7c3e577 | 2010-04-14 15:44:10 +0000 | [diff] [blame] | 534 | task_handler.join() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 535 | |
| 536 | debug('joining result handler') |
Antoine Pitrou | bed9a5b | 2011-04-11 00:20:23 +0200 | [diff] [blame] | 537 | result_handler.join() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 538 | |
| 539 | if pool and hasattr(pool[0], 'terminate'): |
| 540 | debug('joining pool workers') |
| 541 | for p in pool: |
Florent Xicluna | 998171f | 2010-03-08 13:32:17 +0000 | [diff] [blame] | 542 | if p.is_alive(): |
Jesse Noller | 1f0b658 | 2010-01-27 03:36:01 +0000 | [diff] [blame] | 543 | # worker has not yet exited |
Florent Xicluna | 998171f | 2010-03-08 13:32:17 +0000 | [diff] [blame] | 544 | debug('cleaning up worker %d' % p.pid) |
| 545 | p.join() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 546 | |
| 547 | # |
| 548 | # Class whose instances are returned by `Pool.apply_async()` |
| 549 | # |
| 550 | |
| 551 | class ApplyResult(object): |
| 552 | |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 553 | def __init__(self, cache, callback, error_callback): |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 554 | self._cond = threading.Condition(threading.Lock()) |
| 555 | self._job = next(job_counter) |
| 556 | self._cache = cache |
| 557 | self._ready = False |
| 558 | self._callback = callback |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 559 | self._error_callback = error_callback |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 560 | cache[self._job] = self |
| 561 | |
| 562 | def ready(self): |
| 563 | return self._ready |
| 564 | |
| 565 | def successful(self): |
| 566 | assert self._ready |
| 567 | return self._success |
| 568 | |
| 569 | def wait(self, timeout=None): |
| 570 | self._cond.acquire() |
| 571 | try: |
| 572 | if not self._ready: |
| 573 | self._cond.wait(timeout) |
| 574 | finally: |
| 575 | self._cond.release() |
| 576 | |
| 577 | def get(self, timeout=None): |
| 578 | self.wait(timeout) |
| 579 | if not self._ready: |
| 580 | raise TimeoutError |
| 581 | if self._success: |
| 582 | return self._value |
| 583 | else: |
| 584 | raise self._value |
| 585 | |
| 586 | def _set(self, i, obj): |
| 587 | self._success, self._value = obj |
| 588 | if self._callback and self._success: |
| 589 | self._callback(self._value) |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 590 | if self._error_callback and not self._success: |
| 591 | self._error_callback(self._value) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 592 | self._cond.acquire() |
| 593 | try: |
| 594 | self._ready = True |
| 595 | self._cond.notify() |
| 596 | finally: |
| 597 | self._cond.release() |
| 598 | del self._cache[self._job] |
| 599 | |
| 600 | # |
| 601 | # Class whose instances are returned by `Pool.map_async()` |
| 602 | # |
| 603 | |
| 604 | class MapResult(ApplyResult): |
| 605 | |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 606 | def __init__(self, cache, chunksize, length, callback, error_callback): |
| 607 | ApplyResult.__init__(self, cache, callback, |
| 608 | error_callback=error_callback) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 609 | self._success = True |
| 610 | self._value = [None] * length |
| 611 | self._chunksize = chunksize |
| 612 | if chunksize <= 0: |
| 613 | self._number_left = 0 |
| 614 | self._ready = True |
| 615 | else: |
| 616 | self._number_left = length//chunksize + bool(length % chunksize) |
| 617 | |
| 618 | def _set(self, i, success_result): |
| 619 | success, result = success_result |
| 620 | if success: |
| 621 | self._value[i*self._chunksize:(i+1)*self._chunksize] = result |
| 622 | self._number_left -= 1 |
| 623 | if self._number_left == 0: |
| 624 | if self._callback: |
| 625 | self._callback(self._value) |
| 626 | del self._cache[self._job] |
| 627 | self._cond.acquire() |
| 628 | try: |
| 629 | self._ready = True |
| 630 | self._cond.notify() |
| 631 | finally: |
| 632 | self._cond.release() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 633 | else: |
| 634 | self._success = False |
| 635 | self._value = result |
Ask Solem | 2afcbf2 | 2010-11-09 20:55:52 +0000 | [diff] [blame] | 636 | if self._error_callback: |
| 637 | self._error_callback(self._value) |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 638 | del self._cache[self._job] |
| 639 | self._cond.acquire() |
| 640 | try: |
| 641 | self._ready = True |
| 642 | self._cond.notify() |
| 643 | finally: |
| 644 | self._cond.release() |
| 645 | |
| 646 | # |
| 647 | # Class whose instances are returned by `Pool.imap()` |
| 648 | # |
| 649 | |
| 650 | class IMapIterator(object): |
| 651 | |
| 652 | def __init__(self, cache): |
| 653 | self._cond = threading.Condition(threading.Lock()) |
| 654 | self._job = next(job_counter) |
| 655 | self._cache = cache |
| 656 | self._items = collections.deque() |
| 657 | self._index = 0 |
| 658 | self._length = None |
| 659 | self._unsorted = {} |
| 660 | cache[self._job] = self |
| 661 | |
| 662 | def __iter__(self): |
| 663 | return self |
| 664 | |
| 665 | def next(self, timeout=None): |
| 666 | self._cond.acquire() |
| 667 | try: |
| 668 | try: |
| 669 | item = self._items.popleft() |
| 670 | except IndexError: |
| 671 | if self._index == self._length: |
| 672 | raise StopIteration |
| 673 | self._cond.wait(timeout) |
| 674 | try: |
| 675 | item = self._items.popleft() |
| 676 | except IndexError: |
| 677 | if self._index == self._length: |
| 678 | raise StopIteration |
| 679 | raise TimeoutError |
| 680 | finally: |
| 681 | self._cond.release() |
| 682 | |
| 683 | success, value = item |
| 684 | if success: |
| 685 | return value |
| 686 | raise value |
| 687 | |
| 688 | __next__ = next # XXX |
| 689 | |
| 690 | def _set(self, i, obj): |
| 691 | self._cond.acquire() |
| 692 | try: |
| 693 | if self._index == i: |
| 694 | self._items.append(obj) |
| 695 | self._index += 1 |
| 696 | while self._index in self._unsorted: |
| 697 | obj = self._unsorted.pop(self._index) |
| 698 | self._items.append(obj) |
| 699 | self._index += 1 |
| 700 | self._cond.notify() |
| 701 | else: |
| 702 | self._unsorted[i] = obj |
| 703 | |
| 704 | if self._index == self._length: |
| 705 | del self._cache[self._job] |
| 706 | finally: |
| 707 | self._cond.release() |
| 708 | |
| 709 | def _set_length(self, length): |
| 710 | self._cond.acquire() |
| 711 | try: |
| 712 | self._length = length |
| 713 | if self._index == self._length: |
| 714 | self._cond.notify() |
| 715 | del self._cache[self._job] |
| 716 | finally: |
| 717 | self._cond.release() |
| 718 | |
| 719 | # |
| 720 | # Class whose instances are returned by `Pool.imap_unordered()` |
| 721 | # |
| 722 | |
| 723 | class IMapUnorderedIterator(IMapIterator): |
| 724 | |
| 725 | def _set(self, i, obj): |
| 726 | self._cond.acquire() |
| 727 | try: |
| 728 | self._items.append(obj) |
| 729 | self._index += 1 |
| 730 | self._cond.notify() |
| 731 | if self._index == self._length: |
| 732 | del self._cache[self._job] |
| 733 | finally: |
| 734 | self._cond.release() |
| 735 | |
| 736 | # |
| 737 | # |
| 738 | # |
| 739 | |
| 740 | class ThreadPool(Pool): |
| 741 | |
| 742 | from .dummy import Process |
| 743 | |
| 744 | def __init__(self, processes=None, initializer=None, initargs=()): |
| 745 | Pool.__init__(self, processes, initializer, initargs) |
| 746 | |
| 747 | def _setup_queues(self): |
| 748 | self._inqueue = queue.Queue() |
| 749 | self._outqueue = queue.Queue() |
| 750 | self._quick_put = self._inqueue.put |
| 751 | self._quick_get = self._outqueue.get |
| 752 | |
| 753 | @staticmethod |
| 754 | def _help_stuff_finish(inqueue, task_handler, size): |
| 755 | # put sentinels at head of inqueue to make workers finish |
| 756 | inqueue.not_empty.acquire() |
| 757 | try: |
| 758 | inqueue.queue.clear() |
| 759 | inqueue.queue.extend([None] * size) |
Benjamin Peterson | 672b803 | 2008-06-11 19:14:14 +0000 | [diff] [blame] | 760 | inqueue.not_empty.notify_all() |
Benjamin Peterson | e711caf | 2008-06-11 16:44:04 +0000 | [diff] [blame] | 761 | finally: |
| 762 | inqueue.not_empty.release() |