blob: 535fa046c18a9eaa9bf3b4a50ba84787555184f1 [file] [log] [blame]
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001"""functools.py - Tools for working with functions and callable objects
Thomas Wouters4d70c3d2006-06-08 14:42:34 +00002"""
3# Python module wrapper for _functools C module
4# to allow utilities written in Python to be added
5# to the functools module.
Łukasz Langa6f692512013-06-05 12:20:24 +02006# Written by Nick Coghlan <ncoghlan at gmail.com>,
7# Raymond Hettinger <python at rcn.com>,
8# and Łukasz Langa <lukasz at langa.pl>.
9# Copyright (C) 2006-2013 Python Software Foundation.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000010# See C source code for _functools credits/copyright
Thomas Wouters4d70c3d2006-06-08 14:42:34 +000011
Georg Brandl2e7346a2010-07-31 18:09:23 +000012__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
Pablo Galindo99e6c262020-01-23 15:29:52 +000013 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce',
14 'TopologicalSorter', 'CycleError',
Hakan Çelik217dce92020-03-02 00:01:34 +030015 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod',
16 "cached_property"]
Georg Brandl2e7346a2010-07-31 18:09:23 +000017
Łukasz Langa6f692512013-06-05 12:20:24 +020018from abc import get_cache_token
Raymond Hettingerec0e9102012-03-16 01:16:31 -070019from collections import namedtuple
INADA Naoki9811e802017-09-30 16:13:02 +090020# import types, weakref # Deferred to single_dispatch()
Nick Coghlan457fc9a2016-09-10 20:00:02 +100021from reprlib import recursive_repr
Antoine Pitroua6a4dc82017-09-07 18:56:24 +020022from _thread import RLock
Thomas Wouters4d70c3d2006-06-08 14:42:34 +000023
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -070024
25################################################################################
26### update_wrapper() and wraps() decorator
27################################################################################
28
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000029# update_wrapper() and wraps() are tools to help write
30# wrapper functions that can handle naive introspection
31
Meador Ingeff7f64c2011-12-11 22:37:31 -060032WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
33 '__annotations__')
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000034WRAPPER_UPDATES = ('__dict__',)
35def update_wrapper(wrapper,
36 wrapped,
37 assigned = WRAPPER_ASSIGNMENTS,
38 updated = WRAPPER_UPDATES):
39 """Update a wrapper function to look like the wrapped function
40
41 wrapper is the function to be updated
42 wrapped is the original function
43 assigned is a tuple naming the attributes assigned directly
44 from the wrapped function to the wrapper function (defaults to
45 functools.WRAPPER_ASSIGNMENTS)
Thomas Wouters89f507f2006-12-13 04:49:30 +000046 updated is a tuple naming the attributes of the wrapper that
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000047 are updated with the corresponding attribute from the wrapped
48 function (defaults to functools.WRAPPER_UPDATES)
49 """
50 for attr in assigned:
Nick Coghlan98876832010-08-17 06:17:18 +000051 try:
52 value = getattr(wrapped, attr)
53 except AttributeError:
54 pass
55 else:
56 setattr(wrapper, attr, value)
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000057 for attr in updated:
Thomas Wouters89f507f2006-12-13 04:49:30 +000058 getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
Nick Coghlan24c05bc2013-07-15 21:13:08 +100059 # Issue #17482: set __wrapped__ last so we don't inadvertently copy it
60 # from the wrapped function when updating __dict__
61 wrapper.__wrapped__ = wrapped
Thomas Wouters73e5a5b2006-06-08 15:35:45 +000062 # Return the wrapper so this can be used as a decorator via partial()
63 return wrapper
64
65def wraps(wrapped,
66 assigned = WRAPPER_ASSIGNMENTS,
67 updated = WRAPPER_UPDATES):
68 """Decorator factory to apply update_wrapper() to a wrapper function
69
70 Returns a decorator that invokes update_wrapper() with the decorated
71 function as the wrapper argument and the arguments to wraps() as the
72 remaining arguments. Default arguments are as for update_wrapper().
73 This is a convenience function to simplify applying partial() to
74 update_wrapper().
75 """
76 return partial(update_wrapper, wrapped=wrapped,
77 assigned=assigned, updated=updated)
Raymond Hettingerc50846a2010-04-05 18:56:31 +000078
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -070079
80################################################################################
81### total_ordering class decorator
82################################################################################
83
Raymond Hettinger0603d302015-01-05 21:52:10 -080084# The total ordering functions all invoke the root magic method directly
85# rather than using the corresponding operator. This avoids possible
86# infinite recursion that could occur when the operator dispatch logic
87# detects a NotImplemented result and then calls a reflected method.
Nick Coghlanf05d9812013-10-02 00:02:03 +100088
Raymond Hettingerffcd8492015-05-12 21:26:37 -070089def _gt_from_lt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -080090 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).'
91 op_result = self.__lt__(other)
Nick Coghlanf05d9812013-10-02 00:02:03 +100092 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -080093 return op_result
Nick Coghlanf05d9812013-10-02 00:02:03 +100094 return not op_result and self != other
95
Raymond Hettingerffcd8492015-05-12 21:26:37 -070096def _le_from_lt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -080097 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).'
98 op_result = self.__lt__(other)
99 return op_result or self == other
100
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700101def _ge_from_lt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800102 'Return a >= b. Computed by @total_ordering from (not a < b).'
103 op_result = self.__lt__(other)
104 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800105 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800106 return not op_result
107
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700108def _ge_from_le(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800109 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).'
110 op_result = self.__le__(other)
Nick Coghlanf05d9812013-10-02 00:02:03 +1000111 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800112 return op_result
Nick Coghlanf05d9812013-10-02 00:02:03 +1000113 return not op_result or self == other
114
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700115def _lt_from_le(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800116 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).'
117 op_result = self.__le__(other)
Nick Coghlanf05d9812013-10-02 00:02:03 +1000118 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800119 return op_result
Nick Coghlanf05d9812013-10-02 00:02:03 +1000120 return op_result and self != other
121
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700122def _gt_from_le(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800123 'Return a > b. Computed by @total_ordering from (not a <= b).'
124 op_result = self.__le__(other)
125 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800126 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800127 return not op_result
128
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700129def _lt_from_gt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800130 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).'
131 op_result = self.__gt__(other)
132 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800133 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800134 return not op_result and self != other
135
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700136def _ge_from_gt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800137 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).'
138 op_result = self.__gt__(other)
139 return op_result or self == other
140
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700141def _le_from_gt(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800142 'Return a <= b. Computed by @total_ordering from (not a > b).'
143 op_result = self.__gt__(other)
144 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800145 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800146 return not op_result
147
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700148def _le_from_ge(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800149 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).'
150 op_result = self.__ge__(other)
151 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800152 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800153 return not op_result or self == other
154
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700155def _gt_from_ge(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800156 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).'
157 op_result = self.__ge__(other)
158 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800159 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800160 return op_result and self != other
161
Raymond Hettingerffcd8492015-05-12 21:26:37 -0700162def _lt_from_ge(self, other, NotImplemented=NotImplemented):
Raymond Hettinger0603d302015-01-05 21:52:10 -0800163 'Return a < b. Computed by @total_ordering from (not a >= b).'
164 op_result = self.__ge__(other)
165 if op_result is NotImplemented:
Raymond Hettingere5db8632015-01-06 22:16:10 -0800166 return op_result
Raymond Hettinger0603d302015-01-05 21:52:10 -0800167 return not op_result
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000168
Raymond Hettinger1a8ada82015-01-13 22:57:35 -0800169_convert = {
170 '__lt__': [('__gt__', _gt_from_lt),
171 ('__le__', _le_from_lt),
172 ('__ge__', _ge_from_lt)],
173 '__le__': [('__ge__', _ge_from_le),
174 ('__lt__', _lt_from_le),
175 ('__gt__', _gt_from_le)],
176 '__gt__': [('__lt__', _lt_from_gt),
177 ('__ge__', _ge_from_gt),
178 ('__le__', _le_from_gt)],
179 '__ge__': [('__le__', _le_from_ge),
180 ('__gt__', _gt_from_ge),
181 ('__lt__', _lt_from_ge)]
182}
183
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000184def total_ordering(cls):
Georg Brandle5a26732010-05-19 21:06:36 +0000185 """Class decorator that fills in missing ordering methods"""
Raymond Hettinger3255c632010-09-16 00:31:21 +0000186 # Find user-defined comparisons (not those inherited from object).
Raymond Hettinger15ce0be2017-09-05 09:40:44 -0700187 roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)}
Raymond Hettinger56de7e22010-04-10 16:59:03 +0000188 if not roots:
189 raise ValueError('must define at least one ordering operation: < > <= >=')
190 root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
Raymond Hettinger1a8ada82015-01-13 22:57:35 -0800191 for opname, opfunc in _convert[root]:
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000192 if opname not in roots:
193 opfunc.__name__ = opname
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000194 setattr(cls, opname, opfunc)
195 return cls
196
Pablo Galindo99e6c262020-01-23 15:29:52 +0000197################################################################################
198### topological sort
199################################################################################
200
201_NODE_OUT = -1
202_NODE_DONE = -2
203
204
205class _NodeInfo:
206 __slots__ = 'node', 'npredecessors', 'successors'
207
208 def __init__(self, node):
209 # The node this class is augmenting.
210 self.node = node
211
212 # Number of predecessors, generally >= 0. When this value falls to 0,
213 # and is returned by get_ready(), this is set to _NODE_OUT and when the
214 # node is marked done by a call to done(), set to _NODE_DONE.
215 self.npredecessors = 0
216
217 # List of successor nodes. The list can contain duplicated elements as
218 # long as they're all reflected in the successor's npredecessors attribute).
219 self.successors = []
220
221
222class CycleError(ValueError):
223 """Subclass of ValueError raised by TopologicalSorterif cycles exist in the graph
224
225 If multiple cycles exist, only one undefined choice among them will be reported
226 and included in the exception. The detected cycle can be accessed via the second
227 element in the *args* attribute of the exception instance and consists in a list
228 of nodes, such that each node is, in the graph, an immediate predecessor of the
229 next node in the list. In the reported list, the first and the last node will be
230 the same, to make it clear that it is cyclic.
231 """
232 pass
233
234
235class TopologicalSorter:
236 """Provides functionality to topologically sort a graph of hashable nodes"""
237
238 def __init__(self, graph=None):
239 self._node2info = {}
240 self._ready_nodes = None
241 self._npassedout = 0
242 self._nfinished = 0
243
244 if graph is not None:
245 for node, predecessors in graph.items():
246 self.add(node, *predecessors)
247
248 def _get_nodeinfo(self, node):
249 if (result := self._node2info.get(node)) is None:
250 self._node2info[node] = result = _NodeInfo(node)
251 return result
252
253 def add(self, node, *predecessors):
254 """Add a new node and its predecessors to the graph.
255
256 Both the *node* and all elements in *predecessors* must be hashable.
257
258 If called multiple times with the same node argument, the set of dependencies
259 will be the union of all dependencies passed in.
260
261 It is possible to add a node with no dependencies (*predecessors* is not provided)
262 as well as provide a dependency twice. If a node that has not been provided before
263 is included among *predecessors* it will be automatically added to the graph with
264 no predecessors of its own.
265
266 Raises ValueError if called after "prepare".
267 """
268 if self._ready_nodes is not None:
269 raise ValueError("Nodes cannot be added after a call to prepare()")
270
271 # Create the node -> predecessor edges
272 nodeinfo = self._get_nodeinfo(node)
273 nodeinfo.npredecessors += len(predecessors)
274
275 # Create the predecessor -> node edges
276 for pred in predecessors:
277 pred_info = self._get_nodeinfo(pred)
278 pred_info.successors.append(node)
279
280 def prepare(self):
281 """Mark the graph as finished and check for cycles in the graph.
282
283 If any cycle is detected, "CycleError" will be raised, but "get_ready" can
284 still be used to obtain as many nodes as possible until cycles block more
285 progress. After a call to this function, the graph cannot be modified and
286 therefore no more nodes can be added using "add".
287 """
288 if self._ready_nodes is not None:
289 raise ValueError("cannot prepare() more than once")
290
291 self._ready_nodes = [i.node for i in self._node2info.values()
292 if i.npredecessors == 0]
293 # ready_nodes is set before we look for cycles on purpose:
294 # if the user wants to catch the CycleError, that's fine,
295 # they can continue using the instance to grab as many
296 # nodes as possible before cycles block more progress
297 cycle = self._find_cycle()
298 if cycle:
299 raise CycleError(f"nodes are in a cycle", cycle)
300
301 def get_ready(self):
302 """Return a tuple of all the nodes that are ready.
303
304 Initially it returns all nodes with no predecessors; once those are marked
305 as processed by calling "done", further calls will return all new nodes that
306 have all their predecessors already processed. Once no more progress can be made,
307 empty tuples are returned.
308
309 Raises ValueError if called without calling "prepare" previously.
310 """
311 if self._ready_nodes is None:
312 raise ValueError("prepare() must be called first")
313
314 # Get the nodes that are ready and mark them
315 result = tuple(self._ready_nodes)
316 n2i = self._node2info
317 for node in result:
318 n2i[node].npredecessors = _NODE_OUT
319
320 # Clean the list of nodes that are ready and update
321 # the counter of nodes that we have returned.
322 self._ready_nodes.clear()
323 self._npassedout += len(result)
324
325 return result
326
327 def is_active(self):
328 """Return True if more progress can be made and ``False`` otherwise.
329
330 Progress can be made if cycles do not block the resolution and either there
331 are still nodes ready that haven't yet been returned by "get_ready" or the
332 number of nodes marked "done" is less than the number that have been returned
333 by "get_ready".
334
335 Raises ValueError if called without calling "prepare" previously.
336 """
337 if self._ready_nodes is None:
338 raise ValueError("prepare() must be called first")
339 return self._nfinished < self._npassedout or bool(self._ready_nodes)
340
341 def __bool__(self):
342 return self.is_active()
343
344 def done(self, *nodes):
345 """Marks a set of nodes returned by "get_ready" as processed.
346
347 This method unblocks any successor of each node in *nodes* for being returned
348 in the future by a a call to "get_ready"
349
350 Raises :exec:`ValueError` if any node in *nodes* has already been marked as
351 processed by a previous call to this method, if a node was not added to the
352 graph by using "add" or if called without calling "prepare" previously or if
353 node has not yet been returned by "get_ready".
354 """
355
356 if self._ready_nodes is None:
357 raise ValueError("prepare() must be called first")
358
359 n2i = self._node2info
360
361 for node in nodes:
362
363 # Check if we know about this node (it was added previously using add()
364 if (nodeinfo := n2i.get(node)) is None:
365 raise ValueError(f"node {node!r} was not added using add()")
366
367 # If the node has not being returned (marked as ready) previously, inform the user.
368 stat = nodeinfo.npredecessors
369 if stat != _NODE_OUT:
370 if stat >= 0:
371 raise ValueError(f"node {node!r} was not passed out (still not ready)")
372 elif stat == _NODE_DONE:
373 raise ValueError(f"node {node!r} was already marked done")
374 else:
375 assert False, f"node {node!r}: unknown status {stat}"
376
377 # Mark the node as processed
378 nodeinfo.npredecessors = _NODE_DONE
379
380 # Go to all the successors and reduce the number of predecessors, collecting all the ones
381 # that are ready to be returned in the next get_ready() call.
382 for successor in nodeinfo.successors:
383 successor_info = n2i[successor]
384 successor_info.npredecessors -= 1
385 if successor_info.npredecessors == 0:
386 self._ready_nodes.append(successor)
387 self._nfinished += 1
388
389 def _find_cycle(self):
390 n2i = self._node2info
391 stack = []
392 itstack = []
393 seen = set()
394 node2stacki = {}
395
396 for node in n2i:
397 if node in seen:
398 continue
399
400 while True:
401 if node in seen:
402 # If we have seen already the node and is in the
403 # current stack we have found a cycle.
404 if node in node2stacki:
405 return stack[node2stacki[node]:] + [node]
406 # else go on to get next successor
407 else:
408 seen.add(node)
409 itstack.append(iter(n2i[node].successors).__next__)
410 node2stacki[node] = len(stack)
411 stack.append(node)
412
413 # Backtrack to the topmost stack entry with
414 # at least another successor.
415 while stack:
416 try:
417 node = itstack[-1]()
418 break
419 except StopIteration:
420 del node2stacki[stack.pop()]
421 itstack.pop()
422 else:
423 break
424 return None
425
426 def static_order(self):
427 """Returns an iterable of nodes in a topological order.
428
429 The particular order that is returned may depend on the specific
430 order in which the items were inserted in the graph.
431
432 Using this method does not require to call "prepare" or "done". If any
433 cycle is detected, :exc:`CycleError` will be raised.
434 """
435 self.prepare()
436 while self.is_active():
437 node_group = self.get_ready()
438 yield from node_group
439 self.done(*node_group)
440
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700441
442################################################################################
443### cmp_to_key() function converter
444################################################################################
445
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000446def cmp_to_key(mycmp):
Georg Brandle5a26732010-05-19 21:06:36 +0000447 """Convert a cmp= function into a key= function"""
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000448 class K(object):
Raymond Hettingera0d1d962011-03-21 17:50:28 -0700449 __slots__ = ['obj']
Raymond Hettinger7ab9e222011-04-05 02:33:54 -0700450 def __init__(self, obj):
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000451 self.obj = obj
452 def __lt__(self, other):
453 return mycmp(self.obj, other.obj) < 0
454 def __gt__(self, other):
455 return mycmp(self.obj, other.obj) > 0
456 def __eq__(self, other):
457 return mycmp(self.obj, other.obj) == 0
458 def __le__(self, other):
459 return mycmp(self.obj, other.obj) <= 0
460 def __ge__(self, other):
461 return mycmp(self.obj, other.obj) >= 0
Raymond Hettinger003be522011-05-03 11:01:32 -0700462 __hash__ = None
Raymond Hettingerc50846a2010-04-05 18:56:31 +0000463 return K
Georg Brandl2e7346a2010-07-31 18:09:23 +0000464
Raymond Hettinger7ab9e222011-04-05 02:33:54 -0700465try:
466 from _functools import cmp_to_key
Brett Cannoncd171c82013-07-04 17:43:24 -0400467except ImportError:
Raymond Hettinger7ab9e222011-04-05 02:33:54 -0700468 pass
469
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700470
471################################################################################
madman-bobe25d5fc2018-10-25 15:02:10 +0100472### reduce() sequence to a single item
473################################################################################
474
475_initial_missing = object()
476
477def reduce(function, sequence, initial=_initial_missing):
478 """
479 reduce(function, sequence[, initial]) -> value
480
481 Apply a function of two arguments cumulatively to the items of a sequence,
482 from left to right, so as to reduce the sequence to a single value.
483 For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
484 ((((1+2)+3)+4)+5). If initial is present, it is placed before the items
485 of the sequence in the calculation, and serves as a default when the
486 sequence is empty.
487 """
488
489 it = iter(sequence)
490
491 if initial is _initial_missing:
492 try:
493 value = next(it)
494 except StopIteration:
495 raise TypeError("reduce() of empty sequence with no initial value") from None
496 else:
497 value = initial
498
499 for element in it:
500 value = function(value, element)
501
502 return value
503
504try:
505 from _functools import reduce
506except ImportError:
507 pass
508
509
510################################################################################
Antoine Pitroub5b37142012-11-13 21:35:40 +0100511### partial() argument application
512################################################################################
513
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000514# Purely functional, no descriptor behaviour
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000515class partial:
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000516 """New function with partial application of the given arguments
Antoine Pitroub5b37142012-11-13 21:35:40 +0100517 and keywords.
518 """
Alexander Belopolskye49af342015-03-01 15:08:17 -0500519
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000520 __slots__ = "func", "args", "keywords", "__dict__", "__weakref__"
521
Serhiy Storchaka2085bd02019-06-01 11:00:15 +0300522 def __new__(cls, func, /, *args, **keywords):
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000523 if not callable(func):
524 raise TypeError("the first argument must be callable")
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000525
526 if hasattr(func, "func"):
527 args = func.args + args
Serhiy Storchakada084702019-03-27 08:02:28 +0200528 keywords = {**func.keywords, **keywords}
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000529 func = func.func
530
531 self = super(partial, cls).__new__(cls)
532
533 self.func = func
534 self.args = args
535 self.keywords = keywords
536 return self
537
Serhiy Storchaka2085bd02019-06-01 11:00:15 +0300538 def __call__(self, /, *args, **keywords):
Serhiy Storchakada084702019-03-27 08:02:28 +0200539 keywords = {**self.keywords, **keywords}
540 return self.func(*self.args, *args, **keywords)
Nick Coghlan457fc9a2016-09-10 20:00:02 +1000541
542 @recursive_repr()
543 def __repr__(self):
544 qualname = type(self).__qualname__
545 args = [repr(self.func)]
546 args.extend(repr(x) for x in self.args)
547 args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items())
548 if type(self).__module__ == "functools":
549 return f"functools.{qualname}({', '.join(args)})"
550 return f"{qualname}({', '.join(args)})"
551
552 def __reduce__(self):
553 return type(self), (self.func,), (self.func, self.args,
554 self.keywords or None, self.__dict__ or None)
555
556 def __setstate__(self, state):
557 if not isinstance(state, tuple):
558 raise TypeError("argument to __setstate__ must be a tuple")
559 if len(state) != 4:
560 raise TypeError(f"expected 4 items in state, got {len(state)}")
561 func, args, kwds, namespace = state
562 if (not callable(func) or not isinstance(args, tuple) or
563 (kwds is not None and not isinstance(kwds, dict)) or
564 (namespace is not None and not isinstance(namespace, dict))):
565 raise TypeError("invalid partial state")
566
567 args = tuple(args) # just in case it's a subclass
568 if kwds is None:
569 kwds = {}
570 elif type(kwds) is not dict: # XXX does it need to be *exactly* dict?
571 kwds = dict(kwds)
572 if namespace is None:
573 namespace = {}
574
575 self.__dict__ = namespace
576 self.func = func
577 self.args = args
578 self.keywords = kwds
Antoine Pitroub5b37142012-11-13 21:35:40 +0100579
580try:
581 from _functools import partial
Brett Cannoncd171c82013-07-04 17:43:24 -0400582except ImportError:
Antoine Pitroub5b37142012-11-13 21:35:40 +0100583 pass
584
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000585# Descriptor version
586class partialmethod(object):
587 """Method descriptor with partial application of the given arguments
588 and keywords.
589
590 Supports wrapping existing descriptors and handles non-descriptor
591 callables as instance methods.
592 """
593
Serhiy Storchaka142566c2019-06-05 18:22:31 +0300594 def __init__(self, func, /, *args, **keywords):
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000595 if not callable(func) and not hasattr(func, "__get__"):
596 raise TypeError("{!r} is not callable or a descriptor"
597 .format(func))
598
599 # func could be a descriptor like classmethod which isn't callable,
600 # so we can't inherit from partial (it verifies func is callable)
601 if isinstance(func, partialmethod):
602 # flattening is mandatory in order to place cls/self before all
603 # other arguments
604 # it's also more efficient since only one function will be called
605 self.func = func.func
606 self.args = func.args + args
Serhiy Storchakada084702019-03-27 08:02:28 +0200607 self.keywords = {**func.keywords, **keywords}
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000608 else:
609 self.func = func
610 self.args = args
611 self.keywords = keywords
612
613 def __repr__(self):
614 args = ", ".join(map(repr, self.args))
615 keywords = ", ".join("{}={!r}".format(k, v)
616 for k, v in self.keywords.items())
617 format_string = "{module}.{cls}({func}, {args}, {keywords})"
618 return format_string.format(module=self.__class__.__module__,
Serhiy Storchaka521e5862014-07-22 15:00:37 +0300619 cls=self.__class__.__qualname__,
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000620 func=self.func,
621 args=args,
622 keywords=keywords)
623
624 def _make_unbound_method(self):
Serhiy Storchaka2085bd02019-06-01 11:00:15 +0300625 def _method(cls_or_self, /, *args, **keywords):
Serhiy Storchakada084702019-03-27 08:02:28 +0200626 keywords = {**self.keywords, **keywords}
627 return self.func(cls_or_self, *self.args, *args, **keywords)
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000628 _method.__isabstractmethod__ = self.__isabstractmethod__
Yury Selivanovda5fe4f2014-01-27 17:28:37 -0500629 _method._partialmethod = self
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000630 return _method
631
Raymond Hettinger0dac68f2019-08-29 01:27:42 -0700632 def __get__(self, obj, cls=None):
Nick Coghlanf4cb48a2013-11-03 16:41:46 +1000633 get = getattr(self.func, "__get__", None)
634 result = None
635 if get is not None:
636 new_func = get(obj, cls)
637 if new_func is not self.func:
638 # Assume __get__ returning something new indicates the
639 # creation of an appropriate callable
640 result = partial(new_func, *self.args, **self.keywords)
641 try:
642 result.__self__ = new_func.__self__
643 except AttributeError:
644 pass
645 if result is None:
646 # If the underlying descriptor didn't do anything, treat this
647 # like an instance method
648 result = self._make_unbound_method().__get__(obj, cls)
649 return result
650
651 @property
652 def __isabstractmethod__(self):
653 return getattr(self.func, "__isabstractmethod__", False)
654
Pablo Galindo7cd25432018-10-26 12:19:14 +0100655# Helper functions
656
657def _unwrap_partial(func):
658 while isinstance(func, partial):
659 func = func.func
660 return func
Antoine Pitroub5b37142012-11-13 21:35:40 +0100661
662################################################################################
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700663### LRU Cache function decorator
664################################################################################
665
Raymond Hettingerdce583e2012-03-16 22:12:20 -0700666_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
Nick Coghlan234515a2010-11-30 06:19:46 +0000667
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700668class _HashedSeq(list):
Raymond Hettingerf96b2b02013-03-08 21:11:55 -0700669 """ This class guarantees that hash() will be called no more than once
670 per element. This is important because the lru_cache() will hash
671 the key multiple times on a cache miss.
672
673 """
674
Raymond Hettinger9acbb602012-04-30 22:32:16 -0700675 __slots__ = 'hashvalue'
676
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700677 def __init__(self, tup, hash=hash):
678 self[:] = tup
679 self.hashvalue = hash(tup)
Raymond Hettinger9acbb602012-04-30 22:32:16 -0700680
681 def __hash__(self):
682 return self.hashvalue
683
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700684def _make_key(args, kwds, typed,
685 kwd_mark = (object(),),
Raymond Hettingerd8080c02019-01-26 03:02:00 -0500686 fasttypes = {int, str},
Raymond Hettinger4ee39142017-01-08 17:28:20 -0800687 tuple=tuple, type=type, len=len):
Raymond Hettingerf96b2b02013-03-08 21:11:55 -0700688 """Make a cache key from optionally typed positional and keyword arguments
689
690 The key is constructed in a way that is flat as possible rather than
691 as a nested structure that would take more memory.
692
693 If there is only a single argument and its data type is known to cache
694 its hash value, then that argument is returned without a wrapper. This
695 saves space and improves lookup speed.
696
697 """
Raymond Hettinger55037092017-09-04 17:47:53 -0700698 # All of code below relies on kwds preserving the order input by the user.
699 # Formerly, we sorted() the kwds before looping. The new way is *much*
700 # faster; however, it means that f(x=1, y=2) will now be treated as a
701 # distinct call from f(y=2, x=1) which will be cached separately.
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700702 key = args
703 if kwds:
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700704 key += kwd_mark
Raymond Hettinger4ee39142017-01-08 17:28:20 -0800705 for item in kwds.items():
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700706 key += item
707 if typed:
708 key += tuple(type(v) for v in args)
709 if kwds:
Raymond Hettinger4ee39142017-01-08 17:28:20 -0800710 key += tuple(type(v) for v in kwds.values())
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700711 elif len(key) == 1 and type(key[0]) in fasttypes:
712 return key[0]
713 return _HashedSeq(key)
714
Raymond Hettinger010ce322012-05-19 21:20:48 -0700715def lru_cache(maxsize=128, typed=False):
Benjamin Peterson1f594ad2010-08-08 13:17:07 +0000716 """Least-recently-used cache decorator.
Georg Brandl2e7346a2010-07-31 18:09:23 +0000717
Raymond Hettingerc79fb0e2010-12-01 03:45:41 +0000718 If *maxsize* is set to None, the LRU features are disabled and the cache
719 can grow without bound.
720
Raymond Hettingercd9fdfd2011-10-20 08:57:45 -0700721 If *typed* is True, arguments of different types will be cached separately.
722 For example, f(3.0) and f(3) will be treated as distinct calls with
723 distinct results.
724
Georg Brandl2e7346a2010-07-31 18:09:23 +0000725 Arguments to the cached function must be hashable.
Raymond Hettinger5fa40c02010-11-25 08:11:57 +0000726
Raymond Hettinger7f7a5a72012-03-30 21:50:40 -0700727 View the cache statistics named tuple (hits, misses, maxsize, currsize)
728 with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Raymond Hettinger00f2f972010-12-01 00:47:56 +0000729 Access the underlying function with f.__wrapped__.
Raymond Hettinger5fa40c02010-11-25 08:11:57 +0000730
amist336b3062019-09-16 21:36:14 +0300731 See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU)
Georg Brandl2e7346a2010-07-31 18:09:23 +0000732
Benjamin Peterson1f594ad2010-08-08 13:17:07 +0000733 """
Raymond Hettinger1ff50df2012-03-30 13:15:48 -0700734
Raymond Hettinger5fa40c02010-11-25 08:11:57 +0000735 # Users should only access the lru_cache through its public API:
Raymond Hettinger5e20bab2010-11-30 07:13:04 +0000736 # cache_info, cache_clear, and f.__wrapped__
Raymond Hettinger5fa40c02010-11-25 08:11:57 +0000737 # The internals of the lru_cache are encapsulated for thread safety and
738 # to allow the implementation to change (including a possible C version).
739
Raymond Hettingerd8080c02019-01-26 03:02:00 -0500740 if isinstance(maxsize, int):
Raymond Hettingerb8218682019-05-26 11:27:35 -0700741 # Negative maxsize is treated as 0
Raymond Hettingerd8080c02019-01-26 03:02:00 -0500742 if maxsize < 0:
743 maxsize = 0
Raymond Hettingerb8218682019-05-26 11:27:35 -0700744 elif callable(maxsize) and isinstance(typed, bool):
745 # The user_function was passed in directly via the maxsize argument
746 user_function, maxsize = maxsize, 128
747 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
Manjusaka051ff522019-11-12 15:30:18 +0800748 wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
Raymond Hettingerb8218682019-05-26 11:27:35 -0700749 return update_wrapper(wrapper, user_function)
Raymond Hettingerd8080c02019-01-26 03:02:00 -0500750 elif maxsize is not None:
Raymond Hettingerb8218682019-05-26 11:27:35 -0700751 raise TypeError(
752 'Expected first argument to be an integer, a callable, or None')
Raymond Hettinger4d588972014-08-12 12:44:52 -0700753
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300754 def decorating_function(user_function):
755 wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo)
Manjusaka051ff522019-11-12 15:30:18 +0800756 wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed}
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300757 return update_wrapper(wrapper, user_function)
758
759 return decorating_function
760
761def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo):
Raymond Hettinger9f0ab9f2012-04-29 14:55:27 -0700762 # Constants shared by all lru cache instances:
Raymond Hettingerb6b98c02012-04-29 18:09:02 -0700763 sentinel = object() # unique object used to signal cache misses
Raymond Hettinger0c9050c2012-06-04 00:21:14 -0700764 make_key = _make_key # build a key from the function arguments
Raymond Hettinger9f0ab9f2012-04-29 14:55:27 -0700765 PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
766
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300767 cache = {}
768 hits = misses = 0
769 full = False
770 cache_get = cache.get # bound method to lookup a key or return None
Raymond Hettingerb2d4b3d2016-12-16 14:59:37 -0800771 cache_len = cache.__len__ # get cache size without calling len()
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300772 lock = RLock() # because linkedlist updates aren't threadsafe
773 root = [] # root of the circular doubly linked list
774 root[:] = [root, root, None, None] # initialize by pointing to self
Raymond Hettinger6e8c8172012-03-16 16:53:05 -0700775
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300776 if maxsize == 0:
Raymond Hettinger7e0c5812012-03-17 15:10:24 -0700777
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300778 def wrapper(*args, **kwds):
Raymond Hettingerffdf1c32019-01-31 15:03:38 -0800779 # No caching -- just a statistics update
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300780 nonlocal misses
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300781 misses += 1
Raymond Hettingerffdf1c32019-01-31 15:03:38 -0800782 result = user_function(*args, **kwds)
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300783 return result
784
785 elif maxsize is None:
786
787 def wrapper(*args, **kwds):
788 # Simple caching without ordering or size limit
789 nonlocal hits, misses
790 key = make_key(args, kwds, typed)
791 result = cache_get(key, sentinel)
792 if result is not sentinel:
793 hits += 1
Raymond Hettinger7e0c5812012-03-17 15:10:24 -0700794 return result
Raymond Hettingerffdf1c32019-01-31 15:03:38 -0800795 misses += 1
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300796 result = user_function(*args, **kwds)
797 cache[key] = result
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300798 return result
Raymond Hettinger7e0c5812012-03-17 15:10:24 -0700799
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300800 else:
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700801
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300802 def wrapper(*args, **kwds):
803 # Size limited caching that tracks accesses by recency
804 nonlocal root, hits, misses, full
805 key = make_key(args, kwds, typed)
806 with lock:
807 link = cache_get(key)
808 if link is not None:
809 # Move the link to the front of the circular queue
810 link_prev, link_next, _key, result = link
811 link_prev[NEXT] = link_next
812 link_next[PREV] = link_prev
813 last = root[PREV]
814 last[NEXT] = root[PREV] = link
815 link[PREV] = last
816 link[NEXT] = root
Nick Coghlan234515a2010-11-30 06:19:46 +0000817 hits += 1
Raymond Hettinger4b779b32011-10-15 23:50:42 -0700818 return result
Raymond Hettingerd8080c02019-01-26 03:02:00 -0500819 misses += 1
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300820 result = user_function(*args, **kwds)
821 with lock:
822 if key in cache:
823 # Getting here means that this same key was added to the
824 # cache while the lock was released. Since the link
825 # update is already done, we need only return the
826 # computed result and update the count of misses.
827 pass
828 elif full:
829 # Use the old root to store the new key and result.
830 oldroot = root
831 oldroot[KEY] = key
832 oldroot[RESULT] = result
833 # Empty the oldest link and make it the new root.
834 # Keep a reference to the old key and old result to
835 # prevent their ref counts from going to zero during the
836 # update. That will prevent potentially arbitrary object
837 # clean-up code (i.e. __del__) from running while we're
838 # still adjusting the links.
839 root = oldroot[NEXT]
840 oldkey = root[KEY]
841 oldresult = root[RESULT]
842 root[KEY] = root[RESULT] = None
843 # Now update the cache dictionary.
844 del cache[oldkey]
845 # Save the potentially reentrant cache[key] assignment
846 # for last, after the root and links have been put in
847 # a consistent state.
848 cache[key] = oldroot
849 else:
850 # Put result in a new link at the front of the queue.
851 last = root[PREV]
852 link = [last, root, key, result]
853 last[NEXT] = root[PREV] = cache[key] = link
Raymond Hettingerb2d4b3d2016-12-16 14:59:37 -0800854 # Use the cache_len bound method instead of the len() function
Raymond Hettingeraf56e0e2016-12-16 13:57:40 -0800855 # which could potentially be wrapped in an lru_cache itself.
Raymond Hettingerb2d4b3d2016-12-16 14:59:37 -0800856 full = (cache_len() >= maxsize)
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300857 return result
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700858
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300859 def cache_info():
860 """Report cache statistics"""
861 with lock:
Raymond Hettingerb2d4b3d2016-12-16 14:59:37 -0800862 return _CacheInfo(hits, misses, maxsize, cache_len())
Raymond Hettingerbc8e81d2012-03-17 00:24:09 -0700863
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300864 def cache_clear():
865 """Clear the cache and cache statistics"""
866 nonlocal hits, misses, full
867 with lock:
868 cache.clear()
869 root[:] = [root, root, None, None]
870 hits = misses = 0
871 full = False
Georg Brandl2e7346a2010-07-31 18:09:23 +0000872
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300873 wrapper.cache_info = cache_info
874 wrapper.cache_clear = cache_clear
Serhiy Storchakace2295d2015-10-24 09:51:53 +0300875 return wrapper
Nick Coghlan234515a2010-11-30 06:19:46 +0000876
Serhiy Storchaka46c56112015-05-24 21:53:49 +0300877try:
878 from _functools import _lru_cache_wrapper
879except ImportError:
880 pass
Łukasz Langa6f692512013-06-05 12:20:24 +0200881
882
883################################################################################
884### singledispatch() - single-dispatch generic function decorator
885################################################################################
886
Łukasz Langa3720c772013-07-01 16:00:38 +0200887def _c3_merge(sequences):
888 """Merges MROs in *sequences* to a single MRO using the C3 algorithm.
889
890 Adapted from http://www.python.org/download/releases/2.3/mro/.
891
892 """
893 result = []
894 while True:
895 sequences = [s for s in sequences if s] # purge empty sequences
896 if not sequences:
897 return result
898 for s1 in sequences: # find merge candidates among seq heads
899 candidate = s1[0]
900 for s2 in sequences:
901 if candidate in s2[1:]:
902 candidate = None
903 break # reject the current head, it appears later
904 else:
905 break
Yury Selivanov77a8cd62015-08-18 14:20:00 -0400906 if candidate is None:
Łukasz Langa3720c772013-07-01 16:00:38 +0200907 raise RuntimeError("Inconsistent hierarchy")
908 result.append(candidate)
909 # remove the chosen candidate
910 for seq in sequences:
911 if seq[0] == candidate:
912 del seq[0]
913
914def _c3_mro(cls, abcs=None):
915 """Computes the method resolution order using extended C3 linearization.
916
917 If no *abcs* are given, the algorithm works exactly like the built-in C3
918 linearization used for method resolution.
919
920 If given, *abcs* is a list of abstract base classes that should be inserted
921 into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
922 result. The algorithm inserts ABCs where their functionality is introduced,
923 i.e. issubclass(cls, abc) returns True for the class itself but returns
924 False for all its direct base classes. Implicit ABCs for a given class
925 (either registered or inferred from the presence of a special method like
926 __len__) are inserted directly after the last ABC explicitly listed in the
927 MRO of said class. If two implicit ABCs end up next to each other in the
928 resulting MRO, their ordering depends on the order of types in *abcs*.
929
930 """
931 for i, base in enumerate(reversed(cls.__bases__)):
932 if hasattr(base, '__abstractmethods__'):
933 boundary = len(cls.__bases__) - i
934 break # Bases up to the last explicit ABC are considered first.
935 else:
936 boundary = 0
937 abcs = list(abcs) if abcs else []
938 explicit_bases = list(cls.__bases__[:boundary])
939 abstract_bases = []
940 other_bases = list(cls.__bases__[boundary:])
941 for base in abcs:
942 if issubclass(cls, base) and not any(
943 issubclass(b, base) for b in cls.__bases__
944 ):
945 # If *cls* is the class that introduces behaviour described by
946 # an ABC *base*, insert said ABC to its MRO.
947 abstract_bases.append(base)
948 for base in abstract_bases:
949 abcs.remove(base)
950 explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
951 abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
952 other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
953 return _c3_merge(
954 [[cls]] +
955 explicit_c3_mros + abstract_c3_mros + other_c3_mros +
956 [explicit_bases] + [abstract_bases] + [other_bases]
957 )
958
959def _compose_mro(cls, types):
960 """Calculates the method resolution order for a given class *cls*.
961
962 Includes relevant abstract base classes (with their respective bases) from
963 the *types* iterable. Uses a modified C3 linearization algorithm.
Łukasz Langa6f692512013-06-05 12:20:24 +0200964
965 """
966 bases = set(cls.__mro__)
Łukasz Langa3720c772013-07-01 16:00:38 +0200967 # Remove entries which are already present in the __mro__ or unrelated.
968 def is_related(typ):
969 return (typ not in bases and hasattr(typ, '__mro__')
970 and issubclass(cls, typ))
971 types = [n for n in types if is_related(n)]
972 # Remove entries which are strict bases of other entries (they will end up
973 # in the MRO anyway.
974 def is_strict_base(typ):
975 for other in types:
976 if typ != other and typ in other.__mro__:
977 return True
978 return False
979 types = [n for n in types if not is_strict_base(n)]
980 # Subclasses of the ABCs in *types* which are also implemented by
981 # *cls* can be used to stabilize ABC ordering.
982 type_set = set(types)
983 mro = []
984 for typ in types:
985 found = []
986 for sub in typ.__subclasses__():
987 if sub not in bases and issubclass(cls, sub):
988 found.append([s for s in sub.__mro__ if s in type_set])
989 if not found:
990 mro.append(typ)
991 continue
992 # Favor subclasses with the biggest number of useful bases
993 found.sort(key=len, reverse=True)
994 for sub in found:
995 for subcls in sub:
996 if subcls not in mro:
997 mro.append(subcls)
998 return _c3_mro(cls, abcs=mro)
Łukasz Langa6f692512013-06-05 12:20:24 +0200999
1000def _find_impl(cls, registry):
Łukasz Langa3720c772013-07-01 16:00:38 +02001001 """Returns the best matching implementation from *registry* for type *cls*.
Łukasz Langa6f692512013-06-05 12:20:24 +02001002
Łukasz Langa3720c772013-07-01 16:00:38 +02001003 Where there is no registered implementation for a specific type, its method
1004 resolution order is used to find a more generic implementation.
1005
1006 Note: if *registry* does not contain an implementation for the base
1007 *object* type, this function may return None.
Łukasz Langa6f692512013-06-05 12:20:24 +02001008
1009 """
1010 mro = _compose_mro(cls, registry.keys())
1011 match = None
1012 for t in mro:
1013 if match is not None:
Łukasz Langa3720c772013-07-01 16:00:38 +02001014 # If *match* is an implicit ABC but there is another unrelated,
1015 # equally matching implicit ABC, refuse the temptation to guess.
1016 if (t in registry and t not in cls.__mro__
1017 and match not in cls.__mro__
1018 and not issubclass(match, t)):
Łukasz Langa6f692512013-06-05 12:20:24 +02001019 raise RuntimeError("Ambiguous dispatch: {} or {}".format(
1020 match, t))
1021 break
1022 if t in registry:
1023 match = t
1024 return registry.get(match)
1025
1026def singledispatch(func):
1027 """Single-dispatch generic function decorator.
1028
1029 Transforms a function into a generic function, which can have different
1030 behaviours depending upon the type of its first argument. The decorated
1031 function acts as the default implementation, and additional
Łukasz Langa3720c772013-07-01 16:00:38 +02001032 implementations can be registered using the register() attribute of the
1033 generic function.
Łukasz Langa6f692512013-06-05 12:20:24 +02001034 """
INADA Naoki9811e802017-09-30 16:13:02 +09001035 # There are many programs that use functools without singledispatch, so we
1036 # trade-off making singledispatch marginally slower for the benefit of
1037 # making start-up of such applications slightly faster.
1038 import types, weakref
1039
Łukasz Langa6f692512013-06-05 12:20:24 +02001040 registry = {}
INADA Naoki9811e802017-09-30 16:13:02 +09001041 dispatch_cache = weakref.WeakKeyDictionary()
Łukasz Langa6f692512013-06-05 12:20:24 +02001042 cache_token = None
1043
Łukasz Langa3720c772013-07-01 16:00:38 +02001044 def dispatch(cls):
1045 """generic_func.dispatch(cls) -> <function implementation>
Łukasz Langa6f692512013-06-05 12:20:24 +02001046
1047 Runs the dispatch algorithm to return the best available implementation
Łukasz Langa3720c772013-07-01 16:00:38 +02001048 for the given *cls* registered on *generic_func*.
Łukasz Langa6f692512013-06-05 12:20:24 +02001049
1050 """
1051 nonlocal cache_token
1052 if cache_token is not None:
1053 current_token = get_cache_token()
1054 if cache_token != current_token:
1055 dispatch_cache.clear()
1056 cache_token = current_token
1057 try:
Łukasz Langa3720c772013-07-01 16:00:38 +02001058 impl = dispatch_cache[cls]
Łukasz Langa6f692512013-06-05 12:20:24 +02001059 except KeyError:
1060 try:
Łukasz Langa3720c772013-07-01 16:00:38 +02001061 impl = registry[cls]
Łukasz Langa6f692512013-06-05 12:20:24 +02001062 except KeyError:
Łukasz Langa3720c772013-07-01 16:00:38 +02001063 impl = _find_impl(cls, registry)
1064 dispatch_cache[cls] = impl
Łukasz Langa6f692512013-06-05 12:20:24 +02001065 return impl
1066
Łukasz Langa3720c772013-07-01 16:00:38 +02001067 def register(cls, func=None):
1068 """generic_func.register(cls, func) -> func
Łukasz Langa6f692512013-06-05 12:20:24 +02001069
Łukasz Langa3720c772013-07-01 16:00:38 +02001070 Registers a new implementation for the given *cls* on a *generic_func*.
Łukasz Langa6f692512013-06-05 12:20:24 +02001071
1072 """
1073 nonlocal cache_token
1074 if func is None:
Łukasz Langae5697532017-12-11 13:56:31 -08001075 if isinstance(cls, type):
1076 return lambda f: register(cls, f)
1077 ann = getattr(cls, '__annotations__', {})
1078 if not ann:
1079 raise TypeError(
1080 f"Invalid first argument to `register()`: {cls!r}. "
1081 f"Use either `@register(some_class)` or plain `@register` "
1082 f"on an annotated function."
1083 )
1084 func = cls
1085
1086 # only import typing if annotation parsing is necessary
1087 from typing import get_type_hints
1088 argname, cls = next(iter(get_type_hints(func).items()))
Lysandros Nikolaoud6738102019-05-20 00:11:21 +02001089 if not isinstance(cls, type):
1090 raise TypeError(
1091 f"Invalid annotation for {argname!r}. "
1092 f"{cls!r} is not a class."
1093 )
Łukasz Langa3720c772013-07-01 16:00:38 +02001094 registry[cls] = func
1095 if cache_token is None and hasattr(cls, '__abstractmethods__'):
Łukasz Langa6f692512013-06-05 12:20:24 +02001096 cache_token = get_cache_token()
1097 dispatch_cache.clear()
1098 return func
1099
1100 def wrapper(*args, **kw):
Dong-hee Na445f1b32018-07-10 16:26:36 +09001101 if not args:
1102 raise TypeError(f'{funcname} requires at least '
1103 '1 positional argument')
1104
Łukasz Langa6f692512013-06-05 12:20:24 +02001105 return dispatch(args[0].__class__)(*args, **kw)
1106
Dong-hee Na445f1b32018-07-10 16:26:36 +09001107 funcname = getattr(func, '__name__', 'singledispatch function')
Łukasz Langa6f692512013-06-05 12:20:24 +02001108 registry[object] = func
1109 wrapper.register = register
1110 wrapper.dispatch = dispatch
INADA Naoki9811e802017-09-30 16:13:02 +09001111 wrapper.registry = types.MappingProxyType(registry)
Łukasz Langa6f692512013-06-05 12:20:24 +02001112 wrapper._clear_cache = dispatch_cache.clear
1113 update_wrapper(wrapper, func)
1114 return wrapper
Ethan Smithc6512752018-05-26 16:38:33 -04001115
1116
1117# Descriptor version
1118class singledispatchmethod:
1119 """Single-dispatch generic method descriptor.
1120
1121 Supports wrapping existing descriptors and handles non-descriptor
1122 callables as instance methods.
1123 """
1124
1125 def __init__(self, func):
1126 if not callable(func) and not hasattr(func, "__get__"):
1127 raise TypeError(f"{func!r} is not callable or a descriptor")
1128
1129 self.dispatcher = singledispatch(func)
1130 self.func = func
1131
1132 def register(self, cls, method=None):
1133 """generic_method.register(cls, func) -> func
1134
1135 Registers a new implementation for the given *cls* on a *generic_method*.
1136 """
1137 return self.dispatcher.register(cls, func=method)
1138
Raymond Hettinger0dac68f2019-08-29 01:27:42 -07001139 def __get__(self, obj, cls=None):
Ethan Smithc6512752018-05-26 16:38:33 -04001140 def _method(*args, **kwargs):
1141 method = self.dispatcher.dispatch(args[0].__class__)
1142 return method.__get__(obj, cls)(*args, **kwargs)
1143
1144 _method.__isabstractmethod__ = self.__isabstractmethod__
1145 _method.register = self.register
1146 update_wrapper(_method, self.func)
1147 return _method
1148
1149 @property
1150 def __isabstractmethod__(self):
1151 return getattr(self.func, '__isabstractmethod__', False)
Carl Meyerd658dea2018-08-28 01:11:56 -06001152
1153
1154################################################################################
1155### cached_property() - computed once per instance, cached as attribute
1156################################################################################
1157
1158_NOT_FOUND = object()
1159
1160
1161class cached_property:
1162 def __init__(self, func):
1163 self.func = func
1164 self.attrname = None
1165 self.__doc__ = func.__doc__
1166 self.lock = RLock()
1167
1168 def __set_name__(self, owner, name):
1169 if self.attrname is None:
1170 self.attrname = name
1171 elif name != self.attrname:
1172 raise TypeError(
1173 "Cannot assign the same cached_property to two different names "
1174 f"({self.attrname!r} and {name!r})."
1175 )
1176
Raymond Hettinger0dac68f2019-08-29 01:27:42 -07001177 def __get__(self, instance, owner=None):
Carl Meyerd658dea2018-08-28 01:11:56 -06001178 if instance is None:
1179 return self
1180 if self.attrname is None:
1181 raise TypeError(
1182 "Cannot use cached_property instance without calling __set_name__ on it.")
1183 try:
1184 cache = instance.__dict__
1185 except AttributeError: # not all objects have __dict__ (e.g. class defines slots)
1186 msg = (
1187 f"No '__dict__' attribute on {type(instance).__name__!r} "
1188 f"instance to cache {self.attrname!r} property."
1189 )
1190 raise TypeError(msg) from None
1191 val = cache.get(self.attrname, _NOT_FOUND)
1192 if val is _NOT_FOUND:
1193 with self.lock:
1194 # check if another thread filled cache while we awaited lock
1195 val = cache.get(self.attrname, _NOT_FOUND)
1196 if val is _NOT_FOUND:
1197 val = self.func(instance)
1198 try:
1199 cache[self.attrname] = val
1200 except TypeError:
1201 msg = (
1202 f"The '__dict__' attribute on {type(instance).__name__!r} instance "
1203 f"does not support item assignment for caching {self.attrname!r} property."
1204 )
1205 raise TypeError(msg) from None
1206 return val