Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1 | """functools.py - Tools for working with functions and callable objects |
Thomas Wouters | 4d70c3d | 2006-06-08 14:42:34 +0000 | [diff] [blame] | 2 | """ |
| 3 | # Python module wrapper for _functools C module |
| 4 | # to allow utilities written in Python to be added |
| 5 | # to the functools module. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 6 | # Written by Nick Coghlan <ncoghlan at gmail.com>, |
| 7 | # Raymond Hettinger <python at rcn.com>, |
| 8 | # and Łukasz Langa <lukasz at langa.pl>. |
| 9 | # Copyright (C) 2006-2013 Python Software Foundation. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 10 | # See C source code for _functools credits/copyright |
Thomas Wouters | 4d70c3d | 2006-06-08 14:42:34 +0000 | [diff] [blame] | 11 | |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 12 | __all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES', |
Pablo Galindo | 99e6c26 | 2020-01-23 15:29:52 +0000 | [diff] [blame] | 13 | 'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', |
| 14 | 'TopologicalSorter', 'CycleError', |
Hakan Çelik | 217dce9 | 2020-03-02 00:01:34 +0300 | [diff] [blame] | 15 | 'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod', |
| 16 | "cached_property"] |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 17 | |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 18 | from abc import get_cache_token |
Raymond Hettinger | ec0e910 | 2012-03-16 01:16:31 -0700 | [diff] [blame] | 19 | from collections import namedtuple |
INADA Naoki | 9811e80 | 2017-09-30 16:13:02 +0900 | [diff] [blame] | 20 | # import types, weakref # Deferred to single_dispatch() |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 21 | from reprlib import recursive_repr |
Antoine Pitrou | a6a4dc8 | 2017-09-07 18:56:24 +0200 | [diff] [blame] | 22 | from _thread import RLock |
Thomas Wouters | 4d70c3d | 2006-06-08 14:42:34 +0000 | [diff] [blame] | 23 | |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 24 | |
| 25 | ################################################################################ |
| 26 | ### update_wrapper() and wraps() decorator |
| 27 | ################################################################################ |
| 28 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 29 | # update_wrapper() and wraps() are tools to help write |
| 30 | # wrapper functions that can handle naive introspection |
| 31 | |
Meador Inge | ff7f64c | 2011-12-11 22:37:31 -0600 | [diff] [blame] | 32 | WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__', |
| 33 | '__annotations__') |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 34 | WRAPPER_UPDATES = ('__dict__',) |
| 35 | def update_wrapper(wrapper, |
| 36 | wrapped, |
| 37 | assigned = WRAPPER_ASSIGNMENTS, |
| 38 | updated = WRAPPER_UPDATES): |
| 39 | """Update a wrapper function to look like the wrapped function |
| 40 | |
| 41 | wrapper is the function to be updated |
| 42 | wrapped is the original function |
| 43 | assigned is a tuple naming the attributes assigned directly |
| 44 | from the wrapped function to the wrapper function (defaults to |
| 45 | functools.WRAPPER_ASSIGNMENTS) |
Thomas Wouters | 89f507f | 2006-12-13 04:49:30 +0000 | [diff] [blame] | 46 | updated is a tuple naming the attributes of the wrapper that |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 47 | are updated with the corresponding attribute from the wrapped |
| 48 | function (defaults to functools.WRAPPER_UPDATES) |
| 49 | """ |
| 50 | for attr in assigned: |
Nick Coghlan | 9887683 | 2010-08-17 06:17:18 +0000 | [diff] [blame] | 51 | try: |
| 52 | value = getattr(wrapped, attr) |
| 53 | except AttributeError: |
| 54 | pass |
| 55 | else: |
| 56 | setattr(wrapper, attr, value) |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 57 | for attr in updated: |
Thomas Wouters | 89f507f | 2006-12-13 04:49:30 +0000 | [diff] [blame] | 58 | getattr(wrapper, attr).update(getattr(wrapped, attr, {})) |
Nick Coghlan | 24c05bc | 2013-07-15 21:13:08 +1000 | [diff] [blame] | 59 | # Issue #17482: set __wrapped__ last so we don't inadvertently copy it |
| 60 | # from the wrapped function when updating __dict__ |
| 61 | wrapper.__wrapped__ = wrapped |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 62 | # Return the wrapper so this can be used as a decorator via partial() |
| 63 | return wrapper |
| 64 | |
| 65 | def wraps(wrapped, |
| 66 | assigned = WRAPPER_ASSIGNMENTS, |
| 67 | updated = WRAPPER_UPDATES): |
| 68 | """Decorator factory to apply update_wrapper() to a wrapper function |
| 69 | |
| 70 | Returns a decorator that invokes update_wrapper() with the decorated |
| 71 | function as the wrapper argument and the arguments to wraps() as the |
| 72 | remaining arguments. Default arguments are as for update_wrapper(). |
| 73 | This is a convenience function to simplify applying partial() to |
| 74 | update_wrapper(). |
| 75 | """ |
| 76 | return partial(update_wrapper, wrapped=wrapped, |
| 77 | assigned=assigned, updated=updated) |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 78 | |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 79 | |
| 80 | ################################################################################ |
| 81 | ### total_ordering class decorator |
| 82 | ################################################################################ |
| 83 | |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 84 | # The total ordering functions all invoke the root magic method directly |
| 85 | # rather than using the corresponding operator. This avoids possible |
| 86 | # infinite recursion that could occur when the operator dispatch logic |
| 87 | # detects a NotImplemented result and then calls a reflected method. |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 88 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 89 | def _gt_from_lt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 90 | 'Return a > b. Computed by @total_ordering from (not a < b) and (a != b).' |
| 91 | op_result = self.__lt__(other) |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 92 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 93 | return op_result |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 94 | return not op_result and self != other |
| 95 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 96 | def _le_from_lt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 97 | 'Return a <= b. Computed by @total_ordering from (a < b) or (a == b).' |
| 98 | op_result = self.__lt__(other) |
MojoVampire | 469325c | 2020-03-03 18:50:17 +0000 | [diff] [blame] | 99 | if op_result is NotImplemented: |
| 100 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 101 | return op_result or self == other |
| 102 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 103 | def _ge_from_lt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 104 | 'Return a >= b. Computed by @total_ordering from (not a < b).' |
| 105 | op_result = self.__lt__(other) |
| 106 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 107 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 108 | return not op_result |
| 109 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 110 | def _ge_from_le(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 111 | 'Return a >= b. Computed by @total_ordering from (not a <= b) or (a == b).' |
| 112 | op_result = self.__le__(other) |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 113 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 114 | return op_result |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 115 | return not op_result or self == other |
| 116 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 117 | def _lt_from_le(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 118 | 'Return a < b. Computed by @total_ordering from (a <= b) and (a != b).' |
| 119 | op_result = self.__le__(other) |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 120 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 121 | return op_result |
Nick Coghlan | f05d981 | 2013-10-02 00:02:03 +1000 | [diff] [blame] | 122 | return op_result and self != other |
| 123 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 124 | def _gt_from_le(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 125 | 'Return a > b. Computed by @total_ordering from (not a <= b).' |
| 126 | op_result = self.__le__(other) |
| 127 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 128 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 129 | return not op_result |
| 130 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 131 | def _lt_from_gt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 132 | 'Return a < b. Computed by @total_ordering from (not a > b) and (a != b).' |
| 133 | op_result = self.__gt__(other) |
| 134 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 135 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 136 | return not op_result and self != other |
| 137 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 138 | def _ge_from_gt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 139 | 'Return a >= b. Computed by @total_ordering from (a > b) or (a == b).' |
| 140 | op_result = self.__gt__(other) |
MojoVampire | 469325c | 2020-03-03 18:50:17 +0000 | [diff] [blame] | 141 | if op_result is NotImplemented: |
| 142 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 143 | return op_result or self == other |
| 144 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 145 | def _le_from_gt(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 146 | 'Return a <= b. Computed by @total_ordering from (not a > b).' |
| 147 | op_result = self.__gt__(other) |
| 148 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 149 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 150 | return not op_result |
| 151 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 152 | def _le_from_ge(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 153 | 'Return a <= b. Computed by @total_ordering from (not a >= b) or (a == b).' |
| 154 | op_result = self.__ge__(other) |
| 155 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 156 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 157 | return not op_result or self == other |
| 158 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 159 | def _gt_from_ge(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 160 | 'Return a > b. Computed by @total_ordering from (a >= b) and (a != b).' |
| 161 | op_result = self.__ge__(other) |
| 162 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 163 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 164 | return op_result and self != other |
| 165 | |
Raymond Hettinger | ffcd849 | 2015-05-12 21:26:37 -0700 | [diff] [blame] | 166 | def _lt_from_ge(self, other, NotImplemented=NotImplemented): |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 167 | 'Return a < b. Computed by @total_ordering from (not a >= b).' |
| 168 | op_result = self.__ge__(other) |
| 169 | if op_result is NotImplemented: |
Raymond Hettinger | e5db863 | 2015-01-06 22:16:10 -0800 | [diff] [blame] | 170 | return op_result |
Raymond Hettinger | 0603d30 | 2015-01-05 21:52:10 -0800 | [diff] [blame] | 171 | return not op_result |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 172 | |
Raymond Hettinger | 1a8ada8 | 2015-01-13 22:57:35 -0800 | [diff] [blame] | 173 | _convert = { |
| 174 | '__lt__': [('__gt__', _gt_from_lt), |
| 175 | ('__le__', _le_from_lt), |
| 176 | ('__ge__', _ge_from_lt)], |
| 177 | '__le__': [('__ge__', _ge_from_le), |
| 178 | ('__lt__', _lt_from_le), |
| 179 | ('__gt__', _gt_from_le)], |
| 180 | '__gt__': [('__lt__', _lt_from_gt), |
| 181 | ('__ge__', _ge_from_gt), |
| 182 | ('__le__', _le_from_gt)], |
| 183 | '__ge__': [('__le__', _le_from_ge), |
| 184 | ('__gt__', _gt_from_ge), |
| 185 | ('__lt__', _lt_from_ge)] |
| 186 | } |
| 187 | |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 188 | def total_ordering(cls): |
Georg Brandl | e5a2673 | 2010-05-19 21:06:36 +0000 | [diff] [blame] | 189 | """Class decorator that fills in missing ordering methods""" |
Raymond Hettinger | 3255c63 | 2010-09-16 00:31:21 +0000 | [diff] [blame] | 190 | # Find user-defined comparisons (not those inherited from object). |
Raymond Hettinger | 15ce0be | 2017-09-05 09:40:44 -0700 | [diff] [blame] | 191 | roots = {op for op in _convert if getattr(cls, op, None) is not getattr(object, op, None)} |
Raymond Hettinger | 56de7e2 | 2010-04-10 16:59:03 +0000 | [diff] [blame] | 192 | if not roots: |
| 193 | raise ValueError('must define at least one ordering operation: < > <= >=') |
| 194 | root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__ |
Raymond Hettinger | 1a8ada8 | 2015-01-13 22:57:35 -0800 | [diff] [blame] | 195 | for opname, opfunc in _convert[root]: |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 196 | if opname not in roots: |
| 197 | opfunc.__name__ = opname |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 198 | setattr(cls, opname, opfunc) |
| 199 | return cls |
| 200 | |
Pablo Galindo | 99e6c26 | 2020-01-23 15:29:52 +0000 | [diff] [blame] | 201 | ################################################################################ |
| 202 | ### topological sort |
| 203 | ################################################################################ |
| 204 | |
| 205 | _NODE_OUT = -1 |
| 206 | _NODE_DONE = -2 |
| 207 | |
| 208 | |
| 209 | class _NodeInfo: |
| 210 | __slots__ = 'node', 'npredecessors', 'successors' |
| 211 | |
| 212 | def __init__(self, node): |
| 213 | # The node this class is augmenting. |
| 214 | self.node = node |
| 215 | |
| 216 | # Number of predecessors, generally >= 0. When this value falls to 0, |
| 217 | # and is returned by get_ready(), this is set to _NODE_OUT and when the |
| 218 | # node is marked done by a call to done(), set to _NODE_DONE. |
| 219 | self.npredecessors = 0 |
| 220 | |
| 221 | # List of successor nodes. The list can contain duplicated elements as |
| 222 | # long as they're all reflected in the successor's npredecessors attribute). |
| 223 | self.successors = [] |
| 224 | |
| 225 | |
| 226 | class CycleError(ValueError): |
| 227 | """Subclass of ValueError raised by TopologicalSorterif cycles exist in the graph |
| 228 | |
| 229 | If multiple cycles exist, only one undefined choice among them will be reported |
| 230 | and included in the exception. The detected cycle can be accessed via the second |
| 231 | element in the *args* attribute of the exception instance and consists in a list |
| 232 | of nodes, such that each node is, in the graph, an immediate predecessor of the |
| 233 | next node in the list. In the reported list, the first and the last node will be |
| 234 | the same, to make it clear that it is cyclic. |
| 235 | """ |
| 236 | pass |
| 237 | |
| 238 | |
| 239 | class TopologicalSorter: |
| 240 | """Provides functionality to topologically sort a graph of hashable nodes""" |
| 241 | |
| 242 | def __init__(self, graph=None): |
| 243 | self._node2info = {} |
| 244 | self._ready_nodes = None |
| 245 | self._npassedout = 0 |
| 246 | self._nfinished = 0 |
| 247 | |
| 248 | if graph is not None: |
| 249 | for node, predecessors in graph.items(): |
| 250 | self.add(node, *predecessors) |
| 251 | |
| 252 | def _get_nodeinfo(self, node): |
| 253 | if (result := self._node2info.get(node)) is None: |
| 254 | self._node2info[node] = result = _NodeInfo(node) |
| 255 | return result |
| 256 | |
| 257 | def add(self, node, *predecessors): |
| 258 | """Add a new node and its predecessors to the graph. |
| 259 | |
| 260 | Both the *node* and all elements in *predecessors* must be hashable. |
| 261 | |
| 262 | If called multiple times with the same node argument, the set of dependencies |
| 263 | will be the union of all dependencies passed in. |
| 264 | |
| 265 | It is possible to add a node with no dependencies (*predecessors* is not provided) |
| 266 | as well as provide a dependency twice. If a node that has not been provided before |
| 267 | is included among *predecessors* it will be automatically added to the graph with |
| 268 | no predecessors of its own. |
| 269 | |
| 270 | Raises ValueError if called after "prepare". |
| 271 | """ |
| 272 | if self._ready_nodes is not None: |
| 273 | raise ValueError("Nodes cannot be added after a call to prepare()") |
| 274 | |
| 275 | # Create the node -> predecessor edges |
| 276 | nodeinfo = self._get_nodeinfo(node) |
| 277 | nodeinfo.npredecessors += len(predecessors) |
| 278 | |
| 279 | # Create the predecessor -> node edges |
| 280 | for pred in predecessors: |
| 281 | pred_info = self._get_nodeinfo(pred) |
| 282 | pred_info.successors.append(node) |
| 283 | |
| 284 | def prepare(self): |
| 285 | """Mark the graph as finished and check for cycles in the graph. |
| 286 | |
| 287 | If any cycle is detected, "CycleError" will be raised, but "get_ready" can |
| 288 | still be used to obtain as many nodes as possible until cycles block more |
| 289 | progress. After a call to this function, the graph cannot be modified and |
| 290 | therefore no more nodes can be added using "add". |
| 291 | """ |
| 292 | if self._ready_nodes is not None: |
| 293 | raise ValueError("cannot prepare() more than once") |
| 294 | |
| 295 | self._ready_nodes = [i.node for i in self._node2info.values() |
| 296 | if i.npredecessors == 0] |
| 297 | # ready_nodes is set before we look for cycles on purpose: |
| 298 | # if the user wants to catch the CycleError, that's fine, |
| 299 | # they can continue using the instance to grab as many |
| 300 | # nodes as possible before cycles block more progress |
| 301 | cycle = self._find_cycle() |
| 302 | if cycle: |
| 303 | raise CycleError(f"nodes are in a cycle", cycle) |
| 304 | |
| 305 | def get_ready(self): |
| 306 | """Return a tuple of all the nodes that are ready. |
| 307 | |
| 308 | Initially it returns all nodes with no predecessors; once those are marked |
| 309 | as processed by calling "done", further calls will return all new nodes that |
| 310 | have all their predecessors already processed. Once no more progress can be made, |
| 311 | empty tuples are returned. |
| 312 | |
| 313 | Raises ValueError if called without calling "prepare" previously. |
| 314 | """ |
| 315 | if self._ready_nodes is None: |
| 316 | raise ValueError("prepare() must be called first") |
| 317 | |
| 318 | # Get the nodes that are ready and mark them |
| 319 | result = tuple(self._ready_nodes) |
| 320 | n2i = self._node2info |
| 321 | for node in result: |
| 322 | n2i[node].npredecessors = _NODE_OUT |
| 323 | |
| 324 | # Clean the list of nodes that are ready and update |
| 325 | # the counter of nodes that we have returned. |
| 326 | self._ready_nodes.clear() |
| 327 | self._npassedout += len(result) |
| 328 | |
| 329 | return result |
| 330 | |
| 331 | def is_active(self): |
| 332 | """Return True if more progress can be made and ``False`` otherwise. |
| 333 | |
| 334 | Progress can be made if cycles do not block the resolution and either there |
| 335 | are still nodes ready that haven't yet been returned by "get_ready" or the |
| 336 | number of nodes marked "done" is less than the number that have been returned |
| 337 | by "get_ready". |
| 338 | |
| 339 | Raises ValueError if called without calling "prepare" previously. |
| 340 | """ |
| 341 | if self._ready_nodes is None: |
| 342 | raise ValueError("prepare() must be called first") |
| 343 | return self._nfinished < self._npassedout or bool(self._ready_nodes) |
| 344 | |
| 345 | def __bool__(self): |
| 346 | return self.is_active() |
| 347 | |
| 348 | def done(self, *nodes): |
| 349 | """Marks a set of nodes returned by "get_ready" as processed. |
| 350 | |
| 351 | This method unblocks any successor of each node in *nodes* for being returned |
| 352 | in the future by a a call to "get_ready" |
| 353 | |
| 354 | Raises :exec:`ValueError` if any node in *nodes* has already been marked as |
| 355 | processed by a previous call to this method, if a node was not added to the |
| 356 | graph by using "add" or if called without calling "prepare" previously or if |
| 357 | node has not yet been returned by "get_ready". |
| 358 | """ |
| 359 | |
| 360 | if self._ready_nodes is None: |
| 361 | raise ValueError("prepare() must be called first") |
| 362 | |
| 363 | n2i = self._node2info |
| 364 | |
| 365 | for node in nodes: |
| 366 | |
| 367 | # Check if we know about this node (it was added previously using add() |
| 368 | if (nodeinfo := n2i.get(node)) is None: |
| 369 | raise ValueError(f"node {node!r} was not added using add()") |
| 370 | |
| 371 | # If the node has not being returned (marked as ready) previously, inform the user. |
| 372 | stat = nodeinfo.npredecessors |
| 373 | if stat != _NODE_OUT: |
| 374 | if stat >= 0: |
| 375 | raise ValueError(f"node {node!r} was not passed out (still not ready)") |
| 376 | elif stat == _NODE_DONE: |
| 377 | raise ValueError(f"node {node!r} was already marked done") |
| 378 | else: |
| 379 | assert False, f"node {node!r}: unknown status {stat}" |
| 380 | |
| 381 | # Mark the node as processed |
| 382 | nodeinfo.npredecessors = _NODE_DONE |
| 383 | |
| 384 | # Go to all the successors and reduce the number of predecessors, collecting all the ones |
| 385 | # that are ready to be returned in the next get_ready() call. |
| 386 | for successor in nodeinfo.successors: |
| 387 | successor_info = n2i[successor] |
| 388 | successor_info.npredecessors -= 1 |
| 389 | if successor_info.npredecessors == 0: |
| 390 | self._ready_nodes.append(successor) |
| 391 | self._nfinished += 1 |
| 392 | |
| 393 | def _find_cycle(self): |
| 394 | n2i = self._node2info |
| 395 | stack = [] |
| 396 | itstack = [] |
| 397 | seen = set() |
| 398 | node2stacki = {} |
| 399 | |
| 400 | for node in n2i: |
| 401 | if node in seen: |
| 402 | continue |
| 403 | |
| 404 | while True: |
| 405 | if node in seen: |
| 406 | # If we have seen already the node and is in the |
| 407 | # current stack we have found a cycle. |
| 408 | if node in node2stacki: |
| 409 | return stack[node2stacki[node]:] + [node] |
| 410 | # else go on to get next successor |
| 411 | else: |
| 412 | seen.add(node) |
| 413 | itstack.append(iter(n2i[node].successors).__next__) |
| 414 | node2stacki[node] = len(stack) |
| 415 | stack.append(node) |
| 416 | |
| 417 | # Backtrack to the topmost stack entry with |
| 418 | # at least another successor. |
| 419 | while stack: |
| 420 | try: |
| 421 | node = itstack[-1]() |
| 422 | break |
| 423 | except StopIteration: |
| 424 | del node2stacki[stack.pop()] |
| 425 | itstack.pop() |
| 426 | else: |
| 427 | break |
| 428 | return None |
| 429 | |
| 430 | def static_order(self): |
| 431 | """Returns an iterable of nodes in a topological order. |
| 432 | |
| 433 | The particular order that is returned may depend on the specific |
| 434 | order in which the items were inserted in the graph. |
| 435 | |
| 436 | Using this method does not require to call "prepare" or "done". If any |
| 437 | cycle is detected, :exc:`CycleError` will be raised. |
| 438 | """ |
| 439 | self.prepare() |
| 440 | while self.is_active(): |
| 441 | node_group = self.get_ready() |
| 442 | yield from node_group |
| 443 | self.done(*node_group) |
| 444 | |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 445 | |
| 446 | ################################################################################ |
| 447 | ### cmp_to_key() function converter |
| 448 | ################################################################################ |
| 449 | |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 450 | def cmp_to_key(mycmp): |
Georg Brandl | e5a2673 | 2010-05-19 21:06:36 +0000 | [diff] [blame] | 451 | """Convert a cmp= function into a key= function""" |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 452 | class K(object): |
Raymond Hettinger | a0d1d96 | 2011-03-21 17:50:28 -0700 | [diff] [blame] | 453 | __slots__ = ['obj'] |
Raymond Hettinger | 7ab9e22 | 2011-04-05 02:33:54 -0700 | [diff] [blame] | 454 | def __init__(self, obj): |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 455 | self.obj = obj |
| 456 | def __lt__(self, other): |
| 457 | return mycmp(self.obj, other.obj) < 0 |
| 458 | def __gt__(self, other): |
| 459 | return mycmp(self.obj, other.obj) > 0 |
| 460 | def __eq__(self, other): |
| 461 | return mycmp(self.obj, other.obj) == 0 |
| 462 | def __le__(self, other): |
| 463 | return mycmp(self.obj, other.obj) <= 0 |
| 464 | def __ge__(self, other): |
| 465 | return mycmp(self.obj, other.obj) >= 0 |
Raymond Hettinger | 003be52 | 2011-05-03 11:01:32 -0700 | [diff] [blame] | 466 | __hash__ = None |
Raymond Hettinger | c50846a | 2010-04-05 18:56:31 +0000 | [diff] [blame] | 467 | return K |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 468 | |
Raymond Hettinger | 7ab9e22 | 2011-04-05 02:33:54 -0700 | [diff] [blame] | 469 | try: |
| 470 | from _functools import cmp_to_key |
Brett Cannon | cd171c8 | 2013-07-04 17:43:24 -0400 | [diff] [blame] | 471 | except ImportError: |
Raymond Hettinger | 7ab9e22 | 2011-04-05 02:33:54 -0700 | [diff] [blame] | 472 | pass |
| 473 | |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 474 | |
| 475 | ################################################################################ |
madman-bob | e25d5fc | 2018-10-25 15:02:10 +0100 | [diff] [blame] | 476 | ### reduce() sequence to a single item |
| 477 | ################################################################################ |
| 478 | |
| 479 | _initial_missing = object() |
| 480 | |
| 481 | def reduce(function, sequence, initial=_initial_missing): |
| 482 | """ |
| 483 | reduce(function, sequence[, initial]) -> value |
| 484 | |
| 485 | Apply a function of two arguments cumulatively to the items of a sequence, |
| 486 | from left to right, so as to reduce the sequence to a single value. |
| 487 | For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates |
| 488 | ((((1+2)+3)+4)+5). If initial is present, it is placed before the items |
| 489 | of the sequence in the calculation, and serves as a default when the |
| 490 | sequence is empty. |
| 491 | """ |
| 492 | |
| 493 | it = iter(sequence) |
| 494 | |
| 495 | if initial is _initial_missing: |
| 496 | try: |
| 497 | value = next(it) |
| 498 | except StopIteration: |
| 499 | raise TypeError("reduce() of empty sequence with no initial value") from None |
| 500 | else: |
| 501 | value = initial |
| 502 | |
| 503 | for element in it: |
| 504 | value = function(value, element) |
| 505 | |
| 506 | return value |
| 507 | |
| 508 | try: |
| 509 | from _functools import reduce |
| 510 | except ImportError: |
| 511 | pass |
| 512 | |
| 513 | |
| 514 | ################################################################################ |
Antoine Pitrou | b5b3714 | 2012-11-13 21:35:40 +0100 | [diff] [blame] | 515 | ### partial() argument application |
| 516 | ################################################################################ |
| 517 | |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 518 | # Purely functional, no descriptor behaviour |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 519 | class partial: |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 520 | """New function with partial application of the given arguments |
Antoine Pitrou | b5b3714 | 2012-11-13 21:35:40 +0100 | [diff] [blame] | 521 | and keywords. |
| 522 | """ |
Alexander Belopolsky | e49af34 | 2015-03-01 15:08:17 -0500 | [diff] [blame] | 523 | |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 524 | __slots__ = "func", "args", "keywords", "__dict__", "__weakref__" |
| 525 | |
Serhiy Storchaka | 2085bd0 | 2019-06-01 11:00:15 +0300 | [diff] [blame] | 526 | def __new__(cls, func, /, *args, **keywords): |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 527 | if not callable(func): |
| 528 | raise TypeError("the first argument must be callable") |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 529 | |
| 530 | if hasattr(func, "func"): |
| 531 | args = func.args + args |
Serhiy Storchaka | da08470 | 2019-03-27 08:02:28 +0200 | [diff] [blame] | 532 | keywords = {**func.keywords, **keywords} |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 533 | func = func.func |
| 534 | |
| 535 | self = super(partial, cls).__new__(cls) |
| 536 | |
| 537 | self.func = func |
| 538 | self.args = args |
| 539 | self.keywords = keywords |
| 540 | return self |
| 541 | |
Serhiy Storchaka | 2085bd0 | 2019-06-01 11:00:15 +0300 | [diff] [blame] | 542 | def __call__(self, /, *args, **keywords): |
Serhiy Storchaka | da08470 | 2019-03-27 08:02:28 +0200 | [diff] [blame] | 543 | keywords = {**self.keywords, **keywords} |
| 544 | return self.func(*self.args, *args, **keywords) |
Nick Coghlan | 457fc9a | 2016-09-10 20:00:02 +1000 | [diff] [blame] | 545 | |
| 546 | @recursive_repr() |
| 547 | def __repr__(self): |
| 548 | qualname = type(self).__qualname__ |
| 549 | args = [repr(self.func)] |
| 550 | args.extend(repr(x) for x in self.args) |
| 551 | args.extend(f"{k}={v!r}" for (k, v) in self.keywords.items()) |
| 552 | if type(self).__module__ == "functools": |
| 553 | return f"functools.{qualname}({', '.join(args)})" |
| 554 | return f"{qualname}({', '.join(args)})" |
| 555 | |
| 556 | def __reduce__(self): |
| 557 | return type(self), (self.func,), (self.func, self.args, |
| 558 | self.keywords or None, self.__dict__ or None) |
| 559 | |
| 560 | def __setstate__(self, state): |
| 561 | if not isinstance(state, tuple): |
| 562 | raise TypeError("argument to __setstate__ must be a tuple") |
| 563 | if len(state) != 4: |
| 564 | raise TypeError(f"expected 4 items in state, got {len(state)}") |
| 565 | func, args, kwds, namespace = state |
| 566 | if (not callable(func) or not isinstance(args, tuple) or |
| 567 | (kwds is not None and not isinstance(kwds, dict)) or |
| 568 | (namespace is not None and not isinstance(namespace, dict))): |
| 569 | raise TypeError("invalid partial state") |
| 570 | |
| 571 | args = tuple(args) # just in case it's a subclass |
| 572 | if kwds is None: |
| 573 | kwds = {} |
| 574 | elif type(kwds) is not dict: # XXX does it need to be *exactly* dict? |
| 575 | kwds = dict(kwds) |
| 576 | if namespace is None: |
| 577 | namespace = {} |
| 578 | |
| 579 | self.__dict__ = namespace |
| 580 | self.func = func |
| 581 | self.args = args |
| 582 | self.keywords = kwds |
Antoine Pitrou | b5b3714 | 2012-11-13 21:35:40 +0100 | [diff] [blame] | 583 | |
| 584 | try: |
| 585 | from _functools import partial |
Brett Cannon | cd171c8 | 2013-07-04 17:43:24 -0400 | [diff] [blame] | 586 | except ImportError: |
Antoine Pitrou | b5b3714 | 2012-11-13 21:35:40 +0100 | [diff] [blame] | 587 | pass |
| 588 | |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 589 | # Descriptor version |
| 590 | class partialmethod(object): |
| 591 | """Method descriptor with partial application of the given arguments |
| 592 | and keywords. |
| 593 | |
| 594 | Supports wrapping existing descriptors and handles non-descriptor |
| 595 | callables as instance methods. |
| 596 | """ |
| 597 | |
Serhiy Storchaka | 142566c | 2019-06-05 18:22:31 +0300 | [diff] [blame] | 598 | def __init__(self, func, /, *args, **keywords): |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 599 | if not callable(func) and not hasattr(func, "__get__"): |
| 600 | raise TypeError("{!r} is not callable or a descriptor" |
| 601 | .format(func)) |
| 602 | |
| 603 | # func could be a descriptor like classmethod which isn't callable, |
| 604 | # so we can't inherit from partial (it verifies func is callable) |
| 605 | if isinstance(func, partialmethod): |
| 606 | # flattening is mandatory in order to place cls/self before all |
| 607 | # other arguments |
| 608 | # it's also more efficient since only one function will be called |
| 609 | self.func = func.func |
| 610 | self.args = func.args + args |
Serhiy Storchaka | da08470 | 2019-03-27 08:02:28 +0200 | [diff] [blame] | 611 | self.keywords = {**func.keywords, **keywords} |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 612 | else: |
| 613 | self.func = func |
| 614 | self.args = args |
| 615 | self.keywords = keywords |
| 616 | |
| 617 | def __repr__(self): |
| 618 | args = ", ".join(map(repr, self.args)) |
| 619 | keywords = ", ".join("{}={!r}".format(k, v) |
| 620 | for k, v in self.keywords.items()) |
| 621 | format_string = "{module}.{cls}({func}, {args}, {keywords})" |
| 622 | return format_string.format(module=self.__class__.__module__, |
Serhiy Storchaka | 521e586 | 2014-07-22 15:00:37 +0300 | [diff] [blame] | 623 | cls=self.__class__.__qualname__, |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 624 | func=self.func, |
| 625 | args=args, |
| 626 | keywords=keywords) |
| 627 | |
| 628 | def _make_unbound_method(self): |
Serhiy Storchaka | 2085bd0 | 2019-06-01 11:00:15 +0300 | [diff] [blame] | 629 | def _method(cls_or_self, /, *args, **keywords): |
Serhiy Storchaka | da08470 | 2019-03-27 08:02:28 +0200 | [diff] [blame] | 630 | keywords = {**self.keywords, **keywords} |
| 631 | return self.func(cls_or_self, *self.args, *args, **keywords) |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 632 | _method.__isabstractmethod__ = self.__isabstractmethod__ |
Yury Selivanov | da5fe4f | 2014-01-27 17:28:37 -0500 | [diff] [blame] | 633 | _method._partialmethod = self |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 634 | return _method |
| 635 | |
Raymond Hettinger | 0dac68f | 2019-08-29 01:27:42 -0700 | [diff] [blame] | 636 | def __get__(self, obj, cls=None): |
Nick Coghlan | f4cb48a | 2013-11-03 16:41:46 +1000 | [diff] [blame] | 637 | get = getattr(self.func, "__get__", None) |
| 638 | result = None |
| 639 | if get is not None: |
| 640 | new_func = get(obj, cls) |
| 641 | if new_func is not self.func: |
| 642 | # Assume __get__ returning something new indicates the |
| 643 | # creation of an appropriate callable |
| 644 | result = partial(new_func, *self.args, **self.keywords) |
| 645 | try: |
| 646 | result.__self__ = new_func.__self__ |
| 647 | except AttributeError: |
| 648 | pass |
| 649 | if result is None: |
| 650 | # If the underlying descriptor didn't do anything, treat this |
| 651 | # like an instance method |
| 652 | result = self._make_unbound_method().__get__(obj, cls) |
| 653 | return result |
| 654 | |
| 655 | @property |
| 656 | def __isabstractmethod__(self): |
| 657 | return getattr(self.func, "__isabstractmethod__", False) |
| 658 | |
Pablo Galindo | 7cd2543 | 2018-10-26 12:19:14 +0100 | [diff] [blame] | 659 | # Helper functions |
| 660 | |
| 661 | def _unwrap_partial(func): |
| 662 | while isinstance(func, partial): |
| 663 | func = func.func |
| 664 | return func |
Antoine Pitrou | b5b3714 | 2012-11-13 21:35:40 +0100 | [diff] [blame] | 665 | |
| 666 | ################################################################################ |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 667 | ### LRU Cache function decorator |
| 668 | ################################################################################ |
| 669 | |
Raymond Hettinger | dce583e | 2012-03-16 22:12:20 -0700 | [diff] [blame] | 670 | _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) |
Nick Coghlan | 234515a | 2010-11-30 06:19:46 +0000 | [diff] [blame] | 671 | |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 672 | class _HashedSeq(list): |
Raymond Hettinger | f96b2b0 | 2013-03-08 21:11:55 -0700 | [diff] [blame] | 673 | """ This class guarantees that hash() will be called no more than once |
| 674 | per element. This is important because the lru_cache() will hash |
| 675 | the key multiple times on a cache miss. |
| 676 | |
| 677 | """ |
| 678 | |
Raymond Hettinger | 9acbb60 | 2012-04-30 22:32:16 -0700 | [diff] [blame] | 679 | __slots__ = 'hashvalue' |
| 680 | |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 681 | def __init__(self, tup, hash=hash): |
| 682 | self[:] = tup |
| 683 | self.hashvalue = hash(tup) |
Raymond Hettinger | 9acbb60 | 2012-04-30 22:32:16 -0700 | [diff] [blame] | 684 | |
| 685 | def __hash__(self): |
| 686 | return self.hashvalue |
| 687 | |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 688 | def _make_key(args, kwds, typed, |
| 689 | kwd_mark = (object(),), |
Raymond Hettinger | d8080c0 | 2019-01-26 03:02:00 -0500 | [diff] [blame] | 690 | fasttypes = {int, str}, |
Raymond Hettinger | 4ee3914 | 2017-01-08 17:28:20 -0800 | [diff] [blame] | 691 | tuple=tuple, type=type, len=len): |
Raymond Hettinger | f96b2b0 | 2013-03-08 21:11:55 -0700 | [diff] [blame] | 692 | """Make a cache key from optionally typed positional and keyword arguments |
| 693 | |
| 694 | The key is constructed in a way that is flat as possible rather than |
| 695 | as a nested structure that would take more memory. |
| 696 | |
| 697 | If there is only a single argument and its data type is known to cache |
| 698 | its hash value, then that argument is returned without a wrapper. This |
| 699 | saves space and improves lookup speed. |
| 700 | |
| 701 | """ |
Raymond Hettinger | 5503709 | 2017-09-04 17:47:53 -0700 | [diff] [blame] | 702 | # All of code below relies on kwds preserving the order input by the user. |
| 703 | # Formerly, we sorted() the kwds before looping. The new way is *much* |
| 704 | # faster; however, it means that f(x=1, y=2) will now be treated as a |
| 705 | # distinct call from f(y=2, x=1) which will be cached separately. |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 706 | key = args |
| 707 | if kwds: |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 708 | key += kwd_mark |
Raymond Hettinger | 4ee3914 | 2017-01-08 17:28:20 -0800 | [diff] [blame] | 709 | for item in kwds.items(): |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 710 | key += item |
| 711 | if typed: |
| 712 | key += tuple(type(v) for v in args) |
| 713 | if kwds: |
Raymond Hettinger | 4ee3914 | 2017-01-08 17:28:20 -0800 | [diff] [blame] | 714 | key += tuple(type(v) for v in kwds.values()) |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 715 | elif len(key) == 1 and type(key[0]) in fasttypes: |
| 716 | return key[0] |
| 717 | return _HashedSeq(key) |
| 718 | |
Raymond Hettinger | 010ce32 | 2012-05-19 21:20:48 -0700 | [diff] [blame] | 719 | def lru_cache(maxsize=128, typed=False): |
Benjamin Peterson | 1f594ad | 2010-08-08 13:17:07 +0000 | [diff] [blame] | 720 | """Least-recently-used cache decorator. |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 721 | |
Raymond Hettinger | c79fb0e | 2010-12-01 03:45:41 +0000 | [diff] [blame] | 722 | If *maxsize* is set to None, the LRU features are disabled and the cache |
| 723 | can grow without bound. |
| 724 | |
Raymond Hettinger | cd9fdfd | 2011-10-20 08:57:45 -0700 | [diff] [blame] | 725 | If *typed* is True, arguments of different types will be cached separately. |
| 726 | For example, f(3.0) and f(3) will be treated as distinct calls with |
| 727 | distinct results. |
| 728 | |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 729 | Arguments to the cached function must be hashable. |
Raymond Hettinger | 5fa40c0 | 2010-11-25 08:11:57 +0000 | [diff] [blame] | 730 | |
Raymond Hettinger | 7f7a5a7 | 2012-03-30 21:50:40 -0700 | [diff] [blame] | 731 | View the cache statistics named tuple (hits, misses, maxsize, currsize) |
| 732 | with f.cache_info(). Clear the cache and statistics with f.cache_clear(). |
Raymond Hettinger | 00f2f97 | 2010-12-01 00:47:56 +0000 | [diff] [blame] | 733 | Access the underlying function with f.__wrapped__. |
Raymond Hettinger | 5fa40c0 | 2010-11-25 08:11:57 +0000 | [diff] [blame] | 734 | |
amist | 336b306 | 2019-09-16 21:36:14 +0300 | [diff] [blame] | 735 | See: http://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU) |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 736 | |
Benjamin Peterson | 1f594ad | 2010-08-08 13:17:07 +0000 | [diff] [blame] | 737 | """ |
Raymond Hettinger | 1ff50df | 2012-03-30 13:15:48 -0700 | [diff] [blame] | 738 | |
Raymond Hettinger | 5fa40c0 | 2010-11-25 08:11:57 +0000 | [diff] [blame] | 739 | # Users should only access the lru_cache through its public API: |
Raymond Hettinger | 5e20bab | 2010-11-30 07:13:04 +0000 | [diff] [blame] | 740 | # cache_info, cache_clear, and f.__wrapped__ |
Raymond Hettinger | 5fa40c0 | 2010-11-25 08:11:57 +0000 | [diff] [blame] | 741 | # The internals of the lru_cache are encapsulated for thread safety and |
| 742 | # to allow the implementation to change (including a possible C version). |
| 743 | |
Raymond Hettinger | d8080c0 | 2019-01-26 03:02:00 -0500 | [diff] [blame] | 744 | if isinstance(maxsize, int): |
Raymond Hettinger | b821868 | 2019-05-26 11:27:35 -0700 | [diff] [blame] | 745 | # Negative maxsize is treated as 0 |
Raymond Hettinger | d8080c0 | 2019-01-26 03:02:00 -0500 | [diff] [blame] | 746 | if maxsize < 0: |
| 747 | maxsize = 0 |
Raymond Hettinger | b821868 | 2019-05-26 11:27:35 -0700 | [diff] [blame] | 748 | elif callable(maxsize) and isinstance(typed, bool): |
| 749 | # The user_function was passed in directly via the maxsize argument |
| 750 | user_function, maxsize = maxsize, 128 |
| 751 | wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) |
Manjusaka | 051ff52 | 2019-11-12 15:30:18 +0800 | [diff] [blame] | 752 | wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} |
Raymond Hettinger | b821868 | 2019-05-26 11:27:35 -0700 | [diff] [blame] | 753 | return update_wrapper(wrapper, user_function) |
Raymond Hettinger | d8080c0 | 2019-01-26 03:02:00 -0500 | [diff] [blame] | 754 | elif maxsize is not None: |
Raymond Hettinger | b821868 | 2019-05-26 11:27:35 -0700 | [diff] [blame] | 755 | raise TypeError( |
| 756 | 'Expected first argument to be an integer, a callable, or None') |
Raymond Hettinger | 4d58897 | 2014-08-12 12:44:52 -0700 | [diff] [blame] | 757 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 758 | def decorating_function(user_function): |
| 759 | wrapper = _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo) |
Manjusaka | 051ff52 | 2019-11-12 15:30:18 +0800 | [diff] [blame] | 760 | wrapper.cache_parameters = lambda : {'maxsize': maxsize, 'typed': typed} |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 761 | return update_wrapper(wrapper, user_function) |
| 762 | |
| 763 | return decorating_function |
| 764 | |
| 765 | def _lru_cache_wrapper(user_function, maxsize, typed, _CacheInfo): |
Raymond Hettinger | 9f0ab9f | 2012-04-29 14:55:27 -0700 | [diff] [blame] | 766 | # Constants shared by all lru cache instances: |
Raymond Hettinger | b6b98c0 | 2012-04-29 18:09:02 -0700 | [diff] [blame] | 767 | sentinel = object() # unique object used to signal cache misses |
Raymond Hettinger | 0c9050c | 2012-06-04 00:21:14 -0700 | [diff] [blame] | 768 | make_key = _make_key # build a key from the function arguments |
Raymond Hettinger | 9f0ab9f | 2012-04-29 14:55:27 -0700 | [diff] [blame] | 769 | PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields |
| 770 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 771 | cache = {} |
| 772 | hits = misses = 0 |
| 773 | full = False |
| 774 | cache_get = cache.get # bound method to lookup a key or return None |
Raymond Hettinger | b2d4b3d | 2016-12-16 14:59:37 -0800 | [diff] [blame] | 775 | cache_len = cache.__len__ # get cache size without calling len() |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 776 | lock = RLock() # because linkedlist updates aren't threadsafe |
| 777 | root = [] # root of the circular doubly linked list |
| 778 | root[:] = [root, root, None, None] # initialize by pointing to self |
Raymond Hettinger | 6e8c817 | 2012-03-16 16:53:05 -0700 | [diff] [blame] | 779 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 780 | if maxsize == 0: |
Raymond Hettinger | 7e0c581 | 2012-03-17 15:10:24 -0700 | [diff] [blame] | 781 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 782 | def wrapper(*args, **kwds): |
Raymond Hettinger | ffdf1c3 | 2019-01-31 15:03:38 -0800 | [diff] [blame] | 783 | # No caching -- just a statistics update |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 784 | nonlocal misses |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 785 | misses += 1 |
Raymond Hettinger | ffdf1c3 | 2019-01-31 15:03:38 -0800 | [diff] [blame] | 786 | result = user_function(*args, **kwds) |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 787 | return result |
| 788 | |
| 789 | elif maxsize is None: |
| 790 | |
| 791 | def wrapper(*args, **kwds): |
| 792 | # Simple caching without ordering or size limit |
| 793 | nonlocal hits, misses |
| 794 | key = make_key(args, kwds, typed) |
| 795 | result = cache_get(key, sentinel) |
| 796 | if result is not sentinel: |
| 797 | hits += 1 |
Raymond Hettinger | 7e0c581 | 2012-03-17 15:10:24 -0700 | [diff] [blame] | 798 | return result |
Raymond Hettinger | ffdf1c3 | 2019-01-31 15:03:38 -0800 | [diff] [blame] | 799 | misses += 1 |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 800 | result = user_function(*args, **kwds) |
| 801 | cache[key] = result |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 802 | return result |
Raymond Hettinger | 7e0c581 | 2012-03-17 15:10:24 -0700 | [diff] [blame] | 803 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 804 | else: |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 805 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 806 | def wrapper(*args, **kwds): |
| 807 | # Size limited caching that tracks accesses by recency |
| 808 | nonlocal root, hits, misses, full |
| 809 | key = make_key(args, kwds, typed) |
| 810 | with lock: |
| 811 | link = cache_get(key) |
| 812 | if link is not None: |
| 813 | # Move the link to the front of the circular queue |
| 814 | link_prev, link_next, _key, result = link |
| 815 | link_prev[NEXT] = link_next |
| 816 | link_next[PREV] = link_prev |
| 817 | last = root[PREV] |
| 818 | last[NEXT] = root[PREV] = link |
| 819 | link[PREV] = last |
| 820 | link[NEXT] = root |
Nick Coghlan | 234515a | 2010-11-30 06:19:46 +0000 | [diff] [blame] | 821 | hits += 1 |
Raymond Hettinger | 4b779b3 | 2011-10-15 23:50:42 -0700 | [diff] [blame] | 822 | return result |
Raymond Hettinger | d8080c0 | 2019-01-26 03:02:00 -0500 | [diff] [blame] | 823 | misses += 1 |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 824 | result = user_function(*args, **kwds) |
| 825 | with lock: |
| 826 | if key in cache: |
| 827 | # Getting here means that this same key was added to the |
| 828 | # cache while the lock was released. Since the link |
| 829 | # update is already done, we need only return the |
| 830 | # computed result and update the count of misses. |
| 831 | pass |
| 832 | elif full: |
| 833 | # Use the old root to store the new key and result. |
| 834 | oldroot = root |
| 835 | oldroot[KEY] = key |
| 836 | oldroot[RESULT] = result |
| 837 | # Empty the oldest link and make it the new root. |
| 838 | # Keep a reference to the old key and old result to |
| 839 | # prevent their ref counts from going to zero during the |
| 840 | # update. That will prevent potentially arbitrary object |
| 841 | # clean-up code (i.e. __del__) from running while we're |
| 842 | # still adjusting the links. |
| 843 | root = oldroot[NEXT] |
| 844 | oldkey = root[KEY] |
| 845 | oldresult = root[RESULT] |
| 846 | root[KEY] = root[RESULT] = None |
| 847 | # Now update the cache dictionary. |
| 848 | del cache[oldkey] |
| 849 | # Save the potentially reentrant cache[key] assignment |
| 850 | # for last, after the root and links have been put in |
| 851 | # a consistent state. |
| 852 | cache[key] = oldroot |
| 853 | else: |
| 854 | # Put result in a new link at the front of the queue. |
| 855 | last = root[PREV] |
| 856 | link = [last, root, key, result] |
| 857 | last[NEXT] = root[PREV] = cache[key] = link |
Raymond Hettinger | b2d4b3d | 2016-12-16 14:59:37 -0800 | [diff] [blame] | 858 | # Use the cache_len bound method instead of the len() function |
Raymond Hettinger | af56e0e | 2016-12-16 13:57:40 -0800 | [diff] [blame] | 859 | # which could potentially be wrapped in an lru_cache itself. |
Raymond Hettinger | b2d4b3d | 2016-12-16 14:59:37 -0800 | [diff] [blame] | 860 | full = (cache_len() >= maxsize) |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 861 | return result |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 862 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 863 | def cache_info(): |
| 864 | """Report cache statistics""" |
| 865 | with lock: |
Raymond Hettinger | b2d4b3d | 2016-12-16 14:59:37 -0800 | [diff] [blame] | 866 | return _CacheInfo(hits, misses, maxsize, cache_len()) |
Raymond Hettinger | bc8e81d | 2012-03-17 00:24:09 -0700 | [diff] [blame] | 867 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 868 | def cache_clear(): |
| 869 | """Clear the cache and cache statistics""" |
| 870 | nonlocal hits, misses, full |
| 871 | with lock: |
| 872 | cache.clear() |
| 873 | root[:] = [root, root, None, None] |
| 874 | hits = misses = 0 |
| 875 | full = False |
Georg Brandl | 2e7346a | 2010-07-31 18:09:23 +0000 | [diff] [blame] | 876 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 877 | wrapper.cache_info = cache_info |
| 878 | wrapper.cache_clear = cache_clear |
Serhiy Storchaka | ce2295d | 2015-10-24 09:51:53 +0300 | [diff] [blame] | 879 | return wrapper |
Nick Coghlan | 234515a | 2010-11-30 06:19:46 +0000 | [diff] [blame] | 880 | |
Serhiy Storchaka | 46c5611 | 2015-05-24 21:53:49 +0300 | [diff] [blame] | 881 | try: |
| 882 | from _functools import _lru_cache_wrapper |
| 883 | except ImportError: |
| 884 | pass |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 885 | |
| 886 | |
| 887 | ################################################################################ |
| 888 | ### singledispatch() - single-dispatch generic function decorator |
| 889 | ################################################################################ |
| 890 | |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 891 | def _c3_merge(sequences): |
| 892 | """Merges MROs in *sequences* to a single MRO using the C3 algorithm. |
| 893 | |
| 894 | Adapted from http://www.python.org/download/releases/2.3/mro/. |
| 895 | |
| 896 | """ |
| 897 | result = [] |
| 898 | while True: |
| 899 | sequences = [s for s in sequences if s] # purge empty sequences |
| 900 | if not sequences: |
| 901 | return result |
| 902 | for s1 in sequences: # find merge candidates among seq heads |
| 903 | candidate = s1[0] |
| 904 | for s2 in sequences: |
| 905 | if candidate in s2[1:]: |
| 906 | candidate = None |
| 907 | break # reject the current head, it appears later |
| 908 | else: |
| 909 | break |
Yury Selivanov | 77a8cd6 | 2015-08-18 14:20:00 -0400 | [diff] [blame] | 910 | if candidate is None: |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 911 | raise RuntimeError("Inconsistent hierarchy") |
| 912 | result.append(candidate) |
| 913 | # remove the chosen candidate |
| 914 | for seq in sequences: |
| 915 | if seq[0] == candidate: |
| 916 | del seq[0] |
| 917 | |
| 918 | def _c3_mro(cls, abcs=None): |
| 919 | """Computes the method resolution order using extended C3 linearization. |
| 920 | |
| 921 | If no *abcs* are given, the algorithm works exactly like the built-in C3 |
| 922 | linearization used for method resolution. |
| 923 | |
| 924 | If given, *abcs* is a list of abstract base classes that should be inserted |
| 925 | into the resulting MRO. Unrelated ABCs are ignored and don't end up in the |
| 926 | result. The algorithm inserts ABCs where their functionality is introduced, |
| 927 | i.e. issubclass(cls, abc) returns True for the class itself but returns |
| 928 | False for all its direct base classes. Implicit ABCs for a given class |
| 929 | (either registered or inferred from the presence of a special method like |
| 930 | __len__) are inserted directly after the last ABC explicitly listed in the |
| 931 | MRO of said class. If two implicit ABCs end up next to each other in the |
| 932 | resulting MRO, their ordering depends on the order of types in *abcs*. |
| 933 | |
| 934 | """ |
| 935 | for i, base in enumerate(reversed(cls.__bases__)): |
| 936 | if hasattr(base, '__abstractmethods__'): |
| 937 | boundary = len(cls.__bases__) - i |
| 938 | break # Bases up to the last explicit ABC are considered first. |
| 939 | else: |
| 940 | boundary = 0 |
| 941 | abcs = list(abcs) if abcs else [] |
| 942 | explicit_bases = list(cls.__bases__[:boundary]) |
| 943 | abstract_bases = [] |
| 944 | other_bases = list(cls.__bases__[boundary:]) |
| 945 | for base in abcs: |
| 946 | if issubclass(cls, base) and not any( |
| 947 | issubclass(b, base) for b in cls.__bases__ |
| 948 | ): |
| 949 | # If *cls* is the class that introduces behaviour described by |
| 950 | # an ABC *base*, insert said ABC to its MRO. |
| 951 | abstract_bases.append(base) |
| 952 | for base in abstract_bases: |
| 953 | abcs.remove(base) |
| 954 | explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] |
| 955 | abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] |
| 956 | other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] |
| 957 | return _c3_merge( |
| 958 | [[cls]] + |
| 959 | explicit_c3_mros + abstract_c3_mros + other_c3_mros + |
| 960 | [explicit_bases] + [abstract_bases] + [other_bases] |
| 961 | ) |
| 962 | |
| 963 | def _compose_mro(cls, types): |
| 964 | """Calculates the method resolution order for a given class *cls*. |
| 965 | |
| 966 | Includes relevant abstract base classes (with their respective bases) from |
| 967 | the *types* iterable. Uses a modified C3 linearization algorithm. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 968 | |
| 969 | """ |
| 970 | bases = set(cls.__mro__) |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 971 | # Remove entries which are already present in the __mro__ or unrelated. |
| 972 | def is_related(typ): |
| 973 | return (typ not in bases and hasattr(typ, '__mro__') |
| 974 | and issubclass(cls, typ)) |
| 975 | types = [n for n in types if is_related(n)] |
| 976 | # Remove entries which are strict bases of other entries (they will end up |
| 977 | # in the MRO anyway. |
| 978 | def is_strict_base(typ): |
| 979 | for other in types: |
| 980 | if typ != other and typ in other.__mro__: |
| 981 | return True |
| 982 | return False |
| 983 | types = [n for n in types if not is_strict_base(n)] |
| 984 | # Subclasses of the ABCs in *types* which are also implemented by |
| 985 | # *cls* can be used to stabilize ABC ordering. |
| 986 | type_set = set(types) |
| 987 | mro = [] |
| 988 | for typ in types: |
| 989 | found = [] |
| 990 | for sub in typ.__subclasses__(): |
| 991 | if sub not in bases and issubclass(cls, sub): |
| 992 | found.append([s for s in sub.__mro__ if s in type_set]) |
| 993 | if not found: |
| 994 | mro.append(typ) |
| 995 | continue |
| 996 | # Favor subclasses with the biggest number of useful bases |
| 997 | found.sort(key=len, reverse=True) |
| 998 | for sub in found: |
| 999 | for subcls in sub: |
| 1000 | if subcls not in mro: |
| 1001 | mro.append(subcls) |
| 1002 | return _c3_mro(cls, abcs=mro) |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1003 | |
| 1004 | def _find_impl(cls, registry): |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1005 | """Returns the best matching implementation from *registry* for type *cls*. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1006 | |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1007 | Where there is no registered implementation for a specific type, its method |
| 1008 | resolution order is used to find a more generic implementation. |
| 1009 | |
| 1010 | Note: if *registry* does not contain an implementation for the base |
| 1011 | *object* type, this function may return None. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1012 | |
| 1013 | """ |
| 1014 | mro = _compose_mro(cls, registry.keys()) |
| 1015 | match = None |
| 1016 | for t in mro: |
| 1017 | if match is not None: |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1018 | # If *match* is an implicit ABC but there is another unrelated, |
| 1019 | # equally matching implicit ABC, refuse the temptation to guess. |
| 1020 | if (t in registry and t not in cls.__mro__ |
| 1021 | and match not in cls.__mro__ |
| 1022 | and not issubclass(match, t)): |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1023 | raise RuntimeError("Ambiguous dispatch: {} or {}".format( |
| 1024 | match, t)) |
| 1025 | break |
| 1026 | if t in registry: |
| 1027 | match = t |
| 1028 | return registry.get(match) |
| 1029 | |
| 1030 | def singledispatch(func): |
| 1031 | """Single-dispatch generic function decorator. |
| 1032 | |
| 1033 | Transforms a function into a generic function, which can have different |
| 1034 | behaviours depending upon the type of its first argument. The decorated |
| 1035 | function acts as the default implementation, and additional |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1036 | implementations can be registered using the register() attribute of the |
| 1037 | generic function. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1038 | """ |
INADA Naoki | 9811e80 | 2017-09-30 16:13:02 +0900 | [diff] [blame] | 1039 | # There are many programs that use functools without singledispatch, so we |
| 1040 | # trade-off making singledispatch marginally slower for the benefit of |
| 1041 | # making start-up of such applications slightly faster. |
| 1042 | import types, weakref |
| 1043 | |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1044 | registry = {} |
INADA Naoki | 9811e80 | 2017-09-30 16:13:02 +0900 | [diff] [blame] | 1045 | dispatch_cache = weakref.WeakKeyDictionary() |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1046 | cache_token = None |
| 1047 | |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1048 | def dispatch(cls): |
| 1049 | """generic_func.dispatch(cls) -> <function implementation> |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1050 | |
| 1051 | Runs the dispatch algorithm to return the best available implementation |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1052 | for the given *cls* registered on *generic_func*. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1053 | |
| 1054 | """ |
| 1055 | nonlocal cache_token |
| 1056 | if cache_token is not None: |
| 1057 | current_token = get_cache_token() |
| 1058 | if cache_token != current_token: |
| 1059 | dispatch_cache.clear() |
| 1060 | cache_token = current_token |
| 1061 | try: |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1062 | impl = dispatch_cache[cls] |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1063 | except KeyError: |
| 1064 | try: |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1065 | impl = registry[cls] |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1066 | except KeyError: |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1067 | impl = _find_impl(cls, registry) |
| 1068 | dispatch_cache[cls] = impl |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1069 | return impl |
| 1070 | |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1071 | def register(cls, func=None): |
| 1072 | """generic_func.register(cls, func) -> func |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1073 | |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1074 | Registers a new implementation for the given *cls* on a *generic_func*. |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1075 | |
| 1076 | """ |
| 1077 | nonlocal cache_token |
| 1078 | if func is None: |
Łukasz Langa | e569753 | 2017-12-11 13:56:31 -0800 | [diff] [blame] | 1079 | if isinstance(cls, type): |
| 1080 | return lambda f: register(cls, f) |
| 1081 | ann = getattr(cls, '__annotations__', {}) |
| 1082 | if not ann: |
| 1083 | raise TypeError( |
| 1084 | f"Invalid first argument to `register()`: {cls!r}. " |
| 1085 | f"Use either `@register(some_class)` or plain `@register` " |
| 1086 | f"on an annotated function." |
| 1087 | ) |
| 1088 | func = cls |
| 1089 | |
| 1090 | # only import typing if annotation parsing is necessary |
| 1091 | from typing import get_type_hints |
| 1092 | argname, cls = next(iter(get_type_hints(func).items())) |
Lysandros Nikolaou | d673810 | 2019-05-20 00:11:21 +0200 | [diff] [blame] | 1093 | if not isinstance(cls, type): |
| 1094 | raise TypeError( |
| 1095 | f"Invalid annotation for {argname!r}. " |
| 1096 | f"{cls!r} is not a class." |
| 1097 | ) |
Łukasz Langa | 3720c77 | 2013-07-01 16:00:38 +0200 | [diff] [blame] | 1098 | registry[cls] = func |
| 1099 | if cache_token is None and hasattr(cls, '__abstractmethods__'): |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1100 | cache_token = get_cache_token() |
| 1101 | dispatch_cache.clear() |
| 1102 | return func |
| 1103 | |
| 1104 | def wrapper(*args, **kw): |
Dong-hee Na | 445f1b3 | 2018-07-10 16:26:36 +0900 | [diff] [blame] | 1105 | if not args: |
| 1106 | raise TypeError(f'{funcname} requires at least ' |
| 1107 | '1 positional argument') |
| 1108 | |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1109 | return dispatch(args[0].__class__)(*args, **kw) |
| 1110 | |
Dong-hee Na | 445f1b3 | 2018-07-10 16:26:36 +0900 | [diff] [blame] | 1111 | funcname = getattr(func, '__name__', 'singledispatch function') |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1112 | registry[object] = func |
| 1113 | wrapper.register = register |
| 1114 | wrapper.dispatch = dispatch |
INADA Naoki | 9811e80 | 2017-09-30 16:13:02 +0900 | [diff] [blame] | 1115 | wrapper.registry = types.MappingProxyType(registry) |
Łukasz Langa | 6f69251 | 2013-06-05 12:20:24 +0200 | [diff] [blame] | 1116 | wrapper._clear_cache = dispatch_cache.clear |
| 1117 | update_wrapper(wrapper, func) |
| 1118 | return wrapper |
Ethan Smith | c651275 | 2018-05-26 16:38:33 -0400 | [diff] [blame] | 1119 | |
| 1120 | |
| 1121 | # Descriptor version |
| 1122 | class singledispatchmethod: |
| 1123 | """Single-dispatch generic method descriptor. |
| 1124 | |
| 1125 | Supports wrapping existing descriptors and handles non-descriptor |
| 1126 | callables as instance methods. |
| 1127 | """ |
| 1128 | |
| 1129 | def __init__(self, func): |
| 1130 | if not callable(func) and not hasattr(func, "__get__"): |
| 1131 | raise TypeError(f"{func!r} is not callable or a descriptor") |
| 1132 | |
| 1133 | self.dispatcher = singledispatch(func) |
| 1134 | self.func = func |
| 1135 | |
| 1136 | def register(self, cls, method=None): |
| 1137 | """generic_method.register(cls, func) -> func |
| 1138 | |
| 1139 | Registers a new implementation for the given *cls* on a *generic_method*. |
| 1140 | """ |
| 1141 | return self.dispatcher.register(cls, func=method) |
| 1142 | |
Raymond Hettinger | 0dac68f | 2019-08-29 01:27:42 -0700 | [diff] [blame] | 1143 | def __get__(self, obj, cls=None): |
Ethan Smith | c651275 | 2018-05-26 16:38:33 -0400 | [diff] [blame] | 1144 | def _method(*args, **kwargs): |
| 1145 | method = self.dispatcher.dispatch(args[0].__class__) |
| 1146 | return method.__get__(obj, cls)(*args, **kwargs) |
| 1147 | |
| 1148 | _method.__isabstractmethod__ = self.__isabstractmethod__ |
| 1149 | _method.register = self.register |
| 1150 | update_wrapper(_method, self.func) |
| 1151 | return _method |
| 1152 | |
| 1153 | @property |
| 1154 | def __isabstractmethod__(self): |
| 1155 | return getattr(self.func, '__isabstractmethod__', False) |
Carl Meyer | d658dea | 2018-08-28 01:11:56 -0600 | [diff] [blame] | 1156 | |
| 1157 | |
| 1158 | ################################################################################ |
| 1159 | ### cached_property() - computed once per instance, cached as attribute |
| 1160 | ################################################################################ |
| 1161 | |
| 1162 | _NOT_FOUND = object() |
| 1163 | |
| 1164 | |
| 1165 | class cached_property: |
| 1166 | def __init__(self, func): |
| 1167 | self.func = func |
| 1168 | self.attrname = None |
| 1169 | self.__doc__ = func.__doc__ |
| 1170 | self.lock = RLock() |
| 1171 | |
| 1172 | def __set_name__(self, owner, name): |
| 1173 | if self.attrname is None: |
| 1174 | self.attrname = name |
| 1175 | elif name != self.attrname: |
| 1176 | raise TypeError( |
| 1177 | "Cannot assign the same cached_property to two different names " |
| 1178 | f"({self.attrname!r} and {name!r})." |
| 1179 | ) |
| 1180 | |
Raymond Hettinger | 0dac68f | 2019-08-29 01:27:42 -0700 | [diff] [blame] | 1181 | def __get__(self, instance, owner=None): |
Carl Meyer | d658dea | 2018-08-28 01:11:56 -0600 | [diff] [blame] | 1182 | if instance is None: |
| 1183 | return self |
| 1184 | if self.attrname is None: |
| 1185 | raise TypeError( |
| 1186 | "Cannot use cached_property instance without calling __set_name__ on it.") |
| 1187 | try: |
| 1188 | cache = instance.__dict__ |
| 1189 | except AttributeError: # not all objects have __dict__ (e.g. class defines slots) |
| 1190 | msg = ( |
| 1191 | f"No '__dict__' attribute on {type(instance).__name__!r} " |
| 1192 | f"instance to cache {self.attrname!r} property." |
| 1193 | ) |
| 1194 | raise TypeError(msg) from None |
| 1195 | val = cache.get(self.attrname, _NOT_FOUND) |
| 1196 | if val is _NOT_FOUND: |
| 1197 | with self.lock: |
| 1198 | # check if another thread filled cache while we awaited lock |
| 1199 | val = cache.get(self.attrname, _NOT_FOUND) |
| 1200 | if val is _NOT_FOUND: |
| 1201 | val = self.func(instance) |
| 1202 | try: |
| 1203 | cache[self.attrname] = val |
| 1204 | except TypeError: |
| 1205 | msg = ( |
| 1206 | f"The '__dict__' attribute on {type(instance).__name__!r} instance " |
| 1207 | f"does not support item assignment for caching {self.attrname!r} property." |
| 1208 | ) |
| 1209 | raise TypeError(msg) from None |
| 1210 | return val |