Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1 | /* |
Tim Peters | 8839617 | 2002-06-30 17:56:40 +0000 | [diff] [blame] | 2 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 3 | Reference Cycle Garbage Collection |
| 4 | ================================== |
| 5 | |
Neil Schemenauer | b2c2c9e | 2000-10-04 16:34:09 +0000 | [diff] [blame] | 6 | Neil Schemenauer <nas@arctrix.com> |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 7 | |
| 8 | Based on a post on the python-dev list. Ideas from Guido van Rossum, |
| 9 | Eric Tiedemann, and various others. |
| 10 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 11 | http://www.arctrix.com/nas/python/gc/ |
Neil Schemenauer | a7024e9 | 2008-07-15 19:24:01 +0000 | [diff] [blame] | 12 | |
| 13 | The following mailing list threads provide a historical perspective on |
| 14 | the design of this module. Note that a fair amount of refinement has |
| 15 | occurred since those discussions. |
| 16 | |
| 17 | http://mail.python.org/pipermail/python-dev/2000-March/002385.html |
| 18 | http://mail.python.org/pipermail/python-dev/2000-March/002434.html |
| 19 | http://mail.python.org/pipermail/python-dev/2000-March/002497.html |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 20 | |
| 21 | For a highlevel view of the collection process, read the collect |
| 22 | function. |
| 23 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 24 | */ |
| 25 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 26 | #include "Python.h" |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 27 | #include "frameobject.h" /* for PyFrame_ClearFreeList */ |
Łukasz Langa | a785c87 | 2016-09-09 17:37:37 -0700 | [diff] [blame] | 28 | #include "pydtrace.h" |
Victor Stinner | 7181dec | 2015-03-27 17:47:53 +0100 | [diff] [blame] | 29 | #include "pytime.h" /* for _PyTime_GetMonotonicClock() */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 30 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 31 | /*[clinic input] |
| 32 | module gc |
| 33 | [clinic start generated code]*/ |
| 34 | /*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/ |
| 35 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 36 | /* Get an object's GC head */ |
| 37 | #define AS_GC(o) ((PyGC_Head *)(o)-1) |
| 38 | |
| 39 | /* Get the object given the GC head */ |
| 40 | #define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1)) |
| 41 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 42 | /*** Global GC state ***/ |
| 43 | |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 44 | struct gc_generation { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 45 | PyGC_Head head; |
| 46 | int threshold; /* collection threshold */ |
| 47 | int count; /* count of allocations or collections of younger |
| 48 | generations */ |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 49 | }; |
| 50 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 51 | /* If we change this, we need to change the default value in the signature of |
| 52 | gc.collect. */ |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 53 | #define NUM_GENERATIONS 3 |
| 54 | #define GEN_HEAD(n) (&generations[n].head) |
| 55 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 56 | /* linked lists of container objects */ |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 57 | static struct gc_generation generations[NUM_GENERATIONS] = { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 58 | /* PyGC_Head, threshold, count */ |
| 59 | {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0}, |
| 60 | {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0}, |
| 61 | {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0}, |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 62 | }; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 63 | |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 64 | PyGC_Head *_PyGC_generation0 = GEN_HEAD(0); |
| 65 | |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 66 | static int enabled = 1; /* automatic collection enabled? */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 67 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 68 | /* true if we are currently running the collector */ |
Tim Peters | bf384c2 | 2003-04-06 00:11:39 +0000 | [diff] [blame] | 69 | static int collecting = 0; |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 70 | |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 71 | /* list of uncollectable objects */ |
Tim Peters | bf384c2 | 2003-04-06 00:11:39 +0000 | [diff] [blame] | 72 | static PyObject *garbage = NULL; |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 73 | |
| 74 | /* Python string to use if unhandled exception occurs */ |
Tim Peters | bf384c2 | 2003-04-06 00:11:39 +0000 | [diff] [blame] | 75 | static PyObject *gc_str = NULL; |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 76 | |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 77 | /* a list of callbacks to be invoked when collection is performed */ |
| 78 | static PyObject *callbacks = NULL; |
| 79 | |
| 80 | /* This is the number of objects that survived the last full collection. It |
Antoine Pitrou | 14b78f5 | 2009-01-09 22:27:08 +0000 | [diff] [blame] | 81 | approximates the number of long lived objects tracked by the GC. |
| 82 | |
| 83 | (by "full collection", we mean a collection of the oldest generation). |
| 84 | */ |
| 85 | static Py_ssize_t long_lived_total = 0; |
| 86 | |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 87 | /* This is the number of objects that survived all "non-full" collections, |
Antoine Pitrou | 14b78f5 | 2009-01-09 22:27:08 +0000 | [diff] [blame] | 88 | and are awaiting to undergo a full collection for the first time. |
| 89 | |
| 90 | */ |
| 91 | static Py_ssize_t long_lived_pending = 0; |
| 92 | |
| 93 | /* |
| 94 | NOTE: about the counting of long-lived objects. |
| 95 | |
| 96 | To limit the cost of garbage collection, there are two strategies; |
| 97 | - make each collection faster, e.g. by scanning fewer objects |
| 98 | - do less collections |
| 99 | This heuristic is about the latter strategy. |
| 100 | |
| 101 | In addition to the various configurable thresholds, we only trigger a |
| 102 | full collection if the ratio |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 103 | long_lived_pending / long_lived_total |
Antoine Pitrou | 14b78f5 | 2009-01-09 22:27:08 +0000 | [diff] [blame] | 104 | is above a given value (hardwired to 25%). |
| 105 | |
| 106 | The reason is that, while "non-full" collections (i.e., collections of |
| 107 | the young and middle generations) will always examine roughly the same |
| 108 | number of objects -- determined by the aforementioned thresholds --, |
| 109 | the cost of a full collection is proportional to the total number of |
| 110 | long-lived objects, which is virtually unbounded. |
| 111 | |
| 112 | Indeed, it has been remarked that doing a full collection every |
| 113 | <constant number> of object creations entails a dramatic performance |
| 114 | degradation in workloads which consist in creating and storing lots of |
| 115 | long-lived objects (e.g. building a large list of GC-tracked objects would |
| 116 | show quadratic performance, instead of linear as expected: see issue #4074). |
| 117 | |
| 118 | Using the above ratio, instead, yields amortized linear performance in |
| 119 | the total number of objects (the effect of which can be summarized |
| 120 | thusly: "each full garbage collection is more and more costly as the |
| 121 | number of objects grows, but we do fewer and fewer of them"). |
| 122 | |
| 123 | This heuristic was suggested by Martin von Löwis on python-dev in |
| 124 | June 2008. His original analysis and proposal can be found at: |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 125 | http://mail.python.org/pipermail/python-dev/2008-June/080579.html |
Antoine Pitrou | 14b78f5 | 2009-01-09 22:27:08 +0000 | [diff] [blame] | 126 | */ |
| 127 | |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 128 | /* |
| 129 | NOTE: about untracking of mutable objects. |
Victor Stinner | c1eb26c | 2013-07-08 22:15:05 +0200 | [diff] [blame] | 130 | |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 131 | Certain types of container cannot participate in a reference cycle, and |
| 132 | so do not need to be tracked by the garbage collector. Untracking these |
| 133 | objects reduces the cost of garbage collections. However, determining |
| 134 | which objects may be untracked is not free, and the costs must be |
| 135 | weighed against the benefits for garbage collection. |
| 136 | |
| 137 | There are two possible strategies for when to untrack a container: |
| 138 | |
| 139 | i) When the container is created. |
| 140 | ii) When the container is examined by the garbage collector. |
| 141 | |
| 142 | Tuples containing only immutable objects (integers, strings etc, and |
| 143 | recursively, tuples of immutable objects) do not need to be tracked. |
| 144 | The interpreter creates a large number of tuples, many of which will |
| 145 | not survive until garbage collection. It is therefore not worthwhile |
| 146 | to untrack eligible tuples at creation time. |
| 147 | |
Victor Stinner | c1eb26c | 2013-07-08 22:15:05 +0200 | [diff] [blame] | 148 | Instead, all tuples except the empty tuple are tracked when created. |
| 149 | During garbage collection it is determined whether any surviving tuples |
| 150 | can be untracked. A tuple can be untracked if all of its contents are |
| 151 | already not tracked. Tuples are examined for untracking in all garbage |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 152 | collection cycles. It may take more than one cycle to untrack a tuple. |
| 153 | |
| 154 | Dictionaries containing only immutable objects also do not need to be |
| 155 | tracked. Dictionaries are untracked when created. If a tracked item is |
| 156 | inserted into a dictionary (either as a key or value), the dictionary |
| 157 | becomes tracked. During a full garbage collection (all generations), |
| 158 | the collector will untrack any dictionaries whose contents are not |
| 159 | tracked. |
| 160 | |
| 161 | The module provides the python function is_tracked(obj), which returns |
| 162 | the CURRENT tracking status of the object. Subsequent garbage |
| 163 | collections may change the tracking status of the object. |
Victor Stinner | c1eb26c | 2013-07-08 22:15:05 +0200 | [diff] [blame] | 164 | |
| 165 | Untracking of certain containers was introduced in issue #4688, and |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 166 | the algorithm was refined in response to issue #14775. |
| 167 | */ |
Antoine Pitrou | 14b78f5 | 2009-01-09 22:27:08 +0000 | [diff] [blame] | 168 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 169 | /* set for debugging information */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 170 | #define DEBUG_STATS (1<<0) /* print collection statistics */ |
| 171 | #define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */ |
| 172 | #define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */ |
| 173 | #define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */ |
| 174 | #define DEBUG_LEAK DEBUG_COLLECTABLE | \ |
| 175 | DEBUG_UNCOLLECTABLE | \ |
| 176 | DEBUG_SAVEALL |
Jeremy Hylton | b709df3 | 2000-09-01 02:47:25 +0000 | [diff] [blame] | 177 | static int debug; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 178 | |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 179 | /* Running stats per generation */ |
| 180 | struct gc_generation_stats { |
| 181 | /* total number of collections */ |
| 182 | Py_ssize_t collections; |
| 183 | /* total number of collected objects */ |
| 184 | Py_ssize_t collected; |
| 185 | /* total number of uncollectable objects (put into gc.garbage) */ |
| 186 | Py_ssize_t uncollectable; |
| 187 | }; |
| 188 | |
| 189 | static struct gc_generation_stats generation_stats[NUM_GENERATIONS]; |
| 190 | |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 191 | /*-------------------------------------------------------------------------- |
| 192 | gc_refs values. |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 193 | |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 194 | Between collections, every gc'ed object has one of two gc_refs values: |
| 195 | |
| 196 | GC_UNTRACKED |
| 197 | The initial state; objects returned by PyObject_GC_Malloc are in this |
| 198 | state. The object doesn't live in any generation list, and its |
| 199 | tp_traverse slot must not be called. |
| 200 | |
| 201 | GC_REACHABLE |
| 202 | The object lives in some generation list, and its tp_traverse is safe to |
| 203 | call. An object transitions to GC_REACHABLE when PyObject_GC_Track |
| 204 | is called. |
| 205 | |
| 206 | During a collection, gc_refs can temporarily take on other states: |
| 207 | |
| 208 | >= 0 |
| 209 | At the start of a collection, update_refs() copies the true refcount |
| 210 | to gc_refs, for each object in the generation being collected. |
| 211 | subtract_refs() then adjusts gc_refs so that it equals the number of |
| 212 | times an object is referenced directly from outside the generation |
| 213 | being collected. |
Martin v. Löwis | 774348c | 2002-11-09 19:54:06 +0000 | [diff] [blame] | 214 | gc_refs remains >= 0 throughout these steps. |
Tim Peters | 6fc13d9 | 2002-07-02 18:12:35 +0000 | [diff] [blame] | 215 | |
| 216 | GC_TENTATIVELY_UNREACHABLE |
| 217 | move_unreachable() then moves objects not reachable (whether directly or |
| 218 | indirectly) from outside the generation into an "unreachable" set. |
| 219 | Objects that are found to be reachable have gc_refs set to GC_REACHABLE |
| 220 | again. Objects that are found to be unreachable have gc_refs set to |
| 221 | GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing |
| 222 | this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may |
| 223 | transition back to GC_REACHABLE. |
| 224 | |
| 225 | Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates |
| 226 | for collection. If it's decided not to collect such an object (e.g., |
| 227 | it has a __del__ method), its gc_refs is restored to GC_REACHABLE again. |
| 228 | ---------------------------------------------------------------------------- |
| 229 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 230 | #define GC_UNTRACKED _PyGC_REFS_UNTRACKED |
| 231 | #define GC_REACHABLE _PyGC_REFS_REACHABLE |
| 232 | #define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 233 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 234 | #define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED) |
| 235 | #define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE) |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 236 | #define IS_TENTATIVELY_UNREACHABLE(o) ( \ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 237 | _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE) |
Neil Schemenauer | a2b11ec | 2002-05-21 15:53:24 +0000 | [diff] [blame] | 238 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 239 | /*** list functions ***/ |
| 240 | |
| 241 | static void |
| 242 | gc_list_init(PyGC_Head *list) |
| 243 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 244 | list->gc.gc_prev = list; |
| 245 | list->gc.gc_next = list; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 246 | } |
| 247 | |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 248 | static int |
| 249 | gc_list_is_empty(PyGC_Head *list) |
| 250 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 251 | return (list->gc.gc_next == list); |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 252 | } |
| 253 | |
Tim Peters | e2d5918 | 2004-11-01 01:39:08 +0000 | [diff] [blame] | 254 | #if 0 |
| 255 | /* This became unused after gc_list_move() was introduced. */ |
| 256 | /* Append `node` to `list`. */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 257 | static void |
| 258 | gc_list_append(PyGC_Head *node, PyGC_Head *list) |
| 259 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 260 | node->gc.gc_next = list; |
| 261 | node->gc.gc_prev = list->gc.gc_prev; |
| 262 | node->gc.gc_prev->gc.gc_next = node; |
| 263 | list->gc.gc_prev = node; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 264 | } |
Tim Peters | e2d5918 | 2004-11-01 01:39:08 +0000 | [diff] [blame] | 265 | #endif |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 266 | |
Tim Peters | e2d5918 | 2004-11-01 01:39:08 +0000 | [diff] [blame] | 267 | /* Remove `node` from the gc list it's currently in. */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 268 | static void |
| 269 | gc_list_remove(PyGC_Head *node) |
| 270 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 271 | node->gc.gc_prev->gc.gc_next = node->gc.gc_next; |
| 272 | node->gc.gc_next->gc.gc_prev = node->gc.gc_prev; |
| 273 | node->gc.gc_next = NULL; /* object is not currently tracked */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 274 | } |
| 275 | |
Tim Peters | e2d5918 | 2004-11-01 01:39:08 +0000 | [diff] [blame] | 276 | /* Move `node` from the gc list it's currently in (which is not explicitly |
| 277 | * named here) to the end of `list`. This is semantically the same as |
| 278 | * gc_list_remove(node) followed by gc_list_append(node, list). |
| 279 | */ |
| 280 | static void |
| 281 | gc_list_move(PyGC_Head *node, PyGC_Head *list) |
| 282 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 283 | PyGC_Head *new_prev; |
| 284 | PyGC_Head *current_prev = node->gc.gc_prev; |
| 285 | PyGC_Head *current_next = node->gc.gc_next; |
| 286 | /* Unlink from current list. */ |
| 287 | current_prev->gc.gc_next = current_next; |
| 288 | current_next->gc.gc_prev = current_prev; |
| 289 | /* Relink at end of new list. */ |
| 290 | new_prev = node->gc.gc_prev = list->gc.gc_prev; |
| 291 | new_prev->gc.gc_next = list->gc.gc_prev = node; |
| 292 | node->gc.gc_next = list; |
Tim Peters | e2d5918 | 2004-11-01 01:39:08 +0000 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | /* append list `from` onto list `to`; `from` becomes an empty list */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 296 | static void |
| 297 | gc_list_merge(PyGC_Head *from, PyGC_Head *to) |
| 298 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 299 | PyGC_Head *tail; |
| 300 | assert(from != to); |
| 301 | if (!gc_list_is_empty(from)) { |
| 302 | tail = to->gc.gc_prev; |
| 303 | tail->gc.gc_next = from->gc.gc_next; |
| 304 | tail->gc.gc_next->gc.gc_prev = tail; |
| 305 | to->gc.gc_prev = from->gc.gc_prev; |
| 306 | to->gc.gc_prev->gc.gc_next = to; |
| 307 | } |
| 308 | gc_list_init(from); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 309 | } |
| 310 | |
Neal Norwitz | 7b216c5 | 2006-03-04 20:01:53 +0000 | [diff] [blame] | 311 | static Py_ssize_t |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 312 | gc_list_size(PyGC_Head *list) |
| 313 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 314 | PyGC_Head *gc; |
| 315 | Py_ssize_t n = 0; |
| 316 | for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) { |
| 317 | n++; |
| 318 | } |
| 319 | return n; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 320 | } |
| 321 | |
Tim Peters | 259272b | 2003-04-06 19:41:39 +0000 | [diff] [blame] | 322 | /* Append objects in a GC list to a Python list. |
| 323 | * Return 0 if all OK, < 0 if error (out of memory for list). |
| 324 | */ |
| 325 | static int |
| 326 | append_objects(PyObject *py_list, PyGC_Head *gc_list) |
| 327 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 328 | PyGC_Head *gc; |
| 329 | for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) { |
| 330 | PyObject *op = FROM_GC(gc); |
| 331 | if (op != py_list) { |
| 332 | if (PyList_Append(py_list, op)) { |
| 333 | return -1; /* exception */ |
| 334 | } |
| 335 | } |
| 336 | } |
| 337 | return 0; |
Tim Peters | 259272b | 2003-04-06 19:41:39 +0000 | [diff] [blame] | 338 | } |
| 339 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 340 | /*** end of list stuff ***/ |
| 341 | |
| 342 | |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 343 | /* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects |
| 344 | * in containers, and is GC_REACHABLE for all tracked gc objects not in |
| 345 | * containers. |
Tim Peters | 8839617 | 2002-06-30 17:56:40 +0000 | [diff] [blame] | 346 | */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 347 | static void |
| 348 | update_refs(PyGC_Head *containers) |
| 349 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 350 | PyGC_Head *gc = containers->gc.gc_next; |
| 351 | for (; gc != containers; gc = gc->gc.gc_next) { |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 352 | assert(_PyGCHead_REFS(gc) == GC_REACHABLE); |
| 353 | _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc))); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 354 | /* Python's cyclic gc should never see an incoming refcount |
| 355 | * of 0: if something decref'ed to 0, it should have been |
| 356 | * deallocated immediately at that time. |
| 357 | * Possible cause (if the assert triggers): a tp_dealloc |
| 358 | * routine left a gc-aware object tracked during its teardown |
| 359 | * phase, and did something-- or allowed something to happen -- |
| 360 | * that called back into Python. gc can trigger then, and may |
| 361 | * see the still-tracked dying object. Before this assert |
| 362 | * was added, such mistakes went on to allow gc to try to |
| 363 | * delete the object again. In a debug build, that caused |
| 364 | * a mysterious segfault, when _Py_ForgetReference tried |
| 365 | * to remove the object from the doubly-linked list of all |
| 366 | * objects a second time. In a release build, an actual |
| 367 | * double deallocation occurred, which leads to corruption |
| 368 | * of the allocator's internal bookkeeping pointers. That's |
| 369 | * so serious that maybe this should be a release-build |
| 370 | * check instead of an assert? |
| 371 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 372 | assert(_PyGCHead_REFS(gc) != 0); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 373 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 374 | } |
| 375 | |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 376 | /* A traversal callback for subtract_refs. */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 377 | static int |
| 378 | visit_decref(PyObject *op, void *data) |
| 379 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 380 | assert(op != NULL); |
| 381 | if (PyObject_IS_GC(op)) { |
| 382 | PyGC_Head *gc = AS_GC(op); |
| 383 | /* We're only interested in gc_refs for objects in the |
| 384 | * generation being collected, which can be recognized |
| 385 | * because only they have positive gc_refs. |
| 386 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 387 | assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */ |
| 388 | if (_PyGCHead_REFS(gc) > 0) |
| 389 | _PyGCHead_DECREF(gc); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 390 | } |
| 391 | return 0; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 392 | } |
| 393 | |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 394 | /* Subtract internal references from gc_refs. After this, gc_refs is >= 0 |
| 395 | * for all objects in containers, and is GC_REACHABLE for all tracked gc |
| 396 | * objects not in containers. The ones with gc_refs > 0 are directly |
| 397 | * reachable from outside containers, and so can't be collected. |
| 398 | */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 399 | static void |
| 400 | subtract_refs(PyGC_Head *containers) |
| 401 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 402 | traverseproc traverse; |
| 403 | PyGC_Head *gc = containers->gc.gc_next; |
| 404 | for (; gc != containers; gc=gc->gc.gc_next) { |
| 405 | traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; |
| 406 | (void) traverse(FROM_GC(gc), |
| 407 | (visitproc)visit_decref, |
| 408 | NULL); |
| 409 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 410 | } |
| 411 | |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 412 | /* A traversal callback for move_unreachable. */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 413 | static int |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 414 | visit_reachable(PyObject *op, PyGC_Head *reachable) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 415 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 416 | if (PyObject_IS_GC(op)) { |
| 417 | PyGC_Head *gc = AS_GC(op); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 418 | const Py_ssize_t gc_refs = _PyGCHead_REFS(gc); |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 419 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 420 | if (gc_refs == 0) { |
| 421 | /* This is in move_unreachable's 'young' list, but |
| 422 | * the traversal hasn't yet gotten to it. All |
| 423 | * we need to do is tell move_unreachable that it's |
| 424 | * reachable. |
| 425 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 426 | _PyGCHead_SET_REFS(gc, 1); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 427 | } |
| 428 | else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) { |
| 429 | /* This had gc_refs = 0 when move_unreachable got |
| 430 | * to it, but turns out it's reachable after all. |
| 431 | * Move it back to move_unreachable's 'young' list, |
| 432 | * and move_unreachable will eventually get to it |
| 433 | * again. |
| 434 | */ |
| 435 | gc_list_move(gc, reachable); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 436 | _PyGCHead_SET_REFS(gc, 1); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 437 | } |
| 438 | /* Else there's nothing to do. |
| 439 | * If gc_refs > 0, it must be in move_unreachable's 'young' |
| 440 | * list, and move_unreachable will eventually get to it. |
| 441 | * If gc_refs == GC_REACHABLE, it's either in some other |
| 442 | * generation so we don't care about it, or move_unreachable |
| 443 | * already dealt with it. |
| 444 | * If gc_refs == GC_UNTRACKED, it must be ignored. |
| 445 | */ |
| 446 | else { |
| 447 | assert(gc_refs > 0 |
| 448 | || gc_refs == GC_REACHABLE |
| 449 | || gc_refs == GC_UNTRACKED); |
| 450 | } |
| 451 | } |
| 452 | return 0; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 453 | } |
| 454 | |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 455 | /* Move the unreachable objects from young to unreachable. After this, |
| 456 | * all objects in young have gc_refs = GC_REACHABLE, and all objects in |
| 457 | * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked |
| 458 | * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE. |
| 459 | * All objects in young after this are directly or indirectly reachable |
| 460 | * from outside the original young; and all objects in unreachable are |
| 461 | * not. |
Tim Peters | 8839617 | 2002-06-30 17:56:40 +0000 | [diff] [blame] | 462 | */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 463 | static void |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 464 | move_unreachable(PyGC_Head *young, PyGC_Head *unreachable) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 465 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 466 | PyGC_Head *gc = young->gc.gc_next; |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 467 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 468 | /* Invariants: all objects "to the left" of us in young have gc_refs |
| 469 | * = GC_REACHABLE, and are indeed reachable (directly or indirectly) |
| 470 | * from outside the young list as it was at entry. All other objects |
| 471 | * from the original young "to the left" of us are in unreachable now, |
| 472 | * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the |
| 473 | * left of us in 'young' now have been scanned, and no objects here |
| 474 | * or to the right have been scanned yet. |
| 475 | */ |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 476 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 477 | while (gc != young) { |
| 478 | PyGC_Head *next; |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 479 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 480 | if (_PyGCHead_REFS(gc)) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 481 | /* gc is definitely reachable from outside the |
| 482 | * original 'young'. Mark it as such, and traverse |
| 483 | * its pointers to find any other objects that may |
| 484 | * be directly reachable from it. Note that the |
| 485 | * call to tp_traverse may append objects to young, |
| 486 | * so we have to wait until it returns to determine |
| 487 | * the next object to visit. |
| 488 | */ |
| 489 | PyObject *op = FROM_GC(gc); |
| 490 | traverseproc traverse = Py_TYPE(op)->tp_traverse; |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 491 | assert(_PyGCHead_REFS(gc) > 0); |
| 492 | _PyGCHead_SET_REFS(gc, GC_REACHABLE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 493 | (void) traverse(op, |
| 494 | (visitproc)visit_reachable, |
| 495 | (void *)young); |
| 496 | next = gc->gc.gc_next; |
| 497 | if (PyTuple_CheckExact(op)) { |
| 498 | _PyTuple_MaybeUntrack(op); |
| 499 | } |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 500 | } |
| 501 | else { |
| 502 | /* This *may* be unreachable. To make progress, |
| 503 | * assume it is. gc isn't directly reachable from |
| 504 | * any object we've already traversed, but may be |
| 505 | * reachable from an object we haven't gotten to yet. |
| 506 | * visit_reachable will eventually move gc back into |
| 507 | * young if that's so, and we'll see it again. |
| 508 | */ |
| 509 | next = gc->gc.gc_next; |
| 510 | gc_list_move(gc, unreachable); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 511 | _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 512 | } |
| 513 | gc = next; |
| 514 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 515 | } |
| 516 | |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 517 | /* Try to untrack all currently tracked dictionaries */ |
| 518 | static void |
| 519 | untrack_dicts(PyGC_Head *head) |
| 520 | { |
| 521 | PyGC_Head *next, *gc = head->gc.gc_next; |
| 522 | while (gc != head) { |
| 523 | PyObject *op = FROM_GC(gc); |
| 524 | next = gc->gc.gc_next; |
| 525 | if (PyDict_CheckExact(op)) |
| 526 | _PyDict_MaybeUntrack(op); |
| 527 | gc = next; |
| 528 | } |
| 529 | } |
| 530 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 531 | /* Return true if object has a pre-PEP 442 finalization method. */ |
Neil Schemenauer | a765c12 | 2001-11-01 17:35:23 +0000 | [diff] [blame] | 532 | static int |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 533 | has_legacy_finalizer(PyObject *op) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 534 | { |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 535 | return op->ob_type->tp_del != NULL; |
Neil Schemenauer | a765c12 | 2001-11-01 17:35:23 +0000 | [diff] [blame] | 536 | } |
| 537 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 538 | /* Move the objects in unreachable with tp_del slots into `finalizers`. |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 539 | * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the |
| 540 | * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE. |
Jeremy Hylton | ce136e9 | 2003-04-04 19:59:06 +0000 | [diff] [blame] | 541 | */ |
Neil Schemenauer | a765c12 | 2001-11-01 17:35:23 +0000 | [diff] [blame] | 542 | static void |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 543 | move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers) |
Neil Schemenauer | a765c12 | 2001-11-01 17:35:23 +0000 | [diff] [blame] | 544 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 545 | PyGC_Head *gc; |
| 546 | PyGC_Head *next; |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 547 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 548 | /* March over unreachable. Move objects with finalizers into |
| 549 | * `finalizers`. |
| 550 | */ |
| 551 | for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) { |
| 552 | PyObject *op = FROM_GC(gc); |
Jeremy Hylton | ce136e9 | 2003-04-04 19:59:06 +0000 | [diff] [blame] | 553 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 554 | assert(IS_TENTATIVELY_UNREACHABLE(op)); |
| 555 | next = gc->gc.gc_next; |
Tim Peters | f6ae7a4 | 2003-04-05 18:40:50 +0000 | [diff] [blame] | 556 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 557 | if (has_legacy_finalizer(op)) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 558 | gc_list_move(gc, finalizers); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 559 | _PyGCHead_SET_REFS(gc, GC_REACHABLE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 560 | } |
| 561 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 562 | } |
| 563 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 564 | /* A traversal callback for move_legacy_finalizer_reachable. */ |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 565 | static int |
| 566 | visit_move(PyObject *op, PyGC_Head *tolist) |
| 567 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 568 | if (PyObject_IS_GC(op)) { |
| 569 | if (IS_TENTATIVELY_UNREACHABLE(op)) { |
| 570 | PyGC_Head *gc = AS_GC(op); |
| 571 | gc_list_move(gc, tolist); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 572 | _PyGCHead_SET_REFS(gc, GC_REACHABLE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 573 | } |
| 574 | } |
| 575 | return 0; |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 576 | } |
| 577 | |
| 578 | /* Move objects that are reachable from finalizers, from the unreachable set |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 579 | * into finalizers set. |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 580 | */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 581 | static void |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 582 | move_legacy_finalizer_reachable(PyGC_Head *finalizers) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 583 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 584 | traverseproc traverse; |
| 585 | PyGC_Head *gc = finalizers->gc.gc_next; |
| 586 | for (; gc != finalizers; gc = gc->gc.gc_next) { |
| 587 | /* Note that the finalizers list may grow during this. */ |
| 588 | traverse = Py_TYPE(FROM_GC(gc))->tp_traverse; |
| 589 | (void) traverse(FROM_GC(gc), |
| 590 | (visitproc)visit_move, |
| 591 | (void *)finalizers); |
| 592 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 593 | } |
| 594 | |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 595 | /* Clear all weakrefs to unreachable objects, and if such a weakref has a |
| 596 | * callback, invoke it if necessary. Note that it's possible for such |
| 597 | * weakrefs to be outside the unreachable set -- indeed, those are precisely |
| 598 | * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for |
| 599 | * overview & some details. Some weakrefs with callbacks may be reclaimed |
| 600 | * directly by this routine; the number reclaimed is the return value. Other |
| 601 | * weakrefs with callbacks may be moved into the `old` generation. Objects |
| 602 | * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in |
| 603 | * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns, |
| 604 | * no object in `unreachable` is weakly referenced anymore. |
Tim Peters | 403a203 | 2003-11-20 21:21:46 +0000 | [diff] [blame] | 605 | */ |
| 606 | static int |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 607 | handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old) |
Tim Peters | 403a203 | 2003-11-20 21:21:46 +0000 | [diff] [blame] | 608 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 609 | PyGC_Head *gc; |
| 610 | PyObject *op; /* generally FROM_GC(gc) */ |
| 611 | PyWeakReference *wr; /* generally a cast of op */ |
| 612 | PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */ |
| 613 | PyGC_Head *next; |
| 614 | int num_freed = 0; |
Tim Peters | 403a203 | 2003-11-20 21:21:46 +0000 | [diff] [blame] | 615 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 616 | gc_list_init(&wrcb_to_call); |
Tim Peters | 403a203 | 2003-11-20 21:21:46 +0000 | [diff] [blame] | 617 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 618 | /* Clear all weakrefs to the objects in unreachable. If such a weakref |
| 619 | * also has a callback, move it into `wrcb_to_call` if the callback |
| 620 | * needs to be invoked. Note that we cannot invoke any callbacks until |
| 621 | * all weakrefs to unreachable objects are cleared, lest the callback |
| 622 | * resurrect an unreachable object via a still-active weakref. We |
| 623 | * make another pass over wrcb_to_call, invoking callbacks, after this |
| 624 | * pass completes. |
| 625 | */ |
| 626 | for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) { |
| 627 | PyWeakReference **wrlist; |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 628 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 629 | op = FROM_GC(gc); |
| 630 | assert(IS_TENTATIVELY_UNREACHABLE(op)); |
| 631 | next = gc->gc.gc_next; |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 632 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 633 | if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op))) |
| 634 | continue; |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 635 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 636 | /* It supports weakrefs. Does it have any? */ |
| 637 | wrlist = (PyWeakReference **) |
| 638 | PyObject_GET_WEAKREFS_LISTPTR(op); |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 639 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 640 | /* `op` may have some weakrefs. March over the list, clear |
| 641 | * all the weakrefs, and move the weakrefs with callbacks |
| 642 | * that must be called into wrcb_to_call. |
| 643 | */ |
| 644 | for (wr = *wrlist; wr != NULL; wr = *wrlist) { |
| 645 | PyGC_Head *wrasgc; /* AS_GC(wr) */ |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 646 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 647 | /* _PyWeakref_ClearRef clears the weakref but leaves |
| 648 | * the callback pointer intact. Obscure: it also |
| 649 | * changes *wrlist. |
| 650 | */ |
| 651 | assert(wr->wr_object == op); |
| 652 | _PyWeakref_ClearRef(wr); |
| 653 | assert(wr->wr_object == Py_None); |
| 654 | if (wr->wr_callback == NULL) |
| 655 | continue; /* no callback */ |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 656 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 657 | /* Headache time. `op` is going away, and is weakly referenced by |
| 658 | * `wr`, which has a callback. Should the callback be invoked? If wr |
| 659 | * is also trash, no: |
| 660 | * |
| 661 | * 1. There's no need to call it. The object and the weakref are |
| 662 | * both going away, so it's legitimate to pretend the weakref is |
| 663 | * going away first. The user has to ensure a weakref outlives its |
| 664 | * referent if they want a guarantee that the wr callback will get |
| 665 | * invoked. |
| 666 | * |
| 667 | * 2. It may be catastrophic to call it. If the callback is also in |
| 668 | * cyclic trash (CT), then although the CT is unreachable from |
| 669 | * outside the current generation, CT may be reachable from the |
| 670 | * callback. Then the callback could resurrect insane objects. |
| 671 | * |
| 672 | * Since the callback is never needed and may be unsafe in this case, |
| 673 | * wr is simply left in the unreachable set. Note that because we |
| 674 | * already called _PyWeakref_ClearRef(wr), its callback will never |
| 675 | * trigger. |
| 676 | * |
| 677 | * OTOH, if wr isn't part of CT, we should invoke the callback: the |
| 678 | * weakref outlived the trash. Note that since wr isn't CT in this |
| 679 | * case, its callback can't be CT either -- wr acted as an external |
| 680 | * root to this generation, and therefore its callback did too. So |
| 681 | * nothing in CT is reachable from the callback either, so it's hard |
| 682 | * to imagine how calling it later could create a problem for us. wr |
| 683 | * is moved to wrcb_to_call in this case. |
| 684 | */ |
| 685 | if (IS_TENTATIVELY_UNREACHABLE(wr)) |
| 686 | continue; |
| 687 | assert(IS_REACHABLE(wr)); |
Tim Peters | cc2a866 | 2004-10-31 22:12:43 +0000 | [diff] [blame] | 688 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 689 | /* Create a new reference so that wr can't go away |
| 690 | * before we can process it again. |
| 691 | */ |
| 692 | Py_INCREF(wr); |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 693 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 694 | /* Move wr to wrcb_to_call, for the next pass. */ |
| 695 | wrasgc = AS_GC(wr); |
| 696 | assert(wrasgc != next); /* wrasgc is reachable, but |
| 697 | next isn't, so they can't |
| 698 | be the same */ |
| 699 | gc_list_move(wrasgc, &wrcb_to_call); |
| 700 | } |
| 701 | } |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 702 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 703 | /* Invoke the callbacks we decided to honor. It's safe to invoke them |
| 704 | * because they can't reference unreachable objects. |
| 705 | */ |
| 706 | while (! gc_list_is_empty(&wrcb_to_call)) { |
| 707 | PyObject *temp; |
| 708 | PyObject *callback; |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 709 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 710 | gc = wrcb_to_call.gc.gc_next; |
| 711 | op = FROM_GC(gc); |
| 712 | assert(IS_REACHABLE(op)); |
| 713 | assert(PyWeakref_Check(op)); |
| 714 | wr = (PyWeakReference *)op; |
| 715 | callback = wr->wr_callback; |
| 716 | assert(callback != NULL); |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 717 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 718 | /* copy-paste of weakrefobject.c's handle_callback() */ |
Victor Stinner | de4ae3d | 2016-12-04 22:59:09 +0100 | [diff] [blame] | 719 | temp = PyObject_CallFunctionObjArgs(callback, wr, NULL); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 720 | if (temp == NULL) |
| 721 | PyErr_WriteUnraisable(callback); |
| 722 | else |
| 723 | Py_DECREF(temp); |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 724 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 725 | /* Give up the reference we created in the first pass. When |
| 726 | * op's refcount hits 0 (which it may or may not do right now), |
| 727 | * op's tp_dealloc will decref op->wr_callback too. Note |
| 728 | * that the refcount probably will hit 0 now, and because this |
| 729 | * weakref was reachable to begin with, gc didn't already |
| 730 | * add it to its count of freed objects. Example: a reachable |
| 731 | * weak value dict maps some key to this reachable weakref. |
| 732 | * The callback removes this key->weakref mapping from the |
| 733 | * dict, leaving no other references to the weakref (excepting |
| 734 | * ours). |
| 735 | */ |
| 736 | Py_DECREF(op); |
| 737 | if (wrcb_to_call.gc.gc_next == gc) { |
| 738 | /* object is still alive -- move it */ |
| 739 | gc_list_move(gc, old); |
| 740 | } |
| 741 | else |
| 742 | ++num_freed; |
| 743 | } |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 744 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 745 | return num_freed; |
Tim Peters | 403a203 | 2003-11-20 21:21:46 +0000 | [diff] [blame] | 746 | } |
| 747 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 748 | static void |
Serhiy Storchaka | ef1585e | 2015-12-25 20:01:53 +0200 | [diff] [blame] | 749 | debug_cycle(const char *msg, PyObject *op) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 750 | { |
Victor Stinner | 499dfcf | 2011-03-21 13:26:24 +0100 | [diff] [blame] | 751 | PySys_FormatStderr("gc: %s <%s %p>\n", |
| 752 | msg, Py_TYPE(op)->tp_name, op); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 753 | } |
| 754 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 755 | /* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable |
Tim Peters | bf384c2 | 2003-04-06 00:11:39 +0000 | [diff] [blame] | 756 | * only from such cycles). |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 757 | * If DEBUG_SAVEALL, all objects in finalizers are appended to the module |
| 758 | * garbage list (a Python list), else only the objects in finalizers with |
| 759 | * __del__ methods are appended to garbage. All objects in finalizers are |
| 760 | * merged into the old list regardless. |
Tim Peters | 259272b | 2003-04-06 19:41:39 +0000 | [diff] [blame] | 761 | * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list). |
| 762 | * The finalizers list is made empty on a successful return. |
Tim Peters | bf384c2 | 2003-04-06 00:11:39 +0000 | [diff] [blame] | 763 | */ |
Tim Peters | 259272b | 2003-04-06 19:41:39 +0000 | [diff] [blame] | 764 | static int |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 765 | handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 766 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 767 | PyGC_Head *gc = finalizers->gc.gc_next; |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 768 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 769 | if (garbage == NULL) { |
| 770 | garbage = PyList_New(0); |
| 771 | if (garbage == NULL) |
| 772 | Py_FatalError("gc couldn't create gc.garbage list"); |
| 773 | } |
| 774 | for (; gc != finalizers; gc = gc->gc.gc_next) { |
| 775 | PyObject *op = FROM_GC(gc); |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 776 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 777 | if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 778 | if (PyList_Append(garbage, op) < 0) |
| 779 | return -1; |
| 780 | } |
| 781 | } |
Tim Peters | f6b8045 | 2003-04-07 19:21:15 +0000 | [diff] [blame] | 782 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 783 | gc_list_merge(finalizers, old); |
| 784 | return 0; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 785 | } |
| 786 | |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 787 | /* Run first-time finalizers (if any) on all the objects in collectable. |
| 788 | * Note that this may remove some (or even all) of the objects from the |
| 789 | * list, due to refcounts falling to 0. |
| 790 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 791 | static void |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 792 | finalize_garbage(PyGC_Head *collectable) |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 793 | { |
| 794 | destructor finalize; |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 795 | PyGC_Head seen; |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 796 | |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 797 | /* While we're going through the loop, `finalize(op)` may cause op, or |
| 798 | * other objects, to be reclaimed via refcounts falling to zero. So |
| 799 | * there's little we can rely on about the structure of the input |
| 800 | * `collectable` list across iterations. For safety, we always take the |
| 801 | * first object in that list and move it to a temporary `seen` list. |
| 802 | * If objects vanish from the `collectable` and `seen` lists we don't |
| 803 | * care. |
| 804 | */ |
| 805 | gc_list_init(&seen); |
| 806 | |
| 807 | while (!gc_list_is_empty(collectable)) { |
| 808 | PyGC_Head *gc = collectable->gc.gc_next; |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 809 | PyObject *op = FROM_GC(gc); |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 810 | gc_list_move(gc, &seen); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 811 | if (!_PyGCHead_FINALIZED(gc) && |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 812 | PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) && |
| 813 | (finalize = Py_TYPE(op)->tp_finalize) != NULL) { |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 814 | _PyGCHead_SET_FINALIZED(gc, 1); |
| 815 | Py_INCREF(op); |
| 816 | finalize(op); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 817 | Py_DECREF(op); |
| 818 | } |
| 819 | } |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 820 | gc_list_merge(&seen, collectable); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 821 | } |
| 822 | |
| 823 | /* Walk the collectable list and check that they are really unreachable |
| 824 | from the outside (some objects could have been resurrected by a |
| 825 | finalizer). */ |
| 826 | static int |
| 827 | check_garbage(PyGC_Head *collectable) |
| 828 | { |
| 829 | PyGC_Head *gc; |
| 830 | for (gc = collectable->gc.gc_next; gc != collectable; |
| 831 | gc = gc->gc.gc_next) { |
| 832 | _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc))); |
| 833 | assert(_PyGCHead_REFS(gc) != 0); |
| 834 | } |
| 835 | subtract_refs(collectable); |
| 836 | for (gc = collectable->gc.gc_next; gc != collectable; |
| 837 | gc = gc->gc.gc_next) { |
| 838 | assert(_PyGCHead_REFS(gc) >= 0); |
| 839 | if (_PyGCHead_REFS(gc) != 0) |
| 840 | return -1; |
| 841 | } |
| 842 | return 0; |
| 843 | } |
| 844 | |
| 845 | static void |
| 846 | revive_garbage(PyGC_Head *collectable) |
| 847 | { |
| 848 | PyGC_Head *gc; |
| 849 | for (gc = collectable->gc.gc_next; gc != collectable; |
| 850 | gc = gc->gc.gc_next) { |
| 851 | _PyGCHead_SET_REFS(gc, GC_REACHABLE); |
| 852 | } |
| 853 | } |
| 854 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 855 | /* Break reference cycles by clearing the containers involved. This is |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 856 | * tricky business as the lists can be changing and we don't know which |
Tim Peters | 19b74c7 | 2002-07-01 03:52:19 +0000 | [diff] [blame] | 857 | * objects may be freed. It is possible I screwed something up here. |
| 858 | */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 859 | static void |
Jeremy Hylton | ce136e9 | 2003-04-04 19:59:06 +0000 | [diff] [blame] | 860 | delete_garbage(PyGC_Head *collectable, PyGC_Head *old) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 861 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 862 | inquiry clear; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 863 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 864 | while (!gc_list_is_empty(collectable)) { |
| 865 | PyGC_Head *gc = collectable->gc.gc_next; |
| 866 | PyObject *op = FROM_GC(gc); |
Tim Peters | 8839617 | 2002-06-30 17:56:40 +0000 | [diff] [blame] | 867 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 868 | if (debug & DEBUG_SAVEALL) { |
| 869 | PyList_Append(garbage, op); |
| 870 | } |
| 871 | else { |
| 872 | if ((clear = Py_TYPE(op)->tp_clear) != NULL) { |
| 873 | Py_INCREF(op); |
| 874 | clear(op); |
| 875 | Py_DECREF(op); |
| 876 | } |
| 877 | } |
| 878 | if (collectable->gc.gc_next == gc) { |
| 879 | /* object is still alive, move it, it may die later */ |
| 880 | gc_list_move(gc, old); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 881 | _PyGCHead_SET_REFS(gc, GC_REACHABLE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 882 | } |
| 883 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 884 | } |
| 885 | |
Christian Heimes | a156e09 | 2008-02-16 07:38:31 +0000 | [diff] [blame] | 886 | /* Clear all free lists |
| 887 | * All free lists are cleared during the collection of the highest generation. |
| 888 | * Allocated items in the free list may keep a pymalloc arena occupied. |
| 889 | * Clearing the free lists may give back memory to the OS earlier. |
| 890 | */ |
| 891 | static void |
| 892 | clear_freelists(void) |
| 893 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 894 | (void)PyMethod_ClearFreeList(); |
| 895 | (void)PyFrame_ClearFreeList(); |
| 896 | (void)PyCFunction_ClearFreeList(); |
| 897 | (void)PyTuple_ClearFreeList(); |
| 898 | (void)PyUnicode_ClearFreeList(); |
| 899 | (void)PyFloat_ClearFreeList(); |
Antoine Pitrou | 9a812cb | 2011-11-15 00:00:12 +0100 | [diff] [blame] | 900 | (void)PyList_ClearFreeList(); |
| 901 | (void)PyDict_ClearFreeList(); |
Antoine Pitrou | 093ce9c | 2011-12-16 11:24:27 +0100 | [diff] [blame] | 902 | (void)PySet_ClearFreeList(); |
Yury Selivanov | eb63645 | 2016-09-08 22:01:51 -0700 | [diff] [blame] | 903 | (void)PyAsyncGen_ClearFreeLists(); |
Christian Heimes | a156e09 | 2008-02-16 07:38:31 +0000 | [diff] [blame] | 904 | } |
| 905 | |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 906 | /* This is the main function. Read this to understand how the |
| 907 | * collection process works. */ |
Neal Norwitz | 7b216c5 | 2006-03-04 20:01:53 +0000 | [diff] [blame] | 908 | static Py_ssize_t |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 909 | collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, |
| 910 | int nofail) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 911 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 912 | int i; |
| 913 | Py_ssize_t m = 0; /* # objects collected */ |
| 914 | Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */ |
| 915 | PyGC_Head *young; /* the generation we are examining */ |
| 916 | PyGC_Head *old; /* next older generation */ |
| 917 | PyGC_Head unreachable; /* non-problematic unreachable trash */ |
| 918 | PyGC_Head finalizers; /* objects with, & reachable from, __del__ */ |
| 919 | PyGC_Head *gc; |
Victor Stinner | 7181dec | 2015-03-27 17:47:53 +0100 | [diff] [blame] | 920 | _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */ |
Antoine Pitrou | 40f6b12 | 2014-05-24 19:21:53 +0200 | [diff] [blame] | 921 | |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 922 | struct gc_generation_stats *stats = &generation_stats[generation]; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 923 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 924 | if (debug & DEBUG_STATS) { |
| 925 | PySys_WriteStderr("gc: collecting generation %d...\n", |
| 926 | generation); |
| 927 | PySys_WriteStderr("gc: objects in each generation:"); |
| 928 | for (i = 0; i < NUM_GENERATIONS; i++) |
Antoine Pitrou | ded3c1b | 2014-05-24 19:24:40 +0200 | [diff] [blame] | 929 | PySys_FormatStderr(" %zd", |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 930 | gc_list_size(GEN_HEAD(i))); |
Victor Stinner | 7181dec | 2015-03-27 17:47:53 +0100 | [diff] [blame] | 931 | t1 = _PyTime_GetMonotonicClock(); |
Antoine Pitrou | 40f6b12 | 2014-05-24 19:21:53 +0200 | [diff] [blame] | 932 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 933 | PySys_WriteStderr("\n"); |
| 934 | } |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 935 | |
Łukasz Langa | a785c87 | 2016-09-09 17:37:37 -0700 | [diff] [blame] | 936 | if (PyDTrace_GC_START_ENABLED()) |
| 937 | PyDTrace_GC_START(generation); |
| 938 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 939 | /* update collection and allocation counters */ |
| 940 | if (generation+1 < NUM_GENERATIONS) |
| 941 | generations[generation+1].count += 1; |
| 942 | for (i = 0; i <= generation; i++) |
| 943 | generations[i].count = 0; |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 944 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 945 | /* merge younger generations with one we are currently collecting */ |
| 946 | for (i = 0; i < generation; i++) { |
| 947 | gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation)); |
| 948 | } |
Neil Schemenauer | 2880ae5 | 2002-05-04 05:35:20 +0000 | [diff] [blame] | 949 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 950 | /* handy references */ |
| 951 | young = GEN_HEAD(generation); |
| 952 | if (generation < NUM_GENERATIONS-1) |
| 953 | old = GEN_HEAD(generation+1); |
| 954 | else |
| 955 | old = young; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 956 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 957 | /* Using ob_refcnt and gc_refs, calculate which objects in the |
| 958 | * container set are reachable from outside the set (i.e., have a |
| 959 | * refcount greater than 0 when all the references within the |
| 960 | * set are taken into account). |
| 961 | */ |
| 962 | update_refs(young); |
| 963 | subtract_refs(young); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 964 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 965 | /* Leave everything reachable from outside young in young, and move |
| 966 | * everything else (in young) to unreachable. |
| 967 | * NOTE: This used to move the reachable objects into a reachable |
| 968 | * set instead. But most things usually turn out to be reachable, |
| 969 | * so it's more efficient to move the unreachable things. |
| 970 | */ |
| 971 | gc_list_init(&unreachable); |
| 972 | move_unreachable(young, &unreachable); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 973 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 974 | /* Move reachable objects to next generation. */ |
| 975 | if (young != old) { |
| 976 | if (generation == NUM_GENERATIONS - 2) { |
| 977 | long_lived_pending += gc_list_size(young); |
| 978 | } |
| 979 | gc_list_merge(young, old); |
| 980 | } |
| 981 | else { |
Antoine Pitrou | e1ad3da | 2012-05-28 22:22:34 +0200 | [diff] [blame] | 982 | /* We only untrack dicts in full collections, to avoid quadratic |
| 983 | dict build-up. See issue #14775. */ |
| 984 | untrack_dicts(young); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 985 | long_lived_pending = 0; |
| 986 | long_lived_total = gc_list_size(young); |
| 987 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 988 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 989 | /* All objects in unreachable are trash, but objects reachable from |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 990 | * legacy finalizers (e.g. tp_del) can't safely be deleted. |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 991 | */ |
| 992 | gc_list_init(&finalizers); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 993 | move_legacy_finalizers(&unreachable, &finalizers); |
| 994 | /* finalizers contains the unreachable objects with a legacy finalizer; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 995 | * unreachable objects reachable *from* those are also uncollectable, |
| 996 | * and we move those into the finalizers list too. |
| 997 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 998 | move_legacy_finalizer_reachable(&finalizers); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 999 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1000 | /* Collect statistics on collectable objects found and print |
| 1001 | * debugging information. |
| 1002 | */ |
| 1003 | for (gc = unreachable.gc.gc_next; gc != &unreachable; |
| 1004 | gc = gc->gc.gc_next) { |
| 1005 | m++; |
| 1006 | if (debug & DEBUG_COLLECTABLE) { |
| 1007 | debug_cycle("collectable", FROM_GC(gc)); |
| 1008 | } |
| 1009 | } |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 1010 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1011 | /* Clear weakrefs and invoke callbacks as necessary. */ |
| 1012 | m += handle_weakrefs(&unreachable, old); |
Tim Peters | ead8b7a | 2004-10-30 23:09:22 +0000 | [diff] [blame] | 1013 | |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 1014 | /* Call tp_finalize on objects which have one. */ |
Tim Peters | 5fbc7b1 | 2014-05-08 17:42:19 -0500 | [diff] [blame] | 1015 | finalize_garbage(&unreachable); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 1016 | |
| 1017 | if (check_garbage(&unreachable)) { |
| 1018 | revive_garbage(&unreachable); |
| 1019 | gc_list_merge(&unreachable, old); |
| 1020 | } |
| 1021 | else { |
| 1022 | /* Call tp_clear on objects in the unreachable set. This will cause |
| 1023 | * the reference cycles to be broken. It may also cause some objects |
| 1024 | * in finalizers to be freed. |
| 1025 | */ |
| 1026 | delete_garbage(&unreachable, old); |
| 1027 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1028 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1029 | /* Collect statistics on uncollectable objects found and print |
| 1030 | * debugging information. */ |
| 1031 | for (gc = finalizers.gc.gc_next; |
| 1032 | gc != &finalizers; |
| 1033 | gc = gc->gc.gc_next) { |
| 1034 | n++; |
| 1035 | if (debug & DEBUG_UNCOLLECTABLE) |
| 1036 | debug_cycle("uncollectable", FROM_GC(gc)); |
| 1037 | } |
| 1038 | if (debug & DEBUG_STATS) { |
Victor Stinner | 7181dec | 2015-03-27 17:47:53 +0100 | [diff] [blame] | 1039 | _PyTime_t t2 = _PyTime_GetMonotonicClock(); |
Antoine Pitrou | 40f6b12 | 2014-05-24 19:21:53 +0200 | [diff] [blame] | 1040 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1041 | if (m == 0 && n == 0) |
| 1042 | PySys_WriteStderr("gc: done"); |
| 1043 | else |
Antoine Pitrou | ded3c1b | 2014-05-24 19:24:40 +0200 | [diff] [blame] | 1044 | PySys_FormatStderr( |
| 1045 | "gc: done, %zd unreachable, %zd uncollectable", |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1046 | n+m, n); |
Victor Stinner | 7181dec | 2015-03-27 17:47:53 +0100 | [diff] [blame] | 1047 | PySys_WriteStderr(", %.4fs elapsed\n", |
| 1048 | _PyTime_AsSecondsDouble(t2 - t1)); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1049 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1050 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1051 | /* Append instances in the uncollectable set to a Python |
| 1052 | * reachable list of garbage. The programmer has to deal with |
| 1053 | * this if they insist on creating this type of structure. |
| 1054 | */ |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 1055 | (void)handle_legacy_finalizers(&finalizers, old); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1056 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1057 | /* Clear free list only during the collection of the highest |
| 1058 | * generation */ |
| 1059 | if (generation == NUM_GENERATIONS-1) { |
| 1060 | clear_freelists(); |
| 1061 | } |
Christian Heimes | a156e09 | 2008-02-16 07:38:31 +0000 | [diff] [blame] | 1062 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1063 | if (PyErr_Occurred()) { |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 1064 | if (nofail) { |
| 1065 | PyErr_Clear(); |
| 1066 | } |
| 1067 | else { |
| 1068 | if (gc_str == NULL) |
| 1069 | gc_str = PyUnicode_FromString("garbage collection"); |
| 1070 | PyErr_WriteUnraisable(gc_str); |
| 1071 | Py_FatalError("unexpected exception during garbage collection"); |
| 1072 | } |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1073 | } |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1074 | |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 1075 | /* Update stats */ |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1076 | if (n_collected) |
| 1077 | *n_collected = m; |
| 1078 | if (n_uncollectable) |
| 1079 | *n_uncollectable = n; |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 1080 | stats->collections++; |
| 1081 | stats->collected += m; |
| 1082 | stats->uncollectable += n; |
Łukasz Langa | a785c87 | 2016-09-09 17:37:37 -0700 | [diff] [blame] | 1083 | |
| 1084 | if (PyDTrace_GC_DONE_ENABLED()) |
| 1085 | PyDTrace_GC_DONE(n+m); |
| 1086 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1087 | return n+m; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1088 | } |
| 1089 | |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1090 | /* Invoke progress callbacks to notify clients that garbage collection |
| 1091 | * is starting or stopping |
| 1092 | */ |
| 1093 | static void |
| 1094 | invoke_gc_callback(const char *phase, int generation, |
| 1095 | Py_ssize_t collected, Py_ssize_t uncollectable) |
| 1096 | { |
| 1097 | Py_ssize_t i; |
| 1098 | PyObject *info = NULL; |
| 1099 | |
| 1100 | /* we may get called very early */ |
| 1101 | if (callbacks == NULL) |
| 1102 | return; |
| 1103 | /* The local variable cannot be rebound, check it for sanity */ |
| 1104 | assert(callbacks != NULL && PyList_CheckExact(callbacks)); |
| 1105 | if (PyList_GET_SIZE(callbacks) != 0) { |
| 1106 | info = Py_BuildValue("{sisnsn}", |
| 1107 | "generation", generation, |
| 1108 | "collected", collected, |
| 1109 | "uncollectable", uncollectable); |
| 1110 | if (info == NULL) { |
| 1111 | PyErr_WriteUnraisable(NULL); |
| 1112 | return; |
| 1113 | } |
| 1114 | } |
| 1115 | for (i=0; i<PyList_GET_SIZE(callbacks); i++) { |
| 1116 | PyObject *r, *cb = PyList_GET_ITEM(callbacks, i); |
| 1117 | Py_INCREF(cb); /* make sure cb doesn't go away */ |
| 1118 | r = PyObject_CallFunction(cb, "sO", phase, info); |
| 1119 | Py_XDECREF(r); |
| 1120 | if (r == NULL) |
| 1121 | PyErr_WriteUnraisable(cb); |
| 1122 | Py_DECREF(cb); |
| 1123 | } |
| 1124 | Py_XDECREF(info); |
| 1125 | } |
| 1126 | |
| 1127 | /* Perform garbage collection of a generation and invoke |
| 1128 | * progress callbacks. |
| 1129 | */ |
| 1130 | static Py_ssize_t |
| 1131 | collect_with_callback(int generation) |
| 1132 | { |
| 1133 | Py_ssize_t result, collected, uncollectable; |
| 1134 | invoke_gc_callback("start", generation, 0, 0); |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 1135 | result = collect(generation, &collected, &uncollectable, 0); |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1136 | invoke_gc_callback("stop", generation, collected, uncollectable); |
| 1137 | return result; |
| 1138 | } |
| 1139 | |
Neal Norwitz | 7b216c5 | 2006-03-04 20:01:53 +0000 | [diff] [blame] | 1140 | static Py_ssize_t |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1141 | collect_generations(void) |
| 1142 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1143 | int i; |
| 1144 | Py_ssize_t n = 0; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1145 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1146 | /* Find the oldest generation (highest numbered) where the count |
| 1147 | * exceeds the threshold. Objects in the that generation and |
| 1148 | * generations younger than it will be collected. */ |
| 1149 | for (i = NUM_GENERATIONS-1; i >= 0; i--) { |
| 1150 | if (generations[i].count > generations[i].threshold) { |
| 1151 | /* Avoid quadratic performance degradation in number |
| 1152 | of tracked objects. See comments at the beginning |
| 1153 | of this file, and issue #4074. |
| 1154 | */ |
| 1155 | if (i == NUM_GENERATIONS - 1 |
| 1156 | && long_lived_pending < long_lived_total / 4) |
| 1157 | continue; |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1158 | n = collect_with_callback(i); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1159 | break; |
| 1160 | } |
| 1161 | } |
| 1162 | return n; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1163 | } |
| 1164 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1165 | #include "clinic/gcmodule.c.h" |
| 1166 | |
| 1167 | /*[clinic input] |
| 1168 | gc.enable |
| 1169 | |
| 1170 | Enable automatic garbage collection. |
| 1171 | [clinic start generated code]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1172 | |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1173 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1174 | gc_enable_impl(PyObject *module) |
| 1175 | /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/ |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1176 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1177 | enabled = 1; |
Serhiy Storchaka | 228b12e | 2017-01-23 09:47:21 +0200 | [diff] [blame] | 1178 | Py_RETURN_NONE; |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1179 | } |
| 1180 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1181 | /*[clinic input] |
| 1182 | gc.disable |
| 1183 | |
| 1184 | Disable automatic garbage collection. |
| 1185 | [clinic start generated code]*/ |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1186 | |
| 1187 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1188 | gc_disable_impl(PyObject *module) |
| 1189 | /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/ |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1190 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1191 | enabled = 0; |
Serhiy Storchaka | 228b12e | 2017-01-23 09:47:21 +0200 | [diff] [blame] | 1192 | Py_RETURN_NONE; |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1193 | } |
| 1194 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1195 | /*[clinic input] |
| 1196 | gc.isenabled -> bool |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1197 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1198 | Returns true if automatic garbage collection is enabled. |
| 1199 | [clinic start generated code]*/ |
| 1200 | |
| 1201 | static int |
| 1202 | gc_isenabled_impl(PyObject *module) |
| 1203 | /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/ |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1204 | { |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1205 | return enabled; |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1206 | } |
| 1207 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1208 | /*[clinic input] |
| 1209 | gc.collect -> Py_ssize_t |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1210 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1211 | generation: int(c_default="NUM_GENERATIONS - 1") = 2 |
| 1212 | |
| 1213 | Run the garbage collector. |
| 1214 | |
| 1215 | With no arguments, run a full collection. The optional argument |
| 1216 | may be an integer specifying which generation to collect. A ValueError |
| 1217 | is raised if the generation number is invalid. |
| 1218 | |
| 1219 | The number of unreachable objects is returned. |
| 1220 | [clinic start generated code]*/ |
| 1221 | |
| 1222 | static Py_ssize_t |
| 1223 | gc_collect_impl(PyObject *module, int generation) |
| 1224 | /*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1225 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1226 | Py_ssize_t n; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1227 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1228 | if (generation < 0 || generation >= NUM_GENERATIONS) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1229 | PyErr_SetString(PyExc_ValueError, "invalid generation"); |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1230 | return -1; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1231 | } |
Barry Warsaw | d3c38ff | 2006-03-07 09:46:03 +0000 | [diff] [blame] | 1232 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1233 | if (collecting) |
| 1234 | n = 0; /* already collecting, don't do anything */ |
| 1235 | else { |
| 1236 | collecting = 1; |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1237 | n = collect_with_callback(generation); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1238 | collecting = 0; |
| 1239 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1240 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1241 | return n; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1242 | } |
| 1243 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1244 | /*[clinic input] |
| 1245 | gc.set_debug |
| 1246 | |
| 1247 | flags: int |
| 1248 | An integer that can have the following bits turned on: |
| 1249 | DEBUG_STATS - Print statistics during collection. |
| 1250 | DEBUG_COLLECTABLE - Print collectable objects found. |
| 1251 | DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects |
| 1252 | found. |
| 1253 | DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them. |
| 1254 | DEBUG_LEAK - Debug leaking programs (everything but STATS). |
| 1255 | / |
| 1256 | |
| 1257 | Set the garbage collection debugging flags. |
| 1258 | |
| 1259 | Debugging information is written to sys.stderr. |
| 1260 | [clinic start generated code]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1261 | |
| 1262 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1263 | gc_set_debug_impl(PyObject *module, int flags) |
| 1264 | /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1265 | { |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1266 | debug = flags; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1267 | |
Serhiy Storchaka | 228b12e | 2017-01-23 09:47:21 +0200 | [diff] [blame] | 1268 | Py_RETURN_NONE; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1269 | } |
| 1270 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1271 | /*[clinic input] |
| 1272 | gc.get_debug -> int |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1273 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1274 | Get the garbage collection debugging flags. |
| 1275 | [clinic start generated code]*/ |
| 1276 | |
| 1277 | static int |
| 1278 | gc_get_debug_impl(PyObject *module) |
| 1279 | /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1280 | { |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1281 | return debug; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1282 | } |
| 1283 | |
Martin v. Löwis | 14f8b4c | 2002-06-13 20:33:02 +0000 | [diff] [blame] | 1284 | PyDoc_STRVAR(gc_set_thresh__doc__, |
Neal Norwitz | 2a47c0f | 2002-01-29 00:53:41 +0000 | [diff] [blame] | 1285 | "set_threshold(threshold0, [threshold1, threshold2]) -> None\n" |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1286 | "\n" |
| 1287 | "Sets the collection thresholds. Setting threshold0 to zero disables\n" |
Martin v. Löwis | 14f8b4c | 2002-06-13 20:33:02 +0000 | [diff] [blame] | 1288 | "collection.\n"); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1289 | |
| 1290 | static PyObject * |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1291 | gc_set_thresh(PyObject *self, PyObject *args) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1292 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1293 | int i; |
| 1294 | if (!PyArg_ParseTuple(args, "i|ii:set_threshold", |
| 1295 | &generations[0].threshold, |
| 1296 | &generations[1].threshold, |
| 1297 | &generations[2].threshold)) |
| 1298 | return NULL; |
| 1299 | for (i = 2; i < NUM_GENERATIONS; i++) { |
| 1300 | /* generations higher than 2 get the same threshold */ |
| 1301 | generations[i].threshold = generations[2].threshold; |
| 1302 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1303 | |
Serhiy Storchaka | 228b12e | 2017-01-23 09:47:21 +0200 | [diff] [blame] | 1304 | Py_RETURN_NONE; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1305 | } |
| 1306 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1307 | /*[clinic input] |
| 1308 | gc.get_threshold |
| 1309 | |
| 1310 | Return the current collection thresholds. |
| 1311 | [clinic start generated code]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1312 | |
| 1313 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1314 | gc_get_threshold_impl(PyObject *module) |
| 1315 | /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1316 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1317 | return Py_BuildValue("(iii)", |
| 1318 | generations[0].threshold, |
| 1319 | generations[1].threshold, |
| 1320 | generations[2].threshold); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1321 | } |
| 1322 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1323 | /*[clinic input] |
| 1324 | gc.get_count |
| 1325 | |
| 1326 | Return a three-tuple of the current collection counts. |
| 1327 | [clinic start generated code]*/ |
Barry Warsaw | d3c38ff | 2006-03-07 09:46:03 +0000 | [diff] [blame] | 1328 | |
| 1329 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1330 | gc_get_count_impl(PyObject *module) |
| 1331 | /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/ |
Barry Warsaw | d3c38ff | 2006-03-07 09:46:03 +0000 | [diff] [blame] | 1332 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1333 | return Py_BuildValue("(iii)", |
| 1334 | generations[0].count, |
| 1335 | generations[1].count, |
| 1336 | generations[2].count); |
Barry Warsaw | d3c38ff | 2006-03-07 09:46:03 +0000 | [diff] [blame] | 1337 | } |
| 1338 | |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1339 | static int |
Martin v. Löwis | 560da62 | 2001-11-24 09:24:51 +0000 | [diff] [blame] | 1340 | referrersvisit(PyObject* obj, PyObject *objs) |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1341 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1342 | Py_ssize_t i; |
| 1343 | for (i = 0; i < PyTuple_GET_SIZE(objs); i++) |
| 1344 | if (PyTuple_GET_ITEM(objs, i) == obj) |
| 1345 | return 1; |
| 1346 | return 0; |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1347 | } |
| 1348 | |
Neil Schemenauer | 17e7be6 | 2001-08-10 14:46:47 +0000 | [diff] [blame] | 1349 | static int |
Martin v. Löwis | 560da62 | 2001-11-24 09:24:51 +0000 | [diff] [blame] | 1350 | gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist) |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1351 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1352 | PyGC_Head *gc; |
| 1353 | PyObject *obj; |
| 1354 | traverseproc traverse; |
| 1355 | for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) { |
| 1356 | obj = FROM_GC(gc); |
| 1357 | traverse = Py_TYPE(obj)->tp_traverse; |
| 1358 | if (obj == objs || obj == resultlist) |
| 1359 | continue; |
| 1360 | if (traverse(obj, (visitproc)referrersvisit, objs)) { |
| 1361 | if (PyList_Append(resultlist, obj) < 0) |
| 1362 | return 0; /* error */ |
| 1363 | } |
| 1364 | } |
| 1365 | return 1; /* no error */ |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1366 | } |
| 1367 | |
Martin v. Löwis | 14f8b4c | 2002-06-13 20:33:02 +0000 | [diff] [blame] | 1368 | PyDoc_STRVAR(gc_get_referrers__doc__, |
Martin v. Löwis | 560da62 | 2001-11-24 09:24:51 +0000 | [diff] [blame] | 1369 | "get_referrers(*objs) -> list\n\ |
Martin v. Löwis | 14f8b4c | 2002-06-13 20:33:02 +0000 | [diff] [blame] | 1370 | Return the list of objects that directly refer to any of objs."); |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1371 | |
Neil Schemenauer | 17e7be6 | 2001-08-10 14:46:47 +0000 | [diff] [blame] | 1372 | static PyObject * |
Martin v. Löwis | 560da62 | 2001-11-24 09:24:51 +0000 | [diff] [blame] | 1373 | gc_get_referrers(PyObject *self, PyObject *args) |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1374 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1375 | int i; |
| 1376 | PyObject *result = PyList_New(0); |
| 1377 | if (!result) return NULL; |
Thomas Wouters | 49fd7fa | 2006-04-21 10:40:58 +0000 | [diff] [blame] | 1378 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1379 | for (i = 0; i < NUM_GENERATIONS; i++) { |
| 1380 | if (!(gc_referrers_for(args, GEN_HEAD(i), result))) { |
| 1381 | Py_DECREF(result); |
| 1382 | return NULL; |
| 1383 | } |
| 1384 | } |
| 1385 | return result; |
Neil Schemenauer | 48c7034 | 2001-08-09 15:38:31 +0000 | [diff] [blame] | 1386 | } |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1387 | |
Tim Peters | 0f81ab6 | 2003-04-08 16:39:48 +0000 | [diff] [blame] | 1388 | /* Append obj to list; return true if error (out of memory), false if OK. */ |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1389 | static int |
Tim Peters | 730f553 | 2003-04-08 17:17:17 +0000 | [diff] [blame] | 1390 | referentsvisit(PyObject *obj, PyObject *list) |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1391 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1392 | return PyList_Append(list, obj) < 0; |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1393 | } |
| 1394 | |
Tim Peters | 730f553 | 2003-04-08 17:17:17 +0000 | [diff] [blame] | 1395 | PyDoc_STRVAR(gc_get_referents__doc__, |
| 1396 | "get_referents(*objs) -> list\n\ |
Jeremy Hylton | 059b094 | 2003-04-03 16:29:13 +0000 | [diff] [blame] | 1397 | Return the list of objects that are directly referred to by objs."); |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1398 | |
| 1399 | static PyObject * |
Tim Peters | 730f553 | 2003-04-08 17:17:17 +0000 | [diff] [blame] | 1400 | gc_get_referents(PyObject *self, PyObject *args) |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1401 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1402 | Py_ssize_t i; |
| 1403 | PyObject *result = PyList_New(0); |
Tim Peters | 0f81ab6 | 2003-04-08 16:39:48 +0000 | [diff] [blame] | 1404 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1405 | if (result == NULL) |
| 1406 | return NULL; |
Tim Peters | 0f81ab6 | 2003-04-08 16:39:48 +0000 | [diff] [blame] | 1407 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1408 | for (i = 0; i < PyTuple_GET_SIZE(args); i++) { |
| 1409 | traverseproc traverse; |
| 1410 | PyObject *obj = PyTuple_GET_ITEM(args, i); |
Tim Peters | 0f81ab6 | 2003-04-08 16:39:48 +0000 | [diff] [blame] | 1411 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1412 | if (! PyObject_IS_GC(obj)) |
| 1413 | continue; |
| 1414 | traverse = Py_TYPE(obj)->tp_traverse; |
| 1415 | if (! traverse) |
| 1416 | continue; |
| 1417 | if (traverse(obj, (visitproc)referentsvisit, result)) { |
| 1418 | Py_DECREF(result); |
| 1419 | return NULL; |
| 1420 | } |
| 1421 | } |
| 1422 | return result; |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1423 | } |
| 1424 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1425 | /*[clinic input] |
| 1426 | gc.get_objects |
| 1427 | |
| 1428 | Return a list of objects tracked by the collector (excluding the list returned). |
| 1429 | [clinic start generated code]*/ |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1430 | |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1431 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1432 | gc_get_objects_impl(PyObject *module) |
| 1433 | /*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/ |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1434 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1435 | int i; |
| 1436 | PyObject* result; |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1437 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1438 | result = PyList_New(0); |
| 1439 | if (result == NULL) |
| 1440 | return NULL; |
| 1441 | for (i = 0; i < NUM_GENERATIONS; i++) { |
| 1442 | if (append_objects(result, GEN_HEAD(i))) { |
| 1443 | Py_DECREF(result); |
| 1444 | return NULL; |
| 1445 | } |
| 1446 | } |
| 1447 | return result; |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1448 | } |
| 1449 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1450 | /*[clinic input] |
| 1451 | gc.get_stats |
| 1452 | |
| 1453 | Return a list of dictionaries containing per-generation statistics. |
| 1454 | [clinic start generated code]*/ |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 1455 | |
| 1456 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1457 | gc_get_stats_impl(PyObject *module) |
| 1458 | /*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/ |
Antoine Pitrou | d4156c1 | 2012-10-30 22:43:19 +0100 | [diff] [blame] | 1459 | { |
| 1460 | int i; |
| 1461 | PyObject *result; |
| 1462 | struct gc_generation_stats stats[NUM_GENERATIONS], *st; |
| 1463 | |
| 1464 | /* To get consistent values despite allocations while constructing |
| 1465 | the result list, we use a snapshot of the running stats. */ |
| 1466 | for (i = 0; i < NUM_GENERATIONS; i++) { |
| 1467 | stats[i] = generation_stats[i]; |
| 1468 | } |
| 1469 | |
| 1470 | result = PyList_New(0); |
| 1471 | if (result == NULL) |
| 1472 | return NULL; |
| 1473 | |
| 1474 | for (i = 0; i < NUM_GENERATIONS; i++) { |
| 1475 | PyObject *dict; |
| 1476 | st = &stats[i]; |
| 1477 | dict = Py_BuildValue("{snsnsn}", |
| 1478 | "collections", st->collections, |
| 1479 | "collected", st->collected, |
| 1480 | "uncollectable", st->uncollectable |
| 1481 | ); |
| 1482 | if (dict == NULL) |
| 1483 | goto error; |
| 1484 | if (PyList_Append(result, dict)) { |
| 1485 | Py_DECREF(dict); |
| 1486 | goto error; |
| 1487 | } |
| 1488 | Py_DECREF(dict); |
| 1489 | } |
| 1490 | return result; |
| 1491 | |
| 1492 | error: |
| 1493 | Py_XDECREF(result); |
| 1494 | return NULL; |
| 1495 | } |
| 1496 | |
| 1497 | |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1498 | /*[clinic input] |
| 1499 | gc.is_tracked |
| 1500 | |
| 1501 | obj: object |
| 1502 | / |
| 1503 | |
| 1504 | Returns true if the object is tracked by the garbage collector. |
| 1505 | |
| 1506 | Simple atomic objects will return false. |
| 1507 | [clinic start generated code]*/ |
Antoine Pitrou | 3a652b1 | 2009-03-23 18:52:06 +0000 | [diff] [blame] | 1508 | |
| 1509 | static PyObject * |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1510 | gc_is_tracked(PyObject *module, PyObject *obj) |
| 1511 | /*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/ |
Antoine Pitrou | 3a652b1 | 2009-03-23 18:52:06 +0000 | [diff] [blame] | 1512 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1513 | PyObject *result; |
| 1514 | |
| 1515 | if (PyObject_IS_GC(obj) && IS_TRACKED(obj)) |
| 1516 | result = Py_True; |
| 1517 | else |
| 1518 | result = Py_False; |
| 1519 | Py_INCREF(result); |
| 1520 | return result; |
Antoine Pitrou | 3a652b1 | 2009-03-23 18:52:06 +0000 | [diff] [blame] | 1521 | } |
| 1522 | |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1523 | |
Martin v. Löwis | 14f8b4c | 2002-06-13 20:33:02 +0000 | [diff] [blame] | 1524 | PyDoc_STRVAR(gc__doc__, |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1525 | "This module provides access to the garbage collector for reference cycles.\n" |
| 1526 | "\n" |
Vladimir Marangozov | f9d20c3 | 2000-08-06 22:45:31 +0000 | [diff] [blame] | 1527 | "enable() -- Enable automatic garbage collection.\n" |
| 1528 | "disable() -- Disable automatic garbage collection.\n" |
| 1529 | "isenabled() -- Returns true if automatic collection is enabled.\n" |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1530 | "collect() -- Do a full collection right now.\n" |
Thomas Wouters | 89f507f | 2006-12-13 04:49:30 +0000 | [diff] [blame] | 1531 | "get_count() -- Return the current collection counts.\n" |
R David Murray | 0e81463 | 2013-12-26 15:11:28 -0500 | [diff] [blame] | 1532 | "get_stats() -- Return list of dictionaries containing per-generation stats.\n" |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1533 | "set_debug() -- Set debugging flags.\n" |
| 1534 | "get_debug() -- Get debugging flags.\n" |
| 1535 | "set_threshold() -- Set the collection thresholds.\n" |
| 1536 | "get_threshold() -- Return the current the collection thresholds.\n" |
Neil Schemenauer | c7c8d8e | 2001-08-09 15:58:59 +0000 | [diff] [blame] | 1537 | "get_objects() -- Return a list of all objects tracked by the collector.\n" |
Antoine Pitrou | 3a652b1 | 2009-03-23 18:52:06 +0000 | [diff] [blame] | 1538 | "is_tracked() -- Returns true if a given object is tracked.\n" |
Jeremy Hylton | 5bd378b | 2003-04-03 16:28:38 +0000 | [diff] [blame] | 1539 | "get_referrers() -- Return the list of objects that refer to an object.\n" |
Tim Peters | 730f553 | 2003-04-08 17:17:17 +0000 | [diff] [blame] | 1540 | "get_referents() -- Return the list of objects that an object refers to.\n"); |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1541 | |
| 1542 | static PyMethodDef GcMethods[] = { |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1543 | GC_ENABLE_METHODDEF |
| 1544 | GC_DISABLE_METHODDEF |
| 1545 | GC_ISENABLED_METHODDEF |
| 1546 | GC_SET_DEBUG_METHODDEF |
| 1547 | GC_GET_DEBUG_METHODDEF |
| 1548 | GC_GET_COUNT_METHODDEF |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1549 | {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__}, |
Serhiy Storchaka | 9326028 | 2017-02-04 11:19:59 +0200 | [diff] [blame] | 1550 | GC_GET_THRESHOLD_METHODDEF |
| 1551 | GC_COLLECT_METHODDEF |
| 1552 | GC_GET_OBJECTS_METHODDEF |
| 1553 | GC_GET_STATS_METHODDEF |
| 1554 | GC_IS_TRACKED_METHODDEF |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1555 | {"get_referrers", gc_get_referrers, METH_VARARGS, |
| 1556 | gc_get_referrers__doc__}, |
| 1557 | {"get_referents", gc_get_referents, METH_VARARGS, |
| 1558 | gc_get_referents__doc__}, |
| 1559 | {NULL, NULL} /* Sentinel */ |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1560 | }; |
| 1561 | |
Martin v. Löwis | 1a21451 | 2008-06-11 05:26:20 +0000 | [diff] [blame] | 1562 | static struct PyModuleDef gcmodule = { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1563 | PyModuleDef_HEAD_INIT, |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1564 | "gc", /* m_name */ |
| 1565 | gc__doc__, /* m_doc */ |
| 1566 | -1, /* m_size */ |
| 1567 | GcMethods, /* m_methods */ |
| 1568 | NULL, /* m_reload */ |
| 1569 | NULL, /* m_traverse */ |
| 1570 | NULL, /* m_clear */ |
| 1571 | NULL /* m_free */ |
Martin v. Löwis | 1a21451 | 2008-06-11 05:26:20 +0000 | [diff] [blame] | 1572 | }; |
| 1573 | |
Jason Tishler | 6bc06ec | 2003-09-04 11:59:50 +0000 | [diff] [blame] | 1574 | PyMODINIT_FUNC |
Martin v. Löwis | 1a21451 | 2008-06-11 05:26:20 +0000 | [diff] [blame] | 1575 | PyInit_gc(void) |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1576 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1577 | PyObject *m; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1578 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1579 | m = PyModule_Create(&gcmodule); |
Martin v. Löwis | 1a21451 | 2008-06-11 05:26:20 +0000 | [diff] [blame] | 1580 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1581 | if (m == NULL) |
| 1582 | return NULL; |
Tim Peters | 1155887 | 2003-04-06 23:30:52 +0000 | [diff] [blame] | 1583 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1584 | if (garbage == NULL) { |
| 1585 | garbage = PyList_New(0); |
| 1586 | if (garbage == NULL) |
| 1587 | return NULL; |
| 1588 | } |
| 1589 | Py_INCREF(garbage); |
| 1590 | if (PyModule_AddObject(m, "garbage", garbage) < 0) |
| 1591 | return NULL; |
Thomas Wouters | 477c8d5 | 2006-05-27 19:21:47 +0000 | [diff] [blame] | 1592 | |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1593 | if (callbacks == NULL) { |
| 1594 | callbacks = PyList_New(0); |
| 1595 | if (callbacks == NULL) |
| 1596 | return NULL; |
| 1597 | } |
| 1598 | Py_INCREF(callbacks); |
| 1599 | if (PyModule_AddObject(m, "callbacks", callbacks) < 0) |
| 1600 | return NULL; |
| 1601 | |
Martin v. Löwis | 1a21451 | 2008-06-11 05:26:20 +0000 | [diff] [blame] | 1602 | #define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1603 | ADD_INT(DEBUG_STATS); |
| 1604 | ADD_INT(DEBUG_COLLECTABLE); |
| 1605 | ADD_INT(DEBUG_UNCOLLECTABLE); |
| 1606 | ADD_INT(DEBUG_SAVEALL); |
| 1607 | ADD_INT(DEBUG_LEAK); |
Tim Peters | 1155887 | 2003-04-06 23:30:52 +0000 | [diff] [blame] | 1608 | #undef ADD_INT |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1609 | return m; |
Jeremy Hylton | c5007aa | 2000-06-30 05:02:53 +0000 | [diff] [blame] | 1610 | } |
| 1611 | |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1612 | /* API to invoke gc.collect() from C */ |
Neal Norwitz | 7b216c5 | 2006-03-04 20:01:53 +0000 | [diff] [blame] | 1613 | Py_ssize_t |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1614 | PyGC_Collect(void) |
| 1615 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1616 | Py_ssize_t n; |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1617 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1618 | if (collecting) |
| 1619 | n = 0; /* already collecting, don't do anything */ |
| 1620 | else { |
| 1621 | collecting = 1; |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1622 | n = collect_with_callback(NUM_GENERATIONS - 1); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1623 | collecting = 0; |
| 1624 | } |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1625 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1626 | return n; |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1627 | } |
| 1628 | |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 1629 | Py_ssize_t |
Łukasz Langa | fef7e94 | 2016-09-09 21:47:46 -0700 | [diff] [blame] | 1630 | _PyGC_CollectIfEnabled(void) |
| 1631 | { |
| 1632 | if (!enabled) |
| 1633 | return 0; |
| 1634 | |
| 1635 | return PyGC_Collect(); |
| 1636 | } |
| 1637 | |
| 1638 | Py_ssize_t |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 1639 | _PyGC_CollectNoFail(void) |
| 1640 | { |
| 1641 | Py_ssize_t n; |
| 1642 | |
Antoine Pitrou | c69c9bc | 2013-08-15 20:15:15 +0200 | [diff] [blame] | 1643 | /* Ideally, this function is only called on interpreter shutdown, |
| 1644 | and therefore not recursively. Unfortunately, when there are daemon |
| 1645 | threads, a daemon thread can start a cyclic garbage collection |
| 1646 | during interpreter shutdown (and then never finish it). |
| 1647 | See http://bugs.python.org/issue8713#msg195178 for an example. |
| 1648 | */ |
| 1649 | if (collecting) |
| 1650 | n = 0; |
| 1651 | else { |
| 1652 | collecting = 1; |
| 1653 | n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1); |
| 1654 | collecting = 0; |
| 1655 | } |
Antoine Pitrou | fef34e3 | 2013-05-19 01:11:58 +0200 | [diff] [blame] | 1656 | return n; |
| 1657 | } |
Antoine Pitrou | 5f454a0 | 2013-05-06 21:15:57 +0200 | [diff] [blame] | 1658 | |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1659 | void |
Antoine Pitrou | 5f454a0 | 2013-05-06 21:15:57 +0200 | [diff] [blame] | 1660 | _PyGC_DumpShutdownStats(void) |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1661 | { |
Antoine Pitrou | 2ed94eb | 2010-09-14 09:48:39 +0000 | [diff] [blame] | 1662 | if (!(debug & DEBUG_SAVEALL) |
| 1663 | && garbage != NULL && PyList_GET_SIZE(garbage) > 0) { |
Georg Brandl | 08be72d | 2010-10-24 15:11:22 +0000 | [diff] [blame] | 1664 | char *message; |
| 1665 | if (debug & DEBUG_UNCOLLECTABLE) |
Antoine Pitrou | b5d8204 | 2010-11-05 00:05:25 +0000 | [diff] [blame] | 1666 | message = "gc: %zd uncollectable objects at " \ |
Georg Brandl | 08be72d | 2010-10-24 15:11:22 +0000 | [diff] [blame] | 1667 | "shutdown"; |
| 1668 | else |
Antoine Pitrou | b5d8204 | 2010-11-05 00:05:25 +0000 | [diff] [blame] | 1669 | message = "gc: %zd uncollectable objects at " \ |
Georg Brandl | 08be72d | 2010-10-24 15:11:22 +0000 | [diff] [blame] | 1670 | "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them"; |
Antoine Pitrou | 070cb3c | 2013-05-08 13:23:25 +0200 | [diff] [blame] | 1671 | /* PyErr_WarnFormat does too many things and we are at shutdown, |
| 1672 | the warnings module's dependencies (e.g. linecache) may be gone |
| 1673 | already. */ |
| 1674 | if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0, |
| 1675 | "gc", NULL, message, |
| 1676 | PyList_GET_SIZE(garbage))) |
Georg Brandl | 08be72d | 2010-10-24 15:11:22 +0000 | [diff] [blame] | 1677 | PyErr_WriteUnraisable(NULL); |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1678 | if (debug & DEBUG_UNCOLLECTABLE) { |
| 1679 | PyObject *repr = NULL, *bytes = NULL; |
| 1680 | repr = PyObject_Repr(garbage); |
| 1681 | if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr))) |
| 1682 | PyErr_WriteUnraisable(garbage); |
| 1683 | else { |
| 1684 | PySys_WriteStderr( |
Antoine Pitrou | 070cb3c | 2013-05-08 13:23:25 +0200 | [diff] [blame] | 1685 | " %s\n", |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1686 | PyBytes_AS_STRING(bytes) |
| 1687 | ); |
| 1688 | } |
| 1689 | Py_XDECREF(repr); |
| 1690 | Py_XDECREF(bytes); |
| 1691 | } |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1692 | } |
Antoine Pitrou | 5f454a0 | 2013-05-06 21:15:57 +0200 | [diff] [blame] | 1693 | } |
| 1694 | |
| 1695 | void |
| 1696 | _PyGC_Fini(void) |
| 1697 | { |
Kristján Valur Jónsson | 69c6352 | 2012-04-15 11:41:32 +0000 | [diff] [blame] | 1698 | Py_CLEAR(callbacks); |
Antoine Pitrou | 696e035 | 2010-08-08 22:18:46 +0000 | [diff] [blame] | 1699 | } |
| 1700 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1701 | /* for debugging */ |
Guido van Rossum | e13ddc9 | 2003-04-17 17:29:22 +0000 | [diff] [blame] | 1702 | void |
| 1703 | _PyGC_Dump(PyGC_Head *g) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1704 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1705 | _PyObject_Dump(FROM_GC(g)); |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1706 | } |
| 1707 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1708 | /* extension modules might be compiled with GC support so these |
| 1709 | functions must always be available */ |
| 1710 | |
Neil Schemenauer | fec4eb1 | 2002-04-12 02:41:03 +0000 | [diff] [blame] | 1711 | #undef PyObject_GC_Track |
| 1712 | #undef PyObject_GC_UnTrack |
| 1713 | #undef PyObject_GC_Del |
| 1714 | #undef _PyObject_GC_Malloc |
| 1715 | |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1716 | void |
Neil Schemenauer | fec4eb1 | 2002-04-12 02:41:03 +0000 | [diff] [blame] | 1717 | PyObject_GC_Track(void *op) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1718 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1719 | _PyObject_GC_TRACK(op); |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1720 | } |
| 1721 | |
Neil Schemenauer | fec4eb1 | 2002-04-12 02:41:03 +0000 | [diff] [blame] | 1722 | void |
| 1723 | PyObject_GC_UnTrack(void *op) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1724 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1725 | /* Obscure: the Py_TRASHCAN mechanism requires that we be able to |
| 1726 | * call PyObject_GC_UnTrack twice on an object. |
| 1727 | */ |
| 1728 | if (IS_TRACKED(op)) |
| 1729 | _PyObject_GC_UNTRACK(op); |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1730 | } |
| 1731 | |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1732 | static PyObject * |
| 1733 | _PyObject_GC_Alloc(int use_calloc, size_t basicsize) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1734 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1735 | PyObject *op; |
| 1736 | PyGC_Head *g; |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1737 | size_t size; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1738 | if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) |
| 1739 | return PyErr_NoMemory(); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1740 | size = sizeof(PyGC_Head) + basicsize; |
| 1741 | if (use_calloc) |
| 1742 | g = (PyGC_Head *)PyObject_Calloc(1, size); |
| 1743 | else |
| 1744 | g = (PyGC_Head *)PyObject_Malloc(size); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1745 | if (g == NULL) |
| 1746 | return PyErr_NoMemory(); |
Antoine Pitrou | 796564c | 2013-07-30 19:59:21 +0200 | [diff] [blame] | 1747 | g->gc.gc_refs = 0; |
| 1748 | _PyGCHead_SET_REFS(g, GC_UNTRACKED); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1749 | generations[0].count++; /* number of allocated GC objects */ |
| 1750 | if (generations[0].count > generations[0].threshold && |
| 1751 | enabled && |
| 1752 | generations[0].threshold && |
| 1753 | !collecting && |
| 1754 | !PyErr_Occurred()) { |
| 1755 | collecting = 1; |
| 1756 | collect_generations(); |
| 1757 | collecting = 0; |
| 1758 | } |
| 1759 | op = FROM_GC(g); |
| 1760 | return op; |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1761 | } |
| 1762 | |
| 1763 | PyObject * |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1764 | _PyObject_GC_Malloc(size_t basicsize) |
| 1765 | { |
| 1766 | return _PyObject_GC_Alloc(0, basicsize); |
| 1767 | } |
| 1768 | |
| 1769 | PyObject * |
| 1770 | _PyObject_GC_Calloc(size_t basicsize) |
| 1771 | { |
| 1772 | return _PyObject_GC_Alloc(1, basicsize); |
| 1773 | } |
| 1774 | |
| 1775 | PyObject * |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1776 | _PyObject_GC_New(PyTypeObject *tp) |
| 1777 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1778 | PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp)); |
| 1779 | if (op != NULL) |
| 1780 | op = PyObject_INIT(op, tp); |
| 1781 | return op; |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1782 | } |
| 1783 | |
| 1784 | PyVarObject * |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame] | 1785 | _PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1786 | { |
Victor Stinner | 5d1866c | 2013-07-08 22:17:52 +0200 | [diff] [blame] | 1787 | size_t size; |
| 1788 | PyVarObject *op; |
| 1789 | |
| 1790 | if (nitems < 0) { |
| 1791 | PyErr_BadInternalCall(); |
| 1792 | return NULL; |
| 1793 | } |
| 1794 | size = _PyObject_VAR_SIZE(tp, nitems); |
| 1795 | op = (PyVarObject *) _PyObject_GC_Malloc(size); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1796 | if (op != NULL) |
| 1797 | op = PyObject_INIT_VAR(op, tp, nitems); |
| 1798 | return op; |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1799 | } |
| 1800 | |
| 1801 | PyVarObject * |
Martin v. Löwis | 4129068 | 2006-02-16 14:56:14 +0000 | [diff] [blame] | 1802 | _PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1803 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1804 | const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems); |
| 1805 | PyGC_Head *g = AS_GC(op); |
| 1806 | if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) |
| 1807 | return (PyVarObject *)PyErr_NoMemory(); |
| 1808 | g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize); |
| 1809 | if (g == NULL) |
| 1810 | return (PyVarObject *)PyErr_NoMemory(); |
| 1811 | op = (PyVarObject *) FROM_GC(g); |
| 1812 | Py_SIZE(op) = nitems; |
| 1813 | return op; |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1814 | } |
| 1815 | |
| 1816 | void |
Neil Schemenauer | fec4eb1 | 2002-04-12 02:41:03 +0000 | [diff] [blame] | 1817 | PyObject_GC_Del(void *op) |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1818 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1819 | PyGC_Head *g = AS_GC(op); |
| 1820 | if (IS_TRACKED(op)) |
| 1821 | gc_list_remove(g); |
| 1822 | if (generations[0].count > 0) { |
| 1823 | generations[0].count--; |
| 1824 | } |
| 1825 | PyObject_FREE(g); |
Neil Schemenauer | 43411b5 | 2001-08-30 00:05:51 +0000 | [diff] [blame] | 1826 | } |