blob: 4e5acf305b9f4b0d89ec582af665122321f91dbc [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000027#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070028#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010029#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000030
Serhiy Storchaka93260282017-02-04 11:19:59 +020031/*[clinic input]
32module gc
33[clinic start generated code]*/
34/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
35
Neil Schemenauer43411b52001-08-30 00:05:51 +000036/* Get an object's GC head */
37#define AS_GC(o) ((PyGC_Head *)(o)-1)
38
39/* Get the object given the GC head */
40#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
41
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000042/*** Global GC state ***/
43
Neil Schemenauer2880ae52002-05-04 05:35:20 +000044struct gc_generation {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000045 PyGC_Head head;
46 int threshold; /* collection threshold */
47 int count; /* count of allocations or collections of younger
48 generations */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000049};
50
Serhiy Storchaka93260282017-02-04 11:19:59 +020051/* If we change this, we need to change the default value in the signature of
52 gc.collect. */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000053#define NUM_GENERATIONS 3
54#define GEN_HEAD(n) (&generations[n].head)
55
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000056/* linked lists of container objects */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000057static struct gc_generation generations[NUM_GENERATIONS] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000058 /* PyGC_Head, threshold, count */
59 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
60 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
61 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
Neil Schemenauer2880ae52002-05-04 05:35:20 +000062};
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000063
Neil Schemenauer2880ae52002-05-04 05:35:20 +000064PyGC_Head *_PyGC_generation0 = GEN_HEAD(0);
65
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +000066static int enabled = 1; /* automatic collection enabled? */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000067
Neil Schemenauer43411b52001-08-30 00:05:51 +000068/* true if we are currently running the collector */
Tim Petersbf384c22003-04-06 00:11:39 +000069static int collecting = 0;
Neil Schemenauer43411b52001-08-30 00:05:51 +000070
Tim Peters6fc13d92002-07-02 18:12:35 +000071/* list of uncollectable objects */
Tim Petersbf384c22003-04-06 00:11:39 +000072static PyObject *garbage = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000073
74/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +000075static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000076
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000077/* a list of callbacks to be invoked when collection is performed */
78static PyObject *callbacks = NULL;
79
80/* This is the number of objects that survived the last full collection. It
Antoine Pitrou14b78f52009-01-09 22:27:08 +000081 approximates the number of long lived objects tracked by the GC.
82
83 (by "full collection", we mean a collection of the oldest generation).
84*/
85static Py_ssize_t long_lived_total = 0;
86
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000087/* This is the number of objects that survived all "non-full" collections,
Antoine Pitrou14b78f52009-01-09 22:27:08 +000088 and are awaiting to undergo a full collection for the first time.
89
90*/
91static Py_ssize_t long_lived_pending = 0;
92
93/*
94 NOTE: about the counting of long-lived objects.
95
96 To limit the cost of garbage collection, there are two strategies;
97 - make each collection faster, e.g. by scanning fewer objects
98 - do less collections
99 This heuristic is about the latter strategy.
100
101 In addition to the various configurable thresholds, we only trigger a
102 full collection if the ratio
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000103 long_lived_pending / long_lived_total
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000104 is above a given value (hardwired to 25%).
105
106 The reason is that, while "non-full" collections (i.e., collections of
107 the young and middle generations) will always examine roughly the same
108 number of objects -- determined by the aforementioned thresholds --,
109 the cost of a full collection is proportional to the total number of
110 long-lived objects, which is virtually unbounded.
111
112 Indeed, it has been remarked that doing a full collection every
113 <constant number> of object creations entails a dramatic performance
114 degradation in workloads which consist in creating and storing lots of
115 long-lived objects (e.g. building a large list of GC-tracked objects would
116 show quadratic performance, instead of linear as expected: see issue #4074).
117
118 Using the above ratio, instead, yields amortized linear performance in
119 the total number of objects (the effect of which can be summarized
120 thusly: "each full garbage collection is more and more costly as the
121 number of objects grows, but we do fewer and fewer of them").
122
123 This heuristic was suggested by Martin von Löwis on python-dev in
124 June 2008. His original analysis and proposal can be found at:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000125 http://mail.python.org/pipermail/python-dev/2008-June/080579.html
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000126*/
127
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200128/*
129 NOTE: about untracking of mutable objects.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200130
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200131 Certain types of container cannot participate in a reference cycle, and
132 so do not need to be tracked by the garbage collector. Untracking these
133 objects reduces the cost of garbage collections. However, determining
134 which objects may be untracked is not free, and the costs must be
135 weighed against the benefits for garbage collection.
136
137 There are two possible strategies for when to untrack a container:
138
139 i) When the container is created.
140 ii) When the container is examined by the garbage collector.
141
142 Tuples containing only immutable objects (integers, strings etc, and
143 recursively, tuples of immutable objects) do not need to be tracked.
144 The interpreter creates a large number of tuples, many of which will
145 not survive until garbage collection. It is therefore not worthwhile
146 to untrack eligible tuples at creation time.
147
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200148 Instead, all tuples except the empty tuple are tracked when created.
149 During garbage collection it is determined whether any surviving tuples
150 can be untracked. A tuple can be untracked if all of its contents are
151 already not tracked. Tuples are examined for untracking in all garbage
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200152 collection cycles. It may take more than one cycle to untrack a tuple.
153
154 Dictionaries containing only immutable objects also do not need to be
155 tracked. Dictionaries are untracked when created. If a tracked item is
156 inserted into a dictionary (either as a key or value), the dictionary
157 becomes tracked. During a full garbage collection (all generations),
158 the collector will untrack any dictionaries whose contents are not
159 tracked.
160
161 The module provides the python function is_tracked(obj), which returns
162 the CURRENT tracking status of the object. Subsequent garbage
163 collections may change the tracking status of the object.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200164
165 Untracking of certain containers was introduced in issue #4688, and
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200166 the algorithm was refined in response to issue #14775.
167*/
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000168
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000169/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000170#define DEBUG_STATS (1<<0) /* print collection statistics */
171#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
172#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
173#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
174#define DEBUG_LEAK DEBUG_COLLECTABLE | \
175 DEBUG_UNCOLLECTABLE | \
176 DEBUG_SAVEALL
Jeremy Hyltonb709df32000-09-01 02:47:25 +0000177static int debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000178
Antoine Pitroud4156c12012-10-30 22:43:19 +0100179/* Running stats per generation */
180struct gc_generation_stats {
181 /* total number of collections */
182 Py_ssize_t collections;
183 /* total number of collected objects */
184 Py_ssize_t collected;
185 /* total number of uncollectable objects (put into gc.garbage) */
186 Py_ssize_t uncollectable;
187};
188
189static struct gc_generation_stats generation_stats[NUM_GENERATIONS];
190
Tim Peters6fc13d92002-07-02 18:12:35 +0000191/*--------------------------------------------------------------------------
192gc_refs values.
Neil Schemenauer43411b52001-08-30 00:05:51 +0000193
Tim Peters6fc13d92002-07-02 18:12:35 +0000194Between collections, every gc'ed object has one of two gc_refs values:
195
196GC_UNTRACKED
197 The initial state; objects returned by PyObject_GC_Malloc are in this
198 state. The object doesn't live in any generation list, and its
199 tp_traverse slot must not be called.
200
201GC_REACHABLE
202 The object lives in some generation list, and its tp_traverse is safe to
203 call. An object transitions to GC_REACHABLE when PyObject_GC_Track
204 is called.
205
206During a collection, gc_refs can temporarily take on other states:
207
208>= 0
209 At the start of a collection, update_refs() copies the true refcount
210 to gc_refs, for each object in the generation being collected.
211 subtract_refs() then adjusts gc_refs so that it equals the number of
212 times an object is referenced directly from outside the generation
213 being collected.
Martin v. Löwis774348c2002-11-09 19:54:06 +0000214 gc_refs remains >= 0 throughout these steps.
Tim Peters6fc13d92002-07-02 18:12:35 +0000215
216GC_TENTATIVELY_UNREACHABLE
217 move_unreachable() then moves objects not reachable (whether directly or
218 indirectly) from outside the generation into an "unreachable" set.
219 Objects that are found to be reachable have gc_refs set to GC_REACHABLE
220 again. Objects that are found to be unreachable have gc_refs set to
221 GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
222 this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
223 transition back to GC_REACHABLE.
224
225 Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
226 for collection. If it's decided not to collect such an object (e.g.,
227 it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
228----------------------------------------------------------------------------
229*/
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000230#define GC_UNTRACKED _PyGC_REFS_UNTRACKED
231#define GC_REACHABLE _PyGC_REFS_REACHABLE
232#define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
Tim Peters19b74c72002-07-01 03:52:19 +0000233
Antoine Pitrou796564c2013-07-30 19:59:21 +0200234#define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
235#define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
Tim Peters19b74c72002-07-01 03:52:19 +0000236#define IS_TENTATIVELY_UNREACHABLE(o) ( \
Antoine Pitrou796564c2013-07-30 19:59:21 +0200237 _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000238
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000239/*** list functions ***/
240
241static void
242gc_list_init(PyGC_Head *list)
243{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000244 list->gc.gc_prev = list;
245 list->gc.gc_next = list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000246}
247
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000248static int
249gc_list_is_empty(PyGC_Head *list)
250{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000251 return (list->gc.gc_next == list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000252}
253
Tim Peterse2d59182004-11-01 01:39:08 +0000254#if 0
255/* This became unused after gc_list_move() was introduced. */
256/* Append `node` to `list`. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000257static void
258gc_list_append(PyGC_Head *node, PyGC_Head *list)
259{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000260 node->gc.gc_next = list;
261 node->gc.gc_prev = list->gc.gc_prev;
262 node->gc.gc_prev->gc.gc_next = node;
263 list->gc.gc_prev = node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000264}
Tim Peterse2d59182004-11-01 01:39:08 +0000265#endif
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000266
Tim Peterse2d59182004-11-01 01:39:08 +0000267/* Remove `node` from the gc list it's currently in. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000268static void
269gc_list_remove(PyGC_Head *node)
270{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000271 node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
272 node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
273 node->gc.gc_next = NULL; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000274}
275
Tim Peterse2d59182004-11-01 01:39:08 +0000276/* Move `node` from the gc list it's currently in (which is not explicitly
277 * named here) to the end of `list`. This is semantically the same as
278 * gc_list_remove(node) followed by gc_list_append(node, list).
279 */
280static void
281gc_list_move(PyGC_Head *node, PyGC_Head *list)
282{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000283 PyGC_Head *new_prev;
284 PyGC_Head *current_prev = node->gc.gc_prev;
285 PyGC_Head *current_next = node->gc.gc_next;
286 /* Unlink from current list. */
287 current_prev->gc.gc_next = current_next;
288 current_next->gc.gc_prev = current_prev;
289 /* Relink at end of new list. */
290 new_prev = node->gc.gc_prev = list->gc.gc_prev;
291 new_prev->gc.gc_next = list->gc.gc_prev = node;
292 node->gc.gc_next = list;
Tim Peterse2d59182004-11-01 01:39:08 +0000293}
294
295/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000296static void
297gc_list_merge(PyGC_Head *from, PyGC_Head *to)
298{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000299 PyGC_Head *tail;
300 assert(from != to);
301 if (!gc_list_is_empty(from)) {
302 tail = to->gc.gc_prev;
303 tail->gc.gc_next = from->gc.gc_next;
304 tail->gc.gc_next->gc.gc_prev = tail;
305 to->gc.gc_prev = from->gc.gc_prev;
306 to->gc.gc_prev->gc.gc_next = to;
307 }
308 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000309}
310
Neal Norwitz7b216c52006-03-04 20:01:53 +0000311static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000312gc_list_size(PyGC_Head *list)
313{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000314 PyGC_Head *gc;
315 Py_ssize_t n = 0;
316 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
317 n++;
318 }
319 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000320}
321
Tim Peters259272b2003-04-06 19:41:39 +0000322/* Append objects in a GC list to a Python list.
323 * Return 0 if all OK, < 0 if error (out of memory for list).
324 */
325static int
326append_objects(PyObject *py_list, PyGC_Head *gc_list)
327{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000328 PyGC_Head *gc;
329 for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
330 PyObject *op = FROM_GC(gc);
331 if (op != py_list) {
332 if (PyList_Append(py_list, op)) {
333 return -1; /* exception */
334 }
335 }
336 }
337 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000338}
339
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000340/*** end of list stuff ***/
341
342
Tim Peters19b74c72002-07-01 03:52:19 +0000343/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
344 * in containers, and is GC_REACHABLE for all tracked gc objects not in
345 * containers.
Tim Peters88396172002-06-30 17:56:40 +0000346 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000347static void
348update_refs(PyGC_Head *containers)
349{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000350 PyGC_Head *gc = containers->gc.gc_next;
351 for (; gc != containers; gc = gc->gc.gc_next) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200352 assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
353 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000354 /* Python's cyclic gc should never see an incoming refcount
355 * of 0: if something decref'ed to 0, it should have been
356 * deallocated immediately at that time.
357 * Possible cause (if the assert triggers): a tp_dealloc
358 * routine left a gc-aware object tracked during its teardown
359 * phase, and did something-- or allowed something to happen --
360 * that called back into Python. gc can trigger then, and may
361 * see the still-tracked dying object. Before this assert
362 * was added, such mistakes went on to allow gc to try to
363 * delete the object again. In a debug build, that caused
364 * a mysterious segfault, when _Py_ForgetReference tried
365 * to remove the object from the doubly-linked list of all
366 * objects a second time. In a release build, an actual
367 * double deallocation occurred, which leads to corruption
368 * of the allocator's internal bookkeeping pointers. That's
369 * so serious that maybe this should be a release-build
370 * check instead of an assert?
371 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200372 assert(_PyGCHead_REFS(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000373 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000374}
375
Tim Peters19b74c72002-07-01 03:52:19 +0000376/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000377static int
378visit_decref(PyObject *op, void *data)
379{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000380 assert(op != NULL);
381 if (PyObject_IS_GC(op)) {
382 PyGC_Head *gc = AS_GC(op);
383 /* We're only interested in gc_refs for objects in the
384 * generation being collected, which can be recognized
385 * because only they have positive gc_refs.
386 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200387 assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
388 if (_PyGCHead_REFS(gc) > 0)
389 _PyGCHead_DECREF(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000390 }
391 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000392}
393
Tim Peters19b74c72002-07-01 03:52:19 +0000394/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
395 * for all objects in containers, and is GC_REACHABLE for all tracked gc
396 * objects not in containers. The ones with gc_refs > 0 are directly
397 * reachable from outside containers, and so can't be collected.
398 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000399static void
400subtract_refs(PyGC_Head *containers)
401{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000402 traverseproc traverse;
403 PyGC_Head *gc = containers->gc.gc_next;
404 for (; gc != containers; gc=gc->gc.gc_next) {
405 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
406 (void) traverse(FROM_GC(gc),
407 (visitproc)visit_decref,
408 NULL);
409 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410}
411
Tim Peters19b74c72002-07-01 03:52:19 +0000412/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000413static int
Tim Peters19b74c72002-07-01 03:52:19 +0000414visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000415{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000416 if (PyObject_IS_GC(op)) {
417 PyGC_Head *gc = AS_GC(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200418 const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
Tim Peters19b74c72002-07-01 03:52:19 +0000419
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000420 if (gc_refs == 0) {
421 /* This is in move_unreachable's 'young' list, but
422 * the traversal hasn't yet gotten to it. All
423 * we need to do is tell move_unreachable that it's
424 * reachable.
425 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200426 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000427 }
428 else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
429 /* This had gc_refs = 0 when move_unreachable got
430 * to it, but turns out it's reachable after all.
431 * Move it back to move_unreachable's 'young' list,
432 * and move_unreachable will eventually get to it
433 * again.
434 */
435 gc_list_move(gc, reachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200436 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000437 }
438 /* Else there's nothing to do.
439 * If gc_refs > 0, it must be in move_unreachable's 'young'
440 * list, and move_unreachable will eventually get to it.
441 * If gc_refs == GC_REACHABLE, it's either in some other
442 * generation so we don't care about it, or move_unreachable
443 * already dealt with it.
444 * If gc_refs == GC_UNTRACKED, it must be ignored.
445 */
446 else {
447 assert(gc_refs > 0
448 || gc_refs == GC_REACHABLE
449 || gc_refs == GC_UNTRACKED);
450 }
451 }
452 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000453}
454
Tim Peters19b74c72002-07-01 03:52:19 +0000455/* Move the unreachable objects from young to unreachable. After this,
456 * all objects in young have gc_refs = GC_REACHABLE, and all objects in
457 * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
458 * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
459 * All objects in young after this are directly or indirectly reachable
460 * from outside the original young; and all objects in unreachable are
461 * not.
Tim Peters88396172002-06-30 17:56:40 +0000462 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000463static void
Tim Peters19b74c72002-07-01 03:52:19 +0000464move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000465{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000466 PyGC_Head *gc = young->gc.gc_next;
Tim Peters19b74c72002-07-01 03:52:19 +0000467
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000468 /* Invariants: all objects "to the left" of us in young have gc_refs
469 * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
470 * from outside the young list as it was at entry. All other objects
471 * from the original young "to the left" of us are in unreachable now,
472 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
473 * left of us in 'young' now have been scanned, and no objects here
474 * or to the right have been scanned yet.
475 */
Tim Peters19b74c72002-07-01 03:52:19 +0000476
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000477 while (gc != young) {
478 PyGC_Head *next;
Tim Peters19b74c72002-07-01 03:52:19 +0000479
Antoine Pitrou796564c2013-07-30 19:59:21 +0200480 if (_PyGCHead_REFS(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000481 /* gc is definitely reachable from outside the
482 * original 'young'. Mark it as such, and traverse
483 * its pointers to find any other objects that may
484 * be directly reachable from it. Note that the
485 * call to tp_traverse may append objects to young,
486 * so we have to wait until it returns to determine
487 * the next object to visit.
488 */
489 PyObject *op = FROM_GC(gc);
490 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200491 assert(_PyGCHead_REFS(gc) > 0);
492 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000493 (void) traverse(op,
494 (visitproc)visit_reachable,
495 (void *)young);
496 next = gc->gc.gc_next;
497 if (PyTuple_CheckExact(op)) {
498 _PyTuple_MaybeUntrack(op);
499 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000500 }
501 else {
502 /* This *may* be unreachable. To make progress,
503 * assume it is. gc isn't directly reachable from
504 * any object we've already traversed, but may be
505 * reachable from an object we haven't gotten to yet.
506 * visit_reachable will eventually move gc back into
507 * young if that's so, and we'll see it again.
508 */
509 next = gc->gc.gc_next;
510 gc_list_move(gc, unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200511 _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000512 }
513 gc = next;
514 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000515}
516
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200517/* Try to untrack all currently tracked dictionaries */
518static void
519untrack_dicts(PyGC_Head *head)
520{
521 PyGC_Head *next, *gc = head->gc.gc_next;
522 while (gc != head) {
523 PyObject *op = FROM_GC(gc);
524 next = gc->gc.gc_next;
525 if (PyDict_CheckExact(op))
526 _PyDict_MaybeUntrack(op);
527 gc = next;
528 }
529}
530
Antoine Pitrou796564c2013-07-30 19:59:21 +0200531/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000532static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200533has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000534{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200535 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000536}
537
Antoine Pitrou796564c2013-07-30 19:59:21 +0200538/* Move the objects in unreachable with tp_del slots into `finalizers`.
Tim Petersead8b7a2004-10-30 23:09:22 +0000539 * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
540 * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000541 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000542static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200543move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000544{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000545 PyGC_Head *gc;
546 PyGC_Head *next;
Tim Petersf6b80452003-04-07 19:21:15 +0000547
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000548 /* March over unreachable. Move objects with finalizers into
549 * `finalizers`.
550 */
551 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
552 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000553
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000554 assert(IS_TENTATIVELY_UNREACHABLE(op));
555 next = gc->gc.gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000556
Antoine Pitrou796564c2013-07-30 19:59:21 +0200557 if (has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000558 gc_list_move(gc, finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200559 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000560 }
561 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000562}
563
Antoine Pitrou796564c2013-07-30 19:59:21 +0200564/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000565static int
566visit_move(PyObject *op, PyGC_Head *tolist)
567{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000568 if (PyObject_IS_GC(op)) {
569 if (IS_TENTATIVELY_UNREACHABLE(op)) {
570 PyGC_Head *gc = AS_GC(op);
571 gc_list_move(gc, tolist);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200572 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000573 }
574 }
575 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000576}
577
578/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000579 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000580 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000581static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200582move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000583{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000584 traverseproc traverse;
585 PyGC_Head *gc = finalizers->gc.gc_next;
586 for (; gc != finalizers; gc = gc->gc.gc_next) {
587 /* Note that the finalizers list may grow during this. */
588 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
589 (void) traverse(FROM_GC(gc),
590 (visitproc)visit_move,
591 (void *)finalizers);
592 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000593}
594
Tim Petersead8b7a2004-10-30 23:09:22 +0000595/* Clear all weakrefs to unreachable objects, and if such a weakref has a
596 * callback, invoke it if necessary. Note that it's possible for such
597 * weakrefs to be outside the unreachable set -- indeed, those are precisely
598 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
599 * overview & some details. Some weakrefs with callbacks may be reclaimed
600 * directly by this routine; the number reclaimed is the return value. Other
601 * weakrefs with callbacks may be moved into the `old` generation. Objects
602 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
603 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
604 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000605 */
606static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000607handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000608{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000609 PyGC_Head *gc;
610 PyObject *op; /* generally FROM_GC(gc) */
611 PyWeakReference *wr; /* generally a cast of op */
612 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
613 PyGC_Head *next;
614 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000615
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000616 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000617
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000618 /* Clear all weakrefs to the objects in unreachable. If such a weakref
619 * also has a callback, move it into `wrcb_to_call` if the callback
620 * needs to be invoked. Note that we cannot invoke any callbacks until
621 * all weakrefs to unreachable objects are cleared, lest the callback
622 * resurrect an unreachable object via a still-active weakref. We
623 * make another pass over wrcb_to_call, invoking callbacks, after this
624 * pass completes.
625 */
626 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
627 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000628
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000629 op = FROM_GC(gc);
630 assert(IS_TENTATIVELY_UNREACHABLE(op));
631 next = gc->gc.gc_next;
Tim Petersead8b7a2004-10-30 23:09:22 +0000632
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000633 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
634 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000635
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000636 /* It supports weakrefs. Does it have any? */
637 wrlist = (PyWeakReference **)
638 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000639
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000640 /* `op` may have some weakrefs. March over the list, clear
641 * all the weakrefs, and move the weakrefs with callbacks
642 * that must be called into wrcb_to_call.
643 */
644 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
645 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000646
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000647 /* _PyWeakref_ClearRef clears the weakref but leaves
648 * the callback pointer intact. Obscure: it also
649 * changes *wrlist.
650 */
651 assert(wr->wr_object == op);
652 _PyWeakref_ClearRef(wr);
653 assert(wr->wr_object == Py_None);
654 if (wr->wr_callback == NULL)
655 continue; /* no callback */
Tim Petersead8b7a2004-10-30 23:09:22 +0000656
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000657 /* Headache time. `op` is going away, and is weakly referenced by
658 * `wr`, which has a callback. Should the callback be invoked? If wr
659 * is also trash, no:
660 *
661 * 1. There's no need to call it. The object and the weakref are
662 * both going away, so it's legitimate to pretend the weakref is
663 * going away first. The user has to ensure a weakref outlives its
664 * referent if they want a guarantee that the wr callback will get
665 * invoked.
666 *
667 * 2. It may be catastrophic to call it. If the callback is also in
668 * cyclic trash (CT), then although the CT is unreachable from
669 * outside the current generation, CT may be reachable from the
670 * callback. Then the callback could resurrect insane objects.
671 *
672 * Since the callback is never needed and may be unsafe in this case,
673 * wr is simply left in the unreachable set. Note that because we
674 * already called _PyWeakref_ClearRef(wr), its callback will never
675 * trigger.
676 *
677 * OTOH, if wr isn't part of CT, we should invoke the callback: the
678 * weakref outlived the trash. Note that since wr isn't CT in this
679 * case, its callback can't be CT either -- wr acted as an external
680 * root to this generation, and therefore its callback did too. So
681 * nothing in CT is reachable from the callback either, so it's hard
682 * to imagine how calling it later could create a problem for us. wr
683 * is moved to wrcb_to_call in this case.
684 */
685 if (IS_TENTATIVELY_UNREACHABLE(wr))
686 continue;
687 assert(IS_REACHABLE(wr));
Tim Peterscc2a8662004-10-31 22:12:43 +0000688
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000689 /* Create a new reference so that wr can't go away
690 * before we can process it again.
691 */
692 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000693
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000694 /* Move wr to wrcb_to_call, for the next pass. */
695 wrasgc = AS_GC(wr);
696 assert(wrasgc != next); /* wrasgc is reachable, but
697 next isn't, so they can't
698 be the same */
699 gc_list_move(wrasgc, &wrcb_to_call);
700 }
701 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000702
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000703 /* Invoke the callbacks we decided to honor. It's safe to invoke them
704 * because they can't reference unreachable objects.
705 */
706 while (! gc_list_is_empty(&wrcb_to_call)) {
707 PyObject *temp;
708 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000709
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000710 gc = wrcb_to_call.gc.gc_next;
711 op = FROM_GC(gc);
712 assert(IS_REACHABLE(op));
713 assert(PyWeakref_Check(op));
714 wr = (PyWeakReference *)op;
715 callback = wr->wr_callback;
716 assert(callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000717
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000718 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100719 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000720 if (temp == NULL)
721 PyErr_WriteUnraisable(callback);
722 else
723 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000724
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000725 /* Give up the reference we created in the first pass. When
726 * op's refcount hits 0 (which it may or may not do right now),
727 * op's tp_dealloc will decref op->wr_callback too. Note
728 * that the refcount probably will hit 0 now, and because this
729 * weakref was reachable to begin with, gc didn't already
730 * add it to its count of freed objects. Example: a reachable
731 * weak value dict maps some key to this reachable weakref.
732 * The callback removes this key->weakref mapping from the
733 * dict, leaving no other references to the weakref (excepting
734 * ours).
735 */
736 Py_DECREF(op);
737 if (wrcb_to_call.gc.gc_next == gc) {
738 /* object is still alive -- move it */
739 gc_list_move(gc, old);
740 }
741 else
742 ++num_freed;
743 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000744
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000745 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000746}
747
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000748static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200749debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000750{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100751 PySys_FormatStderr("gc: %s <%s %p>\n",
752 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000753}
754
Antoine Pitrou796564c2013-07-30 19:59:21 +0200755/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000756 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000757 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
758 * garbage list (a Python list), else only the objects in finalizers with
759 * __del__ methods are appended to garbage. All objects in finalizers are
760 * merged into the old list regardless.
Tim Peters259272b2003-04-06 19:41:39 +0000761 * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
762 * The finalizers list is made empty on a successful return.
Tim Petersbf384c22003-04-06 00:11:39 +0000763 */
Tim Peters259272b2003-04-06 19:41:39 +0000764static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200765handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000766{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000767 PyGC_Head *gc = finalizers->gc.gc_next;
Tim Petersf6b80452003-04-07 19:21:15 +0000768
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000769 if (garbage == NULL) {
770 garbage = PyList_New(0);
771 if (garbage == NULL)
772 Py_FatalError("gc couldn't create gc.garbage list");
773 }
774 for (; gc != finalizers; gc = gc->gc.gc_next) {
775 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000776
Antoine Pitrou796564c2013-07-30 19:59:21 +0200777 if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000778 if (PyList_Append(garbage, op) < 0)
779 return -1;
780 }
781 }
Tim Petersf6b80452003-04-07 19:21:15 +0000782
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000783 gc_list_merge(finalizers, old);
784 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000785}
786
Tim Peters5fbc7b12014-05-08 17:42:19 -0500787/* Run first-time finalizers (if any) on all the objects in collectable.
788 * Note that this may remove some (or even all) of the objects from the
789 * list, due to refcounts falling to 0.
790 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200791static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500792finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200793{
794 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500795 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200796
Tim Peters5fbc7b12014-05-08 17:42:19 -0500797 /* While we're going through the loop, `finalize(op)` may cause op, or
798 * other objects, to be reclaimed via refcounts falling to zero. So
799 * there's little we can rely on about the structure of the input
800 * `collectable` list across iterations. For safety, we always take the
801 * first object in that list and move it to a temporary `seen` list.
802 * If objects vanish from the `collectable` and `seen` lists we don't
803 * care.
804 */
805 gc_list_init(&seen);
806
807 while (!gc_list_is_empty(collectable)) {
808 PyGC_Head *gc = collectable->gc.gc_next;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200809 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500810 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200811 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500812 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
813 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200814 _PyGCHead_SET_FINALIZED(gc, 1);
815 Py_INCREF(op);
816 finalize(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200817 Py_DECREF(op);
818 }
819 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500820 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200821}
822
823/* Walk the collectable list and check that they are really unreachable
824 from the outside (some objects could have been resurrected by a
825 finalizer). */
826static int
827check_garbage(PyGC_Head *collectable)
828{
829 PyGC_Head *gc;
830 for (gc = collectable->gc.gc_next; gc != collectable;
831 gc = gc->gc.gc_next) {
832 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
833 assert(_PyGCHead_REFS(gc) != 0);
834 }
835 subtract_refs(collectable);
836 for (gc = collectable->gc.gc_next; gc != collectable;
837 gc = gc->gc.gc_next) {
838 assert(_PyGCHead_REFS(gc) >= 0);
839 if (_PyGCHead_REFS(gc) != 0)
840 return -1;
841 }
842 return 0;
843}
844
845static void
846revive_garbage(PyGC_Head *collectable)
847{
848 PyGC_Head *gc;
849 for (gc = collectable->gc.gc_next; gc != collectable;
850 gc = gc->gc.gc_next) {
851 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
852 }
853}
854
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000855/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000856 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000857 * objects may be freed. It is possible I screwed something up here.
858 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000859static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000860delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000861{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000862 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000863
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000864 while (!gc_list_is_empty(collectable)) {
865 PyGC_Head *gc = collectable->gc.gc_next;
866 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000867
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000868 if (debug & DEBUG_SAVEALL) {
869 PyList_Append(garbage, op);
870 }
871 else {
872 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
873 Py_INCREF(op);
874 clear(op);
875 Py_DECREF(op);
876 }
877 }
878 if (collectable->gc.gc_next == gc) {
879 /* object is still alive, move it, it may die later */
880 gc_list_move(gc, old);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200881 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000882 }
883 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000884}
885
Christian Heimesa156e092008-02-16 07:38:31 +0000886/* Clear all free lists
887 * All free lists are cleared during the collection of the highest generation.
888 * Allocated items in the free list may keep a pymalloc arena occupied.
889 * Clearing the free lists may give back memory to the OS earlier.
890 */
891static void
892clear_freelists(void)
893{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000894 (void)PyMethod_ClearFreeList();
895 (void)PyFrame_ClearFreeList();
896 (void)PyCFunction_ClearFreeList();
897 (void)PyTuple_ClearFreeList();
898 (void)PyUnicode_ClearFreeList();
899 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100900 (void)PyList_ClearFreeList();
901 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100902 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700903 (void)PyAsyncGen_ClearFreeLists();
Christian Heimesa156e092008-02-16 07:38:31 +0000904}
905
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000906/* This is the main function. Read this to understand how the
907 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000908static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200909collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
910 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000911{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000912 int i;
913 Py_ssize_t m = 0; /* # objects collected */
914 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
915 PyGC_Head *young; /* the generation we are examining */
916 PyGC_Head *old; /* next older generation */
917 PyGC_Head unreachable; /* non-problematic unreachable trash */
918 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
919 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100920 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200921
Antoine Pitroud4156c12012-10-30 22:43:19 +0100922 struct gc_generation_stats *stats = &generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000923
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000924 if (debug & DEBUG_STATS) {
925 PySys_WriteStderr("gc: collecting generation %d...\n",
926 generation);
927 PySys_WriteStderr("gc: objects in each generation:");
928 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200929 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000930 gc_list_size(GEN_HEAD(i)));
Victor Stinner7181dec2015-03-27 17:47:53 +0100931 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200932
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000933 PySys_WriteStderr("\n");
934 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000935
Łukasz Langaa785c872016-09-09 17:37:37 -0700936 if (PyDTrace_GC_START_ENABLED())
937 PyDTrace_GC_START(generation);
938
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000939 /* update collection and allocation counters */
940 if (generation+1 < NUM_GENERATIONS)
941 generations[generation+1].count += 1;
942 for (i = 0; i <= generation; i++)
943 generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000944
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000945 /* merge younger generations with one we are currently collecting */
946 for (i = 0; i < generation; i++) {
947 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
948 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000949
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000950 /* handy references */
951 young = GEN_HEAD(generation);
952 if (generation < NUM_GENERATIONS-1)
953 old = GEN_HEAD(generation+1);
954 else
955 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000956
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000957 /* Using ob_refcnt and gc_refs, calculate which objects in the
958 * container set are reachable from outside the set (i.e., have a
959 * refcount greater than 0 when all the references within the
960 * set are taken into account).
961 */
962 update_refs(young);
963 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000964
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000965 /* Leave everything reachable from outside young in young, and move
966 * everything else (in young) to unreachable.
967 * NOTE: This used to move the reachable objects into a reachable
968 * set instead. But most things usually turn out to be reachable,
969 * so it's more efficient to move the unreachable things.
970 */
971 gc_list_init(&unreachable);
972 move_unreachable(young, &unreachable);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000973
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000974 /* Move reachable objects to next generation. */
975 if (young != old) {
976 if (generation == NUM_GENERATIONS - 2) {
977 long_lived_pending += gc_list_size(young);
978 }
979 gc_list_merge(young, old);
980 }
981 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200982 /* We only untrack dicts in full collections, to avoid quadratic
983 dict build-up. See issue #14775. */
984 untrack_dicts(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000985 long_lived_pending = 0;
986 long_lived_total = gc_list_size(young);
987 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000988
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000989 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +0200990 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000991 */
992 gc_list_init(&finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200993 move_legacy_finalizers(&unreachable, &finalizers);
994 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000995 * unreachable objects reachable *from* those are also uncollectable,
996 * and we move those into the finalizers list too.
997 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200998 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000999
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001000 /* Collect statistics on collectable objects found and print
1001 * debugging information.
1002 */
1003 for (gc = unreachable.gc.gc_next; gc != &unreachable;
1004 gc = gc->gc.gc_next) {
1005 m++;
1006 if (debug & DEBUG_COLLECTABLE) {
1007 debug_cycle("collectable", FROM_GC(gc));
1008 }
1009 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001010
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001011 /* Clear weakrefs and invoke callbacks as necessary. */
1012 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001013
Antoine Pitrou796564c2013-07-30 19:59:21 +02001014 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001015 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001016
1017 if (check_garbage(&unreachable)) {
1018 revive_garbage(&unreachable);
1019 gc_list_merge(&unreachable, old);
1020 }
1021 else {
1022 /* Call tp_clear on objects in the unreachable set. This will cause
1023 * the reference cycles to be broken. It may also cause some objects
1024 * in finalizers to be freed.
1025 */
1026 delete_garbage(&unreachable, old);
1027 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001028
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001029 /* Collect statistics on uncollectable objects found and print
1030 * debugging information. */
1031 for (gc = finalizers.gc.gc_next;
1032 gc != &finalizers;
1033 gc = gc->gc.gc_next) {
1034 n++;
1035 if (debug & DEBUG_UNCOLLECTABLE)
1036 debug_cycle("uncollectable", FROM_GC(gc));
1037 }
1038 if (debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001039 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001040
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001041 if (m == 0 && n == 0)
1042 PySys_WriteStderr("gc: done");
1043 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001044 PySys_FormatStderr(
1045 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001046 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001047 PySys_WriteStderr(", %.4fs elapsed\n",
1048 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001049 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001050
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001051 /* Append instances in the uncollectable set to a Python
1052 * reachable list of garbage. The programmer has to deal with
1053 * this if they insist on creating this type of structure.
1054 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001055 (void)handle_legacy_finalizers(&finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001056
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001057 /* Clear free list only during the collection of the highest
1058 * generation */
1059 if (generation == NUM_GENERATIONS-1) {
1060 clear_freelists();
1061 }
Christian Heimesa156e092008-02-16 07:38:31 +00001062
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001063 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001064 if (nofail) {
1065 PyErr_Clear();
1066 }
1067 else {
1068 if (gc_str == NULL)
1069 gc_str = PyUnicode_FromString("garbage collection");
1070 PyErr_WriteUnraisable(gc_str);
1071 Py_FatalError("unexpected exception during garbage collection");
1072 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001073 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001074
Antoine Pitroud4156c12012-10-30 22:43:19 +01001075 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001076 if (n_collected)
1077 *n_collected = m;
1078 if (n_uncollectable)
1079 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001080 stats->collections++;
1081 stats->collected += m;
1082 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001083
1084 if (PyDTrace_GC_DONE_ENABLED())
1085 PyDTrace_GC_DONE(n+m);
1086
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001087 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001088}
1089
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001090/* Invoke progress callbacks to notify clients that garbage collection
1091 * is starting or stopping
1092 */
1093static void
1094invoke_gc_callback(const char *phase, int generation,
1095 Py_ssize_t collected, Py_ssize_t uncollectable)
1096{
1097 Py_ssize_t i;
1098 PyObject *info = NULL;
1099
1100 /* we may get called very early */
1101 if (callbacks == NULL)
1102 return;
1103 /* The local variable cannot be rebound, check it for sanity */
1104 assert(callbacks != NULL && PyList_CheckExact(callbacks));
1105 if (PyList_GET_SIZE(callbacks) != 0) {
1106 info = Py_BuildValue("{sisnsn}",
1107 "generation", generation,
1108 "collected", collected,
1109 "uncollectable", uncollectable);
1110 if (info == NULL) {
1111 PyErr_WriteUnraisable(NULL);
1112 return;
1113 }
1114 }
1115 for (i=0; i<PyList_GET_SIZE(callbacks); i++) {
1116 PyObject *r, *cb = PyList_GET_ITEM(callbacks, i);
1117 Py_INCREF(cb); /* make sure cb doesn't go away */
1118 r = PyObject_CallFunction(cb, "sO", phase, info);
1119 Py_XDECREF(r);
1120 if (r == NULL)
1121 PyErr_WriteUnraisable(cb);
1122 Py_DECREF(cb);
1123 }
1124 Py_XDECREF(info);
1125}
1126
1127/* Perform garbage collection of a generation and invoke
1128 * progress callbacks.
1129 */
1130static Py_ssize_t
1131collect_with_callback(int generation)
1132{
1133 Py_ssize_t result, collected, uncollectable;
1134 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001135 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001136 invoke_gc_callback("stop", generation, collected, uncollectable);
1137 return result;
1138}
1139
Neal Norwitz7b216c52006-03-04 20:01:53 +00001140static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001141collect_generations(void)
1142{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001143 int i;
1144 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001145
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001146 /* Find the oldest generation (highest numbered) where the count
1147 * exceeds the threshold. Objects in the that generation and
1148 * generations younger than it will be collected. */
1149 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1150 if (generations[i].count > generations[i].threshold) {
1151 /* Avoid quadratic performance degradation in number
1152 of tracked objects. See comments at the beginning
1153 of this file, and issue #4074.
1154 */
1155 if (i == NUM_GENERATIONS - 1
1156 && long_lived_pending < long_lived_total / 4)
1157 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001158 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001159 break;
1160 }
1161 }
1162 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001163}
1164
Serhiy Storchaka93260282017-02-04 11:19:59 +02001165#include "clinic/gcmodule.c.h"
1166
1167/*[clinic input]
1168gc.enable
1169
1170Enable automatic garbage collection.
1171[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001172
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001173static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001174gc_enable_impl(PyObject *module)
1175/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001176{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001177 enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001178 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001179}
1180
Serhiy Storchaka93260282017-02-04 11:19:59 +02001181/*[clinic input]
1182gc.disable
1183
1184Disable automatic garbage collection.
1185[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001186
1187static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001188gc_disable_impl(PyObject *module)
1189/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001190{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001191 enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001192 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001193}
1194
Serhiy Storchaka93260282017-02-04 11:19:59 +02001195/*[clinic input]
1196gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001197
Serhiy Storchaka93260282017-02-04 11:19:59 +02001198Returns true if automatic garbage collection is enabled.
1199[clinic start generated code]*/
1200
1201static int
1202gc_isenabled_impl(PyObject *module)
1203/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001204{
Serhiy Storchaka93260282017-02-04 11:19:59 +02001205 return enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001206}
1207
Serhiy Storchaka93260282017-02-04 11:19:59 +02001208/*[clinic input]
1209gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001210
Serhiy Storchaka93260282017-02-04 11:19:59 +02001211 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1212
1213Run the garbage collector.
1214
1215With no arguments, run a full collection. The optional argument
1216may be an integer specifying which generation to collect. A ValueError
1217is raised if the generation number is invalid.
1218
1219The number of unreachable objects is returned.
1220[clinic start generated code]*/
1221
1222static Py_ssize_t
1223gc_collect_impl(PyObject *module, int generation)
1224/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001225{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001226 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001227
Serhiy Storchaka93260282017-02-04 11:19:59 +02001228 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001229 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001230 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001231 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001232
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001233 if (collecting)
1234 n = 0; /* already collecting, don't do anything */
1235 else {
1236 collecting = 1;
Serhiy Storchaka93260282017-02-04 11:19:59 +02001237 n = collect_with_callback(generation);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001238 collecting = 0;
1239 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001240
Serhiy Storchaka93260282017-02-04 11:19:59 +02001241 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001242}
1243
Serhiy Storchaka93260282017-02-04 11:19:59 +02001244/*[clinic input]
1245gc.set_debug
1246
1247 flags: int
1248 An integer that can have the following bits turned on:
1249 DEBUG_STATS - Print statistics during collection.
1250 DEBUG_COLLECTABLE - Print collectable objects found.
1251 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1252 found.
1253 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1254 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1255 /
1256
1257Set the garbage collection debugging flags.
1258
1259Debugging information is written to sys.stderr.
1260[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001261
1262static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001263gc_set_debug_impl(PyObject *module, int flags)
1264/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001265{
Serhiy Storchaka93260282017-02-04 11:19:59 +02001266 debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001267
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001268 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001269}
1270
Serhiy Storchaka93260282017-02-04 11:19:59 +02001271/*[clinic input]
1272gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001273
Serhiy Storchaka93260282017-02-04 11:19:59 +02001274Get the garbage collection debugging flags.
1275[clinic start generated code]*/
1276
1277static int
1278gc_get_debug_impl(PyObject *module)
1279/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001280{
Serhiy Storchaka93260282017-02-04 11:19:59 +02001281 return debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001282}
1283
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001284PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001285"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001286"\n"
1287"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001288"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001289
1290static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001291gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001292{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001293 int i;
1294 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1295 &generations[0].threshold,
1296 &generations[1].threshold,
1297 &generations[2].threshold))
1298 return NULL;
1299 for (i = 2; i < NUM_GENERATIONS; i++) {
1300 /* generations higher than 2 get the same threshold */
1301 generations[i].threshold = generations[2].threshold;
1302 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001303
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001304 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001305}
1306
Serhiy Storchaka93260282017-02-04 11:19:59 +02001307/*[clinic input]
1308gc.get_threshold
1309
1310Return the current collection thresholds.
1311[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001312
1313static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001314gc_get_threshold_impl(PyObject *module)
1315/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001316{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001317 return Py_BuildValue("(iii)",
1318 generations[0].threshold,
1319 generations[1].threshold,
1320 generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001321}
1322
Serhiy Storchaka93260282017-02-04 11:19:59 +02001323/*[clinic input]
1324gc.get_count
1325
1326Return a three-tuple of the current collection counts.
1327[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001328
1329static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001330gc_get_count_impl(PyObject *module)
1331/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001332{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001333 return Py_BuildValue("(iii)",
1334 generations[0].count,
1335 generations[1].count,
1336 generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001337}
1338
Neil Schemenauer48c70342001-08-09 15:38:31 +00001339static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001340referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001341{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001342 Py_ssize_t i;
1343 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1344 if (PyTuple_GET_ITEM(objs, i) == obj)
1345 return 1;
1346 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001347}
1348
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001349static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001350gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001351{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001352 PyGC_Head *gc;
1353 PyObject *obj;
1354 traverseproc traverse;
1355 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1356 obj = FROM_GC(gc);
1357 traverse = Py_TYPE(obj)->tp_traverse;
1358 if (obj == objs || obj == resultlist)
1359 continue;
1360 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1361 if (PyList_Append(resultlist, obj) < 0)
1362 return 0; /* error */
1363 }
1364 }
1365 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001366}
1367
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001368PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001369"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001370Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001371
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001372static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001373gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001374{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001375 int i;
1376 PyObject *result = PyList_New(0);
1377 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001378
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001379 for (i = 0; i < NUM_GENERATIONS; i++) {
1380 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1381 Py_DECREF(result);
1382 return NULL;
1383 }
1384 }
1385 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001386}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001387
Tim Peters0f81ab62003-04-08 16:39:48 +00001388/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001389static int
Tim Peters730f5532003-04-08 17:17:17 +00001390referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001391{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001392 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001393}
1394
Tim Peters730f5532003-04-08 17:17:17 +00001395PyDoc_STRVAR(gc_get_referents__doc__,
1396"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001397Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001398
1399static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001400gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001401{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001402 Py_ssize_t i;
1403 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001404
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001405 if (result == NULL)
1406 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001407
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001408 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1409 traverseproc traverse;
1410 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001411
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001412 if (! PyObject_IS_GC(obj))
1413 continue;
1414 traverse = Py_TYPE(obj)->tp_traverse;
1415 if (! traverse)
1416 continue;
1417 if (traverse(obj, (visitproc)referentsvisit, result)) {
1418 Py_DECREF(result);
1419 return NULL;
1420 }
1421 }
1422 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001423}
1424
Serhiy Storchaka93260282017-02-04 11:19:59 +02001425/*[clinic input]
1426gc.get_objects
1427
1428Return a list of objects tracked by the collector (excluding the list returned).
1429[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001430
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001431static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001432gc_get_objects_impl(PyObject *module)
1433/*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001434{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001435 int i;
1436 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001437
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001438 result = PyList_New(0);
1439 if (result == NULL)
1440 return NULL;
1441 for (i = 0; i < NUM_GENERATIONS; i++) {
1442 if (append_objects(result, GEN_HEAD(i))) {
1443 Py_DECREF(result);
1444 return NULL;
1445 }
1446 }
1447 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001448}
1449
Serhiy Storchaka93260282017-02-04 11:19:59 +02001450/*[clinic input]
1451gc.get_stats
1452
1453Return a list of dictionaries containing per-generation statistics.
1454[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001455
1456static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001457gc_get_stats_impl(PyObject *module)
1458/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001459{
1460 int i;
1461 PyObject *result;
1462 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1463
1464 /* To get consistent values despite allocations while constructing
1465 the result list, we use a snapshot of the running stats. */
1466 for (i = 0; i < NUM_GENERATIONS; i++) {
1467 stats[i] = generation_stats[i];
1468 }
1469
1470 result = PyList_New(0);
1471 if (result == NULL)
1472 return NULL;
1473
1474 for (i = 0; i < NUM_GENERATIONS; i++) {
1475 PyObject *dict;
1476 st = &stats[i];
1477 dict = Py_BuildValue("{snsnsn}",
1478 "collections", st->collections,
1479 "collected", st->collected,
1480 "uncollectable", st->uncollectable
1481 );
1482 if (dict == NULL)
1483 goto error;
1484 if (PyList_Append(result, dict)) {
1485 Py_DECREF(dict);
1486 goto error;
1487 }
1488 Py_DECREF(dict);
1489 }
1490 return result;
1491
1492error:
1493 Py_XDECREF(result);
1494 return NULL;
1495}
1496
1497
Serhiy Storchaka93260282017-02-04 11:19:59 +02001498/*[clinic input]
1499gc.is_tracked
1500
1501 obj: object
1502 /
1503
1504Returns true if the object is tracked by the garbage collector.
1505
1506Simple atomic objects will return false.
1507[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001508
1509static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001510gc_is_tracked(PyObject *module, PyObject *obj)
1511/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001512{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001513 PyObject *result;
1514
1515 if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1516 result = Py_True;
1517 else
1518 result = Py_False;
1519 Py_INCREF(result);
1520 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001521}
1522
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001523
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001524PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001525"This module provides access to the garbage collector for reference cycles.\n"
1526"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001527"enable() -- Enable automatic garbage collection.\n"
1528"disable() -- Disable automatic garbage collection.\n"
1529"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001530"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001531"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001532"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001533"set_debug() -- Set debugging flags.\n"
1534"get_debug() -- Get debugging flags.\n"
1535"set_threshold() -- Set the collection thresholds.\n"
1536"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001537"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001538"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001539"get_referrers() -- Return the list of objects that refer to an object.\n"
Tim Peters730f5532003-04-08 17:17:17 +00001540"get_referents() -- Return the list of objects that an object refers to.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001541
1542static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001543 GC_ENABLE_METHODDEF
1544 GC_DISABLE_METHODDEF
1545 GC_ISENABLED_METHODDEF
1546 GC_SET_DEBUG_METHODDEF
1547 GC_GET_DEBUG_METHODDEF
1548 GC_GET_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001549 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001550 GC_GET_THRESHOLD_METHODDEF
1551 GC_COLLECT_METHODDEF
1552 GC_GET_OBJECTS_METHODDEF
1553 GC_GET_STATS_METHODDEF
1554 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001555 {"get_referrers", gc_get_referrers, METH_VARARGS,
1556 gc_get_referrers__doc__},
1557 {"get_referents", gc_get_referents, METH_VARARGS,
1558 gc_get_referents__doc__},
1559 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001560};
1561
Martin v. Löwis1a214512008-06-11 05:26:20 +00001562static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001563 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001564 "gc", /* m_name */
1565 gc__doc__, /* m_doc */
1566 -1, /* m_size */
1567 GcMethods, /* m_methods */
1568 NULL, /* m_reload */
1569 NULL, /* m_traverse */
1570 NULL, /* m_clear */
1571 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001572};
1573
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001574PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001575PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001576{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001577 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001578
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001579 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001580
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001581 if (m == NULL)
1582 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001583
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001584 if (garbage == NULL) {
1585 garbage = PyList_New(0);
1586 if (garbage == NULL)
1587 return NULL;
1588 }
1589 Py_INCREF(garbage);
1590 if (PyModule_AddObject(m, "garbage", garbage) < 0)
1591 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001592
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001593 if (callbacks == NULL) {
1594 callbacks = PyList_New(0);
1595 if (callbacks == NULL)
1596 return NULL;
1597 }
1598 Py_INCREF(callbacks);
1599 if (PyModule_AddObject(m, "callbacks", callbacks) < 0)
1600 return NULL;
1601
Martin v. Löwis1a214512008-06-11 05:26:20 +00001602#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001603 ADD_INT(DEBUG_STATS);
1604 ADD_INT(DEBUG_COLLECTABLE);
1605 ADD_INT(DEBUG_UNCOLLECTABLE);
1606 ADD_INT(DEBUG_SAVEALL);
1607 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001608#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001609 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001610}
1611
Guido van Rossume13ddc92003-04-17 17:29:22 +00001612/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001613Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001614PyGC_Collect(void)
1615{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001616 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001617
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001618 if (collecting)
1619 n = 0; /* already collecting, don't do anything */
1620 else {
1621 collecting = 1;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001622 n = collect_with_callback(NUM_GENERATIONS - 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001623 collecting = 0;
1624 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001625
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001626 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001627}
1628
Antoine Pitroufef34e32013-05-19 01:11:58 +02001629Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001630_PyGC_CollectIfEnabled(void)
1631{
1632 if (!enabled)
1633 return 0;
1634
1635 return PyGC_Collect();
1636}
1637
1638Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001639_PyGC_CollectNoFail(void)
1640{
1641 Py_ssize_t n;
1642
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001643 /* Ideally, this function is only called on interpreter shutdown,
1644 and therefore not recursively. Unfortunately, when there are daemon
1645 threads, a daemon thread can start a cyclic garbage collection
1646 during interpreter shutdown (and then never finish it).
1647 See http://bugs.python.org/issue8713#msg195178 for an example.
1648 */
1649 if (collecting)
1650 n = 0;
1651 else {
1652 collecting = 1;
1653 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1654 collecting = 0;
1655 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001656 return n;
1657}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001658
Antoine Pitrou696e0352010-08-08 22:18:46 +00001659void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001660_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001661{
Antoine Pitrou2ed94eb2010-09-14 09:48:39 +00001662 if (!(debug & DEBUG_SAVEALL)
1663 && garbage != NULL && PyList_GET_SIZE(garbage) > 0) {
Georg Brandl08be72d2010-10-24 15:11:22 +00001664 char *message;
1665 if (debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001666 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001667 "shutdown";
1668 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001669 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001670 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001671 /* PyErr_WarnFormat does too many things and we are at shutdown,
1672 the warnings module's dependencies (e.g. linecache) may be gone
1673 already. */
1674 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1675 "gc", NULL, message,
1676 PyList_GET_SIZE(garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001677 PyErr_WriteUnraisable(NULL);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001678 if (debug & DEBUG_UNCOLLECTABLE) {
1679 PyObject *repr = NULL, *bytes = NULL;
1680 repr = PyObject_Repr(garbage);
1681 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1682 PyErr_WriteUnraisable(garbage);
1683 else {
1684 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001685 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001686 PyBytes_AS_STRING(bytes)
1687 );
1688 }
1689 Py_XDECREF(repr);
1690 Py_XDECREF(bytes);
1691 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001692 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001693}
1694
1695void
1696_PyGC_Fini(void)
1697{
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001698 Py_CLEAR(callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001699}
1700
Neil Schemenauer43411b52001-08-30 00:05:51 +00001701/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001702void
1703_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001704{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001705 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001706}
1707
Neil Schemenauer43411b52001-08-30 00:05:51 +00001708/* extension modules might be compiled with GC support so these
1709 functions must always be available */
1710
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001711#undef PyObject_GC_Track
1712#undef PyObject_GC_UnTrack
1713#undef PyObject_GC_Del
1714#undef _PyObject_GC_Malloc
1715
Neil Schemenauer43411b52001-08-30 00:05:51 +00001716void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001717PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001718{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001719 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001720}
1721
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001722void
1723PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001724{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001725 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1726 * call PyObject_GC_UnTrack twice on an object.
1727 */
1728 if (IS_TRACKED(op))
1729 _PyObject_GC_UNTRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001730}
1731
Victor Stinnerdb067af2014-05-02 22:31:14 +02001732static PyObject *
1733_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001734{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001735 PyObject *op;
1736 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001737 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001738 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1739 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001740 size = sizeof(PyGC_Head) + basicsize;
1741 if (use_calloc)
1742 g = (PyGC_Head *)PyObject_Calloc(1, size);
1743 else
1744 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001745 if (g == NULL)
1746 return PyErr_NoMemory();
Antoine Pitrou796564c2013-07-30 19:59:21 +02001747 g->gc.gc_refs = 0;
1748 _PyGCHead_SET_REFS(g, GC_UNTRACKED);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001749 generations[0].count++; /* number of allocated GC objects */
1750 if (generations[0].count > generations[0].threshold &&
1751 enabled &&
1752 generations[0].threshold &&
1753 !collecting &&
1754 !PyErr_Occurred()) {
1755 collecting = 1;
1756 collect_generations();
1757 collecting = 0;
1758 }
1759 op = FROM_GC(g);
1760 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001761}
1762
1763PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001764_PyObject_GC_Malloc(size_t basicsize)
1765{
1766 return _PyObject_GC_Alloc(0, basicsize);
1767}
1768
1769PyObject *
1770_PyObject_GC_Calloc(size_t basicsize)
1771{
1772 return _PyObject_GC_Alloc(1, basicsize);
1773}
1774
1775PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001776_PyObject_GC_New(PyTypeObject *tp)
1777{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001778 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1779 if (op != NULL)
1780 op = PyObject_INIT(op, tp);
1781 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001782}
1783
1784PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001785_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001786{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001787 size_t size;
1788 PyVarObject *op;
1789
1790 if (nitems < 0) {
1791 PyErr_BadInternalCall();
1792 return NULL;
1793 }
1794 size = _PyObject_VAR_SIZE(tp, nitems);
1795 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001796 if (op != NULL)
1797 op = PyObject_INIT_VAR(op, tp, nitems);
1798 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001799}
1800
1801PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001802_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001803{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001804 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1805 PyGC_Head *g = AS_GC(op);
1806 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1807 return (PyVarObject *)PyErr_NoMemory();
1808 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1809 if (g == NULL)
1810 return (PyVarObject *)PyErr_NoMemory();
1811 op = (PyVarObject *) FROM_GC(g);
1812 Py_SIZE(op) = nitems;
1813 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001814}
1815
1816void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001817PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001818{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001819 PyGC_Head *g = AS_GC(op);
1820 if (IS_TRACKED(op))
1821 gc_list_remove(g);
1822 if (generations[0].count > 0) {
1823 generations[0].count--;
1824 }
1825 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001826}