blob: 754348e20a92d0b9e08c35ace72e784d53eebcda [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000027#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070028#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010029#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000030
Neil Schemenauer43411b52001-08-30 00:05:51 +000031/* Get an object's GC head */
32#define AS_GC(o) ((PyGC_Head *)(o)-1)
33
34/* Get the object given the GC head */
35#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
36
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000037/*** Global GC state ***/
38
Neil Schemenauer2880ae52002-05-04 05:35:20 +000039struct gc_generation {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000040 PyGC_Head head;
41 int threshold; /* collection threshold */
42 int count; /* count of allocations or collections of younger
43 generations */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000044};
45
46#define NUM_GENERATIONS 3
47#define GEN_HEAD(n) (&generations[n].head)
48
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000049/* linked lists of container objects */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000050static struct gc_generation generations[NUM_GENERATIONS] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000051 /* PyGC_Head, threshold, count */
52 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
53 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
54 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
Neil Schemenauer2880ae52002-05-04 05:35:20 +000055};
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000056
Neil Schemenauer2880ae52002-05-04 05:35:20 +000057PyGC_Head *_PyGC_generation0 = GEN_HEAD(0);
58
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +000059static int enabled = 1; /* automatic collection enabled? */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000060
Neil Schemenauer43411b52001-08-30 00:05:51 +000061/* true if we are currently running the collector */
Tim Petersbf384c22003-04-06 00:11:39 +000062static int collecting = 0;
Neil Schemenauer43411b52001-08-30 00:05:51 +000063
Tim Peters6fc13d92002-07-02 18:12:35 +000064/* list of uncollectable objects */
Tim Petersbf384c22003-04-06 00:11:39 +000065static PyObject *garbage = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000066
67/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +000068static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000069
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000070/* a list of callbacks to be invoked when collection is performed */
71static PyObject *callbacks = NULL;
72
73/* This is the number of objects that survived the last full collection. It
Antoine Pitrou14b78f52009-01-09 22:27:08 +000074 approximates the number of long lived objects tracked by the GC.
75
76 (by "full collection", we mean a collection of the oldest generation).
77*/
78static Py_ssize_t long_lived_total = 0;
79
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000080/* This is the number of objects that survived all "non-full" collections,
Antoine Pitrou14b78f52009-01-09 22:27:08 +000081 and are awaiting to undergo a full collection for the first time.
82
83*/
84static Py_ssize_t long_lived_pending = 0;
85
86/*
87 NOTE: about the counting of long-lived objects.
88
89 To limit the cost of garbage collection, there are two strategies;
90 - make each collection faster, e.g. by scanning fewer objects
91 - do less collections
92 This heuristic is about the latter strategy.
93
94 In addition to the various configurable thresholds, we only trigger a
95 full collection if the ratio
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000096 long_lived_pending / long_lived_total
Antoine Pitrou14b78f52009-01-09 22:27:08 +000097 is above a given value (hardwired to 25%).
98
99 The reason is that, while "non-full" collections (i.e., collections of
100 the young and middle generations) will always examine roughly the same
101 number of objects -- determined by the aforementioned thresholds --,
102 the cost of a full collection is proportional to the total number of
103 long-lived objects, which is virtually unbounded.
104
105 Indeed, it has been remarked that doing a full collection every
106 <constant number> of object creations entails a dramatic performance
107 degradation in workloads which consist in creating and storing lots of
108 long-lived objects (e.g. building a large list of GC-tracked objects would
109 show quadratic performance, instead of linear as expected: see issue #4074).
110
111 Using the above ratio, instead, yields amortized linear performance in
112 the total number of objects (the effect of which can be summarized
113 thusly: "each full garbage collection is more and more costly as the
114 number of objects grows, but we do fewer and fewer of them").
115
116 This heuristic was suggested by Martin von Löwis on python-dev in
117 June 2008. His original analysis and proposal can be found at:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000118 http://mail.python.org/pipermail/python-dev/2008-June/080579.html
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000119*/
120
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200121/*
122 NOTE: about untracking of mutable objects.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200123
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200124 Certain types of container cannot participate in a reference cycle, and
125 so do not need to be tracked by the garbage collector. Untracking these
126 objects reduces the cost of garbage collections. However, determining
127 which objects may be untracked is not free, and the costs must be
128 weighed against the benefits for garbage collection.
129
130 There are two possible strategies for when to untrack a container:
131
132 i) When the container is created.
133 ii) When the container is examined by the garbage collector.
134
135 Tuples containing only immutable objects (integers, strings etc, and
136 recursively, tuples of immutable objects) do not need to be tracked.
137 The interpreter creates a large number of tuples, many of which will
138 not survive until garbage collection. It is therefore not worthwhile
139 to untrack eligible tuples at creation time.
140
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200141 Instead, all tuples except the empty tuple are tracked when created.
142 During garbage collection it is determined whether any surviving tuples
143 can be untracked. A tuple can be untracked if all of its contents are
144 already not tracked. Tuples are examined for untracking in all garbage
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200145 collection cycles. It may take more than one cycle to untrack a tuple.
146
147 Dictionaries containing only immutable objects also do not need to be
148 tracked. Dictionaries are untracked when created. If a tracked item is
149 inserted into a dictionary (either as a key or value), the dictionary
150 becomes tracked. During a full garbage collection (all generations),
151 the collector will untrack any dictionaries whose contents are not
152 tracked.
153
154 The module provides the python function is_tracked(obj), which returns
155 the CURRENT tracking status of the object. Subsequent garbage
156 collections may change the tracking status of the object.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200157
158 Untracking of certain containers was introduced in issue #4688, and
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200159 the algorithm was refined in response to issue #14775.
160*/
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000161
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000162/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000163#define DEBUG_STATS (1<<0) /* print collection statistics */
164#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
165#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
166#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
167#define DEBUG_LEAK DEBUG_COLLECTABLE | \
168 DEBUG_UNCOLLECTABLE | \
169 DEBUG_SAVEALL
Jeremy Hyltonb709df32000-09-01 02:47:25 +0000170static int debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000171
Antoine Pitroud4156c12012-10-30 22:43:19 +0100172/* Running stats per generation */
173struct gc_generation_stats {
174 /* total number of collections */
175 Py_ssize_t collections;
176 /* total number of collected objects */
177 Py_ssize_t collected;
178 /* total number of uncollectable objects (put into gc.garbage) */
179 Py_ssize_t uncollectable;
180};
181
182static struct gc_generation_stats generation_stats[NUM_GENERATIONS];
183
Tim Peters6fc13d92002-07-02 18:12:35 +0000184/*--------------------------------------------------------------------------
185gc_refs values.
Neil Schemenauer43411b52001-08-30 00:05:51 +0000186
Tim Peters6fc13d92002-07-02 18:12:35 +0000187Between collections, every gc'ed object has one of two gc_refs values:
188
189GC_UNTRACKED
190 The initial state; objects returned by PyObject_GC_Malloc are in this
191 state. The object doesn't live in any generation list, and its
192 tp_traverse slot must not be called.
193
194GC_REACHABLE
195 The object lives in some generation list, and its tp_traverse is safe to
196 call. An object transitions to GC_REACHABLE when PyObject_GC_Track
197 is called.
198
199During a collection, gc_refs can temporarily take on other states:
200
201>= 0
202 At the start of a collection, update_refs() copies the true refcount
203 to gc_refs, for each object in the generation being collected.
204 subtract_refs() then adjusts gc_refs so that it equals the number of
205 times an object is referenced directly from outside the generation
206 being collected.
Martin v. Löwis774348c2002-11-09 19:54:06 +0000207 gc_refs remains >= 0 throughout these steps.
Tim Peters6fc13d92002-07-02 18:12:35 +0000208
209GC_TENTATIVELY_UNREACHABLE
210 move_unreachable() then moves objects not reachable (whether directly or
211 indirectly) from outside the generation into an "unreachable" set.
212 Objects that are found to be reachable have gc_refs set to GC_REACHABLE
213 again. Objects that are found to be unreachable have gc_refs set to
214 GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
215 this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
216 transition back to GC_REACHABLE.
217
218 Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
219 for collection. If it's decided not to collect such an object (e.g.,
220 it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
221----------------------------------------------------------------------------
222*/
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000223#define GC_UNTRACKED _PyGC_REFS_UNTRACKED
224#define GC_REACHABLE _PyGC_REFS_REACHABLE
225#define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
Tim Peters19b74c72002-07-01 03:52:19 +0000226
Antoine Pitrou796564c2013-07-30 19:59:21 +0200227#define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
228#define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
Tim Peters19b74c72002-07-01 03:52:19 +0000229#define IS_TENTATIVELY_UNREACHABLE(o) ( \
Antoine Pitrou796564c2013-07-30 19:59:21 +0200230 _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000231
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000232/*** list functions ***/
233
234static void
235gc_list_init(PyGC_Head *list)
236{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000237 list->gc.gc_prev = list;
238 list->gc.gc_next = list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000239}
240
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000241static int
242gc_list_is_empty(PyGC_Head *list)
243{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000244 return (list->gc.gc_next == list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000245}
246
Tim Peterse2d59182004-11-01 01:39:08 +0000247#if 0
248/* This became unused after gc_list_move() was introduced. */
249/* Append `node` to `list`. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000250static void
251gc_list_append(PyGC_Head *node, PyGC_Head *list)
252{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000253 node->gc.gc_next = list;
254 node->gc.gc_prev = list->gc.gc_prev;
255 node->gc.gc_prev->gc.gc_next = node;
256 list->gc.gc_prev = node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000257}
Tim Peterse2d59182004-11-01 01:39:08 +0000258#endif
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000259
Tim Peterse2d59182004-11-01 01:39:08 +0000260/* Remove `node` from the gc list it's currently in. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000261static void
262gc_list_remove(PyGC_Head *node)
263{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000264 node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
265 node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
266 node->gc.gc_next = NULL; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000267}
268
Tim Peterse2d59182004-11-01 01:39:08 +0000269/* Move `node` from the gc list it's currently in (which is not explicitly
270 * named here) to the end of `list`. This is semantically the same as
271 * gc_list_remove(node) followed by gc_list_append(node, list).
272 */
273static void
274gc_list_move(PyGC_Head *node, PyGC_Head *list)
275{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000276 PyGC_Head *new_prev;
277 PyGC_Head *current_prev = node->gc.gc_prev;
278 PyGC_Head *current_next = node->gc.gc_next;
279 /* Unlink from current list. */
280 current_prev->gc.gc_next = current_next;
281 current_next->gc.gc_prev = current_prev;
282 /* Relink at end of new list. */
283 new_prev = node->gc.gc_prev = list->gc.gc_prev;
284 new_prev->gc.gc_next = list->gc.gc_prev = node;
285 node->gc.gc_next = list;
Tim Peterse2d59182004-11-01 01:39:08 +0000286}
287
288/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000289static void
290gc_list_merge(PyGC_Head *from, PyGC_Head *to)
291{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000292 PyGC_Head *tail;
293 assert(from != to);
294 if (!gc_list_is_empty(from)) {
295 tail = to->gc.gc_prev;
296 tail->gc.gc_next = from->gc.gc_next;
297 tail->gc.gc_next->gc.gc_prev = tail;
298 to->gc.gc_prev = from->gc.gc_prev;
299 to->gc.gc_prev->gc.gc_next = to;
300 }
301 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000302}
303
Neal Norwitz7b216c52006-03-04 20:01:53 +0000304static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000305gc_list_size(PyGC_Head *list)
306{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000307 PyGC_Head *gc;
308 Py_ssize_t n = 0;
309 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
310 n++;
311 }
312 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000313}
314
Tim Peters259272b2003-04-06 19:41:39 +0000315/* Append objects in a GC list to a Python list.
316 * Return 0 if all OK, < 0 if error (out of memory for list).
317 */
318static int
319append_objects(PyObject *py_list, PyGC_Head *gc_list)
320{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000321 PyGC_Head *gc;
322 for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
323 PyObject *op = FROM_GC(gc);
324 if (op != py_list) {
325 if (PyList_Append(py_list, op)) {
326 return -1; /* exception */
327 }
328 }
329 }
330 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000331}
332
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000333/*** end of list stuff ***/
334
335
Tim Peters19b74c72002-07-01 03:52:19 +0000336/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
337 * in containers, and is GC_REACHABLE for all tracked gc objects not in
338 * containers.
Tim Peters88396172002-06-30 17:56:40 +0000339 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000340static void
341update_refs(PyGC_Head *containers)
342{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000343 PyGC_Head *gc = containers->gc.gc_next;
344 for (; gc != containers; gc = gc->gc.gc_next) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200345 assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
346 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000347 /* Python's cyclic gc should never see an incoming refcount
348 * of 0: if something decref'ed to 0, it should have been
349 * deallocated immediately at that time.
350 * Possible cause (if the assert triggers): a tp_dealloc
351 * routine left a gc-aware object tracked during its teardown
352 * phase, and did something-- or allowed something to happen --
353 * that called back into Python. gc can trigger then, and may
354 * see the still-tracked dying object. Before this assert
355 * was added, such mistakes went on to allow gc to try to
356 * delete the object again. In a debug build, that caused
357 * a mysterious segfault, when _Py_ForgetReference tried
358 * to remove the object from the doubly-linked list of all
359 * objects a second time. In a release build, an actual
360 * double deallocation occurred, which leads to corruption
361 * of the allocator's internal bookkeeping pointers. That's
362 * so serious that maybe this should be a release-build
363 * check instead of an assert?
364 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200365 assert(_PyGCHead_REFS(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000366 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000367}
368
Tim Peters19b74c72002-07-01 03:52:19 +0000369/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000370static int
371visit_decref(PyObject *op, void *data)
372{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000373 assert(op != NULL);
374 if (PyObject_IS_GC(op)) {
375 PyGC_Head *gc = AS_GC(op);
376 /* We're only interested in gc_refs for objects in the
377 * generation being collected, which can be recognized
378 * because only they have positive gc_refs.
379 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200380 assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
381 if (_PyGCHead_REFS(gc) > 0)
382 _PyGCHead_DECREF(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000383 }
384 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000385}
386
Tim Peters19b74c72002-07-01 03:52:19 +0000387/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
388 * for all objects in containers, and is GC_REACHABLE for all tracked gc
389 * objects not in containers. The ones with gc_refs > 0 are directly
390 * reachable from outside containers, and so can't be collected.
391 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000392static void
393subtract_refs(PyGC_Head *containers)
394{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000395 traverseproc traverse;
396 PyGC_Head *gc = containers->gc.gc_next;
397 for (; gc != containers; gc=gc->gc.gc_next) {
398 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
399 (void) traverse(FROM_GC(gc),
400 (visitproc)visit_decref,
401 NULL);
402 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000403}
404
Tim Peters19b74c72002-07-01 03:52:19 +0000405/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000406static int
Tim Peters19b74c72002-07-01 03:52:19 +0000407visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000408{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000409 if (PyObject_IS_GC(op)) {
410 PyGC_Head *gc = AS_GC(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200411 const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
Tim Peters19b74c72002-07-01 03:52:19 +0000412
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000413 if (gc_refs == 0) {
414 /* This is in move_unreachable's 'young' list, but
415 * the traversal hasn't yet gotten to it. All
416 * we need to do is tell move_unreachable that it's
417 * reachable.
418 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200419 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000420 }
421 else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
422 /* This had gc_refs = 0 when move_unreachable got
423 * to it, but turns out it's reachable after all.
424 * Move it back to move_unreachable's 'young' list,
425 * and move_unreachable will eventually get to it
426 * again.
427 */
428 gc_list_move(gc, reachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200429 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000430 }
431 /* Else there's nothing to do.
432 * If gc_refs > 0, it must be in move_unreachable's 'young'
433 * list, and move_unreachable will eventually get to it.
434 * If gc_refs == GC_REACHABLE, it's either in some other
435 * generation so we don't care about it, or move_unreachable
436 * already dealt with it.
437 * If gc_refs == GC_UNTRACKED, it must be ignored.
438 */
439 else {
440 assert(gc_refs > 0
441 || gc_refs == GC_REACHABLE
442 || gc_refs == GC_UNTRACKED);
443 }
444 }
445 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000446}
447
Tim Peters19b74c72002-07-01 03:52:19 +0000448/* Move the unreachable objects from young to unreachable. After this,
449 * all objects in young have gc_refs = GC_REACHABLE, and all objects in
450 * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
451 * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
452 * All objects in young after this are directly or indirectly reachable
453 * from outside the original young; and all objects in unreachable are
454 * not.
Tim Peters88396172002-06-30 17:56:40 +0000455 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000456static void
Tim Peters19b74c72002-07-01 03:52:19 +0000457move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000458{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000459 PyGC_Head *gc = young->gc.gc_next;
Tim Peters19b74c72002-07-01 03:52:19 +0000460
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000461 /* Invariants: all objects "to the left" of us in young have gc_refs
462 * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
463 * from outside the young list as it was at entry. All other objects
464 * from the original young "to the left" of us are in unreachable now,
465 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
466 * left of us in 'young' now have been scanned, and no objects here
467 * or to the right have been scanned yet.
468 */
Tim Peters19b74c72002-07-01 03:52:19 +0000469
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000470 while (gc != young) {
471 PyGC_Head *next;
Tim Peters19b74c72002-07-01 03:52:19 +0000472
Antoine Pitrou796564c2013-07-30 19:59:21 +0200473 if (_PyGCHead_REFS(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000474 /* gc is definitely reachable from outside the
475 * original 'young'. Mark it as such, and traverse
476 * its pointers to find any other objects that may
477 * be directly reachable from it. Note that the
478 * call to tp_traverse may append objects to young,
479 * so we have to wait until it returns to determine
480 * the next object to visit.
481 */
482 PyObject *op = FROM_GC(gc);
483 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200484 assert(_PyGCHead_REFS(gc) > 0);
485 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000486 (void) traverse(op,
487 (visitproc)visit_reachable,
488 (void *)young);
489 next = gc->gc.gc_next;
490 if (PyTuple_CheckExact(op)) {
491 _PyTuple_MaybeUntrack(op);
492 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000493 }
494 else {
495 /* This *may* be unreachable. To make progress,
496 * assume it is. gc isn't directly reachable from
497 * any object we've already traversed, but may be
498 * reachable from an object we haven't gotten to yet.
499 * visit_reachable will eventually move gc back into
500 * young if that's so, and we'll see it again.
501 */
502 next = gc->gc.gc_next;
503 gc_list_move(gc, unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200504 _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000505 }
506 gc = next;
507 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000508}
509
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200510/* Try to untrack all currently tracked dictionaries */
511static void
512untrack_dicts(PyGC_Head *head)
513{
514 PyGC_Head *next, *gc = head->gc.gc_next;
515 while (gc != head) {
516 PyObject *op = FROM_GC(gc);
517 next = gc->gc.gc_next;
518 if (PyDict_CheckExact(op))
519 _PyDict_MaybeUntrack(op);
520 gc = next;
521 }
522}
523
Antoine Pitrou796564c2013-07-30 19:59:21 +0200524/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000525static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200526has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000527{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200528 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000529}
530
Antoine Pitrou796564c2013-07-30 19:59:21 +0200531/* Move the objects in unreachable with tp_del slots into `finalizers`.
Tim Petersead8b7a2004-10-30 23:09:22 +0000532 * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
533 * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000534 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000535static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200536move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000537{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000538 PyGC_Head *gc;
539 PyGC_Head *next;
Tim Petersf6b80452003-04-07 19:21:15 +0000540
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000541 /* March over unreachable. Move objects with finalizers into
542 * `finalizers`.
543 */
544 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
545 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000546
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000547 assert(IS_TENTATIVELY_UNREACHABLE(op));
548 next = gc->gc.gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000549
Antoine Pitrou796564c2013-07-30 19:59:21 +0200550 if (has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000551 gc_list_move(gc, finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200552 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000553 }
554 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000555}
556
Antoine Pitrou796564c2013-07-30 19:59:21 +0200557/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000558static int
559visit_move(PyObject *op, PyGC_Head *tolist)
560{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000561 if (PyObject_IS_GC(op)) {
562 if (IS_TENTATIVELY_UNREACHABLE(op)) {
563 PyGC_Head *gc = AS_GC(op);
564 gc_list_move(gc, tolist);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200565 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000566 }
567 }
568 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000569}
570
571/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000572 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000573 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000574static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200575move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000576{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000577 traverseproc traverse;
578 PyGC_Head *gc = finalizers->gc.gc_next;
579 for (; gc != finalizers; gc = gc->gc.gc_next) {
580 /* Note that the finalizers list may grow during this. */
581 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
582 (void) traverse(FROM_GC(gc),
583 (visitproc)visit_move,
584 (void *)finalizers);
585 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000586}
587
Tim Petersead8b7a2004-10-30 23:09:22 +0000588/* Clear all weakrefs to unreachable objects, and if such a weakref has a
589 * callback, invoke it if necessary. Note that it's possible for such
590 * weakrefs to be outside the unreachable set -- indeed, those are precisely
591 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
592 * overview & some details. Some weakrefs with callbacks may be reclaimed
593 * directly by this routine; the number reclaimed is the return value. Other
594 * weakrefs with callbacks may be moved into the `old` generation. Objects
595 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
596 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
597 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000598 */
599static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000600handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000601{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000602 PyGC_Head *gc;
603 PyObject *op; /* generally FROM_GC(gc) */
604 PyWeakReference *wr; /* generally a cast of op */
605 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
606 PyGC_Head *next;
607 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000608
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000609 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000610
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000611 /* Clear all weakrefs to the objects in unreachable. If such a weakref
612 * also has a callback, move it into `wrcb_to_call` if the callback
613 * needs to be invoked. Note that we cannot invoke any callbacks until
614 * all weakrefs to unreachable objects are cleared, lest the callback
615 * resurrect an unreachable object via a still-active weakref. We
616 * make another pass over wrcb_to_call, invoking callbacks, after this
617 * pass completes.
618 */
619 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
620 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000621
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000622 op = FROM_GC(gc);
623 assert(IS_TENTATIVELY_UNREACHABLE(op));
624 next = gc->gc.gc_next;
Tim Petersead8b7a2004-10-30 23:09:22 +0000625
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000626 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
627 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000628
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000629 /* It supports weakrefs. Does it have any? */
630 wrlist = (PyWeakReference **)
631 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000632
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000633 /* `op` may have some weakrefs. March over the list, clear
634 * all the weakrefs, and move the weakrefs with callbacks
635 * that must be called into wrcb_to_call.
636 */
637 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
638 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000639
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000640 /* _PyWeakref_ClearRef clears the weakref but leaves
641 * the callback pointer intact. Obscure: it also
642 * changes *wrlist.
643 */
644 assert(wr->wr_object == op);
645 _PyWeakref_ClearRef(wr);
646 assert(wr->wr_object == Py_None);
647 if (wr->wr_callback == NULL)
648 continue; /* no callback */
Tim Petersead8b7a2004-10-30 23:09:22 +0000649
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000650 /* Headache time. `op` is going away, and is weakly referenced by
651 * `wr`, which has a callback. Should the callback be invoked? If wr
652 * is also trash, no:
653 *
654 * 1. There's no need to call it. The object and the weakref are
655 * both going away, so it's legitimate to pretend the weakref is
656 * going away first. The user has to ensure a weakref outlives its
657 * referent if they want a guarantee that the wr callback will get
658 * invoked.
659 *
660 * 2. It may be catastrophic to call it. If the callback is also in
661 * cyclic trash (CT), then although the CT is unreachable from
662 * outside the current generation, CT may be reachable from the
663 * callback. Then the callback could resurrect insane objects.
664 *
665 * Since the callback is never needed and may be unsafe in this case,
666 * wr is simply left in the unreachable set. Note that because we
667 * already called _PyWeakref_ClearRef(wr), its callback will never
668 * trigger.
669 *
670 * OTOH, if wr isn't part of CT, we should invoke the callback: the
671 * weakref outlived the trash. Note that since wr isn't CT in this
672 * case, its callback can't be CT either -- wr acted as an external
673 * root to this generation, and therefore its callback did too. So
674 * nothing in CT is reachable from the callback either, so it's hard
675 * to imagine how calling it later could create a problem for us. wr
676 * is moved to wrcb_to_call in this case.
677 */
678 if (IS_TENTATIVELY_UNREACHABLE(wr))
679 continue;
680 assert(IS_REACHABLE(wr));
Tim Peterscc2a8662004-10-31 22:12:43 +0000681
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000682 /* Create a new reference so that wr can't go away
683 * before we can process it again.
684 */
685 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000686
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000687 /* Move wr to wrcb_to_call, for the next pass. */
688 wrasgc = AS_GC(wr);
689 assert(wrasgc != next); /* wrasgc is reachable, but
690 next isn't, so they can't
691 be the same */
692 gc_list_move(wrasgc, &wrcb_to_call);
693 }
694 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000695
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000696 /* Invoke the callbacks we decided to honor. It's safe to invoke them
697 * because they can't reference unreachable objects.
698 */
699 while (! gc_list_is_empty(&wrcb_to_call)) {
700 PyObject *temp;
701 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000702
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000703 gc = wrcb_to_call.gc.gc_next;
704 op = FROM_GC(gc);
705 assert(IS_REACHABLE(op));
706 assert(PyWeakref_Check(op));
707 wr = (PyWeakReference *)op;
708 callback = wr->wr_callback;
709 assert(callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000710
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000711 /* copy-paste of weakrefobject.c's handle_callback() */
712 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
713 if (temp == NULL)
714 PyErr_WriteUnraisable(callback);
715 else
716 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000717
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000718 /* Give up the reference we created in the first pass. When
719 * op's refcount hits 0 (which it may or may not do right now),
720 * op's tp_dealloc will decref op->wr_callback too. Note
721 * that the refcount probably will hit 0 now, and because this
722 * weakref was reachable to begin with, gc didn't already
723 * add it to its count of freed objects. Example: a reachable
724 * weak value dict maps some key to this reachable weakref.
725 * The callback removes this key->weakref mapping from the
726 * dict, leaving no other references to the weakref (excepting
727 * ours).
728 */
729 Py_DECREF(op);
730 if (wrcb_to_call.gc.gc_next == gc) {
731 /* object is still alive -- move it */
732 gc_list_move(gc, old);
733 }
734 else
735 ++num_freed;
736 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000737
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000738 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000739}
740
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000741static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200742debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000743{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100744 PySys_FormatStderr("gc: %s <%s %p>\n",
745 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000746}
747
Antoine Pitrou796564c2013-07-30 19:59:21 +0200748/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000749 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000750 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
751 * garbage list (a Python list), else only the objects in finalizers with
752 * __del__ methods are appended to garbage. All objects in finalizers are
753 * merged into the old list regardless.
Tim Peters259272b2003-04-06 19:41:39 +0000754 * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
755 * The finalizers list is made empty on a successful return.
Tim Petersbf384c22003-04-06 00:11:39 +0000756 */
Tim Peters259272b2003-04-06 19:41:39 +0000757static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200758handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000759{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000760 PyGC_Head *gc = finalizers->gc.gc_next;
Tim Petersf6b80452003-04-07 19:21:15 +0000761
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000762 if (garbage == NULL) {
763 garbage = PyList_New(0);
764 if (garbage == NULL)
765 Py_FatalError("gc couldn't create gc.garbage list");
766 }
767 for (; gc != finalizers; gc = gc->gc.gc_next) {
768 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000769
Antoine Pitrou796564c2013-07-30 19:59:21 +0200770 if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000771 if (PyList_Append(garbage, op) < 0)
772 return -1;
773 }
774 }
Tim Petersf6b80452003-04-07 19:21:15 +0000775
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000776 gc_list_merge(finalizers, old);
777 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000778}
779
Tim Peters5fbc7b12014-05-08 17:42:19 -0500780/* Run first-time finalizers (if any) on all the objects in collectable.
781 * Note that this may remove some (or even all) of the objects from the
782 * list, due to refcounts falling to 0.
783 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200784static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500785finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200786{
787 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500788 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200789
Tim Peters5fbc7b12014-05-08 17:42:19 -0500790 /* While we're going through the loop, `finalize(op)` may cause op, or
791 * other objects, to be reclaimed via refcounts falling to zero. So
792 * there's little we can rely on about the structure of the input
793 * `collectable` list across iterations. For safety, we always take the
794 * first object in that list and move it to a temporary `seen` list.
795 * If objects vanish from the `collectable` and `seen` lists we don't
796 * care.
797 */
798 gc_list_init(&seen);
799
800 while (!gc_list_is_empty(collectable)) {
801 PyGC_Head *gc = collectable->gc.gc_next;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200802 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500803 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200804 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500805 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
806 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200807 _PyGCHead_SET_FINALIZED(gc, 1);
808 Py_INCREF(op);
809 finalize(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200810 Py_DECREF(op);
811 }
812 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500813 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200814}
815
816/* Walk the collectable list and check that they are really unreachable
817 from the outside (some objects could have been resurrected by a
818 finalizer). */
819static int
820check_garbage(PyGC_Head *collectable)
821{
822 PyGC_Head *gc;
823 for (gc = collectable->gc.gc_next; gc != collectable;
824 gc = gc->gc.gc_next) {
825 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
826 assert(_PyGCHead_REFS(gc) != 0);
827 }
828 subtract_refs(collectable);
829 for (gc = collectable->gc.gc_next; gc != collectable;
830 gc = gc->gc.gc_next) {
831 assert(_PyGCHead_REFS(gc) >= 0);
832 if (_PyGCHead_REFS(gc) != 0)
833 return -1;
834 }
835 return 0;
836}
837
838static void
839revive_garbage(PyGC_Head *collectable)
840{
841 PyGC_Head *gc;
842 for (gc = collectable->gc.gc_next; gc != collectable;
843 gc = gc->gc.gc_next) {
844 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
845 }
846}
847
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000848/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000849 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000850 * objects may be freed. It is possible I screwed something up here.
851 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000852static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000853delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000854{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000855 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000856
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000857 while (!gc_list_is_empty(collectable)) {
858 PyGC_Head *gc = collectable->gc.gc_next;
859 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000860
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000861 if (debug & DEBUG_SAVEALL) {
862 PyList_Append(garbage, op);
863 }
864 else {
865 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
866 Py_INCREF(op);
867 clear(op);
868 Py_DECREF(op);
869 }
870 }
871 if (collectable->gc.gc_next == gc) {
872 /* object is still alive, move it, it may die later */
873 gc_list_move(gc, old);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200874 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000875 }
876 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000877}
878
Christian Heimesa156e092008-02-16 07:38:31 +0000879/* Clear all free lists
880 * All free lists are cleared during the collection of the highest generation.
881 * Allocated items in the free list may keep a pymalloc arena occupied.
882 * Clearing the free lists may give back memory to the OS earlier.
883 */
884static void
885clear_freelists(void)
886{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000887 (void)PyMethod_ClearFreeList();
888 (void)PyFrame_ClearFreeList();
889 (void)PyCFunction_ClearFreeList();
890 (void)PyTuple_ClearFreeList();
891 (void)PyUnicode_ClearFreeList();
892 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100893 (void)PyList_ClearFreeList();
894 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100895 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700896 (void)PyAsyncGen_ClearFreeLists();
Christian Heimesa156e092008-02-16 07:38:31 +0000897}
898
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000899/* This is the main function. Read this to understand how the
900 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000901static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200902collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
903 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000904{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000905 int i;
906 Py_ssize_t m = 0; /* # objects collected */
907 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
908 PyGC_Head *young; /* the generation we are examining */
909 PyGC_Head *old; /* next older generation */
910 PyGC_Head unreachable; /* non-problematic unreachable trash */
911 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
912 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100913 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200914
Antoine Pitroud4156c12012-10-30 22:43:19 +0100915 struct gc_generation_stats *stats = &generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000916
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000917 if (debug & DEBUG_STATS) {
918 PySys_WriteStderr("gc: collecting generation %d...\n",
919 generation);
920 PySys_WriteStderr("gc: objects in each generation:");
921 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200922 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000923 gc_list_size(GEN_HEAD(i)));
Victor Stinner7181dec2015-03-27 17:47:53 +0100924 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200925
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000926 PySys_WriteStderr("\n");
927 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000928
Łukasz Langaa785c872016-09-09 17:37:37 -0700929 if (PyDTrace_GC_START_ENABLED())
930 PyDTrace_GC_START(generation);
931
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000932 /* update collection and allocation counters */
933 if (generation+1 < NUM_GENERATIONS)
934 generations[generation+1].count += 1;
935 for (i = 0; i <= generation; i++)
936 generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000937
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000938 /* merge younger generations with one we are currently collecting */
939 for (i = 0; i < generation; i++) {
940 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
941 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000942
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000943 /* handy references */
944 young = GEN_HEAD(generation);
945 if (generation < NUM_GENERATIONS-1)
946 old = GEN_HEAD(generation+1);
947 else
948 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000949
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000950 /* Using ob_refcnt and gc_refs, calculate which objects in the
951 * container set are reachable from outside the set (i.e., have a
952 * refcount greater than 0 when all the references within the
953 * set are taken into account).
954 */
955 update_refs(young);
956 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000957
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000958 /* Leave everything reachable from outside young in young, and move
959 * everything else (in young) to unreachable.
960 * NOTE: This used to move the reachable objects into a reachable
961 * set instead. But most things usually turn out to be reachable,
962 * so it's more efficient to move the unreachable things.
963 */
964 gc_list_init(&unreachable);
965 move_unreachable(young, &unreachable);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000966
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000967 /* Move reachable objects to next generation. */
968 if (young != old) {
969 if (generation == NUM_GENERATIONS - 2) {
970 long_lived_pending += gc_list_size(young);
971 }
972 gc_list_merge(young, old);
973 }
974 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200975 /* We only untrack dicts in full collections, to avoid quadratic
976 dict build-up. See issue #14775. */
977 untrack_dicts(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000978 long_lived_pending = 0;
979 long_lived_total = gc_list_size(young);
980 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000981
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000982 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +0200983 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000984 */
985 gc_list_init(&finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200986 move_legacy_finalizers(&unreachable, &finalizers);
987 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000988 * unreachable objects reachable *from* those are also uncollectable,
989 * and we move those into the finalizers list too.
990 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200991 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000992
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000993 /* Collect statistics on collectable objects found and print
994 * debugging information.
995 */
996 for (gc = unreachable.gc.gc_next; gc != &unreachable;
997 gc = gc->gc.gc_next) {
998 m++;
999 if (debug & DEBUG_COLLECTABLE) {
1000 debug_cycle("collectable", FROM_GC(gc));
1001 }
1002 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001003
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001004 /* Clear weakrefs and invoke callbacks as necessary. */
1005 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001006
Antoine Pitrou796564c2013-07-30 19:59:21 +02001007 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001008 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001009
1010 if (check_garbage(&unreachable)) {
1011 revive_garbage(&unreachable);
1012 gc_list_merge(&unreachable, old);
1013 }
1014 else {
1015 /* Call tp_clear on objects in the unreachable set. This will cause
1016 * the reference cycles to be broken. It may also cause some objects
1017 * in finalizers to be freed.
1018 */
1019 delete_garbage(&unreachable, old);
1020 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001021
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001022 /* Collect statistics on uncollectable objects found and print
1023 * debugging information. */
1024 for (gc = finalizers.gc.gc_next;
1025 gc != &finalizers;
1026 gc = gc->gc.gc_next) {
1027 n++;
1028 if (debug & DEBUG_UNCOLLECTABLE)
1029 debug_cycle("uncollectable", FROM_GC(gc));
1030 }
1031 if (debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001032 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001033
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001034 if (m == 0 && n == 0)
1035 PySys_WriteStderr("gc: done");
1036 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001037 PySys_FormatStderr(
1038 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001039 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001040 PySys_WriteStderr(", %.4fs elapsed\n",
1041 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001042 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001043
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001044 /* Append instances in the uncollectable set to a Python
1045 * reachable list of garbage. The programmer has to deal with
1046 * this if they insist on creating this type of structure.
1047 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001048 (void)handle_legacy_finalizers(&finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001049
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001050 /* Clear free list only during the collection of the highest
1051 * generation */
1052 if (generation == NUM_GENERATIONS-1) {
1053 clear_freelists();
1054 }
Christian Heimesa156e092008-02-16 07:38:31 +00001055
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001056 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001057 if (nofail) {
1058 PyErr_Clear();
1059 }
1060 else {
1061 if (gc_str == NULL)
1062 gc_str = PyUnicode_FromString("garbage collection");
1063 PyErr_WriteUnraisable(gc_str);
1064 Py_FatalError("unexpected exception during garbage collection");
1065 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001066 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001067
Antoine Pitroud4156c12012-10-30 22:43:19 +01001068 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001069 if (n_collected)
1070 *n_collected = m;
1071 if (n_uncollectable)
1072 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001073 stats->collections++;
1074 stats->collected += m;
1075 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001076
1077 if (PyDTrace_GC_DONE_ENABLED())
1078 PyDTrace_GC_DONE(n+m);
1079
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001080 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001081}
1082
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001083/* Invoke progress callbacks to notify clients that garbage collection
1084 * is starting or stopping
1085 */
1086static void
1087invoke_gc_callback(const char *phase, int generation,
1088 Py_ssize_t collected, Py_ssize_t uncollectable)
1089{
1090 Py_ssize_t i;
1091 PyObject *info = NULL;
1092
1093 /* we may get called very early */
1094 if (callbacks == NULL)
1095 return;
1096 /* The local variable cannot be rebound, check it for sanity */
1097 assert(callbacks != NULL && PyList_CheckExact(callbacks));
1098 if (PyList_GET_SIZE(callbacks) != 0) {
1099 info = Py_BuildValue("{sisnsn}",
1100 "generation", generation,
1101 "collected", collected,
1102 "uncollectable", uncollectable);
1103 if (info == NULL) {
1104 PyErr_WriteUnraisable(NULL);
1105 return;
1106 }
1107 }
1108 for (i=0; i<PyList_GET_SIZE(callbacks); i++) {
1109 PyObject *r, *cb = PyList_GET_ITEM(callbacks, i);
1110 Py_INCREF(cb); /* make sure cb doesn't go away */
1111 r = PyObject_CallFunction(cb, "sO", phase, info);
1112 Py_XDECREF(r);
1113 if (r == NULL)
1114 PyErr_WriteUnraisable(cb);
1115 Py_DECREF(cb);
1116 }
1117 Py_XDECREF(info);
1118}
1119
1120/* Perform garbage collection of a generation and invoke
1121 * progress callbacks.
1122 */
1123static Py_ssize_t
1124collect_with_callback(int generation)
1125{
1126 Py_ssize_t result, collected, uncollectable;
1127 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001128 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001129 invoke_gc_callback("stop", generation, collected, uncollectable);
1130 return result;
1131}
1132
Neal Norwitz7b216c52006-03-04 20:01:53 +00001133static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001134collect_generations(void)
1135{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001136 int i;
1137 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001138
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001139 /* Find the oldest generation (highest numbered) where the count
1140 * exceeds the threshold. Objects in the that generation and
1141 * generations younger than it will be collected. */
1142 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1143 if (generations[i].count > generations[i].threshold) {
1144 /* Avoid quadratic performance degradation in number
1145 of tracked objects. See comments at the beginning
1146 of this file, and issue #4074.
1147 */
1148 if (i == NUM_GENERATIONS - 1
1149 && long_lived_pending < long_lived_total / 4)
1150 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001151 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001152 break;
1153 }
1154 }
1155 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001156}
1157
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001158PyDoc_STRVAR(gc_enable__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001159"enable() -> None\n"
1160"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001161"Enable automatic garbage collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001162
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001163static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001164gc_enable(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001165{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001166 enabled = 1;
1167 Py_INCREF(Py_None);
1168 return Py_None;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001169}
1170
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001171PyDoc_STRVAR(gc_disable__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001172"disable() -> None\n"
1173"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001174"Disable automatic garbage collection.\n");
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001175
1176static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001177gc_disable(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001178{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001179 enabled = 0;
1180 Py_INCREF(Py_None);
1181 return Py_None;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001182}
1183
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001184PyDoc_STRVAR(gc_isenabled__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001185"isenabled() -> status\n"
1186"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001187"Returns true if automatic garbage collection is enabled.\n");
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001188
1189static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001190gc_isenabled(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001191{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001192 return PyBool_FromLong((long)enabled);
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001193}
1194
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001195PyDoc_STRVAR(gc_collect__doc__,
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001196"collect([generation]) -> n\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001197"\n"
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001198"With no arguments, run a full collection. The optional argument\n"
1199"may be an integer specifying which generation to collect. A ValueError\n"
1200"is raised if the generation number is invalid.\n\n"
1201"The number of unreachable objects is returned.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001202
1203static PyObject *
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001204gc_collect(PyObject *self, PyObject *args, PyObject *kws)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001205{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001206 static char *keywords[] = {"generation", NULL};
1207 int genarg = NUM_GENERATIONS - 1;
1208 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001209
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001210 if (!PyArg_ParseTupleAndKeywords(args, kws, "|i", keywords, &genarg))
1211 return NULL;
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001212
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001213 else if (genarg < 0 || genarg >= NUM_GENERATIONS) {
1214 PyErr_SetString(PyExc_ValueError, "invalid generation");
1215 return NULL;
1216 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001217
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001218 if (collecting)
1219 n = 0; /* already collecting, don't do anything */
1220 else {
1221 collecting = 1;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001222 n = collect_with_callback(genarg);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001223 collecting = 0;
1224 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001225
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001226 return PyLong_FromSsize_t(n);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001227}
1228
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001229PyDoc_STRVAR(gc_set_debug__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001230"set_debug(flags) -> None\n"
1231"\n"
1232"Set the garbage collection debugging flags. Debugging information is\n"
1233"written to sys.stderr.\n"
1234"\n"
1235"flags is an integer and can have the following bits turned on:\n"
1236"\n"
1237" DEBUG_STATS - Print statistics during collection.\n"
1238" DEBUG_COLLECTABLE - Print collectable objects found.\n"
1239" DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.\n"
Neil Schemenauer544de1e2000-09-22 15:22:38 +00001240" DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001241" DEBUG_LEAK - Debug leaking programs (everything but STATS).\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001242
1243static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001244gc_set_debug(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001245{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001246 if (!PyArg_ParseTuple(args, "i:set_debug", &debug))
1247 return NULL;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001248
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001249 Py_INCREF(Py_None);
1250 return Py_None;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001251}
1252
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001253PyDoc_STRVAR(gc_get_debug__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001254"get_debug() -> flags\n"
1255"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001256"Get the garbage collection debugging flags.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001257
1258static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001259gc_get_debug(PyObject *self, PyObject *noargs)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001260{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001261 return Py_BuildValue("i", debug);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001262}
1263
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001264PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001265"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001266"\n"
1267"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001268"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001269
1270static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001271gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001272{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001273 int i;
1274 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1275 &generations[0].threshold,
1276 &generations[1].threshold,
1277 &generations[2].threshold))
1278 return NULL;
1279 for (i = 2; i < NUM_GENERATIONS; i++) {
1280 /* generations higher than 2 get the same threshold */
1281 generations[i].threshold = generations[2].threshold;
1282 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001283
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001284 Py_INCREF(Py_None);
1285 return Py_None;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001286}
1287
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001288PyDoc_STRVAR(gc_get_thresh__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001289"get_threshold() -> (threshold0, threshold1, threshold2)\n"
1290"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001291"Return the current collection thresholds\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001292
1293static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001294gc_get_thresh(PyObject *self, PyObject *noargs)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001295{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001296 return Py_BuildValue("(iii)",
1297 generations[0].threshold,
1298 generations[1].threshold,
1299 generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001300}
1301
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001302PyDoc_STRVAR(gc_get_count__doc__,
1303"get_count() -> (count0, count1, count2)\n"
1304"\n"
1305"Return the current collection counts\n");
1306
1307static PyObject *
1308gc_get_count(PyObject *self, PyObject *noargs)
1309{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001310 return Py_BuildValue("(iii)",
1311 generations[0].count,
1312 generations[1].count,
1313 generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001314}
1315
Neil Schemenauer48c70342001-08-09 15:38:31 +00001316static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001317referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001318{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001319 Py_ssize_t i;
1320 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1321 if (PyTuple_GET_ITEM(objs, i) == obj)
1322 return 1;
1323 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001324}
1325
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001326static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001327gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001328{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001329 PyGC_Head *gc;
1330 PyObject *obj;
1331 traverseproc traverse;
1332 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1333 obj = FROM_GC(gc);
1334 traverse = Py_TYPE(obj)->tp_traverse;
1335 if (obj == objs || obj == resultlist)
1336 continue;
1337 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1338 if (PyList_Append(resultlist, obj) < 0)
1339 return 0; /* error */
1340 }
1341 }
1342 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001343}
1344
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001345PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001346"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001347Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001348
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001349static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001350gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001351{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001352 int i;
1353 PyObject *result = PyList_New(0);
1354 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001355
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001356 for (i = 0; i < NUM_GENERATIONS; i++) {
1357 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1358 Py_DECREF(result);
1359 return NULL;
1360 }
1361 }
1362 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001363}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001364
Tim Peters0f81ab62003-04-08 16:39:48 +00001365/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001366static int
Tim Peters730f5532003-04-08 17:17:17 +00001367referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001368{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001369 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001370}
1371
Tim Peters730f5532003-04-08 17:17:17 +00001372PyDoc_STRVAR(gc_get_referents__doc__,
1373"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001374Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001375
1376static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001377gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001378{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001379 Py_ssize_t i;
1380 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001381
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001382 if (result == NULL)
1383 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001384
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001385 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1386 traverseproc traverse;
1387 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001388
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001389 if (! PyObject_IS_GC(obj))
1390 continue;
1391 traverse = Py_TYPE(obj)->tp_traverse;
1392 if (! traverse)
1393 continue;
1394 if (traverse(obj, (visitproc)referentsvisit, result)) {
1395 Py_DECREF(result);
1396 return NULL;
1397 }
1398 }
1399 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001400}
1401
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001402PyDoc_STRVAR(gc_get_objects__doc__,
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001403"get_objects() -> [...]\n"
1404"\n"
1405"Return a list of objects tracked by the collector (excluding the list\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001406"returned).\n");
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001407
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001408static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001409gc_get_objects(PyObject *self, PyObject *noargs)
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001410{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001411 int i;
1412 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001413
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001414 result = PyList_New(0);
1415 if (result == NULL)
1416 return NULL;
1417 for (i = 0; i < NUM_GENERATIONS; i++) {
1418 if (append_objects(result, GEN_HEAD(i))) {
1419 Py_DECREF(result);
1420 return NULL;
1421 }
1422 }
1423 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001424}
1425
Antoine Pitroud4156c12012-10-30 22:43:19 +01001426PyDoc_STRVAR(gc_get_stats__doc__,
1427"get_stats() -> [...]\n"
1428"\n"
1429"Return a list of dictionaries containing per-generation statistics.\n");
1430
1431static PyObject *
1432gc_get_stats(PyObject *self, PyObject *noargs)
1433{
1434 int i;
1435 PyObject *result;
1436 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1437
1438 /* To get consistent values despite allocations while constructing
1439 the result list, we use a snapshot of the running stats. */
1440 for (i = 0; i < NUM_GENERATIONS; i++) {
1441 stats[i] = generation_stats[i];
1442 }
1443
1444 result = PyList_New(0);
1445 if (result == NULL)
1446 return NULL;
1447
1448 for (i = 0; i < NUM_GENERATIONS; i++) {
1449 PyObject *dict;
1450 st = &stats[i];
1451 dict = Py_BuildValue("{snsnsn}",
1452 "collections", st->collections,
1453 "collected", st->collected,
1454 "uncollectable", st->uncollectable
1455 );
1456 if (dict == NULL)
1457 goto error;
1458 if (PyList_Append(result, dict)) {
1459 Py_DECREF(dict);
1460 goto error;
1461 }
1462 Py_DECREF(dict);
1463 }
1464 return result;
1465
1466error:
1467 Py_XDECREF(result);
1468 return NULL;
1469}
1470
1471
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001472PyDoc_STRVAR(gc_is_tracked__doc__,
1473"is_tracked(obj) -> bool\n"
1474"\n"
1475"Returns true if the object is tracked by the garbage collector.\n"
1476"Simple atomic objects will return false.\n"
1477);
1478
1479static PyObject *
1480gc_is_tracked(PyObject *self, PyObject *obj)
1481{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001482 PyObject *result;
1483
1484 if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1485 result = Py_True;
1486 else
1487 result = Py_False;
1488 Py_INCREF(result);
1489 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001490}
1491
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001492
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001493PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001494"This module provides access to the garbage collector for reference cycles.\n"
1495"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001496"enable() -- Enable automatic garbage collection.\n"
1497"disable() -- Disable automatic garbage collection.\n"
1498"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001499"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001500"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001501"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001502"set_debug() -- Set debugging flags.\n"
1503"get_debug() -- Get debugging flags.\n"
1504"set_threshold() -- Set the collection thresholds.\n"
1505"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001506"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001507"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001508"get_referrers() -- Return the list of objects that refer to an object.\n"
Tim Peters730f5532003-04-08 17:17:17 +00001509"get_referents() -- Return the list of objects that an object refers to.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001510
1511static PyMethodDef GcMethods[] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001512 {"enable", gc_enable, METH_NOARGS, gc_enable__doc__},
1513 {"disable", gc_disable, METH_NOARGS, gc_disable__doc__},
1514 {"isenabled", gc_isenabled, METH_NOARGS, gc_isenabled__doc__},
1515 {"set_debug", gc_set_debug, METH_VARARGS, gc_set_debug__doc__},
1516 {"get_debug", gc_get_debug, METH_NOARGS, gc_get_debug__doc__},
1517 {"get_count", gc_get_count, METH_NOARGS, gc_get_count__doc__},
1518 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1519 {"get_threshold", gc_get_thresh, METH_NOARGS, gc_get_thresh__doc__},
1520 {"collect", (PyCFunction)gc_collect,
1521 METH_VARARGS | METH_KEYWORDS, gc_collect__doc__},
1522 {"get_objects", gc_get_objects,METH_NOARGS, gc_get_objects__doc__},
Antoine Pitroud4156c12012-10-30 22:43:19 +01001523 {"get_stats", gc_get_stats, METH_NOARGS, gc_get_stats__doc__},
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001524 {"is_tracked", gc_is_tracked, METH_O, gc_is_tracked__doc__},
1525 {"get_referrers", gc_get_referrers, METH_VARARGS,
1526 gc_get_referrers__doc__},
1527 {"get_referents", gc_get_referents, METH_VARARGS,
1528 gc_get_referents__doc__},
1529 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001530};
1531
Martin v. Löwis1a214512008-06-11 05:26:20 +00001532static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001533 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001534 "gc", /* m_name */
1535 gc__doc__, /* m_doc */
1536 -1, /* m_size */
1537 GcMethods, /* m_methods */
1538 NULL, /* m_reload */
1539 NULL, /* m_traverse */
1540 NULL, /* m_clear */
1541 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001542};
1543
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001544PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001545PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001546{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001547 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001548
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001549 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001550
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001551 if (m == NULL)
1552 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001553
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001554 if (garbage == NULL) {
1555 garbage = PyList_New(0);
1556 if (garbage == NULL)
1557 return NULL;
1558 }
1559 Py_INCREF(garbage);
1560 if (PyModule_AddObject(m, "garbage", garbage) < 0)
1561 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001562
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001563 if (callbacks == NULL) {
1564 callbacks = PyList_New(0);
1565 if (callbacks == NULL)
1566 return NULL;
1567 }
1568 Py_INCREF(callbacks);
1569 if (PyModule_AddObject(m, "callbacks", callbacks) < 0)
1570 return NULL;
1571
Martin v. Löwis1a214512008-06-11 05:26:20 +00001572#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001573 ADD_INT(DEBUG_STATS);
1574 ADD_INT(DEBUG_COLLECTABLE);
1575 ADD_INT(DEBUG_UNCOLLECTABLE);
1576 ADD_INT(DEBUG_SAVEALL);
1577 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001578#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001579 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001580}
1581
Guido van Rossume13ddc92003-04-17 17:29:22 +00001582/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001583Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001584PyGC_Collect(void)
1585{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001586 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001587
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001588 if (collecting)
1589 n = 0; /* already collecting, don't do anything */
1590 else {
1591 collecting = 1;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001592 n = collect_with_callback(NUM_GENERATIONS - 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001593 collecting = 0;
1594 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001595
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001596 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001597}
1598
Antoine Pitroufef34e32013-05-19 01:11:58 +02001599Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001600_PyGC_CollectIfEnabled(void)
1601{
1602 if (!enabled)
1603 return 0;
1604
1605 return PyGC_Collect();
1606}
1607
1608Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001609_PyGC_CollectNoFail(void)
1610{
1611 Py_ssize_t n;
1612
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001613 /* Ideally, this function is only called on interpreter shutdown,
1614 and therefore not recursively. Unfortunately, when there are daemon
1615 threads, a daemon thread can start a cyclic garbage collection
1616 during interpreter shutdown (and then never finish it).
1617 See http://bugs.python.org/issue8713#msg195178 for an example.
1618 */
1619 if (collecting)
1620 n = 0;
1621 else {
1622 collecting = 1;
1623 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1624 collecting = 0;
1625 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001626 return n;
1627}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001628
Antoine Pitrou696e0352010-08-08 22:18:46 +00001629void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001630_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001631{
Antoine Pitrou2ed94eb2010-09-14 09:48:39 +00001632 if (!(debug & DEBUG_SAVEALL)
1633 && garbage != NULL && PyList_GET_SIZE(garbage) > 0) {
Georg Brandl08be72d2010-10-24 15:11:22 +00001634 char *message;
1635 if (debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001636 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001637 "shutdown";
1638 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001639 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001640 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001641 /* PyErr_WarnFormat does too many things and we are at shutdown,
1642 the warnings module's dependencies (e.g. linecache) may be gone
1643 already. */
1644 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1645 "gc", NULL, message,
1646 PyList_GET_SIZE(garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001647 PyErr_WriteUnraisable(NULL);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001648 if (debug & DEBUG_UNCOLLECTABLE) {
1649 PyObject *repr = NULL, *bytes = NULL;
1650 repr = PyObject_Repr(garbage);
1651 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1652 PyErr_WriteUnraisable(garbage);
1653 else {
1654 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001655 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001656 PyBytes_AS_STRING(bytes)
1657 );
1658 }
1659 Py_XDECREF(repr);
1660 Py_XDECREF(bytes);
1661 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001662 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001663}
1664
1665void
1666_PyGC_Fini(void)
1667{
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001668 Py_CLEAR(callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001669}
1670
Neil Schemenauer43411b52001-08-30 00:05:51 +00001671/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001672void
1673_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001674{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001675 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001676}
1677
Neil Schemenauer43411b52001-08-30 00:05:51 +00001678/* extension modules might be compiled with GC support so these
1679 functions must always be available */
1680
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001681#undef PyObject_GC_Track
1682#undef PyObject_GC_UnTrack
1683#undef PyObject_GC_Del
1684#undef _PyObject_GC_Malloc
1685
Neil Schemenauer43411b52001-08-30 00:05:51 +00001686void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001687PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001688{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001689 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001690}
1691
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001692void
1693PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001694{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001695 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1696 * call PyObject_GC_UnTrack twice on an object.
1697 */
1698 if (IS_TRACKED(op))
1699 _PyObject_GC_UNTRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001700}
1701
Victor Stinnerdb067af2014-05-02 22:31:14 +02001702static PyObject *
1703_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001704{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001705 PyObject *op;
1706 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001707 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001708 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1709 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001710 size = sizeof(PyGC_Head) + basicsize;
1711 if (use_calloc)
1712 g = (PyGC_Head *)PyObject_Calloc(1, size);
1713 else
1714 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001715 if (g == NULL)
1716 return PyErr_NoMemory();
Antoine Pitrou796564c2013-07-30 19:59:21 +02001717 g->gc.gc_refs = 0;
1718 _PyGCHead_SET_REFS(g, GC_UNTRACKED);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001719 generations[0].count++; /* number of allocated GC objects */
1720 if (generations[0].count > generations[0].threshold &&
1721 enabled &&
1722 generations[0].threshold &&
1723 !collecting &&
1724 !PyErr_Occurred()) {
1725 collecting = 1;
1726 collect_generations();
1727 collecting = 0;
1728 }
1729 op = FROM_GC(g);
1730 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001731}
1732
1733PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001734_PyObject_GC_Malloc(size_t basicsize)
1735{
1736 return _PyObject_GC_Alloc(0, basicsize);
1737}
1738
1739PyObject *
1740_PyObject_GC_Calloc(size_t basicsize)
1741{
1742 return _PyObject_GC_Alloc(1, basicsize);
1743}
1744
1745PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001746_PyObject_GC_New(PyTypeObject *tp)
1747{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001748 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1749 if (op != NULL)
1750 op = PyObject_INIT(op, tp);
1751 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001752}
1753
1754PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001755_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001756{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001757 size_t size;
1758 PyVarObject *op;
1759
1760 if (nitems < 0) {
1761 PyErr_BadInternalCall();
1762 return NULL;
1763 }
1764 size = _PyObject_VAR_SIZE(tp, nitems);
1765 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001766 if (op != NULL)
1767 op = PyObject_INIT_VAR(op, tp, nitems);
1768 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001769}
1770
1771PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001772_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001773{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001774 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1775 PyGC_Head *g = AS_GC(op);
1776 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1777 return (PyVarObject *)PyErr_NoMemory();
1778 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1779 if (g == NULL)
1780 return (PyVarObject *)PyErr_NoMemory();
1781 op = (PyVarObject *) FROM_GC(g);
1782 Py_SIZE(op) = nitems;
1783 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001784}
1785
1786void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001787PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001788{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001789 PyGC_Head *g = AS_GC(op);
1790 if (IS_TRACKED(op))
1791 gc_list_remove(g);
1792 if (generations[0].count > 0) {
1793 generations[0].count--;
1794 }
1795 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001796}