blob: 07950a6228c35c970cfaa1c224891f3a463baefb [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000027#include "frameobject.h" /* for PyFrame_ClearFreeList */
Victor Stinner7181dec2015-03-27 17:47:53 +010028#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000029
Neil Schemenauer43411b52001-08-30 00:05:51 +000030/* Get an object's GC head */
31#define AS_GC(o) ((PyGC_Head *)(o)-1)
32
33/* Get the object given the GC head */
34#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
35
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000036/*** Global GC state ***/
37
Neil Schemenauer2880ae52002-05-04 05:35:20 +000038struct gc_generation {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000039 PyGC_Head head;
40 int threshold; /* collection threshold */
41 int count; /* count of allocations or collections of younger
42 generations */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000043};
44
45#define NUM_GENERATIONS 3
46#define GEN_HEAD(n) (&generations[n].head)
47
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000048/* linked lists of container objects */
Neil Schemenauer2880ae52002-05-04 05:35:20 +000049static struct gc_generation generations[NUM_GENERATIONS] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000050 /* PyGC_Head, threshold, count */
51 {{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
52 {{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
53 {{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
Neil Schemenauer2880ae52002-05-04 05:35:20 +000054};
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000055
Neil Schemenauer2880ae52002-05-04 05:35:20 +000056PyGC_Head *_PyGC_generation0 = GEN_HEAD(0);
57
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +000058static int enabled = 1; /* automatic collection enabled? */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000059
Neil Schemenauer43411b52001-08-30 00:05:51 +000060/* true if we are currently running the collector */
Tim Petersbf384c22003-04-06 00:11:39 +000061static int collecting = 0;
Neil Schemenauer43411b52001-08-30 00:05:51 +000062
Tim Peters6fc13d92002-07-02 18:12:35 +000063/* list of uncollectable objects */
Tim Petersbf384c22003-04-06 00:11:39 +000064static PyObject *garbage = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000065
66/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +000067static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +000068
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000069/* a list of callbacks to be invoked when collection is performed */
70static PyObject *callbacks = NULL;
71
72/* This is the number of objects that survived the last full collection. It
Antoine Pitrou14b78f52009-01-09 22:27:08 +000073 approximates the number of long lived objects tracked by the GC.
74
75 (by "full collection", we mean a collection of the oldest generation).
76*/
77static Py_ssize_t long_lived_total = 0;
78
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +000079/* This is the number of objects that survived all "non-full" collections,
Antoine Pitrou14b78f52009-01-09 22:27:08 +000080 and are awaiting to undergo a full collection for the first time.
81
82*/
83static Py_ssize_t long_lived_pending = 0;
84
85/*
86 NOTE: about the counting of long-lived objects.
87
88 To limit the cost of garbage collection, there are two strategies;
89 - make each collection faster, e.g. by scanning fewer objects
90 - do less collections
91 This heuristic is about the latter strategy.
92
93 In addition to the various configurable thresholds, we only trigger a
94 full collection if the ratio
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000095 long_lived_pending / long_lived_total
Antoine Pitrou14b78f52009-01-09 22:27:08 +000096 is above a given value (hardwired to 25%).
97
98 The reason is that, while "non-full" collections (i.e., collections of
99 the young and middle generations) will always examine roughly the same
100 number of objects -- determined by the aforementioned thresholds --,
101 the cost of a full collection is proportional to the total number of
102 long-lived objects, which is virtually unbounded.
103
104 Indeed, it has been remarked that doing a full collection every
105 <constant number> of object creations entails a dramatic performance
106 degradation in workloads which consist in creating and storing lots of
107 long-lived objects (e.g. building a large list of GC-tracked objects would
108 show quadratic performance, instead of linear as expected: see issue #4074).
109
110 Using the above ratio, instead, yields amortized linear performance in
111 the total number of objects (the effect of which can be summarized
112 thusly: "each full garbage collection is more and more costly as the
113 number of objects grows, but we do fewer and fewer of them").
114
115 This heuristic was suggested by Martin von Löwis on python-dev in
116 June 2008. His original analysis and proposal can be found at:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000117 http://mail.python.org/pipermail/python-dev/2008-June/080579.html
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000118*/
119
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200120/*
121 NOTE: about untracking of mutable objects.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200122
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200123 Certain types of container cannot participate in a reference cycle, and
124 so do not need to be tracked by the garbage collector. Untracking these
125 objects reduces the cost of garbage collections. However, determining
126 which objects may be untracked is not free, and the costs must be
127 weighed against the benefits for garbage collection.
128
129 There are two possible strategies for when to untrack a container:
130
131 i) When the container is created.
132 ii) When the container is examined by the garbage collector.
133
134 Tuples containing only immutable objects (integers, strings etc, and
135 recursively, tuples of immutable objects) do not need to be tracked.
136 The interpreter creates a large number of tuples, many of which will
137 not survive until garbage collection. It is therefore not worthwhile
138 to untrack eligible tuples at creation time.
139
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200140 Instead, all tuples except the empty tuple are tracked when created.
141 During garbage collection it is determined whether any surviving tuples
142 can be untracked. A tuple can be untracked if all of its contents are
143 already not tracked. Tuples are examined for untracking in all garbage
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200144 collection cycles. It may take more than one cycle to untrack a tuple.
145
146 Dictionaries containing only immutable objects also do not need to be
147 tracked. Dictionaries are untracked when created. If a tracked item is
148 inserted into a dictionary (either as a key or value), the dictionary
149 becomes tracked. During a full garbage collection (all generations),
150 the collector will untrack any dictionaries whose contents are not
151 tracked.
152
153 The module provides the python function is_tracked(obj), which returns
154 the CURRENT tracking status of the object. Subsequent garbage
155 collections may change the tracking status of the object.
Victor Stinnerc1eb26c2013-07-08 22:15:05 +0200156
157 Untracking of certain containers was introduced in issue #4688, and
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200158 the algorithm was refined in response to issue #14775.
159*/
Antoine Pitrou14b78f52009-01-09 22:27:08 +0000160
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000161/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000162#define DEBUG_STATS (1<<0) /* print collection statistics */
163#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
164#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
165#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
166#define DEBUG_LEAK DEBUG_COLLECTABLE | \
167 DEBUG_UNCOLLECTABLE | \
168 DEBUG_SAVEALL
Jeremy Hyltonb709df32000-09-01 02:47:25 +0000169static int debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000170
Antoine Pitroud4156c12012-10-30 22:43:19 +0100171/* Running stats per generation */
172struct gc_generation_stats {
173 /* total number of collections */
174 Py_ssize_t collections;
175 /* total number of collected objects */
176 Py_ssize_t collected;
177 /* total number of uncollectable objects (put into gc.garbage) */
178 Py_ssize_t uncollectable;
179};
180
181static struct gc_generation_stats generation_stats[NUM_GENERATIONS];
182
Tim Peters6fc13d92002-07-02 18:12:35 +0000183/*--------------------------------------------------------------------------
184gc_refs values.
Neil Schemenauer43411b52001-08-30 00:05:51 +0000185
Tim Peters6fc13d92002-07-02 18:12:35 +0000186Between collections, every gc'ed object has one of two gc_refs values:
187
188GC_UNTRACKED
189 The initial state; objects returned by PyObject_GC_Malloc are in this
190 state. The object doesn't live in any generation list, and its
191 tp_traverse slot must not be called.
192
193GC_REACHABLE
194 The object lives in some generation list, and its tp_traverse is safe to
195 call. An object transitions to GC_REACHABLE when PyObject_GC_Track
196 is called.
197
198During a collection, gc_refs can temporarily take on other states:
199
200>= 0
201 At the start of a collection, update_refs() copies the true refcount
202 to gc_refs, for each object in the generation being collected.
203 subtract_refs() then adjusts gc_refs so that it equals the number of
204 times an object is referenced directly from outside the generation
205 being collected.
Martin v. Löwis774348c2002-11-09 19:54:06 +0000206 gc_refs remains >= 0 throughout these steps.
Tim Peters6fc13d92002-07-02 18:12:35 +0000207
208GC_TENTATIVELY_UNREACHABLE
209 move_unreachable() then moves objects not reachable (whether directly or
210 indirectly) from outside the generation into an "unreachable" set.
211 Objects that are found to be reachable have gc_refs set to GC_REACHABLE
212 again. Objects that are found to be unreachable have gc_refs set to
213 GC_TENTATIVELY_UNREACHABLE. It's "tentatively" because the pass doing
214 this can't be sure until it ends, and GC_TENTATIVELY_UNREACHABLE may
215 transition back to GC_REACHABLE.
216
217 Only objects with GC_TENTATIVELY_UNREACHABLE still set are candidates
218 for collection. If it's decided not to collect such an object (e.g.,
219 it has a __del__ method), its gc_refs is restored to GC_REACHABLE again.
220----------------------------------------------------------------------------
221*/
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000222#define GC_UNTRACKED _PyGC_REFS_UNTRACKED
223#define GC_REACHABLE _PyGC_REFS_REACHABLE
224#define GC_TENTATIVELY_UNREACHABLE _PyGC_REFS_TENTATIVELY_UNREACHABLE
Tim Peters19b74c72002-07-01 03:52:19 +0000225
Antoine Pitrou796564c2013-07-30 19:59:21 +0200226#define IS_TRACKED(o) (_PyGC_REFS(o) != GC_UNTRACKED)
227#define IS_REACHABLE(o) (_PyGC_REFS(o) == GC_REACHABLE)
Tim Peters19b74c72002-07-01 03:52:19 +0000228#define IS_TENTATIVELY_UNREACHABLE(o) ( \
Antoine Pitrou796564c2013-07-30 19:59:21 +0200229 _PyGC_REFS(o) == GC_TENTATIVELY_UNREACHABLE)
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000230
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000231/*** list functions ***/
232
233static void
234gc_list_init(PyGC_Head *list)
235{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000236 list->gc.gc_prev = list;
237 list->gc.gc_next = list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000238}
239
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000240static int
241gc_list_is_empty(PyGC_Head *list)
242{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000243 return (list->gc.gc_next == list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000244}
245
Tim Peterse2d59182004-11-01 01:39:08 +0000246#if 0
247/* This became unused after gc_list_move() was introduced. */
248/* Append `node` to `list`. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000249static void
250gc_list_append(PyGC_Head *node, PyGC_Head *list)
251{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000252 node->gc.gc_next = list;
253 node->gc.gc_prev = list->gc.gc_prev;
254 node->gc.gc_prev->gc.gc_next = node;
255 list->gc.gc_prev = node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000256}
Tim Peterse2d59182004-11-01 01:39:08 +0000257#endif
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000258
Tim Peterse2d59182004-11-01 01:39:08 +0000259/* Remove `node` from the gc list it's currently in. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000260static void
261gc_list_remove(PyGC_Head *node)
262{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000263 node->gc.gc_prev->gc.gc_next = node->gc.gc_next;
264 node->gc.gc_next->gc.gc_prev = node->gc.gc_prev;
265 node->gc.gc_next = NULL; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000266}
267
Tim Peterse2d59182004-11-01 01:39:08 +0000268/* Move `node` from the gc list it's currently in (which is not explicitly
269 * named here) to the end of `list`. This is semantically the same as
270 * gc_list_remove(node) followed by gc_list_append(node, list).
271 */
272static void
273gc_list_move(PyGC_Head *node, PyGC_Head *list)
274{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000275 PyGC_Head *new_prev;
276 PyGC_Head *current_prev = node->gc.gc_prev;
277 PyGC_Head *current_next = node->gc.gc_next;
278 /* Unlink from current list. */
279 current_prev->gc.gc_next = current_next;
280 current_next->gc.gc_prev = current_prev;
281 /* Relink at end of new list. */
282 new_prev = node->gc.gc_prev = list->gc.gc_prev;
283 new_prev->gc.gc_next = list->gc.gc_prev = node;
284 node->gc.gc_next = list;
Tim Peterse2d59182004-11-01 01:39:08 +0000285}
286
287/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000288static void
289gc_list_merge(PyGC_Head *from, PyGC_Head *to)
290{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000291 PyGC_Head *tail;
292 assert(from != to);
293 if (!gc_list_is_empty(from)) {
294 tail = to->gc.gc_prev;
295 tail->gc.gc_next = from->gc.gc_next;
296 tail->gc.gc_next->gc.gc_prev = tail;
297 to->gc.gc_prev = from->gc.gc_prev;
298 to->gc.gc_prev->gc.gc_next = to;
299 }
300 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000301}
302
Neal Norwitz7b216c52006-03-04 20:01:53 +0000303static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000304gc_list_size(PyGC_Head *list)
305{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000306 PyGC_Head *gc;
307 Py_ssize_t n = 0;
308 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
309 n++;
310 }
311 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000312}
313
Tim Peters259272b2003-04-06 19:41:39 +0000314/* Append objects in a GC list to a Python list.
315 * Return 0 if all OK, < 0 if error (out of memory for list).
316 */
317static int
318append_objects(PyObject *py_list, PyGC_Head *gc_list)
319{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000320 PyGC_Head *gc;
321 for (gc = gc_list->gc.gc_next; gc != gc_list; gc = gc->gc.gc_next) {
322 PyObject *op = FROM_GC(gc);
323 if (op != py_list) {
324 if (PyList_Append(py_list, op)) {
325 return -1; /* exception */
326 }
327 }
328 }
329 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000330}
331
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000332/*** end of list stuff ***/
333
334
Tim Peters19b74c72002-07-01 03:52:19 +0000335/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 for all objects
336 * in containers, and is GC_REACHABLE for all tracked gc objects not in
337 * containers.
Tim Peters88396172002-06-30 17:56:40 +0000338 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000339static void
340update_refs(PyGC_Head *containers)
341{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000342 PyGC_Head *gc = containers->gc.gc_next;
343 for (; gc != containers; gc = gc->gc.gc_next) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200344 assert(_PyGCHead_REFS(gc) == GC_REACHABLE);
345 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000346 /* Python's cyclic gc should never see an incoming refcount
347 * of 0: if something decref'ed to 0, it should have been
348 * deallocated immediately at that time.
349 * Possible cause (if the assert triggers): a tp_dealloc
350 * routine left a gc-aware object tracked during its teardown
351 * phase, and did something-- or allowed something to happen --
352 * that called back into Python. gc can trigger then, and may
353 * see the still-tracked dying object. Before this assert
354 * was added, such mistakes went on to allow gc to try to
355 * delete the object again. In a debug build, that caused
356 * a mysterious segfault, when _Py_ForgetReference tried
357 * to remove the object from the doubly-linked list of all
358 * objects a second time. In a release build, an actual
359 * double deallocation occurred, which leads to corruption
360 * of the allocator's internal bookkeeping pointers. That's
361 * so serious that maybe this should be a release-build
362 * check instead of an assert?
363 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200364 assert(_PyGCHead_REFS(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000365 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000366}
367
Tim Peters19b74c72002-07-01 03:52:19 +0000368/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000369static int
370visit_decref(PyObject *op, void *data)
371{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000372 assert(op != NULL);
373 if (PyObject_IS_GC(op)) {
374 PyGC_Head *gc = AS_GC(op);
375 /* We're only interested in gc_refs for objects in the
376 * generation being collected, which can be recognized
377 * because only they have positive gc_refs.
378 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200379 assert(_PyGCHead_REFS(gc) != 0); /* else refcount was too small */
380 if (_PyGCHead_REFS(gc) > 0)
381 _PyGCHead_DECREF(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000382 }
383 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000384}
385
Tim Peters19b74c72002-07-01 03:52:19 +0000386/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
387 * for all objects in containers, and is GC_REACHABLE for all tracked gc
388 * objects not in containers. The ones with gc_refs > 0 are directly
389 * reachable from outside containers, and so can't be collected.
390 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000391static void
392subtract_refs(PyGC_Head *containers)
393{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000394 traverseproc traverse;
395 PyGC_Head *gc = containers->gc.gc_next;
396 for (; gc != containers; gc=gc->gc.gc_next) {
397 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
398 (void) traverse(FROM_GC(gc),
399 (visitproc)visit_decref,
400 NULL);
401 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000402}
403
Tim Peters19b74c72002-07-01 03:52:19 +0000404/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000405static int
Tim Peters19b74c72002-07-01 03:52:19 +0000406visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000407{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000408 if (PyObject_IS_GC(op)) {
409 PyGC_Head *gc = AS_GC(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200410 const Py_ssize_t gc_refs = _PyGCHead_REFS(gc);
Tim Peters19b74c72002-07-01 03:52:19 +0000411
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000412 if (gc_refs == 0) {
413 /* This is in move_unreachable's 'young' list, but
414 * the traversal hasn't yet gotten to it. All
415 * we need to do is tell move_unreachable that it's
416 * reachable.
417 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200418 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000419 }
420 else if (gc_refs == GC_TENTATIVELY_UNREACHABLE) {
421 /* This had gc_refs = 0 when move_unreachable got
422 * to it, but turns out it's reachable after all.
423 * Move it back to move_unreachable's 'young' list,
424 * and move_unreachable will eventually get to it
425 * again.
426 */
427 gc_list_move(gc, reachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200428 _PyGCHead_SET_REFS(gc, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000429 }
430 /* Else there's nothing to do.
431 * If gc_refs > 0, it must be in move_unreachable's 'young'
432 * list, and move_unreachable will eventually get to it.
433 * If gc_refs == GC_REACHABLE, it's either in some other
434 * generation so we don't care about it, or move_unreachable
435 * already dealt with it.
436 * If gc_refs == GC_UNTRACKED, it must be ignored.
437 */
438 else {
439 assert(gc_refs > 0
440 || gc_refs == GC_REACHABLE
441 || gc_refs == GC_UNTRACKED);
442 }
443 }
444 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000445}
446
Tim Peters19b74c72002-07-01 03:52:19 +0000447/* Move the unreachable objects from young to unreachable. After this,
448 * all objects in young have gc_refs = GC_REACHABLE, and all objects in
449 * unreachable have gc_refs = GC_TENTATIVELY_UNREACHABLE. All tracked
450 * gc objects not in young or unreachable still have gc_refs = GC_REACHABLE.
451 * All objects in young after this are directly or indirectly reachable
452 * from outside the original young; and all objects in unreachable are
453 * not.
Tim Peters88396172002-06-30 17:56:40 +0000454 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000455static void
Tim Peters19b74c72002-07-01 03:52:19 +0000456move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000457{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000458 PyGC_Head *gc = young->gc.gc_next;
Tim Peters19b74c72002-07-01 03:52:19 +0000459
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000460 /* Invariants: all objects "to the left" of us in young have gc_refs
461 * = GC_REACHABLE, and are indeed reachable (directly or indirectly)
462 * from outside the young list as it was at entry. All other objects
463 * from the original young "to the left" of us are in unreachable now,
464 * and have gc_refs = GC_TENTATIVELY_UNREACHABLE. All objects to the
465 * left of us in 'young' now have been scanned, and no objects here
466 * or to the right have been scanned yet.
467 */
Tim Peters19b74c72002-07-01 03:52:19 +0000468
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000469 while (gc != young) {
470 PyGC_Head *next;
Tim Peters19b74c72002-07-01 03:52:19 +0000471
Antoine Pitrou796564c2013-07-30 19:59:21 +0200472 if (_PyGCHead_REFS(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000473 /* gc is definitely reachable from outside the
474 * original 'young'. Mark it as such, and traverse
475 * its pointers to find any other objects that may
476 * be directly reachable from it. Note that the
477 * call to tp_traverse may append objects to young,
478 * so we have to wait until it returns to determine
479 * the next object to visit.
480 */
481 PyObject *op = FROM_GC(gc);
482 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200483 assert(_PyGCHead_REFS(gc) > 0);
484 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000485 (void) traverse(op,
486 (visitproc)visit_reachable,
487 (void *)young);
488 next = gc->gc.gc_next;
489 if (PyTuple_CheckExact(op)) {
490 _PyTuple_MaybeUntrack(op);
491 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000492 }
493 else {
494 /* This *may* be unreachable. To make progress,
495 * assume it is. gc isn't directly reachable from
496 * any object we've already traversed, but may be
497 * reachable from an object we haven't gotten to yet.
498 * visit_reachable will eventually move gc back into
499 * young if that's so, and we'll see it again.
500 */
501 next = gc->gc.gc_next;
502 gc_list_move(gc, unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200503 _PyGCHead_SET_REFS(gc, GC_TENTATIVELY_UNREACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000504 }
505 gc = next;
506 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000507}
508
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200509/* Try to untrack all currently tracked dictionaries */
510static void
511untrack_dicts(PyGC_Head *head)
512{
513 PyGC_Head *next, *gc = head->gc.gc_next;
514 while (gc != head) {
515 PyObject *op = FROM_GC(gc);
516 next = gc->gc.gc_next;
517 if (PyDict_CheckExact(op))
518 _PyDict_MaybeUntrack(op);
519 gc = next;
520 }
521}
522
Antoine Pitrou796564c2013-07-30 19:59:21 +0200523/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000524static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200525has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000526{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200527 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000528}
529
Antoine Pitrou796564c2013-07-30 19:59:21 +0200530/* Move the objects in unreachable with tp_del slots into `finalizers`.
Tim Petersead8b7a2004-10-30 23:09:22 +0000531 * Objects moved into `finalizers` have gc_refs set to GC_REACHABLE; the
532 * objects remaining in unreachable are left at GC_TENTATIVELY_UNREACHABLE.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000533 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000534static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200535move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000536{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000537 PyGC_Head *gc;
538 PyGC_Head *next;
Tim Petersf6b80452003-04-07 19:21:15 +0000539
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000540 /* March over unreachable. Move objects with finalizers into
541 * `finalizers`.
542 */
543 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
544 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000545
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000546 assert(IS_TENTATIVELY_UNREACHABLE(op));
547 next = gc->gc.gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000548
Antoine Pitrou796564c2013-07-30 19:59:21 +0200549 if (has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000550 gc_list_move(gc, finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200551 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000552 }
553 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000554}
555
Antoine Pitrou796564c2013-07-30 19:59:21 +0200556/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000557static int
558visit_move(PyObject *op, PyGC_Head *tolist)
559{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000560 if (PyObject_IS_GC(op)) {
561 if (IS_TENTATIVELY_UNREACHABLE(op)) {
562 PyGC_Head *gc = AS_GC(op);
563 gc_list_move(gc, tolist);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200564 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000565 }
566 }
567 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000568}
569
570/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000571 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000572 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000573static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200574move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000575{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000576 traverseproc traverse;
577 PyGC_Head *gc = finalizers->gc.gc_next;
578 for (; gc != finalizers; gc = gc->gc.gc_next) {
579 /* Note that the finalizers list may grow during this. */
580 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
581 (void) traverse(FROM_GC(gc),
582 (visitproc)visit_move,
583 (void *)finalizers);
584 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000585}
586
Tim Petersead8b7a2004-10-30 23:09:22 +0000587/* Clear all weakrefs to unreachable objects, and if such a weakref has a
588 * callback, invoke it if necessary. Note that it's possible for such
589 * weakrefs to be outside the unreachable set -- indeed, those are precisely
590 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
591 * overview & some details. Some weakrefs with callbacks may be reclaimed
592 * directly by this routine; the number reclaimed is the return value. Other
593 * weakrefs with callbacks may be moved into the `old` generation. Objects
594 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
595 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
596 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000597 */
598static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000599handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000600{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000601 PyGC_Head *gc;
602 PyObject *op; /* generally FROM_GC(gc) */
603 PyWeakReference *wr; /* generally a cast of op */
604 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
605 PyGC_Head *next;
606 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000607
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000608 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000609
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000610 /* Clear all weakrefs to the objects in unreachable. If such a weakref
611 * also has a callback, move it into `wrcb_to_call` if the callback
612 * needs to be invoked. Note that we cannot invoke any callbacks until
613 * all weakrefs to unreachable objects are cleared, lest the callback
614 * resurrect an unreachable object via a still-active weakref. We
615 * make another pass over wrcb_to_call, invoking callbacks, after this
616 * pass completes.
617 */
618 for (gc = unreachable->gc.gc_next; gc != unreachable; gc = next) {
619 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000620
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000621 op = FROM_GC(gc);
622 assert(IS_TENTATIVELY_UNREACHABLE(op));
623 next = gc->gc.gc_next;
Tim Petersead8b7a2004-10-30 23:09:22 +0000624
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000625 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
626 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000627
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000628 /* It supports weakrefs. Does it have any? */
629 wrlist = (PyWeakReference **)
630 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000631
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000632 /* `op` may have some weakrefs. March over the list, clear
633 * all the weakrefs, and move the weakrefs with callbacks
634 * that must be called into wrcb_to_call.
635 */
636 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
637 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000638
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000639 /* _PyWeakref_ClearRef clears the weakref but leaves
640 * the callback pointer intact. Obscure: it also
641 * changes *wrlist.
642 */
643 assert(wr->wr_object == op);
644 _PyWeakref_ClearRef(wr);
645 assert(wr->wr_object == Py_None);
646 if (wr->wr_callback == NULL)
647 continue; /* no callback */
Tim Petersead8b7a2004-10-30 23:09:22 +0000648
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000649 /* Headache time. `op` is going away, and is weakly referenced by
650 * `wr`, which has a callback. Should the callback be invoked? If wr
651 * is also trash, no:
652 *
653 * 1. There's no need to call it. The object and the weakref are
654 * both going away, so it's legitimate to pretend the weakref is
655 * going away first. The user has to ensure a weakref outlives its
656 * referent if they want a guarantee that the wr callback will get
657 * invoked.
658 *
659 * 2. It may be catastrophic to call it. If the callback is also in
660 * cyclic trash (CT), then although the CT is unreachable from
661 * outside the current generation, CT may be reachable from the
662 * callback. Then the callback could resurrect insane objects.
663 *
664 * Since the callback is never needed and may be unsafe in this case,
665 * wr is simply left in the unreachable set. Note that because we
666 * already called _PyWeakref_ClearRef(wr), its callback will never
667 * trigger.
668 *
669 * OTOH, if wr isn't part of CT, we should invoke the callback: the
670 * weakref outlived the trash. Note that since wr isn't CT in this
671 * case, its callback can't be CT either -- wr acted as an external
672 * root to this generation, and therefore its callback did too. So
673 * nothing in CT is reachable from the callback either, so it's hard
674 * to imagine how calling it later could create a problem for us. wr
675 * is moved to wrcb_to_call in this case.
676 */
677 if (IS_TENTATIVELY_UNREACHABLE(wr))
678 continue;
679 assert(IS_REACHABLE(wr));
Tim Peterscc2a8662004-10-31 22:12:43 +0000680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000681 /* Create a new reference so that wr can't go away
682 * before we can process it again.
683 */
684 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000685
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000686 /* Move wr to wrcb_to_call, for the next pass. */
687 wrasgc = AS_GC(wr);
688 assert(wrasgc != next); /* wrasgc is reachable, but
689 next isn't, so they can't
690 be the same */
691 gc_list_move(wrasgc, &wrcb_to_call);
692 }
693 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000694
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000695 /* Invoke the callbacks we decided to honor. It's safe to invoke them
696 * because they can't reference unreachable objects.
697 */
698 while (! gc_list_is_empty(&wrcb_to_call)) {
699 PyObject *temp;
700 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000701
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000702 gc = wrcb_to_call.gc.gc_next;
703 op = FROM_GC(gc);
704 assert(IS_REACHABLE(op));
705 assert(PyWeakref_Check(op));
706 wr = (PyWeakReference *)op;
707 callback = wr->wr_callback;
708 assert(callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000709
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000710 /* copy-paste of weakrefobject.c's handle_callback() */
711 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
712 if (temp == NULL)
713 PyErr_WriteUnraisable(callback);
714 else
715 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000716
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000717 /* Give up the reference we created in the first pass. When
718 * op's refcount hits 0 (which it may or may not do right now),
719 * op's tp_dealloc will decref op->wr_callback too. Note
720 * that the refcount probably will hit 0 now, and because this
721 * weakref was reachable to begin with, gc didn't already
722 * add it to its count of freed objects. Example: a reachable
723 * weak value dict maps some key to this reachable weakref.
724 * The callback removes this key->weakref mapping from the
725 * dict, leaving no other references to the weakref (excepting
726 * ours).
727 */
728 Py_DECREF(op);
729 if (wrcb_to_call.gc.gc_next == gc) {
730 /* object is still alive -- move it */
731 gc_list_move(gc, old);
732 }
733 else
734 ++num_freed;
735 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000736
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000737 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000738}
739
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000740static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200741debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000742{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100743 PySys_FormatStderr("gc: %s <%s %p>\n",
744 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000745}
746
Antoine Pitrou796564c2013-07-30 19:59:21 +0200747/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000748 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000749 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
750 * garbage list (a Python list), else only the objects in finalizers with
751 * __del__ methods are appended to garbage. All objects in finalizers are
752 * merged into the old list regardless.
Tim Peters259272b2003-04-06 19:41:39 +0000753 * Returns 0 if all OK, <0 on error (out of memory to grow the garbage list).
754 * The finalizers list is made empty on a successful return.
Tim Petersbf384c22003-04-06 00:11:39 +0000755 */
Tim Peters259272b2003-04-06 19:41:39 +0000756static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200757handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000758{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000759 PyGC_Head *gc = finalizers->gc.gc_next;
Tim Petersf6b80452003-04-07 19:21:15 +0000760
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000761 if (garbage == NULL) {
762 garbage = PyList_New(0);
763 if (garbage == NULL)
764 Py_FatalError("gc couldn't create gc.garbage list");
765 }
766 for (; gc != finalizers; gc = gc->gc.gc_next) {
767 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000768
Antoine Pitrou796564c2013-07-30 19:59:21 +0200769 if ((debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000770 if (PyList_Append(garbage, op) < 0)
771 return -1;
772 }
773 }
Tim Petersf6b80452003-04-07 19:21:15 +0000774
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000775 gc_list_merge(finalizers, old);
776 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000777}
778
Tim Peters5fbc7b12014-05-08 17:42:19 -0500779/* Run first-time finalizers (if any) on all the objects in collectable.
780 * Note that this may remove some (or even all) of the objects from the
781 * list, due to refcounts falling to 0.
782 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200783static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500784finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200785{
786 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500787 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200788
Tim Peters5fbc7b12014-05-08 17:42:19 -0500789 /* While we're going through the loop, `finalize(op)` may cause op, or
790 * other objects, to be reclaimed via refcounts falling to zero. So
791 * there's little we can rely on about the structure of the input
792 * `collectable` list across iterations. For safety, we always take the
793 * first object in that list and move it to a temporary `seen` list.
794 * If objects vanish from the `collectable` and `seen` lists we don't
795 * care.
796 */
797 gc_list_init(&seen);
798
799 while (!gc_list_is_empty(collectable)) {
800 PyGC_Head *gc = collectable->gc.gc_next;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200801 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500802 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200803 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500804 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
805 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
Antoine Pitrou796564c2013-07-30 19:59:21 +0200806 _PyGCHead_SET_FINALIZED(gc, 1);
807 Py_INCREF(op);
808 finalize(op);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200809 Py_DECREF(op);
810 }
811 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500812 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200813}
814
815/* Walk the collectable list and check that they are really unreachable
816 from the outside (some objects could have been resurrected by a
817 finalizer). */
818static int
819check_garbage(PyGC_Head *collectable)
820{
821 PyGC_Head *gc;
822 for (gc = collectable->gc.gc_next; gc != collectable;
823 gc = gc->gc.gc_next) {
824 _PyGCHead_SET_REFS(gc, Py_REFCNT(FROM_GC(gc)));
825 assert(_PyGCHead_REFS(gc) != 0);
826 }
827 subtract_refs(collectable);
828 for (gc = collectable->gc.gc_next; gc != collectable;
829 gc = gc->gc.gc_next) {
830 assert(_PyGCHead_REFS(gc) >= 0);
831 if (_PyGCHead_REFS(gc) != 0)
832 return -1;
833 }
834 return 0;
835}
836
837static void
838revive_garbage(PyGC_Head *collectable)
839{
840 PyGC_Head *gc;
841 for (gc = collectable->gc.gc_next; gc != collectable;
842 gc = gc->gc.gc_next) {
843 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
844 }
845}
846
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000847/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000848 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000849 * objects may be freed. It is possible I screwed something up here.
850 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000851static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000852delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000853{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000854 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000855
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000856 while (!gc_list_is_empty(collectable)) {
857 PyGC_Head *gc = collectable->gc.gc_next;
858 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000859
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000860 if (debug & DEBUG_SAVEALL) {
861 PyList_Append(garbage, op);
862 }
863 else {
864 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
865 Py_INCREF(op);
866 clear(op);
867 Py_DECREF(op);
868 }
869 }
870 if (collectable->gc.gc_next == gc) {
871 /* object is still alive, move it, it may die later */
872 gc_list_move(gc, old);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200873 _PyGCHead_SET_REFS(gc, GC_REACHABLE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000874 }
875 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000876}
877
Christian Heimesa156e092008-02-16 07:38:31 +0000878/* Clear all free lists
879 * All free lists are cleared during the collection of the highest generation.
880 * Allocated items in the free list may keep a pymalloc arena occupied.
881 * Clearing the free lists may give back memory to the OS earlier.
882 */
883static void
884clear_freelists(void)
885{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000886 (void)PyMethod_ClearFreeList();
887 (void)PyFrame_ClearFreeList();
888 (void)PyCFunction_ClearFreeList();
889 (void)PyTuple_ClearFreeList();
890 (void)PyUnicode_ClearFreeList();
891 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100892 (void)PyList_ClearFreeList();
893 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100894 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700895 (void)PyAsyncGen_ClearFreeLists();
Christian Heimesa156e092008-02-16 07:38:31 +0000896}
897
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000898/* This is the main function. Read this to understand how the
899 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000900static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200901collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
902 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000903{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000904 int i;
905 Py_ssize_t m = 0; /* # objects collected */
906 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
907 PyGC_Head *young; /* the generation we are examining */
908 PyGC_Head *old; /* next older generation */
909 PyGC_Head unreachable; /* non-problematic unreachable trash */
910 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
911 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100912 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200913
Antoine Pitroud4156c12012-10-30 22:43:19 +0100914 struct gc_generation_stats *stats = &generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000915
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000916 if (debug & DEBUG_STATS) {
917 PySys_WriteStderr("gc: collecting generation %d...\n",
918 generation);
919 PySys_WriteStderr("gc: objects in each generation:");
920 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200921 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000922 gc_list_size(GEN_HEAD(i)));
Victor Stinner7181dec2015-03-27 17:47:53 +0100923 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200924
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000925 PySys_WriteStderr("\n");
926 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000927
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000928 /* update collection and allocation counters */
929 if (generation+1 < NUM_GENERATIONS)
930 generations[generation+1].count += 1;
931 for (i = 0; i <= generation; i++)
932 generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000933
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000934 /* merge younger generations with one we are currently collecting */
935 for (i = 0; i < generation; i++) {
936 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
937 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000938
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000939 /* handy references */
940 young = GEN_HEAD(generation);
941 if (generation < NUM_GENERATIONS-1)
942 old = GEN_HEAD(generation+1);
943 else
944 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000945
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000946 /* Using ob_refcnt and gc_refs, calculate which objects in the
947 * container set are reachable from outside the set (i.e., have a
948 * refcount greater than 0 when all the references within the
949 * set are taken into account).
950 */
951 update_refs(young);
952 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000953
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000954 /* Leave everything reachable from outside young in young, and move
955 * everything else (in young) to unreachable.
956 * NOTE: This used to move the reachable objects into a reachable
957 * set instead. But most things usually turn out to be reachable,
958 * so it's more efficient to move the unreachable things.
959 */
960 gc_list_init(&unreachable);
961 move_unreachable(young, &unreachable);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000962
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000963 /* Move reachable objects to next generation. */
964 if (young != old) {
965 if (generation == NUM_GENERATIONS - 2) {
966 long_lived_pending += gc_list_size(young);
967 }
968 gc_list_merge(young, old);
969 }
970 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200971 /* We only untrack dicts in full collections, to avoid quadratic
972 dict build-up. See issue #14775. */
973 untrack_dicts(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000974 long_lived_pending = 0;
975 long_lived_total = gc_list_size(young);
976 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000977
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000978 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +0200979 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000980 */
981 gc_list_init(&finalizers);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200982 move_legacy_finalizers(&unreachable, &finalizers);
983 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000984 * unreachable objects reachable *from* those are also uncollectable,
985 * and we move those into the finalizers list too.
986 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200987 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000988
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000989 /* Collect statistics on collectable objects found and print
990 * debugging information.
991 */
992 for (gc = unreachable.gc.gc_next; gc != &unreachable;
993 gc = gc->gc.gc_next) {
994 m++;
995 if (debug & DEBUG_COLLECTABLE) {
996 debug_cycle("collectable", FROM_GC(gc));
997 }
998 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000999
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001000 /* Clear weakrefs and invoke callbacks as necessary. */
1001 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001002
Antoine Pitrou796564c2013-07-30 19:59:21 +02001003 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001004 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001005
1006 if (check_garbage(&unreachable)) {
1007 revive_garbage(&unreachable);
1008 gc_list_merge(&unreachable, old);
1009 }
1010 else {
1011 /* Call tp_clear on objects in the unreachable set. This will cause
1012 * the reference cycles to be broken. It may also cause some objects
1013 * in finalizers to be freed.
1014 */
1015 delete_garbage(&unreachable, old);
1016 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001017
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001018 /* Collect statistics on uncollectable objects found and print
1019 * debugging information. */
1020 for (gc = finalizers.gc.gc_next;
1021 gc != &finalizers;
1022 gc = gc->gc.gc_next) {
1023 n++;
1024 if (debug & DEBUG_UNCOLLECTABLE)
1025 debug_cycle("uncollectable", FROM_GC(gc));
1026 }
1027 if (debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001028 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001029
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001030 if (m == 0 && n == 0)
1031 PySys_WriteStderr("gc: done");
1032 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001033 PySys_FormatStderr(
1034 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001035 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001036 PySys_WriteStderr(", %.4fs elapsed\n",
1037 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001038 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001039
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001040 /* Append instances in the uncollectable set to a Python
1041 * reachable list of garbage. The programmer has to deal with
1042 * this if they insist on creating this type of structure.
1043 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001044 (void)handle_legacy_finalizers(&finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001045
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001046 /* Clear free list only during the collection of the highest
1047 * generation */
1048 if (generation == NUM_GENERATIONS-1) {
1049 clear_freelists();
1050 }
Christian Heimesa156e092008-02-16 07:38:31 +00001051
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001052 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001053 if (nofail) {
1054 PyErr_Clear();
1055 }
1056 else {
1057 if (gc_str == NULL)
1058 gc_str = PyUnicode_FromString("garbage collection");
1059 PyErr_WriteUnraisable(gc_str);
1060 Py_FatalError("unexpected exception during garbage collection");
1061 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001062 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001063
Antoine Pitroud4156c12012-10-30 22:43:19 +01001064 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001065 if (n_collected)
1066 *n_collected = m;
1067 if (n_uncollectable)
1068 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001069 stats->collections++;
1070 stats->collected += m;
1071 stats->uncollectable += n;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001072 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001073}
1074
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001075/* Invoke progress callbacks to notify clients that garbage collection
1076 * is starting or stopping
1077 */
1078static void
1079invoke_gc_callback(const char *phase, int generation,
1080 Py_ssize_t collected, Py_ssize_t uncollectable)
1081{
1082 Py_ssize_t i;
1083 PyObject *info = NULL;
1084
1085 /* we may get called very early */
1086 if (callbacks == NULL)
1087 return;
1088 /* The local variable cannot be rebound, check it for sanity */
1089 assert(callbacks != NULL && PyList_CheckExact(callbacks));
1090 if (PyList_GET_SIZE(callbacks) != 0) {
1091 info = Py_BuildValue("{sisnsn}",
1092 "generation", generation,
1093 "collected", collected,
1094 "uncollectable", uncollectable);
1095 if (info == NULL) {
1096 PyErr_WriteUnraisable(NULL);
1097 return;
1098 }
1099 }
1100 for (i=0; i<PyList_GET_SIZE(callbacks); i++) {
1101 PyObject *r, *cb = PyList_GET_ITEM(callbacks, i);
1102 Py_INCREF(cb); /* make sure cb doesn't go away */
1103 r = PyObject_CallFunction(cb, "sO", phase, info);
1104 Py_XDECREF(r);
1105 if (r == NULL)
1106 PyErr_WriteUnraisable(cb);
1107 Py_DECREF(cb);
1108 }
1109 Py_XDECREF(info);
1110}
1111
1112/* Perform garbage collection of a generation and invoke
1113 * progress callbacks.
1114 */
1115static Py_ssize_t
1116collect_with_callback(int generation)
1117{
1118 Py_ssize_t result, collected, uncollectable;
1119 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001120 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001121 invoke_gc_callback("stop", generation, collected, uncollectable);
1122 return result;
1123}
1124
Neal Norwitz7b216c52006-03-04 20:01:53 +00001125static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001126collect_generations(void)
1127{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001128 int i;
1129 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001130
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001131 /* Find the oldest generation (highest numbered) where the count
1132 * exceeds the threshold. Objects in the that generation and
1133 * generations younger than it will be collected. */
1134 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
1135 if (generations[i].count > generations[i].threshold) {
1136 /* Avoid quadratic performance degradation in number
1137 of tracked objects. See comments at the beginning
1138 of this file, and issue #4074.
1139 */
1140 if (i == NUM_GENERATIONS - 1
1141 && long_lived_pending < long_lived_total / 4)
1142 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001143 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001144 break;
1145 }
1146 }
1147 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001148}
1149
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001150PyDoc_STRVAR(gc_enable__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001151"enable() -> None\n"
1152"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001153"Enable automatic garbage collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001154
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001155static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001156gc_enable(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001157{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001158 enabled = 1;
1159 Py_INCREF(Py_None);
1160 return Py_None;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001161}
1162
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001163PyDoc_STRVAR(gc_disable__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001164"disable() -> None\n"
1165"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001166"Disable automatic garbage collection.\n");
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001167
1168static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001169gc_disable(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001170{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001171 enabled = 0;
1172 Py_INCREF(Py_None);
1173 return Py_None;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001174}
1175
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001176PyDoc_STRVAR(gc_isenabled__doc__,
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001177"isenabled() -> status\n"
1178"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001179"Returns true if automatic garbage collection is enabled.\n");
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001180
1181static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001182gc_isenabled(PyObject *self, PyObject *noargs)
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001183{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001184 return PyBool_FromLong((long)enabled);
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001185}
1186
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001187PyDoc_STRVAR(gc_collect__doc__,
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001188"collect([generation]) -> n\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001189"\n"
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001190"With no arguments, run a full collection. The optional argument\n"
1191"may be an integer specifying which generation to collect. A ValueError\n"
1192"is raised if the generation number is invalid.\n\n"
1193"The number of unreachable objects is returned.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001194
1195static PyObject *
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001196gc_collect(PyObject *self, PyObject *args, PyObject *kws)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001197{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001198 static char *keywords[] = {"generation", NULL};
1199 int genarg = NUM_GENERATIONS - 1;
1200 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001201
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001202 if (!PyArg_ParseTupleAndKeywords(args, kws, "|i", keywords, &genarg))
1203 return NULL;
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001204
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001205 else if (genarg < 0 || genarg >= NUM_GENERATIONS) {
1206 PyErr_SetString(PyExc_ValueError, "invalid generation");
1207 return NULL;
1208 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001209
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001210 if (collecting)
1211 n = 0; /* already collecting, don't do anything */
1212 else {
1213 collecting = 1;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001214 n = collect_with_callback(genarg);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001215 collecting = 0;
1216 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001217
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001218 return PyLong_FromSsize_t(n);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001219}
1220
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001221PyDoc_STRVAR(gc_set_debug__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001222"set_debug(flags) -> None\n"
1223"\n"
1224"Set the garbage collection debugging flags. Debugging information is\n"
1225"written to sys.stderr.\n"
1226"\n"
1227"flags is an integer and can have the following bits turned on:\n"
1228"\n"
1229" DEBUG_STATS - Print statistics during collection.\n"
1230" DEBUG_COLLECTABLE - Print collectable objects found.\n"
1231" DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.\n"
Neil Schemenauer544de1e2000-09-22 15:22:38 +00001232" DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001233" DEBUG_LEAK - Debug leaking programs (everything but STATS).\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001234
1235static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001236gc_set_debug(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001237{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001238 if (!PyArg_ParseTuple(args, "i:set_debug", &debug))
1239 return NULL;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001240
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001241 Py_INCREF(Py_None);
1242 return Py_None;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001243}
1244
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001245PyDoc_STRVAR(gc_get_debug__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001246"get_debug() -> flags\n"
1247"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001248"Get the garbage collection debugging flags.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001249
1250static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001251gc_get_debug(PyObject *self, PyObject *noargs)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001252{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001253 return Py_BuildValue("i", debug);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001254}
1255
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001256PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001257"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001258"\n"
1259"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001260"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001261
1262static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001263gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001264{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001265 int i;
1266 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
1267 &generations[0].threshold,
1268 &generations[1].threshold,
1269 &generations[2].threshold))
1270 return NULL;
1271 for (i = 2; i < NUM_GENERATIONS; i++) {
1272 /* generations higher than 2 get the same threshold */
1273 generations[i].threshold = generations[2].threshold;
1274 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001275
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001276 Py_INCREF(Py_None);
1277 return Py_None;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001278}
1279
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001280PyDoc_STRVAR(gc_get_thresh__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001281"get_threshold() -> (threshold0, threshold1, threshold2)\n"
1282"\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001283"Return the current collection thresholds\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001284
1285static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001286gc_get_thresh(PyObject *self, PyObject *noargs)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001287{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001288 return Py_BuildValue("(iii)",
1289 generations[0].threshold,
1290 generations[1].threshold,
1291 generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001292}
1293
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001294PyDoc_STRVAR(gc_get_count__doc__,
1295"get_count() -> (count0, count1, count2)\n"
1296"\n"
1297"Return the current collection counts\n");
1298
1299static PyObject *
1300gc_get_count(PyObject *self, PyObject *noargs)
1301{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001302 return Py_BuildValue("(iii)",
1303 generations[0].count,
1304 generations[1].count,
1305 generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001306}
1307
Neil Schemenauer48c70342001-08-09 15:38:31 +00001308static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001309referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001310{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001311 Py_ssize_t i;
1312 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1313 if (PyTuple_GET_ITEM(objs, i) == obj)
1314 return 1;
1315 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001316}
1317
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001318static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001319gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001320{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001321 PyGC_Head *gc;
1322 PyObject *obj;
1323 traverseproc traverse;
1324 for (gc = list->gc.gc_next; gc != list; gc = gc->gc.gc_next) {
1325 obj = FROM_GC(gc);
1326 traverse = Py_TYPE(obj)->tp_traverse;
1327 if (obj == objs || obj == resultlist)
1328 continue;
1329 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1330 if (PyList_Append(resultlist, obj) < 0)
1331 return 0; /* error */
1332 }
1333 }
1334 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001335}
1336
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001337PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001338"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001339Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001340
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001341static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001342gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001343{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001344 int i;
1345 PyObject *result = PyList_New(0);
1346 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001347
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001348 for (i = 0; i < NUM_GENERATIONS; i++) {
1349 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1350 Py_DECREF(result);
1351 return NULL;
1352 }
1353 }
1354 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001355}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001356
Tim Peters0f81ab62003-04-08 16:39:48 +00001357/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001358static int
Tim Peters730f5532003-04-08 17:17:17 +00001359referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001360{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001361 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001362}
1363
Tim Peters730f5532003-04-08 17:17:17 +00001364PyDoc_STRVAR(gc_get_referents__doc__,
1365"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001366Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001367
1368static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001369gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001370{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001371 Py_ssize_t i;
1372 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001373
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001374 if (result == NULL)
1375 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001376
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001377 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1378 traverseproc traverse;
1379 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001380
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001381 if (! PyObject_IS_GC(obj))
1382 continue;
1383 traverse = Py_TYPE(obj)->tp_traverse;
1384 if (! traverse)
1385 continue;
1386 if (traverse(obj, (visitproc)referentsvisit, result)) {
1387 Py_DECREF(result);
1388 return NULL;
1389 }
1390 }
1391 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001392}
1393
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001394PyDoc_STRVAR(gc_get_objects__doc__,
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001395"get_objects() -> [...]\n"
1396"\n"
1397"Return a list of objects tracked by the collector (excluding the list\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001398"returned).\n");
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001399
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001400static PyObject *
Tim Peters50c61d52003-04-06 01:50:50 +00001401gc_get_objects(PyObject *self, PyObject *noargs)
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001402{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001403 int i;
1404 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001405
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001406 result = PyList_New(0);
1407 if (result == NULL)
1408 return NULL;
1409 for (i = 0; i < NUM_GENERATIONS; i++) {
1410 if (append_objects(result, GEN_HEAD(i))) {
1411 Py_DECREF(result);
1412 return NULL;
1413 }
1414 }
1415 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001416}
1417
Antoine Pitroud4156c12012-10-30 22:43:19 +01001418PyDoc_STRVAR(gc_get_stats__doc__,
1419"get_stats() -> [...]\n"
1420"\n"
1421"Return a list of dictionaries containing per-generation statistics.\n");
1422
1423static PyObject *
1424gc_get_stats(PyObject *self, PyObject *noargs)
1425{
1426 int i;
1427 PyObject *result;
1428 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1429
1430 /* To get consistent values despite allocations while constructing
1431 the result list, we use a snapshot of the running stats. */
1432 for (i = 0; i < NUM_GENERATIONS; i++) {
1433 stats[i] = generation_stats[i];
1434 }
1435
1436 result = PyList_New(0);
1437 if (result == NULL)
1438 return NULL;
1439
1440 for (i = 0; i < NUM_GENERATIONS; i++) {
1441 PyObject *dict;
1442 st = &stats[i];
1443 dict = Py_BuildValue("{snsnsn}",
1444 "collections", st->collections,
1445 "collected", st->collected,
1446 "uncollectable", st->uncollectable
1447 );
1448 if (dict == NULL)
1449 goto error;
1450 if (PyList_Append(result, dict)) {
1451 Py_DECREF(dict);
1452 goto error;
1453 }
1454 Py_DECREF(dict);
1455 }
1456 return result;
1457
1458error:
1459 Py_XDECREF(result);
1460 return NULL;
1461}
1462
1463
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001464PyDoc_STRVAR(gc_is_tracked__doc__,
1465"is_tracked(obj) -> bool\n"
1466"\n"
1467"Returns true if the object is tracked by the garbage collector.\n"
1468"Simple atomic objects will return false.\n"
1469);
1470
1471static PyObject *
1472gc_is_tracked(PyObject *self, PyObject *obj)
1473{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001474 PyObject *result;
1475
1476 if (PyObject_IS_GC(obj) && IS_TRACKED(obj))
1477 result = Py_True;
1478 else
1479 result = Py_False;
1480 Py_INCREF(result);
1481 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001482}
1483
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001484
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001485PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001486"This module provides access to the garbage collector for reference cycles.\n"
1487"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001488"enable() -- Enable automatic garbage collection.\n"
1489"disable() -- Disable automatic garbage collection.\n"
1490"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001491"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001492"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001493"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001494"set_debug() -- Set debugging flags.\n"
1495"get_debug() -- Get debugging flags.\n"
1496"set_threshold() -- Set the collection thresholds.\n"
1497"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001498"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001499"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001500"get_referrers() -- Return the list of objects that refer to an object.\n"
Tim Peters730f5532003-04-08 17:17:17 +00001501"get_referents() -- Return the list of objects that an object refers to.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001502
1503static PyMethodDef GcMethods[] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001504 {"enable", gc_enable, METH_NOARGS, gc_enable__doc__},
1505 {"disable", gc_disable, METH_NOARGS, gc_disable__doc__},
1506 {"isenabled", gc_isenabled, METH_NOARGS, gc_isenabled__doc__},
1507 {"set_debug", gc_set_debug, METH_VARARGS, gc_set_debug__doc__},
1508 {"get_debug", gc_get_debug, METH_NOARGS, gc_get_debug__doc__},
1509 {"get_count", gc_get_count, METH_NOARGS, gc_get_count__doc__},
1510 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
1511 {"get_threshold", gc_get_thresh, METH_NOARGS, gc_get_thresh__doc__},
1512 {"collect", (PyCFunction)gc_collect,
1513 METH_VARARGS | METH_KEYWORDS, gc_collect__doc__},
1514 {"get_objects", gc_get_objects,METH_NOARGS, gc_get_objects__doc__},
Antoine Pitroud4156c12012-10-30 22:43:19 +01001515 {"get_stats", gc_get_stats, METH_NOARGS, gc_get_stats__doc__},
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001516 {"is_tracked", gc_is_tracked, METH_O, gc_is_tracked__doc__},
1517 {"get_referrers", gc_get_referrers, METH_VARARGS,
1518 gc_get_referrers__doc__},
1519 {"get_referents", gc_get_referents, METH_VARARGS,
1520 gc_get_referents__doc__},
1521 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001522};
1523
Martin v. Löwis1a214512008-06-11 05:26:20 +00001524static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001525 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001526 "gc", /* m_name */
1527 gc__doc__, /* m_doc */
1528 -1, /* m_size */
1529 GcMethods, /* m_methods */
1530 NULL, /* m_reload */
1531 NULL, /* m_traverse */
1532 NULL, /* m_clear */
1533 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001534};
1535
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001536PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001537PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001538{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001539 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001540
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001541 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001542
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001543 if (m == NULL)
1544 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001545
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001546 if (garbage == NULL) {
1547 garbage = PyList_New(0);
1548 if (garbage == NULL)
1549 return NULL;
1550 }
1551 Py_INCREF(garbage);
1552 if (PyModule_AddObject(m, "garbage", garbage) < 0)
1553 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001554
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001555 if (callbacks == NULL) {
1556 callbacks = PyList_New(0);
1557 if (callbacks == NULL)
1558 return NULL;
1559 }
1560 Py_INCREF(callbacks);
1561 if (PyModule_AddObject(m, "callbacks", callbacks) < 0)
1562 return NULL;
1563
Martin v. Löwis1a214512008-06-11 05:26:20 +00001564#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001565 ADD_INT(DEBUG_STATS);
1566 ADD_INT(DEBUG_COLLECTABLE);
1567 ADD_INT(DEBUG_UNCOLLECTABLE);
1568 ADD_INT(DEBUG_SAVEALL);
1569 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001570#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001571 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001572}
1573
Guido van Rossume13ddc92003-04-17 17:29:22 +00001574/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001575Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001576PyGC_Collect(void)
1577{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001578 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001579
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001580 if (collecting)
1581 n = 0; /* already collecting, don't do anything */
1582 else {
1583 collecting = 1;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001584 n = collect_with_callback(NUM_GENERATIONS - 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001585 collecting = 0;
1586 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001587
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001588 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001589}
1590
Antoine Pitroufef34e32013-05-19 01:11:58 +02001591Py_ssize_t
1592_PyGC_CollectNoFail(void)
1593{
1594 Py_ssize_t n;
1595
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001596 /* Ideally, this function is only called on interpreter shutdown,
1597 and therefore not recursively. Unfortunately, when there are daemon
1598 threads, a daemon thread can start a cyclic garbage collection
1599 during interpreter shutdown (and then never finish it).
1600 See http://bugs.python.org/issue8713#msg195178 for an example.
1601 */
1602 if (collecting)
1603 n = 0;
1604 else {
1605 collecting = 1;
1606 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
1607 collecting = 0;
1608 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001609 return n;
1610}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001611
Antoine Pitrou696e0352010-08-08 22:18:46 +00001612void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001613_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001614{
Antoine Pitrou2ed94eb2010-09-14 09:48:39 +00001615 if (!(debug & DEBUG_SAVEALL)
1616 && garbage != NULL && PyList_GET_SIZE(garbage) > 0) {
Georg Brandl08be72d2010-10-24 15:11:22 +00001617 char *message;
1618 if (debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001619 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001620 "shutdown";
1621 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001622 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001623 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001624 /* PyErr_WarnFormat does too many things and we are at shutdown,
1625 the warnings module's dependencies (e.g. linecache) may be gone
1626 already. */
1627 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1628 "gc", NULL, message,
1629 PyList_GET_SIZE(garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001630 PyErr_WriteUnraisable(NULL);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001631 if (debug & DEBUG_UNCOLLECTABLE) {
1632 PyObject *repr = NULL, *bytes = NULL;
1633 repr = PyObject_Repr(garbage);
1634 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
1635 PyErr_WriteUnraisable(garbage);
1636 else {
1637 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001638 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001639 PyBytes_AS_STRING(bytes)
1640 );
1641 }
1642 Py_XDECREF(repr);
1643 Py_XDECREF(bytes);
1644 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001645 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001646}
1647
1648void
1649_PyGC_Fini(void)
1650{
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001651 Py_CLEAR(callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001652}
1653
Neil Schemenauer43411b52001-08-30 00:05:51 +00001654/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001655void
1656_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001657{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001658 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001659}
1660
Neil Schemenauer43411b52001-08-30 00:05:51 +00001661/* extension modules might be compiled with GC support so these
1662 functions must always be available */
1663
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001664#undef PyObject_GC_Track
1665#undef PyObject_GC_UnTrack
1666#undef PyObject_GC_Del
1667#undef _PyObject_GC_Malloc
1668
Neil Schemenauer43411b52001-08-30 00:05:51 +00001669void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001670PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001671{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001672 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001673}
1674
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001675void
1676PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001677{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001678 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1679 * call PyObject_GC_UnTrack twice on an object.
1680 */
1681 if (IS_TRACKED(op))
1682 _PyObject_GC_UNTRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001683}
1684
Victor Stinnerdb067af2014-05-02 22:31:14 +02001685static PyObject *
1686_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001687{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001688 PyObject *op;
1689 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001690 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001691 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1692 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001693 size = sizeof(PyGC_Head) + basicsize;
1694 if (use_calloc)
1695 g = (PyGC_Head *)PyObject_Calloc(1, size);
1696 else
1697 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001698 if (g == NULL)
1699 return PyErr_NoMemory();
Antoine Pitrou796564c2013-07-30 19:59:21 +02001700 g->gc.gc_refs = 0;
1701 _PyGCHead_SET_REFS(g, GC_UNTRACKED);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001702 generations[0].count++; /* number of allocated GC objects */
1703 if (generations[0].count > generations[0].threshold &&
1704 enabled &&
1705 generations[0].threshold &&
1706 !collecting &&
1707 !PyErr_Occurred()) {
1708 collecting = 1;
1709 collect_generations();
1710 collecting = 0;
1711 }
1712 op = FROM_GC(g);
1713 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001714}
1715
1716PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001717_PyObject_GC_Malloc(size_t basicsize)
1718{
1719 return _PyObject_GC_Alloc(0, basicsize);
1720}
1721
1722PyObject *
1723_PyObject_GC_Calloc(size_t basicsize)
1724{
1725 return _PyObject_GC_Alloc(1, basicsize);
1726}
1727
1728PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001729_PyObject_GC_New(PyTypeObject *tp)
1730{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001731 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1732 if (op != NULL)
1733 op = PyObject_INIT(op, tp);
1734 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001735}
1736
1737PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001738_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001739{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001740 size_t size;
1741 PyVarObject *op;
1742
1743 if (nitems < 0) {
1744 PyErr_BadInternalCall();
1745 return NULL;
1746 }
1747 size = _PyObject_VAR_SIZE(tp, nitems);
1748 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001749 if (op != NULL)
1750 op = PyObject_INIT_VAR(op, tp, nitems);
1751 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001752}
1753
1754PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001755_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001756{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001757 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1758 PyGC_Head *g = AS_GC(op);
1759 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1760 return (PyVarObject *)PyErr_NoMemory();
1761 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1762 if (g == NULL)
1763 return (PyVarObject *)PyErr_NoMemory();
1764 op = (PyVarObject *) FROM_GC(g);
1765 Py_SIZE(op) = nitems;
1766 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001767}
1768
1769void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001770PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001771{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001772 PyGC_Head *g = AS_GC(op);
1773 if (IS_TRACKED(op))
1774 gc_list_remove(g);
1775 if (generations[0].count > 0) {
1776 generations[0].count--;
1777 }
1778 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001779}