blob: 8bdbafef167e07aea0adf0f5d9fe072e5dd841f6 [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Victor Stinner27e2d1f2018-11-01 00:52:28 +010027#include "pycore_context.h"
Victor Stinnerbcda8f12018-11-21 22:27:47 +010028#include "pycore_object.h"
Victor Stinner621cebe2018-11-12 16:53:38 +010029#include "pycore_pymem.h"
30#include "pycore_pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000031#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070032#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010033#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000034
Serhiy Storchaka93260282017-02-04 11:19:59 +020035/*[clinic input]
36module gc
37[clinic start generated code]*/
38/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
39
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090040#define GC_DEBUG (0) /* Enable more asserts */
41
42#define GC_NEXT _PyGCHead_NEXT
43#define GC_PREV _PyGCHead_PREV
44
45// update_refs() set this bit for all objects in current generation.
46// subtract_refs() and move_unreachable() uses this to distinguish
47// visited object is in GCing or not.
48//
49// move_unreachable() removes this flag from reachable objects.
50// Only unreachable objects have this flag.
51//
52// No objects in interpreter have this flag after GC ends.
53#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
54
55// Lowest bit of _gc_next is used for UNREACHABLE flag.
56//
57// This flag represents the object is in unreachable list in move_unreachable()
58//
59// Although this flag is used only in move_unreachable(), move_unreachable()
60// doesn't clear this flag to skip unnecessary iteration.
61// move_legacy_finalizers() removes this flag instead.
62// Between them, unreachable list is not normal list and we can not use
63// most gc_list_* functions for it.
64#define NEXT_MASK_UNREACHABLE (1)
65
Victor Stinner626bff82018-10-25 17:31:10 +020066/* Get an object's GC head */
67#define AS_GC(o) ((PyGC_Head *)(o)-1)
68
69/* Get the object given the GC head */
70#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
71
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090072static inline int
73gc_is_collecting(PyGC_Head *g)
74{
75 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
76}
77
78static inline void
79gc_clear_collecting(PyGC_Head *g)
80{
81 g->_gc_prev &= ~PREV_MASK_COLLECTING;
82}
83
84static inline Py_ssize_t
85gc_get_refs(PyGC_Head *g)
86{
87 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
88}
89
90static inline void
91gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
92{
93 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
94 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
95}
96
97static inline void
98gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
99{
100 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
101 | PREV_MASK_COLLECTING
102 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
103}
104
105static inline void
106gc_decref(PyGC_Head *g)
107{
Victor Stinner626bff82018-10-25 17:31:10 +0200108 _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
109 gc_get_refs(g) > 0,
110 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900111 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
112}
113
Tim Peters6fc13d92002-07-02 18:12:35 +0000114/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +0000115static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +0000116
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000117/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000118#define DEBUG_STATS (1<<0) /* print collection statistics */
119#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
120#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
121#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
122#define DEBUG_LEAK DEBUG_COLLECTABLE | \
123 DEBUG_UNCOLLECTABLE | \
124 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000125
Victor Stinner9db03242019-04-26 02:32:01 +0200126#define GEN_HEAD(state, n) (&(state)->generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100127
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600128void
129_PyGC_Initialize(struct _gc_runtime_state *state)
130{
131 state->enabled = 1; /* automatic collection enabled? */
132
Victor Stinner9db03242019-04-26 02:32:01 +0200133#define _GEN_HEAD(n) GEN_HEAD(state, n)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600134 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900135 /* PyGC_Head, threshold, count */
136 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
137 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
138 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600139 };
140 for (int i = 0; i < NUM_GENERATIONS; i++) {
141 state->generations[i] = generations[i];
142 };
Victor Stinner9db03242019-04-26 02:32:01 +0200143 state->generation0 = GEN_HEAD(state, 0);
brainfvckc75edab2017-10-16 12:49:41 -0700144 struct gc_generation permanent_generation = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900145 {(uintptr_t)&state->permanent_generation.head,
146 (uintptr_t)&state->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700147 };
148 state->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600149}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100150
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900151/*
152_gc_prev values
153---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000154
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900155Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000156
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900157Lowest two bits of _gc_prev are used for flags.
158PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
159or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000160
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900161During a collection, _gc_prev is temporary used for gc_refs, and the gc list
162is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000163
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900164gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000165 At the start of a collection, update_refs() copies the true refcount
166 to gc_refs, for each object in the generation being collected.
167 subtract_refs() then adjusts gc_refs so that it equals the number of
168 times an object is referenced directly from outside the generation
169 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000170
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900171PREV_MASK_COLLECTING
172 Objects in generation being collected are marked PREV_MASK_COLLECTING in
173 update_refs().
174
175
176_gc_next values
177---------------
178
179_gc_next takes these values:
180
1810
182 The object is not tracked
183
184!= 0
185 Pointer to the next object in the GC list.
186 Additionally, lowest bit is used temporary for
187 NEXT_MASK_UNREACHABLE flag described below.
188
189NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000190 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900191 indirectly) from outside the generation into an "unreachable" set and
192 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000193
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900194 Objects that are found to be reachable have gc_refs set to 1.
195 When this flag is set for the reachable object, the object must be in
196 "unreachable" set.
197 The flag is unset and the object is moved back to "reachable" set.
198
199 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000200*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000201
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000202/*** list functions ***/
203
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900204static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000205gc_list_init(PyGC_Head *list)
206{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900207 // List header must not have flags.
208 // We can assign pointer by simple cast.
209 list->_gc_prev = (uintptr_t)list;
210 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000211}
212
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900213static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000214gc_list_is_empty(PyGC_Head *list)
215{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900216 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000217}
218
Tim Peterse2d59182004-11-01 01:39:08 +0000219/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900220static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000221gc_list_append(PyGC_Head *node, PyGC_Head *list)
222{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900223 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
224
225 // last <-> node
226 _PyGCHead_SET_PREV(node, last);
227 _PyGCHead_SET_NEXT(last, node);
228
229 // node <-> list
230 _PyGCHead_SET_NEXT(node, list);
231 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000232}
233
Tim Peterse2d59182004-11-01 01:39:08 +0000234/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900235static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000236gc_list_remove(PyGC_Head *node)
237{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900238 PyGC_Head *prev = GC_PREV(node);
239 PyGC_Head *next = GC_NEXT(node);
240
241 _PyGCHead_SET_NEXT(prev, next);
242 _PyGCHead_SET_PREV(next, prev);
243
244 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000245}
246
Tim Peterse2d59182004-11-01 01:39:08 +0000247/* Move `node` from the gc list it's currently in (which is not explicitly
248 * named here) to the end of `list`. This is semantically the same as
249 * gc_list_remove(node) followed by gc_list_append(node, list).
250 */
251static void
252gc_list_move(PyGC_Head *node, PyGC_Head *list)
253{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000254 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900255 PyGC_Head *from_prev = GC_PREV(node);
256 PyGC_Head *from_next = GC_NEXT(node);
257 _PyGCHead_SET_NEXT(from_prev, from_next);
258 _PyGCHead_SET_PREV(from_next, from_prev);
259
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000260 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900261 // list must not have flags. So we can skip macros.
262 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
263 _PyGCHead_SET_PREV(node, to_prev);
264 _PyGCHead_SET_NEXT(to_prev, node);
265 list->_gc_prev = (uintptr_t)node;
266 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000267}
268
269/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000270static void
271gc_list_merge(PyGC_Head *from, PyGC_Head *to)
272{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000273 assert(from != to);
274 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900275 PyGC_Head *to_tail = GC_PREV(to);
276 PyGC_Head *from_head = GC_NEXT(from);
277 PyGC_Head *from_tail = GC_PREV(from);
278 assert(from_head != from);
279 assert(from_tail != from);
280
281 _PyGCHead_SET_NEXT(to_tail, from_head);
282 _PyGCHead_SET_PREV(from_head, to_tail);
283
284 _PyGCHead_SET_NEXT(from_tail, to);
285 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000286 }
287 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000288}
289
Neal Norwitz7b216c52006-03-04 20:01:53 +0000290static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000291gc_list_size(PyGC_Head *list)
292{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000293 PyGC_Head *gc;
294 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900295 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000296 n++;
297 }
298 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000299}
300
Tim Peters259272b2003-04-06 19:41:39 +0000301/* Append objects in a GC list to a Python list.
302 * Return 0 if all OK, < 0 if error (out of memory for list).
303 */
304static int
305append_objects(PyObject *py_list, PyGC_Head *gc_list)
306{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000307 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900308 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000309 PyObject *op = FROM_GC(gc);
310 if (op != py_list) {
311 if (PyList_Append(py_list, op)) {
312 return -1; /* exception */
313 }
314 }
315 }
316 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000317}
318
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900319#if GC_DEBUG
320// validate_list checks list consistency. And it works as document
321// describing when expected_mask is set / unset.
322static void
323validate_list(PyGC_Head *head, uintptr_t expected_mask)
324{
325 PyGC_Head *prev = head;
326 PyGC_Head *gc = GC_NEXT(head);
327 while (gc != head) {
328 assert(GC_NEXT(gc) != NULL);
329 assert(GC_PREV(gc) == prev);
330 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
331 prev = gc;
332 gc = GC_NEXT(gc);
333 }
334 assert(prev == GC_PREV(head));
335}
336#else
337#define validate_list(x,y) do{}while(0)
338#endif
339
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000340/*** end of list stuff ***/
341
342
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900343/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
344 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000345 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000346static void
347update_refs(PyGC_Head *containers)
348{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900349 PyGC_Head *gc = GC_NEXT(containers);
350 for (; gc != containers; gc = GC_NEXT(gc)) {
351 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000352 /* Python's cyclic gc should never see an incoming refcount
353 * of 0: if something decref'ed to 0, it should have been
354 * deallocated immediately at that time.
355 * Possible cause (if the assert triggers): a tp_dealloc
356 * routine left a gc-aware object tracked during its teardown
357 * phase, and did something-- or allowed something to happen --
358 * that called back into Python. gc can trigger then, and may
359 * see the still-tracked dying object. Before this assert
360 * was added, such mistakes went on to allow gc to try to
361 * delete the object again. In a debug build, that caused
362 * a mysterious segfault, when _Py_ForgetReference tried
363 * to remove the object from the doubly-linked list of all
364 * objects a second time. In a release build, an actual
365 * double deallocation occurred, which leads to corruption
366 * of the allocator's internal bookkeeping pointers. That's
367 * so serious that maybe this should be a release-build
368 * check instead of an assert?
369 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200370 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000371 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000372}
373
Tim Peters19b74c72002-07-01 03:52:19 +0000374/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000375static int
376visit_decref(PyObject *op, void *data)
377{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000378 assert(op != NULL);
Miss Islington (bot)57311722019-09-09 10:18:09 -0700379 _PyObject_ASSERT(op, !_PyObject_IsFreed(op));
380
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000381 if (PyObject_IS_GC(op)) {
382 PyGC_Head *gc = AS_GC(op);
383 /* We're only interested in gc_refs for objects in the
384 * generation being collected, which can be recognized
385 * because only they have positive gc_refs.
386 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900387 if (gc_is_collecting(gc)) {
388 gc_decref(gc);
389 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000390 }
391 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000392}
393
Tim Peters19b74c72002-07-01 03:52:19 +0000394/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
395 * for all objects in containers, and is GC_REACHABLE for all tracked gc
396 * objects not in containers. The ones with gc_refs > 0 are directly
397 * reachable from outside containers, and so can't be collected.
398 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000399static void
400subtract_refs(PyGC_Head *containers)
401{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000402 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900403 PyGC_Head *gc = GC_NEXT(containers);
404 for (; gc != containers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000405 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
406 (void) traverse(FROM_GC(gc),
407 (visitproc)visit_decref,
408 NULL);
409 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410}
411
Tim Peters19b74c72002-07-01 03:52:19 +0000412/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000413static int
Tim Peters19b74c72002-07-01 03:52:19 +0000414visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000415{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900416 if (!PyObject_IS_GC(op)) {
417 return 0;
418 }
Tim Peters19b74c72002-07-01 03:52:19 +0000419
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900420 PyGC_Head *gc = AS_GC(op);
421 const Py_ssize_t gc_refs = gc_get_refs(gc);
422
423 // Ignore untracked objects and objects in other generation.
424 if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
425 return 0;
426 }
427
428 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
429 /* This had gc_refs = 0 when move_unreachable got
430 * to it, but turns out it's reachable after all.
431 * Move it back to move_unreachable's 'young' list,
432 * and move_unreachable will eventually get to it
433 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000434 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900435 // Manually unlink gc from unreachable list because
436 PyGC_Head *prev = GC_PREV(gc);
437 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200438 _PyObject_ASSERT(FROM_GC(prev),
439 prev->_gc_next & NEXT_MASK_UNREACHABLE);
440 _PyObject_ASSERT(FROM_GC(next),
441 next->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900442 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
443 _PyGCHead_SET_PREV(next, prev);
444
445 gc_list_append(gc, reachable);
446 gc_set_refs(gc, 1);
447 }
448 else if (gc_refs == 0) {
449 /* This is in move_unreachable's 'young' list, but
450 * the traversal hasn't yet gotten to it. All
451 * we need to do is tell move_unreachable that it's
452 * reachable.
453 */
454 gc_set_refs(gc, 1);
455 }
456 /* Else there's nothing to do.
457 * If gc_refs > 0, it must be in move_unreachable's 'young'
458 * list, and move_unreachable will eventually get to it.
459 */
460 else {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200461 _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000462 }
463 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000464}
465
Tim Peters19b74c72002-07-01 03:52:19 +0000466/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900467 * all objects in young don't have PREV_MASK_COLLECTING flag and
468 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000469 * All objects in young after this are directly or indirectly reachable
470 * from outside the original young; and all objects in unreachable are
471 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900472 *
473 * This function restores _gc_prev pointer. young and unreachable are
474 * doubly linked list after this function.
475 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
476 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000477 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000478static void
Tim Peters19b74c72002-07-01 03:52:19 +0000479move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000480{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900481 // previous elem in the young list, used for restore gc_prev.
482 PyGC_Head *prev = young;
483 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000484
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900485 /* Invariants: all objects "to the left" of us in young are reachable
486 * (directly or indirectly) from outside the young list as it was at entry.
487 *
488 * All other objects from the original young "to the left" of us are in
489 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000490 * left of us in 'young' now have been scanned, and no objects here
491 * or to the right have been scanned yet.
492 */
Tim Peters19b74c72002-07-01 03:52:19 +0000493
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000494 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900495 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000496 /* gc is definitely reachable from outside the
497 * original 'young'. Mark it as such, and traverse
498 * its pointers to find any other objects that may
499 * be directly reachable from it. Note that the
500 * call to tp_traverse may append objects to young,
501 * so we have to wait until it returns to determine
502 * the next object to visit.
503 */
504 PyObject *op = FROM_GC(gc);
505 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200506 _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,
507 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900508 // NOTE: visit_reachable may change gc->_gc_next when
509 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000510 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900511 (visitproc)visit_reachable,
512 (void *)young);
513 // relink gc_prev to prev element.
514 _PyGCHead_SET_PREV(gc, prev);
Quan Tian3bd0d622018-10-20 05:30:03 +0800515 // gc is not COLLECTING state after here.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900516 gc_clear_collecting(gc);
517 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000518 }
519 else {
520 /* This *may* be unreachable. To make progress,
521 * assume it is. gc isn't directly reachable from
522 * any object we've already traversed, but may be
523 * reachable from an object we haven't gotten to yet.
524 * visit_reachable will eventually move gc back into
525 * young if that's so, and we'll see it again.
526 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900527 // Move gc to unreachable.
528 // No need to gc->next->prev = prev because it is single linked.
529 prev->_gc_next = gc->_gc_next;
530
531 // We can't use gc_list_append() here because we use
532 // NEXT_MASK_UNREACHABLE here.
533 PyGC_Head *last = GC_PREV(unreachable);
534 // NOTE: Since all objects in unreachable set has
535 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
536 // But this may set the flat to unreachable too.
537 // move_legacy_finalizers() should care about it.
538 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
539 _PyGCHead_SET_PREV(gc, last);
540 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
541 unreachable->_gc_prev = (uintptr_t)gc;
542 }
543 gc = (PyGC_Head*)prev->_gc_next;
544 }
545 // young->_gc_prev must be last element remained in the list.
546 young->_gc_prev = (uintptr_t)prev;
547}
548
549static void
550untrack_tuples(PyGC_Head *head)
551{
552 PyGC_Head *next, *gc = GC_NEXT(head);
553 while (gc != head) {
554 PyObject *op = FROM_GC(gc);
555 next = GC_NEXT(gc);
556 if (PyTuple_CheckExact(op)) {
557 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000558 }
559 gc = next;
560 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000561}
562
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200563/* Try to untrack all currently tracked dictionaries */
564static void
565untrack_dicts(PyGC_Head *head)
566{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900567 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200568 while (gc != head) {
569 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900570 next = GC_NEXT(gc);
571 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200572 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900573 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200574 gc = next;
575 }
576}
577
Antoine Pitrou796564c2013-07-30 19:59:21 +0200578/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000579static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200580has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000581{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200582 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000583}
584
Antoine Pitrou796564c2013-07-30 19:59:21 +0200585/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900586 *
587 * This function also removes NEXT_MASK_UNREACHABLE flag
588 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000589 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000590static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200591move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000592{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900593 PyGC_Head *gc, *next;
594 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
Tim Petersf6b80452003-04-07 19:21:15 +0000595
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000596 /* March over unreachable. Move objects with finalizers into
597 * `finalizers`.
598 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900599 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000600 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000601
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200602 _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900603 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
604 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000605
Antoine Pitrou796564c2013-07-30 19:59:21 +0200606 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900607 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000608 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000609 }
610 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000611}
612
Antoine Pitrou796564c2013-07-30 19:59:21 +0200613/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000614static int
615visit_move(PyObject *op, PyGC_Head *tolist)
616{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000617 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900618 PyGC_Head *gc = AS_GC(op);
619 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000620 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900621 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000622 }
623 }
624 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000625}
626
627/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000628 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000629 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000630static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200631move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000632{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000633 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900634 PyGC_Head *gc = GC_NEXT(finalizers);
635 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000636 /* Note that the finalizers list may grow during this. */
637 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
638 (void) traverse(FROM_GC(gc),
639 (visitproc)visit_move,
640 (void *)finalizers);
641 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000642}
643
Tim Petersead8b7a2004-10-30 23:09:22 +0000644/* Clear all weakrefs to unreachable objects, and if such a weakref has a
645 * callback, invoke it if necessary. Note that it's possible for such
646 * weakrefs to be outside the unreachable set -- indeed, those are precisely
647 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
648 * overview & some details. Some weakrefs with callbacks may be reclaimed
649 * directly by this routine; the number reclaimed is the return value. Other
650 * weakrefs with callbacks may be moved into the `old` generation. Objects
651 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
652 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
653 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000654 */
655static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000656handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000657{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000658 PyGC_Head *gc;
659 PyObject *op; /* generally FROM_GC(gc) */
660 PyWeakReference *wr; /* generally a cast of op */
661 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
662 PyGC_Head *next;
663 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000664
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000665 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000666
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000667 /* Clear all weakrefs to the objects in unreachable. If such a weakref
668 * also has a callback, move it into `wrcb_to_call` if the callback
669 * needs to be invoked. Note that we cannot invoke any callbacks until
670 * all weakrefs to unreachable objects are cleared, lest the callback
671 * resurrect an unreachable object via a still-active weakref. We
672 * make another pass over wrcb_to_call, invoking callbacks, after this
673 * pass completes.
674 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900675 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000676 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000677
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000678 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900679 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000680
Miss Islington (bot)92ca5152019-09-30 10:27:46 -0700681 if (PyWeakref_Check(op)) {
682 /* A weakref inside the unreachable set must be cleared. If we
683 * allow its callback to execute inside delete_garbage(), it
684 * could expose objects that have tp_clear already called on
685 * them. Or, it could resurrect unreachable objects. One way
686 * this can happen is if some container objects do not implement
687 * tp_traverse. Then, wr_object can be outside the unreachable
688 * set but can be deallocated as a result of breaking the
689 * reference cycle. If we don't clear the weakref, the callback
690 * will run and potentially cause a crash. See bpo-38006 for
691 * one example.
692 */
693 _PyWeakref_ClearRef((PyWeakReference *)op);
694 }
695
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000696 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
697 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000698
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000699 /* It supports weakrefs. Does it have any? */
700 wrlist = (PyWeakReference **)
701 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000702
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000703 /* `op` may have some weakrefs. March over the list, clear
704 * all the weakrefs, and move the weakrefs with callbacks
705 * that must be called into wrcb_to_call.
706 */
707 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
708 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000709
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000710 /* _PyWeakref_ClearRef clears the weakref but leaves
711 * the callback pointer intact. Obscure: it also
712 * changes *wrlist.
713 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200714 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000715 _PyWeakref_ClearRef(wr);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200716 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
717 if (wr->wr_callback == NULL) {
718 /* no callback */
719 continue;
720 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000721
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200722 /* Headache time. `op` is going away, and is weakly referenced by
723 * `wr`, which has a callback. Should the callback be invoked? If wr
724 * is also trash, no:
725 *
726 * 1. There's no need to call it. The object and the weakref are
727 * both going away, so it's legitimate to pretend the weakref is
728 * going away first. The user has to ensure a weakref outlives its
729 * referent if they want a guarantee that the wr callback will get
730 * invoked.
731 *
732 * 2. It may be catastrophic to call it. If the callback is also in
733 * cyclic trash (CT), then although the CT is unreachable from
734 * outside the current generation, CT may be reachable from the
735 * callback. Then the callback could resurrect insane objects.
736 *
737 * Since the callback is never needed and may be unsafe in this case,
738 * wr is simply left in the unreachable set. Note that because we
739 * already called _PyWeakref_ClearRef(wr), its callback will never
740 * trigger.
741 *
742 * OTOH, if wr isn't part of CT, we should invoke the callback: the
743 * weakref outlived the trash. Note that since wr isn't CT in this
744 * case, its callback can't be CT either -- wr acted as an external
745 * root to this generation, and therefore its callback did too. So
746 * nothing in CT is reachable from the callback either, so it's hard
747 * to imagine how calling it later could create a problem for us. wr
748 * is moved to wrcb_to_call in this case.
749 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900750 if (gc_is_collecting(AS_GC(wr))) {
Miss Islington (bot)92ca5152019-09-30 10:27:46 -0700751 /* it should already have been cleared above */
752 assert(wr->wr_object == Py_None);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000753 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900754 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000755
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000756 /* Create a new reference so that wr can't go away
757 * before we can process it again.
758 */
759 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000760
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000761 /* Move wr to wrcb_to_call, for the next pass. */
762 wrasgc = AS_GC(wr);
763 assert(wrasgc != next); /* wrasgc is reachable, but
764 next isn't, so they can't
765 be the same */
766 gc_list_move(wrasgc, &wrcb_to_call);
767 }
768 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000769
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000770 /* Invoke the callbacks we decided to honor. It's safe to invoke them
771 * because they can't reference unreachable objects.
772 */
773 while (! gc_list_is_empty(&wrcb_to_call)) {
774 PyObject *temp;
775 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000776
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900777 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000778 op = FROM_GC(gc);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200779 _PyObject_ASSERT(op, PyWeakref_Check(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000780 wr = (PyWeakReference *)op;
781 callback = wr->wr_callback;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200782 _PyObject_ASSERT(op, callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000783
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000784 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100785 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000786 if (temp == NULL)
787 PyErr_WriteUnraisable(callback);
788 else
789 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000790
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000791 /* Give up the reference we created in the first pass. When
792 * op's refcount hits 0 (which it may or may not do right now),
793 * op's tp_dealloc will decref op->wr_callback too. Note
794 * that the refcount probably will hit 0 now, and because this
795 * weakref was reachable to begin with, gc didn't already
796 * add it to its count of freed objects. Example: a reachable
797 * weak value dict maps some key to this reachable weakref.
798 * The callback removes this key->weakref mapping from the
799 * dict, leaving no other references to the weakref (excepting
800 * ours).
801 */
802 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900803 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000804 /* object is still alive -- move it */
805 gc_list_move(gc, old);
806 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900807 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000808 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900809 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000810 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000811
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000812 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000813}
814
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000815static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200816debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000817{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100818 PySys_FormatStderr("gc: %s <%s %p>\n",
819 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000820}
821
Antoine Pitrou796564c2013-07-30 19:59:21 +0200822/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000823 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000824 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
825 * garbage list (a Python list), else only the objects in finalizers with
826 * __del__ methods are appended to garbage. All objects in finalizers are
827 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000828 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300829static void
Victor Stinner9db03242019-04-26 02:32:01 +0200830handle_legacy_finalizers(struct _gc_runtime_state *state,
831 PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000832{
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300833 assert(!PyErr_Occurred());
Victor Stinner9db03242019-04-26 02:32:01 +0200834
835 PyGC_Head *gc = GC_NEXT(finalizers);
836 if (state->garbage == NULL) {
837 state->garbage = PyList_New(0);
838 if (state->garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000839 Py_FatalError("gc couldn't create gc.garbage list");
840 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900841 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000842 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000843
Victor Stinner9db03242019-04-26 02:32:01 +0200844 if ((state->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
845 if (PyList_Append(state->garbage, op) < 0) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300846 PyErr_Clear();
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300847 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300848 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000849 }
850 }
Tim Petersf6b80452003-04-07 19:21:15 +0000851
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000852 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000853}
854
Tim Peters5fbc7b12014-05-08 17:42:19 -0500855/* Run first-time finalizers (if any) on all the objects in collectable.
856 * Note that this may remove some (or even all) of the objects from the
857 * list, due to refcounts falling to 0.
858 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200859static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500860finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200861{
862 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500863 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200864
Tim Peters5fbc7b12014-05-08 17:42:19 -0500865 /* While we're going through the loop, `finalize(op)` may cause op, or
866 * other objects, to be reclaimed via refcounts falling to zero. So
867 * there's little we can rely on about the structure of the input
868 * `collectable` list across iterations. For safety, we always take the
869 * first object in that list and move it to a temporary `seen` list.
870 * If objects vanish from the `collectable` and `seen` lists we don't
871 * care.
872 */
873 gc_list_init(&seen);
874
875 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900876 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200877 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500878 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200879 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500880 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900881 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200882 Py_INCREF(op);
883 finalize(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300884 assert(!PyErr_Occurred());
Antoine Pitrou796564c2013-07-30 19:59:21 +0200885 Py_DECREF(op);
886 }
887 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500888 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200889}
890
891/* Walk the collectable list and check that they are really unreachable
892 from the outside (some objects could have been resurrected by a
893 finalizer). */
894static int
895check_garbage(PyGC_Head *collectable)
896{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900897 int ret = 0;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200898 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900899 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
900 // Use gc_refs and break gc_prev again.
901 gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200902 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200903 }
904 subtract_refs(collectable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900905 PyGC_Head *prev = collectable;
906 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200907 _PyObject_ASSERT_WITH_MSG(FROM_GC(gc),
908 gc_get_refs(gc) >= 0,
909 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900910 if (gc_get_refs(gc) != 0) {
911 ret = -1;
912 }
913 // Restore gc_prev here.
914 _PyGCHead_SET_PREV(gc, prev);
915 gc_clear_collecting(gc);
916 prev = gc;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200917 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900918 return ret;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200919}
920
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000921/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000922 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000923 * objects may be freed. It is possible I screwed something up here.
924 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000925static void
Victor Stinner9db03242019-04-26 02:32:01 +0200926delete_garbage(struct _gc_runtime_state *state,
927 PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000928{
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300929 assert(!PyErr_Occurred());
Victor Stinner9db03242019-04-26 02:32:01 +0200930
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000931 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900932 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000933 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000934
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200935 _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
936 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900937
Victor Stinner9db03242019-04-26 02:32:01 +0200938 if (state->debug & DEBUG_SAVEALL) {
939 assert(state->garbage != NULL);
940 if (PyList_Append(state->garbage, op) < 0) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300941 PyErr_Clear();
942 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000943 }
944 else {
Victor Stinner9db03242019-04-26 02:32:01 +0200945 inquiry clear;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000946 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
947 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300948 (void) clear(op);
949 if (PyErr_Occurred()) {
Victor Stinner71c52e32019-05-27 08:57:14 +0200950 _PyErr_WriteUnraisableMsg("in tp_clear of",
951 (PyObject*)Py_TYPE(op));
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300952 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000953 Py_DECREF(op);
954 }
955 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900956 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000957 /* object is still alive, move it, it may die later */
958 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000959 }
960 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000961}
962
Christian Heimesa156e092008-02-16 07:38:31 +0000963/* Clear all free lists
964 * All free lists are cleared during the collection of the highest generation.
965 * Allocated items in the free list may keep a pymalloc arena occupied.
966 * Clearing the free lists may give back memory to the OS earlier.
967 */
968static void
969clear_freelists(void)
970{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000971 (void)PyMethod_ClearFreeList();
972 (void)PyFrame_ClearFreeList();
973 (void)PyCFunction_ClearFreeList();
974 (void)PyTuple_ClearFreeList();
975 (void)PyUnicode_ClearFreeList();
976 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100977 (void)PyList_ClearFreeList();
978 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100979 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700980 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -0500981 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +0000982}
983
Miss Islington (bot)e8ea3482019-08-05 00:20:25 -0700984// Show stats for objects in each gennerations.
985static void
986show_stats_each_generations(struct _gc_runtime_state *state)
987{
988 char buf[100];
989 size_t pos = 0;
990
991 for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) {
992 pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos,
993 " %"PY_FORMAT_SIZE_T"d",
994 gc_list_size(GEN_HEAD(state, i)));
995 }
996
997 PySys_FormatStderr(
998 "gc: objects in each generation:%s\n"
999 "gc: objects in permanent generation: %zd\n",
1000 buf, gc_list_size(&state->permanent_generation.head));
1001}
1002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001003/* This is the main function. Read this to understand how the
1004 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001005static Py_ssize_t
Victor Stinner9db03242019-04-26 02:32:01 +02001006collect(struct _gc_runtime_state *state, int generation,
1007 Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001008{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001009 int i;
1010 Py_ssize_t m = 0; /* # objects collected */
1011 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
1012 PyGC_Head *young; /* the generation we are examining */
1013 PyGC_Head *old; /* next older generation */
1014 PyGC_Head unreachable; /* non-problematic unreachable trash */
1015 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
1016 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +01001017 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001018
Victor Stinner9db03242019-04-26 02:32:01 +02001019 if (state->debug & DEBUG_STATS) {
Miss Islington (bot)e8ea3482019-08-05 00:20:25 -07001020 PySys_WriteStderr("gc: collecting generation %d...\n", generation);
1021 show_stats_each_generations(state);
Victor Stinner7181dec2015-03-27 17:47:53 +01001022 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001023 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001024
Łukasz Langaa785c872016-09-09 17:37:37 -07001025 if (PyDTrace_GC_START_ENABLED())
1026 PyDTrace_GC_START(generation);
1027
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001028 /* update collection and allocation counters */
1029 if (generation+1 < NUM_GENERATIONS)
Victor Stinner9db03242019-04-26 02:32:01 +02001030 state->generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001031 for (i = 0; i <= generation; i++)
Victor Stinner9db03242019-04-26 02:32:01 +02001032 state->generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001033
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001034 /* merge younger generations with one we are currently collecting */
1035 for (i = 0; i < generation; i++) {
Victor Stinner9db03242019-04-26 02:32:01 +02001036 gc_list_merge(GEN_HEAD(state, i), GEN_HEAD(state, generation));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001037 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001038
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001039 /* handy references */
Victor Stinner9db03242019-04-26 02:32:01 +02001040 young = GEN_HEAD(state, generation);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001041 if (generation < NUM_GENERATIONS-1)
Victor Stinner9db03242019-04-26 02:32:01 +02001042 old = GEN_HEAD(state, generation+1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001043 else
1044 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001045
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001046 validate_list(young, 0);
1047 validate_list(old, 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001048 /* Using ob_refcnt and gc_refs, calculate which objects in the
1049 * container set are reachable from outside the set (i.e., have a
1050 * refcount greater than 0 when all the references within the
1051 * set are taken into account).
1052 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001053 update_refs(young); // gc_prev is used for gc_refs
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001054 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001055
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001056 /* Leave everything reachable from outside young in young, and move
1057 * everything else (in young) to unreachable.
1058 * NOTE: This used to move the reachable objects into a reachable
1059 * set instead. But most things usually turn out to be reachable,
1060 * so it's more efficient to move the unreachable things.
1061 */
1062 gc_list_init(&unreachable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001063 move_unreachable(young, &unreachable); // gc_prev is pointer again
1064 validate_list(young, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001065
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001066 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001067 /* Move reachable objects to next generation. */
1068 if (young != old) {
1069 if (generation == NUM_GENERATIONS - 2) {
Victor Stinner9db03242019-04-26 02:32:01 +02001070 state->long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001071 }
1072 gc_list_merge(young, old);
1073 }
1074 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001075 /* We only untrack dicts in full collections, to avoid quadratic
1076 dict build-up. See issue #14775. */
1077 untrack_dicts(young);
Victor Stinner9db03242019-04-26 02:32:01 +02001078 state->long_lived_pending = 0;
1079 state->long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001080 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001081
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001082 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001083 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001084 */
1085 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001086 // NEXT_MASK_UNREACHABLE is cleared here.
1087 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001088 move_legacy_finalizers(&unreachable, &finalizers);
1089 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001090 * unreachable objects reachable *from* those are also uncollectable,
1091 * and we move those into the finalizers list too.
1092 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001093 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001094
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001095 validate_list(&finalizers, 0);
1096 validate_list(&unreachable, PREV_MASK_COLLECTING);
1097
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001098 /* Collect statistics on collectable objects found and print
1099 * debugging information.
1100 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001101 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001102 m++;
Victor Stinner9db03242019-04-26 02:32:01 +02001103 if (state->debug & DEBUG_COLLECTABLE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001104 debug_cycle("collectable", FROM_GC(gc));
1105 }
1106 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001107
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001108 /* Clear weakrefs and invoke callbacks as necessary. */
1109 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001110
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001111 validate_list(old, 0);
1112 validate_list(&unreachable, PREV_MASK_COLLECTING);
1113
Antoine Pitrou796564c2013-07-30 19:59:21 +02001114 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001115 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001116
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001117 if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
Antoine Pitrou796564c2013-07-30 19:59:21 +02001118 gc_list_merge(&unreachable, old);
1119 }
1120 else {
1121 /* Call tp_clear on objects in the unreachable set. This will cause
1122 * the reference cycles to be broken. It may also cause some objects
1123 * in finalizers to be freed.
1124 */
Victor Stinner9db03242019-04-26 02:32:01 +02001125 delete_garbage(state, &unreachable, old);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001126 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001127
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001128 /* Collect statistics on uncollectable objects found and print
1129 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001130 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001131 n++;
Victor Stinner9db03242019-04-26 02:32:01 +02001132 if (state->debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001133 debug_cycle("uncollectable", FROM_GC(gc));
1134 }
Victor Stinner9db03242019-04-26 02:32:01 +02001135 if (state->debug & DEBUG_STATS) {
Miss Islington (bot)e8ea3482019-08-05 00:20:25 -07001136 double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1);
Inada Naoki97a31c72019-08-31 10:50:27 +09001137 PySys_WriteStderr(
1138 "gc: done, %" PY_FORMAT_SIZE_T "d unreachable, "
1139 "%" PY_FORMAT_SIZE_T "d uncollectable, %.4fs elapsed\n",
Miss Islington (bot)e8ea3482019-08-05 00:20:25 -07001140 n+m, n, d);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001141 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001142
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001143 /* Append instances in the uncollectable set to a Python
1144 * reachable list of garbage. The programmer has to deal with
1145 * this if they insist on creating this type of structure.
1146 */
Victor Stinner9db03242019-04-26 02:32:01 +02001147 handle_legacy_finalizers(state, &finalizers, old);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001148 validate_list(old, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001149
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001150 /* Clear free list only during the collection of the highest
1151 * generation */
1152 if (generation == NUM_GENERATIONS-1) {
1153 clear_freelists();
1154 }
Christian Heimesa156e092008-02-16 07:38:31 +00001155
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001156 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001157 if (nofail) {
1158 PyErr_Clear();
1159 }
1160 else {
1161 if (gc_str == NULL)
1162 gc_str = PyUnicode_FromString("garbage collection");
1163 PyErr_WriteUnraisable(gc_str);
1164 Py_FatalError("unexpected exception during garbage collection");
1165 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001166 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001167
Antoine Pitroud4156c12012-10-30 22:43:19 +01001168 /* Update stats */
Victor Stinner9db03242019-04-26 02:32:01 +02001169 if (n_collected) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001170 *n_collected = m;
Victor Stinner9db03242019-04-26 02:32:01 +02001171 }
1172 if (n_uncollectable) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001173 *n_uncollectable = n;
Victor Stinner9db03242019-04-26 02:32:01 +02001174 }
1175
1176 struct gc_generation_stats *stats = &state->generation_stats[generation];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001177 stats->collections++;
1178 stats->collected += m;
1179 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001180
Victor Stinner9db03242019-04-26 02:32:01 +02001181 if (PyDTrace_GC_DONE_ENABLED()) {
Łukasz Langaa785c872016-09-09 17:37:37 -07001182 PyDTrace_GC_DONE(n+m);
Victor Stinner9db03242019-04-26 02:32:01 +02001183 }
Łukasz Langaa785c872016-09-09 17:37:37 -07001184
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001185 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001186 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001187}
1188
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001189/* Invoke progress callbacks to notify clients that garbage collection
1190 * is starting or stopping
1191 */
1192static void
Victor Stinner9db03242019-04-26 02:32:01 +02001193invoke_gc_callback(struct _gc_runtime_state *state, const char *phase,
1194 int generation, Py_ssize_t collected,
1195 Py_ssize_t uncollectable)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001196{
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001197 assert(!PyErr_Occurred());
Victor Stinner9db03242019-04-26 02:32:01 +02001198
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001199 /* we may get called very early */
Victor Stinner9db03242019-04-26 02:32:01 +02001200 if (state->callbacks == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001201 return;
Victor Stinner9db03242019-04-26 02:32:01 +02001202 }
1203
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001204 /* The local variable cannot be rebound, check it for sanity */
Victor Stinner9db03242019-04-26 02:32:01 +02001205 assert(PyList_CheckExact(state->callbacks));
1206 PyObject *info = NULL;
1207 if (PyList_GET_SIZE(state->callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001208 info = Py_BuildValue("{sisnsn}",
1209 "generation", generation,
1210 "collected", collected,
1211 "uncollectable", uncollectable);
1212 if (info == NULL) {
1213 PyErr_WriteUnraisable(NULL);
1214 return;
1215 }
1216 }
Victor Stinner9db03242019-04-26 02:32:01 +02001217 for (Py_ssize_t i=0; i<PyList_GET_SIZE(state->callbacks); i++) {
1218 PyObject *r, *cb = PyList_GET_ITEM(state->callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001219 Py_INCREF(cb); /* make sure cb doesn't go away */
1220 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001221 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001222 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001223 }
1224 else {
1225 Py_DECREF(r);
1226 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001227 Py_DECREF(cb);
1228 }
1229 Py_XDECREF(info);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001230 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001231}
1232
1233/* Perform garbage collection of a generation and invoke
1234 * progress callbacks.
1235 */
1236static Py_ssize_t
Victor Stinner9db03242019-04-26 02:32:01 +02001237collect_with_callback(struct _gc_runtime_state *state, int generation)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001238{
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001239 assert(!PyErr_Occurred());
Victor Stinner9db03242019-04-26 02:32:01 +02001240 Py_ssize_t result, collected, uncollectable;
1241 invoke_gc_callback(state, "start", generation, 0, 0);
1242 result = collect(state, generation, &collected, &uncollectable, 0);
1243 invoke_gc_callback(state, "stop", generation, collected, uncollectable);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001244 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001245 return result;
1246}
1247
Neal Norwitz7b216c52006-03-04 20:01:53 +00001248static Py_ssize_t
Victor Stinner9db03242019-04-26 02:32:01 +02001249collect_generations(struct _gc_runtime_state *state)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001250{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001251 /* Find the oldest generation (highest numbered) where the count
1252 * exceeds the threshold. Objects in the that generation and
1253 * generations younger than it will be collected. */
Victor Stinner9db03242019-04-26 02:32:01 +02001254 Py_ssize_t n = 0;
1255 for (int i = NUM_GENERATIONS-1; i >= 0; i--) {
1256 if (state->generations[i].count > state->generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001257 /* Avoid quadratic performance degradation in number
1258 of tracked objects. See comments at the beginning
1259 of this file, and issue #4074.
1260 */
1261 if (i == NUM_GENERATIONS - 1
Victor Stinner9db03242019-04-26 02:32:01 +02001262 && state->long_lived_pending < state->long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001263 continue;
Victor Stinner9db03242019-04-26 02:32:01 +02001264 n = collect_with_callback(state, i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001265 break;
1266 }
1267 }
1268 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001269}
1270
Serhiy Storchaka93260282017-02-04 11:19:59 +02001271#include "clinic/gcmodule.c.h"
1272
1273/*[clinic input]
1274gc.enable
1275
1276Enable automatic garbage collection.
1277[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001278
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001279static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001280gc_enable_impl(PyObject *module)
1281/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001282{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001283 _PyRuntime.gc.enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001284 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001285}
1286
Serhiy Storchaka93260282017-02-04 11:19:59 +02001287/*[clinic input]
1288gc.disable
1289
1290Disable automatic garbage collection.
1291[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001292
1293static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001294gc_disable_impl(PyObject *module)
1295/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001296{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001297 _PyRuntime.gc.enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001298 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001299}
1300
Serhiy Storchaka93260282017-02-04 11:19:59 +02001301/*[clinic input]
1302gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001303
Serhiy Storchaka93260282017-02-04 11:19:59 +02001304Returns true if automatic garbage collection is enabled.
1305[clinic start generated code]*/
1306
1307static int
1308gc_isenabled_impl(PyObject *module)
1309/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001310{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001311 return _PyRuntime.gc.enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001312}
1313
Serhiy Storchaka93260282017-02-04 11:19:59 +02001314/*[clinic input]
1315gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001316
Serhiy Storchaka93260282017-02-04 11:19:59 +02001317 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1318
1319Run the garbage collector.
1320
1321With no arguments, run a full collection. The optional argument
1322may be an integer specifying which generation to collect. A ValueError
1323is raised if the generation number is invalid.
1324
1325The number of unreachable objects is returned.
1326[clinic start generated code]*/
1327
1328static Py_ssize_t
1329gc_collect_impl(PyObject *module, int generation)
1330/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001331{
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001332
Serhiy Storchaka93260282017-02-04 11:19:59 +02001333 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001334 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001335 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001336 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001337
Victor Stinner9db03242019-04-26 02:32:01 +02001338 struct _gc_runtime_state *state = &_PyRuntime.gc;
1339 Py_ssize_t n;
1340 if (state->collecting) {
1341 /* already collecting, don't do anything */
1342 n = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001343 }
Victor Stinner9db03242019-04-26 02:32:01 +02001344 else {
1345 state->collecting = 1;
1346 n = collect_with_callback(state, generation);
1347 state->collecting = 0;
1348 }
Serhiy Storchaka93260282017-02-04 11:19:59 +02001349 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001350}
1351
Serhiy Storchaka93260282017-02-04 11:19:59 +02001352/*[clinic input]
1353gc.set_debug
1354
1355 flags: int
1356 An integer that can have the following bits turned on:
1357 DEBUG_STATS - Print statistics during collection.
1358 DEBUG_COLLECTABLE - Print collectable objects found.
1359 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1360 found.
1361 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1362 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1363 /
1364
1365Set the garbage collection debugging flags.
1366
1367Debugging information is written to sys.stderr.
1368[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001369
1370static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001371gc_set_debug_impl(PyObject *module, int flags)
1372/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001373{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001374 _PyRuntime.gc.debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001375
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001376 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001377}
1378
Serhiy Storchaka93260282017-02-04 11:19:59 +02001379/*[clinic input]
1380gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001381
Serhiy Storchaka93260282017-02-04 11:19:59 +02001382Get the garbage collection debugging flags.
1383[clinic start generated code]*/
1384
1385static int
1386gc_get_debug_impl(PyObject *module)
1387/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001388{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001389 return _PyRuntime.gc.debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001390}
1391
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001392PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001393"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001394"\n"
1395"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001396"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001397
1398static PyObject *
Victor Stinner9db03242019-04-26 02:32:01 +02001399gc_set_threshold(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001400{
Victor Stinner9db03242019-04-26 02:32:01 +02001401 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001402 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Victor Stinner9db03242019-04-26 02:32:01 +02001403 &state->generations[0].threshold,
1404 &state->generations[1].threshold,
1405 &state->generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001406 return NULL;
Victor Stinner9db03242019-04-26 02:32:01 +02001407 for (int i = 3; i < NUM_GENERATIONS; i++) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001408 /* generations higher than 2 get the same threshold */
Victor Stinner9db03242019-04-26 02:32:01 +02001409 state->generations[i].threshold = state->generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001410 }
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001411 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001412}
1413
Serhiy Storchaka93260282017-02-04 11:19:59 +02001414/*[clinic input]
1415gc.get_threshold
1416
1417Return the current collection thresholds.
1418[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001419
1420static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001421gc_get_threshold_impl(PyObject *module)
1422/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001423{
Victor Stinner9db03242019-04-26 02:32:01 +02001424 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001425 return Py_BuildValue("(iii)",
Victor Stinner9db03242019-04-26 02:32:01 +02001426 state->generations[0].threshold,
1427 state->generations[1].threshold,
1428 state->generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001429}
1430
Serhiy Storchaka93260282017-02-04 11:19:59 +02001431/*[clinic input]
1432gc.get_count
1433
1434Return a three-tuple of the current collection counts.
1435[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001436
1437static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001438gc_get_count_impl(PyObject *module)
1439/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001440{
Victor Stinner9db03242019-04-26 02:32:01 +02001441 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001442 return Py_BuildValue("(iii)",
Victor Stinner9db03242019-04-26 02:32:01 +02001443 state->generations[0].count,
1444 state->generations[1].count,
1445 state->generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001446}
1447
Neil Schemenauer48c70342001-08-09 15:38:31 +00001448static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001449referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001450{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001451 Py_ssize_t i;
1452 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1453 if (PyTuple_GET_ITEM(objs, i) == obj)
1454 return 1;
1455 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001456}
1457
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001458static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001459gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001460{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001461 PyGC_Head *gc;
1462 PyObject *obj;
1463 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001464 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001465 obj = FROM_GC(gc);
1466 traverse = Py_TYPE(obj)->tp_traverse;
1467 if (obj == objs || obj == resultlist)
1468 continue;
1469 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1470 if (PyList_Append(resultlist, obj) < 0)
1471 return 0; /* error */
1472 }
1473 }
1474 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001475}
1476
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001477PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001478"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001479Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001480
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001481static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001482gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001483{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001484 int i;
1485 PyObject *result = PyList_New(0);
1486 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001487
Victor Stinner9db03242019-04-26 02:32:01 +02001488 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001489 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner9db03242019-04-26 02:32:01 +02001490 if (!(gc_referrers_for(args, GEN_HEAD(state, i), result))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001491 Py_DECREF(result);
1492 return NULL;
1493 }
1494 }
1495 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001496}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001497
Tim Peters0f81ab62003-04-08 16:39:48 +00001498/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001499static int
Tim Peters730f5532003-04-08 17:17:17 +00001500referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001501{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001502 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001503}
1504
Tim Peters730f5532003-04-08 17:17:17 +00001505PyDoc_STRVAR(gc_get_referents__doc__,
1506"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001507Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001508
1509static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001510gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001511{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001512 Py_ssize_t i;
1513 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001514
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001515 if (result == NULL)
1516 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001517
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001518 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1519 traverseproc traverse;
1520 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001521
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001522 if (! PyObject_IS_GC(obj))
1523 continue;
1524 traverse = Py_TYPE(obj)->tp_traverse;
1525 if (! traverse)
1526 continue;
1527 if (traverse(obj, (visitproc)referentsvisit, result)) {
1528 Py_DECREF(result);
1529 return NULL;
1530 }
1531 }
1532 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001533}
1534
Serhiy Storchaka93260282017-02-04 11:19:59 +02001535/*[clinic input]
1536gc.get_objects
Pablo Galindo175421b2019-02-23 03:02:06 +00001537 generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None
1538 Generation to extract the objects from.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001539
1540Return a list of objects tracked by the collector (excluding the list returned).
Pablo Galindo175421b2019-02-23 03:02:06 +00001541
1542If generation is not None, return only the objects tracked by the collector
1543that are in that generation.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001544[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001545
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001546static PyObject *
Pablo Galindo175421b2019-02-23 03:02:06 +00001547gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
1548/*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001549{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001550 int i;
1551 PyObject* result;
Victor Stinner9db03242019-04-26 02:32:01 +02001552 struct _gc_runtime_state *state = &_PyRuntime.gc;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001553
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001554 result = PyList_New(0);
Pablo Galindo175421b2019-02-23 03:02:06 +00001555 if (result == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001556 return NULL;
Pablo Galindo175421b2019-02-23 03:02:06 +00001557 }
1558
1559 /* If generation is passed, we extract only that generation */
Victor Stinner0810fa72019-04-15 17:54:09 +02001560 if (generation != -1) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001561 if (generation >= NUM_GENERATIONS) {
1562 PyErr_Format(PyExc_ValueError,
1563 "generation parameter must be less than the number of "
1564 "available generations (%i)",
1565 NUM_GENERATIONS);
1566 goto error;
1567 }
1568
1569 if (generation < 0) {
1570 PyErr_SetString(PyExc_ValueError,
1571 "generation parameter cannot be negative");
1572 goto error;
1573 }
1574
Victor Stinner9db03242019-04-26 02:32:01 +02001575 if (append_objects(result, GEN_HEAD(state, generation))) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001576 goto error;
1577 }
1578
1579 return result;
1580 }
1581
1582 /* If generation is not passed or None, get all objects from all generations */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001583 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner9db03242019-04-26 02:32:01 +02001584 if (append_objects(result, GEN_HEAD(state, i))) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001585 goto error;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001586 }
1587 }
1588 return result;
Pablo Galindo175421b2019-02-23 03:02:06 +00001589
1590error:
1591 Py_DECREF(result);
1592 return NULL;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001593}
1594
Serhiy Storchaka93260282017-02-04 11:19:59 +02001595/*[clinic input]
1596gc.get_stats
1597
1598Return a list of dictionaries containing per-generation statistics.
1599[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001600
1601static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001602gc_get_stats_impl(PyObject *module)
1603/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001604{
1605 int i;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001606 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1607
1608 /* To get consistent values despite allocations while constructing
1609 the result list, we use a snapshot of the running stats. */
Victor Stinner9db03242019-04-26 02:32:01 +02001610 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001611 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner9db03242019-04-26 02:32:01 +02001612 stats[i] = state->generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001613 }
1614
Victor Stinner9db03242019-04-26 02:32:01 +02001615 PyObject *result = PyList_New(0);
Antoine Pitroud4156c12012-10-30 22:43:19 +01001616 if (result == NULL)
1617 return NULL;
1618
1619 for (i = 0; i < NUM_GENERATIONS; i++) {
1620 PyObject *dict;
1621 st = &stats[i];
1622 dict = Py_BuildValue("{snsnsn}",
1623 "collections", st->collections,
1624 "collected", st->collected,
1625 "uncollectable", st->uncollectable
1626 );
1627 if (dict == NULL)
1628 goto error;
1629 if (PyList_Append(result, dict)) {
1630 Py_DECREF(dict);
1631 goto error;
1632 }
1633 Py_DECREF(dict);
1634 }
1635 return result;
1636
1637error:
1638 Py_XDECREF(result);
1639 return NULL;
1640}
1641
1642
Serhiy Storchaka93260282017-02-04 11:19:59 +02001643/*[clinic input]
1644gc.is_tracked
1645
1646 obj: object
1647 /
1648
1649Returns true if the object is tracked by the garbage collector.
1650
1651Simple atomic objects will return false.
1652[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001653
1654static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001655gc_is_tracked(PyObject *module, PyObject *obj)
1656/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001657{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001658 PyObject *result;
1659
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001660 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001661 result = Py_True;
1662 else
1663 result = Py_False;
1664 Py_INCREF(result);
1665 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001666}
1667
brainfvckc75edab2017-10-16 12:49:41 -07001668/*[clinic input]
1669gc.freeze
1670
1671Freeze all current tracked objects and ignore them for future collections.
1672
1673This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1674Note: collection before a POSIX fork() call may free pages for future allocation
1675which can cause copy-on-write.
1676[clinic start generated code]*/
1677
1678static PyObject *
1679gc_freeze_impl(PyObject *module)
1680/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1681{
Victor Stinner9db03242019-04-26 02:32:01 +02001682 struct _gc_runtime_state *state = &_PyRuntime.gc;
brainfvckc75edab2017-10-16 12:49:41 -07001683 for (int i = 0; i < NUM_GENERATIONS; ++i) {
Victor Stinner9db03242019-04-26 02:32:01 +02001684 gc_list_merge(GEN_HEAD(state, i), &state->permanent_generation.head);
1685 state->generations[i].count = 0;
brainfvckc75edab2017-10-16 12:49:41 -07001686 }
1687 Py_RETURN_NONE;
1688}
1689
1690/*[clinic input]
1691gc.unfreeze
1692
1693Unfreeze all objects in the permanent generation.
1694
1695Put all objects in the permanent generation back into oldest generation.
1696[clinic start generated code]*/
1697
1698static PyObject *
1699gc_unfreeze_impl(PyObject *module)
1700/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1701{
Victor Stinner9db03242019-04-26 02:32:01 +02001702 struct _gc_runtime_state *state = &_PyRuntime.gc;
1703 gc_list_merge(&state->permanent_generation.head, GEN_HEAD(state, NUM_GENERATIONS-1));
brainfvckc75edab2017-10-16 12:49:41 -07001704 Py_RETURN_NONE;
1705}
1706
1707/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001708gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001709
1710Return the number of objects in the permanent generation.
1711[clinic start generated code]*/
1712
Victor Stinner05d68a82018-01-18 11:15:25 +01001713static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001714gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001715/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001716{
1717 return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1718}
1719
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001720
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001721PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001722"This module provides access to the garbage collector for reference cycles.\n"
1723"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001724"enable() -- Enable automatic garbage collection.\n"
1725"disable() -- Disable automatic garbage collection.\n"
1726"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001727"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001728"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001729"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001730"set_debug() -- Set debugging flags.\n"
1731"get_debug() -- Get debugging flags.\n"
1732"set_threshold() -- Set the collection thresholds.\n"
1733"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001734"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001735"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001736"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001737"get_referents() -- Return the list of objects that an object refers to.\n"
1738"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1739"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1740"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001741
1742static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001743 GC_ENABLE_METHODDEF
1744 GC_DISABLE_METHODDEF
1745 GC_ISENABLED_METHODDEF
1746 GC_SET_DEBUG_METHODDEF
1747 GC_GET_DEBUG_METHODDEF
1748 GC_GET_COUNT_METHODDEF
Victor Stinner9db03242019-04-26 02:32:01 +02001749 {"set_threshold", gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001750 GC_GET_THRESHOLD_METHODDEF
1751 GC_COLLECT_METHODDEF
1752 GC_GET_OBJECTS_METHODDEF
1753 GC_GET_STATS_METHODDEF
1754 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001755 {"get_referrers", gc_get_referrers, METH_VARARGS,
1756 gc_get_referrers__doc__},
1757 {"get_referents", gc_get_referents, METH_VARARGS,
1758 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001759 GC_FREEZE_METHODDEF
1760 GC_UNFREEZE_METHODDEF
1761 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001762 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001763};
1764
Martin v. Löwis1a214512008-06-11 05:26:20 +00001765static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001766 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001767 "gc", /* m_name */
1768 gc__doc__, /* m_doc */
1769 -1, /* m_size */
1770 GcMethods, /* m_methods */
1771 NULL, /* m_reload */
1772 NULL, /* m_traverse */
1773 NULL, /* m_clear */
1774 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001775};
1776
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001777PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001778PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001779{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001780 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001781
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001782 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001783
Victor Stinner9db03242019-04-26 02:32:01 +02001784 if (m == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001785 return NULL;
Victor Stinner9db03242019-04-26 02:32:01 +02001786 }
Tim Peters11558872003-04-06 23:30:52 +00001787
Victor Stinner9db03242019-04-26 02:32:01 +02001788 struct _gc_runtime_state *state = &_PyRuntime.gc;
1789 if (state->garbage == NULL) {
1790 state->garbage = PyList_New(0);
1791 if (state->garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001792 return NULL;
1793 }
Victor Stinner9db03242019-04-26 02:32:01 +02001794 Py_INCREF(state->garbage);
1795 if (PyModule_AddObject(m, "garbage", state->garbage) < 0)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001796 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001797
Victor Stinner9db03242019-04-26 02:32:01 +02001798 if (state->callbacks == NULL) {
1799 state->callbacks = PyList_New(0);
1800 if (state->callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001801 return NULL;
1802 }
Victor Stinner9db03242019-04-26 02:32:01 +02001803 Py_INCREF(state->callbacks);
1804 if (PyModule_AddObject(m, "callbacks", state->callbacks) < 0)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001805 return NULL;
1806
Martin v. Löwis1a214512008-06-11 05:26:20 +00001807#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001808 ADD_INT(DEBUG_STATS);
1809 ADD_INT(DEBUG_COLLECTABLE);
1810 ADD_INT(DEBUG_UNCOLLECTABLE);
1811 ADD_INT(DEBUG_SAVEALL);
1812 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001813#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001814 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001815}
1816
Guido van Rossume13ddc92003-04-17 17:29:22 +00001817/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001818Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001819PyGC_Collect(void)
1820{
Victor Stinner9db03242019-04-26 02:32:01 +02001821 struct _gc_runtime_state *state = &_PyRuntime.gc;
1822 if (!state->enabled) {
1823 return 0;
1824 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001825
Victor Stinner9db03242019-04-26 02:32:01 +02001826 Py_ssize_t n;
1827 if (state->collecting) {
1828 /* already collecting, don't do anything */
1829 n = 0;
1830 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001831 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001832 PyObject *exc, *value, *tb;
Victor Stinner9db03242019-04-26 02:32:01 +02001833 state->collecting = 1;
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001834 PyErr_Fetch(&exc, &value, &tb);
Victor Stinner9db03242019-04-26 02:32:01 +02001835 n = collect_with_callback(state, NUM_GENERATIONS - 1);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001836 PyErr_Restore(exc, value, tb);
Victor Stinner9db03242019-04-26 02:32:01 +02001837 state->collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001838 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001839
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001840 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001841}
1842
Antoine Pitroufef34e32013-05-19 01:11:58 +02001843Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001844_PyGC_CollectIfEnabled(void)
1845{
Łukasz Langafef7e942016-09-09 21:47:46 -07001846 return PyGC_Collect();
1847}
1848
1849Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001850_PyGC_CollectNoFail(void)
1851{
Victor Stinner9db03242019-04-26 02:32:01 +02001852 assert(!PyErr_Occurred());
1853
1854 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitroufef34e32013-05-19 01:11:58 +02001855 Py_ssize_t n;
1856
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001857 /* Ideally, this function is only called on interpreter shutdown,
1858 and therefore not recursively. Unfortunately, when there are daemon
1859 threads, a daemon thread can start a cyclic garbage collection
1860 during interpreter shutdown (and then never finish it).
1861 See http://bugs.python.org/issue8713#msg195178 for an example.
1862 */
Victor Stinner9db03242019-04-26 02:32:01 +02001863 if (state->collecting) {
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001864 n = 0;
Victor Stinner9db03242019-04-26 02:32:01 +02001865 }
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001866 else {
Victor Stinner9db03242019-04-26 02:32:01 +02001867 state->collecting = 1;
1868 n = collect(state, NUM_GENERATIONS - 1, NULL, NULL, 1);
1869 state->collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001870 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001871 return n;
1872}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001873
Antoine Pitrou696e0352010-08-08 22:18:46 +00001874void
Victor Stinner9db03242019-04-26 02:32:01 +02001875_PyGC_DumpShutdownStats(_PyRuntimeState *runtime)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001876{
Victor Stinner9db03242019-04-26 02:32:01 +02001877 struct _gc_runtime_state *state = &runtime->gc;
1878 if (!(state->debug & DEBUG_SAVEALL)
1879 && state->garbage != NULL && PyList_GET_SIZE(state->garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001880 const char *message;
Victor Stinner9db03242019-04-26 02:32:01 +02001881 if (state->debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001882 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001883 "shutdown";
1884 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001885 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001886 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001887 /* PyErr_WarnFormat does too many things and we are at shutdown,
1888 the warnings module's dependencies (e.g. linecache) may be gone
1889 already. */
1890 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1891 "gc", NULL, message,
Victor Stinner9db03242019-04-26 02:32:01 +02001892 PyList_GET_SIZE(state->garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001893 PyErr_WriteUnraisable(NULL);
Victor Stinner9db03242019-04-26 02:32:01 +02001894 if (state->debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00001895 PyObject *repr = NULL, *bytes = NULL;
Victor Stinner9db03242019-04-26 02:32:01 +02001896 repr = PyObject_Repr(state->garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001897 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Victor Stinner9db03242019-04-26 02:32:01 +02001898 PyErr_WriteUnraisable(state->garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001899 else {
1900 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001901 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001902 PyBytes_AS_STRING(bytes)
1903 );
1904 }
1905 Py_XDECREF(repr);
1906 Py_XDECREF(bytes);
1907 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001908 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001909}
1910
1911void
Victor Stinner8e91c242019-04-24 17:24:01 +02001912_PyGC_Fini(_PyRuntimeState *runtime)
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001913{
Victor Stinner9db03242019-04-26 02:32:01 +02001914 struct _gc_runtime_state *state = &runtime->gc;
1915 Py_CLEAR(state->garbage);
1916 Py_CLEAR(state->callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001917}
1918
Neil Schemenauer43411b52001-08-30 00:05:51 +00001919/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001920void
1921_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001922{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001923 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001924}
1925
Neil Schemenauer43411b52001-08-30 00:05:51 +00001926/* extension modules might be compiled with GC support so these
1927 functions must always be available */
1928
1929void
Victor Stinnera42de742018-11-22 10:25:22 +01001930PyObject_GC_Track(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001931{
Victor Stinnera42de742018-11-22 10:25:22 +01001932 PyObject *op = _PyObject_CAST(op_raw);
Victor Stinner271753a2018-11-22 01:02:54 +01001933 if (_PyObject_GC_IS_TRACKED(op)) {
1934 _PyObject_ASSERT_FAILED_MSG(op,
1935 "object already tracked "
1936 "by the garbage collector");
1937 }
Victor Stinnera42de742018-11-22 10:25:22 +01001938 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001939}
1940
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001941void
Victor Stinnera42de742018-11-22 10:25:22 +01001942PyObject_GC_UnTrack(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001943{
Victor Stinnera42de742018-11-22 10:25:22 +01001944 PyObject *op = _PyObject_CAST(op_raw);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001945 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1946 * call PyObject_GC_UnTrack twice on an object.
1947 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001948 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001949 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001950 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00001951}
1952
Victor Stinnerdb067af2014-05-02 22:31:14 +02001953static PyObject *
1954_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001955{
Victor Stinner9db03242019-04-26 02:32:01 +02001956 struct _gc_runtime_state *state = &_PyRuntime.gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001957 PyObject *op;
1958 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001959 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001960 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1961 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001962 size = sizeof(PyGC_Head) + basicsize;
1963 if (use_calloc)
1964 g = (PyGC_Head *)PyObject_Calloc(1, size);
1965 else
1966 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001967 if (g == NULL)
1968 return PyErr_NoMemory();
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001969 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
1970 g->_gc_next = 0;
1971 g->_gc_prev = 0;
Victor Stinner9db03242019-04-26 02:32:01 +02001972 state->generations[0].count++; /* number of allocated GC objects */
1973 if (state->generations[0].count > state->generations[0].threshold &&
1974 state->enabled &&
1975 state->generations[0].threshold &&
1976 !state->collecting &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001977 !PyErr_Occurred()) {
Victor Stinner9db03242019-04-26 02:32:01 +02001978 state->collecting = 1;
1979 collect_generations(state);
1980 state->collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001981 }
1982 op = FROM_GC(g);
1983 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001984}
1985
1986PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001987_PyObject_GC_Malloc(size_t basicsize)
1988{
1989 return _PyObject_GC_Alloc(0, basicsize);
1990}
1991
1992PyObject *
1993_PyObject_GC_Calloc(size_t basicsize)
1994{
1995 return _PyObject_GC_Alloc(1, basicsize);
1996}
1997
1998PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001999_PyObject_GC_New(PyTypeObject *tp)
2000{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002001 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
2002 if (op != NULL)
2003 op = PyObject_INIT(op, tp);
2004 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002005}
2006
2007PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00002008_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002009{
Victor Stinner5d1866c2013-07-08 22:17:52 +02002010 size_t size;
2011 PyVarObject *op;
2012
2013 if (nitems < 0) {
2014 PyErr_BadInternalCall();
2015 return NULL;
2016 }
2017 size = _PyObject_VAR_SIZE(tp, nitems);
2018 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002019 if (op != NULL)
2020 op = PyObject_INIT_VAR(op, tp, nitems);
2021 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002022}
2023
2024PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00002025_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002026{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002027 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
Victor Stinnera4b2bc72018-10-26 18:00:13 +02002028 _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));
2029 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002030 return (PyVarObject *)PyErr_NoMemory();
Victor Stinnera4b2bc72018-10-26 18:00:13 +02002031 }
2032
2033 PyGC_Head *g = AS_GC(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002034 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
2035 if (g == NULL)
2036 return (PyVarObject *)PyErr_NoMemory();
2037 op = (PyVarObject *) FROM_GC(g);
2038 Py_SIZE(op) = nitems;
2039 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002040}
2041
2042void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00002043PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002044{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002045 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002046 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002047 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002048 }
Victor Stinner9db03242019-04-26 02:32:01 +02002049 struct _gc_runtime_state *state = &_PyRuntime.gc;
2050 if (state->generations[0].count > 0) {
2051 state->generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002052 }
2053 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00002054}