blob: fad1356d6b443d40d6b8c09c0b2966f168717412 [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Victor Stinner27e2d1f2018-11-01 00:52:28 +010027#include "pycore_context.h"
Victor Stinnerbcda8f12018-11-21 22:27:47 +010028#include "pycore_object.h"
Victor Stinner621cebe2018-11-12 16:53:38 +010029#include "pycore_pymem.h"
30#include "pycore_pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000031#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070032#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010033#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000034
Serhiy Storchaka93260282017-02-04 11:19:59 +020035/*[clinic input]
36module gc
37[clinic start generated code]*/
38/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
39
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090040#define GC_DEBUG (0) /* Enable more asserts */
41
42#define GC_NEXT _PyGCHead_NEXT
43#define GC_PREV _PyGCHead_PREV
44
45// update_refs() set this bit for all objects in current generation.
46// subtract_refs() and move_unreachable() uses this to distinguish
47// visited object is in GCing or not.
48//
49// move_unreachable() removes this flag from reachable objects.
50// Only unreachable objects have this flag.
51//
52// No objects in interpreter have this flag after GC ends.
53#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
54
55// Lowest bit of _gc_next is used for UNREACHABLE flag.
56//
57// This flag represents the object is in unreachable list in move_unreachable()
58//
59// Although this flag is used only in move_unreachable(), move_unreachable()
60// doesn't clear this flag to skip unnecessary iteration.
61// move_legacy_finalizers() removes this flag instead.
62// Between them, unreachable list is not normal list and we can not use
63// most gc_list_* functions for it.
64#define NEXT_MASK_UNREACHABLE (1)
65
Victor Stinner626bff82018-10-25 17:31:10 +020066/* Get an object's GC head */
67#define AS_GC(o) ((PyGC_Head *)(o)-1)
68
69/* Get the object given the GC head */
70#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
71
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090072static inline int
73gc_is_collecting(PyGC_Head *g)
74{
75 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
76}
77
78static inline void
79gc_clear_collecting(PyGC_Head *g)
80{
81 g->_gc_prev &= ~PREV_MASK_COLLECTING;
82}
83
84static inline Py_ssize_t
85gc_get_refs(PyGC_Head *g)
86{
87 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
88}
89
90static inline void
91gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
92{
93 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
94 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
95}
96
97static inline void
98gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
99{
100 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
101 | PREV_MASK_COLLECTING
102 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
103}
104
105static inline void
106gc_decref(PyGC_Head *g)
107{
Victor Stinner626bff82018-10-25 17:31:10 +0200108 _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
109 gc_get_refs(g) > 0,
110 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900111 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
112}
113
Tim Peters6fc13d92002-07-02 18:12:35 +0000114/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +0000115static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +0000116
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000117/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000118#define DEBUG_STATS (1<<0) /* print collection statistics */
119#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
120#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
121#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
122#define DEBUG_LEAK DEBUG_COLLECTABLE | \
123 DEBUG_UNCOLLECTABLE | \
124 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000125
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600126#define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100127
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600128void
129_PyGC_Initialize(struct _gc_runtime_state *state)
130{
131 state->enabled = 1; /* automatic collection enabled? */
132
133#define _GEN_HEAD(n) (&state->generations[n].head)
134 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900135 /* PyGC_Head, threshold, count */
136 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
137 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
138 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600139 };
140 for (int i = 0; i < NUM_GENERATIONS; i++) {
141 state->generations[i] = generations[i];
142 };
143 state->generation0 = GEN_HEAD(0);
brainfvckc75edab2017-10-16 12:49:41 -0700144 struct gc_generation permanent_generation = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900145 {(uintptr_t)&state->permanent_generation.head,
146 (uintptr_t)&state->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700147 };
148 state->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600149}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100150
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900151/*
152_gc_prev values
153---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000154
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900155Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000156
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900157Lowest two bits of _gc_prev are used for flags.
158PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
159or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000160
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900161During a collection, _gc_prev is temporary used for gc_refs, and the gc list
162is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000163
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900164gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000165 At the start of a collection, update_refs() copies the true refcount
166 to gc_refs, for each object in the generation being collected.
167 subtract_refs() then adjusts gc_refs so that it equals the number of
168 times an object is referenced directly from outside the generation
169 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000170
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900171PREV_MASK_COLLECTING
172 Objects in generation being collected are marked PREV_MASK_COLLECTING in
173 update_refs().
174
175
176_gc_next values
177---------------
178
179_gc_next takes these values:
180
1810
182 The object is not tracked
183
184!= 0
185 Pointer to the next object in the GC list.
186 Additionally, lowest bit is used temporary for
187 NEXT_MASK_UNREACHABLE flag described below.
188
189NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000190 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900191 indirectly) from outside the generation into an "unreachable" set and
192 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000193
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900194 Objects that are found to be reachable have gc_refs set to 1.
195 When this flag is set for the reachable object, the object must be in
196 "unreachable" set.
197 The flag is unset and the object is moved back to "reachable" set.
198
199 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000200*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000201
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000202/*** list functions ***/
203
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900204static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000205gc_list_init(PyGC_Head *list)
206{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900207 // List header must not have flags.
208 // We can assign pointer by simple cast.
209 list->_gc_prev = (uintptr_t)list;
210 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000211}
212
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900213static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000214gc_list_is_empty(PyGC_Head *list)
215{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900216 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000217}
218
Tim Peterse2d59182004-11-01 01:39:08 +0000219/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900220static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000221gc_list_append(PyGC_Head *node, PyGC_Head *list)
222{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900223 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
224
225 // last <-> node
226 _PyGCHead_SET_PREV(node, last);
227 _PyGCHead_SET_NEXT(last, node);
228
229 // node <-> list
230 _PyGCHead_SET_NEXT(node, list);
231 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000232}
233
Tim Peterse2d59182004-11-01 01:39:08 +0000234/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900235static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000236gc_list_remove(PyGC_Head *node)
237{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900238 PyGC_Head *prev = GC_PREV(node);
239 PyGC_Head *next = GC_NEXT(node);
240
241 _PyGCHead_SET_NEXT(prev, next);
242 _PyGCHead_SET_PREV(next, prev);
243
244 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000245}
246
Tim Peterse2d59182004-11-01 01:39:08 +0000247/* Move `node` from the gc list it's currently in (which is not explicitly
248 * named here) to the end of `list`. This is semantically the same as
249 * gc_list_remove(node) followed by gc_list_append(node, list).
250 */
251static void
252gc_list_move(PyGC_Head *node, PyGC_Head *list)
253{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000254 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900255 PyGC_Head *from_prev = GC_PREV(node);
256 PyGC_Head *from_next = GC_NEXT(node);
257 _PyGCHead_SET_NEXT(from_prev, from_next);
258 _PyGCHead_SET_PREV(from_next, from_prev);
259
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000260 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900261 // list must not have flags. So we can skip macros.
262 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
263 _PyGCHead_SET_PREV(node, to_prev);
264 _PyGCHead_SET_NEXT(to_prev, node);
265 list->_gc_prev = (uintptr_t)node;
266 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000267}
268
269/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000270static void
271gc_list_merge(PyGC_Head *from, PyGC_Head *to)
272{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000273 assert(from != to);
274 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900275 PyGC_Head *to_tail = GC_PREV(to);
276 PyGC_Head *from_head = GC_NEXT(from);
277 PyGC_Head *from_tail = GC_PREV(from);
278 assert(from_head != from);
279 assert(from_tail != from);
280
281 _PyGCHead_SET_NEXT(to_tail, from_head);
282 _PyGCHead_SET_PREV(from_head, to_tail);
283
284 _PyGCHead_SET_NEXT(from_tail, to);
285 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000286 }
287 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000288}
289
Neal Norwitz7b216c52006-03-04 20:01:53 +0000290static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000291gc_list_size(PyGC_Head *list)
292{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000293 PyGC_Head *gc;
294 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900295 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000296 n++;
297 }
298 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000299}
300
Tim Peters259272b2003-04-06 19:41:39 +0000301/* Append objects in a GC list to a Python list.
302 * Return 0 if all OK, < 0 if error (out of memory for list).
303 */
304static int
305append_objects(PyObject *py_list, PyGC_Head *gc_list)
306{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000307 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900308 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000309 PyObject *op = FROM_GC(gc);
310 if (op != py_list) {
311 if (PyList_Append(py_list, op)) {
312 return -1; /* exception */
313 }
314 }
315 }
316 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000317}
318
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900319#if GC_DEBUG
320// validate_list checks list consistency. And it works as document
321// describing when expected_mask is set / unset.
322static void
323validate_list(PyGC_Head *head, uintptr_t expected_mask)
324{
325 PyGC_Head *prev = head;
326 PyGC_Head *gc = GC_NEXT(head);
327 while (gc != head) {
328 assert(GC_NEXT(gc) != NULL);
329 assert(GC_PREV(gc) == prev);
330 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
331 prev = gc;
332 gc = GC_NEXT(gc);
333 }
334 assert(prev == GC_PREV(head));
335}
336#else
337#define validate_list(x,y) do{}while(0)
338#endif
339
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000340/*** end of list stuff ***/
341
342
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900343/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
344 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000345 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000346static void
347update_refs(PyGC_Head *containers)
348{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900349 PyGC_Head *gc = GC_NEXT(containers);
350 for (; gc != containers; gc = GC_NEXT(gc)) {
351 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000352 /* Python's cyclic gc should never see an incoming refcount
353 * of 0: if something decref'ed to 0, it should have been
354 * deallocated immediately at that time.
355 * Possible cause (if the assert triggers): a tp_dealloc
356 * routine left a gc-aware object tracked during its teardown
357 * phase, and did something-- or allowed something to happen --
358 * that called back into Python. gc can trigger then, and may
359 * see the still-tracked dying object. Before this assert
360 * was added, such mistakes went on to allow gc to try to
361 * delete the object again. In a debug build, that caused
362 * a mysterious segfault, when _Py_ForgetReference tried
363 * to remove the object from the doubly-linked list of all
364 * objects a second time. In a release build, an actual
365 * double deallocation occurred, which leads to corruption
366 * of the allocator's internal bookkeeping pointers. That's
367 * so serious that maybe this should be a release-build
368 * check instead of an assert?
369 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200370 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000371 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000372}
373
Tim Peters19b74c72002-07-01 03:52:19 +0000374/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000375static int
376visit_decref(PyObject *op, void *data)
377{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000378 assert(op != NULL);
379 if (PyObject_IS_GC(op)) {
380 PyGC_Head *gc = AS_GC(op);
381 /* We're only interested in gc_refs for objects in the
382 * generation being collected, which can be recognized
383 * because only they have positive gc_refs.
384 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900385 if (gc_is_collecting(gc)) {
386 gc_decref(gc);
387 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000388 }
389 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000390}
391
Tim Peters19b74c72002-07-01 03:52:19 +0000392/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
393 * for all objects in containers, and is GC_REACHABLE for all tracked gc
394 * objects not in containers. The ones with gc_refs > 0 are directly
395 * reachable from outside containers, and so can't be collected.
396 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000397static void
398subtract_refs(PyGC_Head *containers)
399{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000400 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900401 PyGC_Head *gc = GC_NEXT(containers);
402 for (; gc != containers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000403 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
404 (void) traverse(FROM_GC(gc),
405 (visitproc)visit_decref,
406 NULL);
407 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000408}
409
Tim Peters19b74c72002-07-01 03:52:19 +0000410/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000411static int
Tim Peters19b74c72002-07-01 03:52:19 +0000412visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000413{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900414 if (!PyObject_IS_GC(op)) {
415 return 0;
416 }
Tim Peters19b74c72002-07-01 03:52:19 +0000417
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900418 PyGC_Head *gc = AS_GC(op);
419 const Py_ssize_t gc_refs = gc_get_refs(gc);
420
421 // Ignore untracked objects and objects in other generation.
422 if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
423 return 0;
424 }
425
426 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
427 /* This had gc_refs = 0 when move_unreachable got
428 * to it, but turns out it's reachable after all.
429 * Move it back to move_unreachable's 'young' list,
430 * and move_unreachable will eventually get to it
431 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000432 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900433 // Manually unlink gc from unreachable list because
434 PyGC_Head *prev = GC_PREV(gc);
435 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200436 _PyObject_ASSERT(FROM_GC(prev),
437 prev->_gc_next & NEXT_MASK_UNREACHABLE);
438 _PyObject_ASSERT(FROM_GC(next),
439 next->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900440 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
441 _PyGCHead_SET_PREV(next, prev);
442
443 gc_list_append(gc, reachable);
444 gc_set_refs(gc, 1);
445 }
446 else if (gc_refs == 0) {
447 /* This is in move_unreachable's 'young' list, but
448 * the traversal hasn't yet gotten to it. All
449 * we need to do is tell move_unreachable that it's
450 * reachable.
451 */
452 gc_set_refs(gc, 1);
453 }
454 /* Else there's nothing to do.
455 * If gc_refs > 0, it must be in move_unreachable's 'young'
456 * list, and move_unreachable will eventually get to it.
457 */
458 else {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200459 _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000460 }
461 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000462}
463
Tim Peters19b74c72002-07-01 03:52:19 +0000464/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900465 * all objects in young don't have PREV_MASK_COLLECTING flag and
466 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000467 * All objects in young after this are directly or indirectly reachable
468 * from outside the original young; and all objects in unreachable are
469 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900470 *
471 * This function restores _gc_prev pointer. young and unreachable are
472 * doubly linked list after this function.
473 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
474 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000475 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000476static void
Tim Peters19b74c72002-07-01 03:52:19 +0000477move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000478{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900479 // previous elem in the young list, used for restore gc_prev.
480 PyGC_Head *prev = young;
481 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000482
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900483 /* Invariants: all objects "to the left" of us in young are reachable
484 * (directly or indirectly) from outside the young list as it was at entry.
485 *
486 * All other objects from the original young "to the left" of us are in
487 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000488 * left of us in 'young' now have been scanned, and no objects here
489 * or to the right have been scanned yet.
490 */
Tim Peters19b74c72002-07-01 03:52:19 +0000491
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000492 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900493 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000494 /* gc is definitely reachable from outside the
495 * original 'young'. Mark it as such, and traverse
496 * its pointers to find any other objects that may
497 * be directly reachable from it. Note that the
498 * call to tp_traverse may append objects to young,
499 * so we have to wait until it returns to determine
500 * the next object to visit.
501 */
502 PyObject *op = FROM_GC(gc);
503 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200504 _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,
505 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900506 // NOTE: visit_reachable may change gc->_gc_next when
507 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000508 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900509 (visitproc)visit_reachable,
510 (void *)young);
511 // relink gc_prev to prev element.
512 _PyGCHead_SET_PREV(gc, prev);
Quan Tian3bd0d622018-10-20 05:30:03 +0800513 // gc is not COLLECTING state after here.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900514 gc_clear_collecting(gc);
515 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000516 }
517 else {
518 /* This *may* be unreachable. To make progress,
519 * assume it is. gc isn't directly reachable from
520 * any object we've already traversed, but may be
521 * reachable from an object we haven't gotten to yet.
522 * visit_reachable will eventually move gc back into
523 * young if that's so, and we'll see it again.
524 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900525 // Move gc to unreachable.
526 // No need to gc->next->prev = prev because it is single linked.
527 prev->_gc_next = gc->_gc_next;
528
529 // We can't use gc_list_append() here because we use
530 // NEXT_MASK_UNREACHABLE here.
531 PyGC_Head *last = GC_PREV(unreachable);
532 // NOTE: Since all objects in unreachable set has
533 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
534 // But this may set the flat to unreachable too.
535 // move_legacy_finalizers() should care about it.
536 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
537 _PyGCHead_SET_PREV(gc, last);
538 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
539 unreachable->_gc_prev = (uintptr_t)gc;
540 }
541 gc = (PyGC_Head*)prev->_gc_next;
542 }
543 // young->_gc_prev must be last element remained in the list.
544 young->_gc_prev = (uintptr_t)prev;
545}
546
547static void
548untrack_tuples(PyGC_Head *head)
549{
550 PyGC_Head *next, *gc = GC_NEXT(head);
551 while (gc != head) {
552 PyObject *op = FROM_GC(gc);
553 next = GC_NEXT(gc);
554 if (PyTuple_CheckExact(op)) {
555 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000556 }
557 gc = next;
558 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000559}
560
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200561/* Try to untrack all currently tracked dictionaries */
562static void
563untrack_dicts(PyGC_Head *head)
564{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900565 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200566 while (gc != head) {
567 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900568 next = GC_NEXT(gc);
569 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200570 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900571 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200572 gc = next;
573 }
574}
575
Antoine Pitrou796564c2013-07-30 19:59:21 +0200576/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000577static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200578has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000579{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200580 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000581}
582
Antoine Pitrou796564c2013-07-30 19:59:21 +0200583/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900584 *
585 * This function also removes NEXT_MASK_UNREACHABLE flag
586 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000587 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000588static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200589move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000590{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900591 PyGC_Head *gc, *next;
592 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
Tim Petersf6b80452003-04-07 19:21:15 +0000593
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000594 /* March over unreachable. Move objects with finalizers into
595 * `finalizers`.
596 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900597 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000598 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000599
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200600 _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900601 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
602 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000603
Antoine Pitrou796564c2013-07-30 19:59:21 +0200604 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900605 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000606 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000607 }
608 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000609}
610
Antoine Pitrou796564c2013-07-30 19:59:21 +0200611/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000612static int
613visit_move(PyObject *op, PyGC_Head *tolist)
614{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000615 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900616 PyGC_Head *gc = AS_GC(op);
617 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000618 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900619 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000620 }
621 }
622 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000623}
624
625/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000626 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000627 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000628static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200629move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000630{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000631 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900632 PyGC_Head *gc = GC_NEXT(finalizers);
633 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000634 /* Note that the finalizers list may grow during this. */
635 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
636 (void) traverse(FROM_GC(gc),
637 (visitproc)visit_move,
638 (void *)finalizers);
639 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000640}
641
Tim Petersead8b7a2004-10-30 23:09:22 +0000642/* Clear all weakrefs to unreachable objects, and if such a weakref has a
643 * callback, invoke it if necessary. Note that it's possible for such
644 * weakrefs to be outside the unreachable set -- indeed, those are precisely
645 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
646 * overview & some details. Some weakrefs with callbacks may be reclaimed
647 * directly by this routine; the number reclaimed is the return value. Other
648 * weakrefs with callbacks may be moved into the `old` generation. Objects
649 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
650 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
651 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000652 */
653static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000654handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000655{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000656 PyGC_Head *gc;
657 PyObject *op; /* generally FROM_GC(gc) */
658 PyWeakReference *wr; /* generally a cast of op */
659 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
660 PyGC_Head *next;
661 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000662
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000663 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000664
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000665 /* Clear all weakrefs to the objects in unreachable. If such a weakref
666 * also has a callback, move it into `wrcb_to_call` if the callback
667 * needs to be invoked. Note that we cannot invoke any callbacks until
668 * all weakrefs to unreachable objects are cleared, lest the callback
669 * resurrect an unreachable object via a still-active weakref. We
670 * make another pass over wrcb_to_call, invoking callbacks, after this
671 * pass completes.
672 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900673 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000674 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000675
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000676 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900677 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000678
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000679 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
680 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000681
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000682 /* It supports weakrefs. Does it have any? */
683 wrlist = (PyWeakReference **)
684 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000685
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000686 /* `op` may have some weakrefs. March over the list, clear
687 * all the weakrefs, and move the weakrefs with callbacks
688 * that must be called into wrcb_to_call.
689 */
690 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
691 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000692
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000693 /* _PyWeakref_ClearRef clears the weakref but leaves
694 * the callback pointer intact. Obscure: it also
695 * changes *wrlist.
696 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200697 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000698 _PyWeakref_ClearRef(wr);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200699 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
700 if (wr->wr_callback == NULL) {
701 /* no callback */
702 continue;
703 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000704
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200705 /* Headache time. `op` is going away, and is weakly referenced by
706 * `wr`, which has a callback. Should the callback be invoked? If wr
707 * is also trash, no:
708 *
709 * 1. There's no need to call it. The object and the weakref are
710 * both going away, so it's legitimate to pretend the weakref is
711 * going away first. The user has to ensure a weakref outlives its
712 * referent if they want a guarantee that the wr callback will get
713 * invoked.
714 *
715 * 2. It may be catastrophic to call it. If the callback is also in
716 * cyclic trash (CT), then although the CT is unreachable from
717 * outside the current generation, CT may be reachable from the
718 * callback. Then the callback could resurrect insane objects.
719 *
720 * Since the callback is never needed and may be unsafe in this case,
721 * wr is simply left in the unreachable set. Note that because we
722 * already called _PyWeakref_ClearRef(wr), its callback will never
723 * trigger.
724 *
725 * OTOH, if wr isn't part of CT, we should invoke the callback: the
726 * weakref outlived the trash. Note that since wr isn't CT in this
727 * case, its callback can't be CT either -- wr acted as an external
728 * root to this generation, and therefore its callback did too. So
729 * nothing in CT is reachable from the callback either, so it's hard
730 * to imagine how calling it later could create a problem for us. wr
731 * is moved to wrcb_to_call in this case.
732 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900733 if (gc_is_collecting(AS_GC(wr))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000734 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900735 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000736
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000737 /* Create a new reference so that wr can't go away
738 * before we can process it again.
739 */
740 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000741
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000742 /* Move wr to wrcb_to_call, for the next pass. */
743 wrasgc = AS_GC(wr);
744 assert(wrasgc != next); /* wrasgc is reachable, but
745 next isn't, so they can't
746 be the same */
747 gc_list_move(wrasgc, &wrcb_to_call);
748 }
749 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000750
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000751 /* Invoke the callbacks we decided to honor. It's safe to invoke them
752 * because they can't reference unreachable objects.
753 */
754 while (! gc_list_is_empty(&wrcb_to_call)) {
755 PyObject *temp;
756 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000757
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900758 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000759 op = FROM_GC(gc);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200760 _PyObject_ASSERT(op, PyWeakref_Check(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000761 wr = (PyWeakReference *)op;
762 callback = wr->wr_callback;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200763 _PyObject_ASSERT(op, callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000764
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000765 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100766 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000767 if (temp == NULL)
768 PyErr_WriteUnraisable(callback);
769 else
770 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000771
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000772 /* Give up the reference we created in the first pass. When
773 * op's refcount hits 0 (which it may or may not do right now),
774 * op's tp_dealloc will decref op->wr_callback too. Note
775 * that the refcount probably will hit 0 now, and because this
776 * weakref was reachable to begin with, gc didn't already
777 * add it to its count of freed objects. Example: a reachable
778 * weak value dict maps some key to this reachable weakref.
779 * The callback removes this key->weakref mapping from the
780 * dict, leaving no other references to the weakref (excepting
781 * ours).
782 */
783 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900784 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000785 /* object is still alive -- move it */
786 gc_list_move(gc, old);
787 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900788 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000789 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900790 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000791 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000792
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000793 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000794}
795
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000796static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200797debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000798{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100799 PySys_FormatStderr("gc: %s <%s %p>\n",
800 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000801}
802
Antoine Pitrou796564c2013-07-30 19:59:21 +0200803/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000804 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000805 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
806 * garbage list (a Python list), else only the objects in finalizers with
807 * __del__ methods are appended to garbage. All objects in finalizers are
808 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000809 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300810static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200811handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000812{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900813 PyGC_Head *gc = GC_NEXT(finalizers);
Tim Petersf6b80452003-04-07 19:21:15 +0000814
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300815 assert(!PyErr_Occurred());
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600816 if (_PyRuntime.gc.garbage == NULL) {
817 _PyRuntime.gc.garbage = PyList_New(0);
818 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000819 Py_FatalError("gc couldn't create gc.garbage list");
820 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900821 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000822 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000823
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600824 if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300825 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
826 PyErr_Clear();
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300827 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300828 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000829 }
830 }
Tim Petersf6b80452003-04-07 19:21:15 +0000831
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000832 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000833}
834
Tim Peters5fbc7b12014-05-08 17:42:19 -0500835/* Run first-time finalizers (if any) on all the objects in collectable.
836 * Note that this may remove some (or even all) of the objects from the
837 * list, due to refcounts falling to 0.
838 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200839static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500840finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200841{
842 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500843 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200844
Tim Peters5fbc7b12014-05-08 17:42:19 -0500845 /* While we're going through the loop, `finalize(op)` may cause op, or
846 * other objects, to be reclaimed via refcounts falling to zero. So
847 * there's little we can rely on about the structure of the input
848 * `collectable` list across iterations. For safety, we always take the
849 * first object in that list and move it to a temporary `seen` list.
850 * If objects vanish from the `collectable` and `seen` lists we don't
851 * care.
852 */
853 gc_list_init(&seen);
854
855 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900856 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200857 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500858 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200859 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500860 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
861 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900862 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200863 Py_INCREF(op);
864 finalize(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300865 assert(!PyErr_Occurred());
Antoine Pitrou796564c2013-07-30 19:59:21 +0200866 Py_DECREF(op);
867 }
868 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500869 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200870}
871
872/* Walk the collectable list and check that they are really unreachable
873 from the outside (some objects could have been resurrected by a
874 finalizer). */
875static int
876check_garbage(PyGC_Head *collectable)
877{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900878 int ret = 0;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200879 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900880 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
881 // Use gc_refs and break gc_prev again.
882 gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200883 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200884 }
885 subtract_refs(collectable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900886 PyGC_Head *prev = collectable;
887 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200888 _PyObject_ASSERT_WITH_MSG(FROM_GC(gc),
889 gc_get_refs(gc) >= 0,
890 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900891 if (gc_get_refs(gc) != 0) {
892 ret = -1;
893 }
894 // Restore gc_prev here.
895 _PyGCHead_SET_PREV(gc, prev);
896 gc_clear_collecting(gc);
897 prev = gc;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200898 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900899 return ret;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200900}
901
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000902/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000903 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000904 * objects may be freed. It is possible I screwed something up here.
905 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000906static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000907delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000908{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000909 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000910
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300911 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000912 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900913 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000914 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000915
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200916 _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
917 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900918
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600919 if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300920 assert(_PyRuntime.gc.garbage != NULL);
921 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
922 PyErr_Clear();
923 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000924 }
925 else {
926 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
927 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300928 (void) clear(op);
929 if (PyErr_Occurred()) {
930 PySys_WriteStderr("Exception ignored in tp_clear of "
931 "%.50s\n", Py_TYPE(op)->tp_name);
932 PyErr_WriteUnraisable(NULL);
933 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000934 Py_DECREF(op);
935 }
936 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900937 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000938 /* object is still alive, move it, it may die later */
939 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000940 }
941 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000942}
943
Christian Heimesa156e092008-02-16 07:38:31 +0000944/* Clear all free lists
945 * All free lists are cleared during the collection of the highest generation.
946 * Allocated items in the free list may keep a pymalloc arena occupied.
947 * Clearing the free lists may give back memory to the OS earlier.
948 */
949static void
950clear_freelists(void)
951{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000952 (void)PyMethod_ClearFreeList();
953 (void)PyFrame_ClearFreeList();
954 (void)PyCFunction_ClearFreeList();
955 (void)PyTuple_ClearFreeList();
956 (void)PyUnicode_ClearFreeList();
957 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100958 (void)PyList_ClearFreeList();
959 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100960 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700961 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -0500962 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +0000963}
964
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000965/* This is the main function. Read this to understand how the
966 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000967static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200968collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
969 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000970{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000971 int i;
972 Py_ssize_t m = 0; /* # objects collected */
973 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
974 PyGC_Head *young; /* the generation we are examining */
975 PyGC_Head *old; /* next older generation */
976 PyGC_Head unreachable; /* non-problematic unreachable trash */
977 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
978 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100979 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200980
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600981 struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000982
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600983 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000984 PySys_WriteStderr("gc: collecting generation %d...\n",
985 generation);
986 PySys_WriteStderr("gc: objects in each generation:");
987 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200988 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000989 gc_list_size(GEN_HEAD(i)));
brainfvckc75edab2017-10-16 12:49:41 -0700990 PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
991 gc_list_size(&_PyRuntime.gc.permanent_generation.head));
Victor Stinner7181dec2015-03-27 17:47:53 +0100992 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200993
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000994 PySys_WriteStderr("\n");
995 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000996
Łukasz Langaa785c872016-09-09 17:37:37 -0700997 if (PyDTrace_GC_START_ENABLED())
998 PyDTrace_GC_START(generation);
999
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001000 /* update collection and allocation counters */
1001 if (generation+1 < NUM_GENERATIONS)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001002 _PyRuntime.gc.generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001003 for (i = 0; i <= generation; i++)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001004 _PyRuntime.gc.generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001005
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001006 /* merge younger generations with one we are currently collecting */
1007 for (i = 0; i < generation; i++) {
1008 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
1009 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001010
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001011 /* handy references */
1012 young = GEN_HEAD(generation);
1013 if (generation < NUM_GENERATIONS-1)
1014 old = GEN_HEAD(generation+1);
1015 else
1016 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001017
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001018 validate_list(young, 0);
1019 validate_list(old, 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001020 /* Using ob_refcnt and gc_refs, calculate which objects in the
1021 * container set are reachable from outside the set (i.e., have a
1022 * refcount greater than 0 when all the references within the
1023 * set are taken into account).
1024 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001025 update_refs(young); // gc_prev is used for gc_refs
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001026 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001027
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001028 /* Leave everything reachable from outside young in young, and move
1029 * everything else (in young) to unreachable.
1030 * NOTE: This used to move the reachable objects into a reachable
1031 * set instead. But most things usually turn out to be reachable,
1032 * so it's more efficient to move the unreachable things.
1033 */
1034 gc_list_init(&unreachable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001035 move_unreachable(young, &unreachable); // gc_prev is pointer again
1036 validate_list(young, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001037
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001038 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001039 /* Move reachable objects to next generation. */
1040 if (young != old) {
1041 if (generation == NUM_GENERATIONS - 2) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001042 _PyRuntime.gc.long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001043 }
1044 gc_list_merge(young, old);
1045 }
1046 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001047 /* We only untrack dicts in full collections, to avoid quadratic
1048 dict build-up. See issue #14775. */
1049 untrack_dicts(young);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001050 _PyRuntime.gc.long_lived_pending = 0;
1051 _PyRuntime.gc.long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001052 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001053
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001054 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001055 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001056 */
1057 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001058 // NEXT_MASK_UNREACHABLE is cleared here.
1059 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001060 move_legacy_finalizers(&unreachable, &finalizers);
1061 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001062 * unreachable objects reachable *from* those are also uncollectable,
1063 * and we move those into the finalizers list too.
1064 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001065 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001066
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001067 validate_list(&finalizers, 0);
1068 validate_list(&unreachable, PREV_MASK_COLLECTING);
1069
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001070 /* Collect statistics on collectable objects found and print
1071 * debugging information.
1072 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001073 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001074 m++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001075 if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001076 debug_cycle("collectable", FROM_GC(gc));
1077 }
1078 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001079
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001080 /* Clear weakrefs and invoke callbacks as necessary. */
1081 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001082
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001083 validate_list(old, 0);
1084 validate_list(&unreachable, PREV_MASK_COLLECTING);
1085
Antoine Pitrou796564c2013-07-30 19:59:21 +02001086 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001087 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001088
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001089 if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
Antoine Pitrou796564c2013-07-30 19:59:21 +02001090 gc_list_merge(&unreachable, old);
1091 }
1092 else {
1093 /* Call tp_clear on objects in the unreachable set. This will cause
1094 * the reference cycles to be broken. It may also cause some objects
1095 * in finalizers to be freed.
1096 */
1097 delete_garbage(&unreachable, old);
1098 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001099
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001100 /* Collect statistics on uncollectable objects found and print
1101 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001102 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001103 n++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001104 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001105 debug_cycle("uncollectable", FROM_GC(gc));
1106 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001107 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001108 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001109
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001110 if (m == 0 && n == 0)
1111 PySys_WriteStderr("gc: done");
1112 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001113 PySys_FormatStderr(
1114 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001115 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001116 PySys_WriteStderr(", %.4fs elapsed\n",
1117 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001118 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001119
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001120 /* Append instances in the uncollectable set to a Python
1121 * reachable list of garbage. The programmer has to deal with
1122 * this if they insist on creating this type of structure.
1123 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001124 handle_legacy_finalizers(&finalizers, old);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001125 validate_list(old, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001126
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001127 /* Clear free list only during the collection of the highest
1128 * generation */
1129 if (generation == NUM_GENERATIONS-1) {
1130 clear_freelists();
1131 }
Christian Heimesa156e092008-02-16 07:38:31 +00001132
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001133 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001134 if (nofail) {
1135 PyErr_Clear();
1136 }
1137 else {
1138 if (gc_str == NULL)
1139 gc_str = PyUnicode_FromString("garbage collection");
1140 PyErr_WriteUnraisable(gc_str);
1141 Py_FatalError("unexpected exception during garbage collection");
1142 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001143 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001144
Antoine Pitroud4156c12012-10-30 22:43:19 +01001145 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001146 if (n_collected)
1147 *n_collected = m;
1148 if (n_uncollectable)
1149 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001150 stats->collections++;
1151 stats->collected += m;
1152 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001153
1154 if (PyDTrace_GC_DONE_ENABLED())
1155 PyDTrace_GC_DONE(n+m);
1156
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001157 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001158 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001159}
1160
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001161/* Invoke progress callbacks to notify clients that garbage collection
1162 * is starting or stopping
1163 */
1164static void
1165invoke_gc_callback(const char *phase, int generation,
1166 Py_ssize_t collected, Py_ssize_t uncollectable)
1167{
1168 Py_ssize_t i;
1169 PyObject *info = NULL;
1170
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001171 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001172 /* we may get called very early */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001173 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001174 return;
1175 /* The local variable cannot be rebound, check it for sanity */
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001176 assert(PyList_CheckExact(_PyRuntime.gc.callbacks));
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001177 if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001178 info = Py_BuildValue("{sisnsn}",
1179 "generation", generation,
1180 "collected", collected,
1181 "uncollectable", uncollectable);
1182 if (info == NULL) {
1183 PyErr_WriteUnraisable(NULL);
1184 return;
1185 }
1186 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001187 for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1188 PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001189 Py_INCREF(cb); /* make sure cb doesn't go away */
1190 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001191 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001192 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001193 }
1194 else {
1195 Py_DECREF(r);
1196 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001197 Py_DECREF(cb);
1198 }
1199 Py_XDECREF(info);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001200 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001201}
1202
1203/* Perform garbage collection of a generation and invoke
1204 * progress callbacks.
1205 */
1206static Py_ssize_t
1207collect_with_callback(int generation)
1208{
1209 Py_ssize_t result, collected, uncollectable;
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001210 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001211 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001212 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001213 invoke_gc_callback("stop", generation, collected, uncollectable);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001214 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001215 return result;
1216}
1217
Neal Norwitz7b216c52006-03-04 20:01:53 +00001218static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001219collect_generations(void)
1220{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001221 int i;
1222 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001223
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001224 /* Find the oldest generation (highest numbered) where the count
1225 * exceeds the threshold. Objects in the that generation and
1226 * generations younger than it will be collected. */
1227 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001228 if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001229 /* Avoid quadratic performance degradation in number
1230 of tracked objects. See comments at the beginning
1231 of this file, and issue #4074.
1232 */
1233 if (i == NUM_GENERATIONS - 1
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001234 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001235 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001236 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001237 break;
1238 }
1239 }
1240 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001241}
1242
Serhiy Storchaka93260282017-02-04 11:19:59 +02001243#include "clinic/gcmodule.c.h"
1244
1245/*[clinic input]
1246gc.enable
1247
1248Enable automatic garbage collection.
1249[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001250
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001251static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001252gc_enable_impl(PyObject *module)
1253/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001254{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001255 _PyRuntime.gc.enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001256 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001257}
1258
Serhiy Storchaka93260282017-02-04 11:19:59 +02001259/*[clinic input]
1260gc.disable
1261
1262Disable automatic garbage collection.
1263[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001264
1265static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001266gc_disable_impl(PyObject *module)
1267/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001268{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001269 _PyRuntime.gc.enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001270 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001271}
1272
Serhiy Storchaka93260282017-02-04 11:19:59 +02001273/*[clinic input]
1274gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001275
Serhiy Storchaka93260282017-02-04 11:19:59 +02001276Returns true if automatic garbage collection is enabled.
1277[clinic start generated code]*/
1278
1279static int
1280gc_isenabled_impl(PyObject *module)
1281/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001282{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001283 return _PyRuntime.gc.enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001284}
1285
Serhiy Storchaka93260282017-02-04 11:19:59 +02001286/*[clinic input]
1287gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001288
Serhiy Storchaka93260282017-02-04 11:19:59 +02001289 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1290
1291Run the garbage collector.
1292
1293With no arguments, run a full collection. The optional argument
1294may be an integer specifying which generation to collect. A ValueError
1295is raised if the generation number is invalid.
1296
1297The number of unreachable objects is returned.
1298[clinic start generated code]*/
1299
1300static Py_ssize_t
1301gc_collect_impl(PyObject *module, int generation)
1302/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001303{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001304 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001305
Serhiy Storchaka93260282017-02-04 11:19:59 +02001306 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001307 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001308 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001309 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001310
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001311 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001312 n = 0; /* already collecting, don't do anything */
1313 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001314 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka93260282017-02-04 11:19:59 +02001315 n = collect_with_callback(generation);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001316 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001317 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001318
Serhiy Storchaka93260282017-02-04 11:19:59 +02001319 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001320}
1321
Serhiy Storchaka93260282017-02-04 11:19:59 +02001322/*[clinic input]
1323gc.set_debug
1324
1325 flags: int
1326 An integer that can have the following bits turned on:
1327 DEBUG_STATS - Print statistics during collection.
1328 DEBUG_COLLECTABLE - Print collectable objects found.
1329 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1330 found.
1331 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1332 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1333 /
1334
1335Set the garbage collection debugging flags.
1336
1337Debugging information is written to sys.stderr.
1338[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001339
1340static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001341gc_set_debug_impl(PyObject *module, int flags)
1342/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001343{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001344 _PyRuntime.gc.debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001345
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001346 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001347}
1348
Serhiy Storchaka93260282017-02-04 11:19:59 +02001349/*[clinic input]
1350gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001351
Serhiy Storchaka93260282017-02-04 11:19:59 +02001352Get the garbage collection debugging flags.
1353[clinic start generated code]*/
1354
1355static int
1356gc_get_debug_impl(PyObject *module)
1357/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001358{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001359 return _PyRuntime.gc.debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001360}
1361
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001362PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001363"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001364"\n"
1365"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001366"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001367
1368static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001369gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001370{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001371 int i;
1372 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001373 &_PyRuntime.gc.generations[0].threshold,
1374 &_PyRuntime.gc.generations[1].threshold,
1375 &_PyRuntime.gc.generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001376 return NULL;
1377 for (i = 2; i < NUM_GENERATIONS; i++) {
1378 /* generations higher than 2 get the same threshold */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001379 _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001380 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001381
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001382 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001383}
1384
Serhiy Storchaka93260282017-02-04 11:19:59 +02001385/*[clinic input]
1386gc.get_threshold
1387
1388Return the current collection thresholds.
1389[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001390
1391static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001392gc_get_threshold_impl(PyObject *module)
1393/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001394{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001395 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001396 _PyRuntime.gc.generations[0].threshold,
1397 _PyRuntime.gc.generations[1].threshold,
1398 _PyRuntime.gc.generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001399}
1400
Serhiy Storchaka93260282017-02-04 11:19:59 +02001401/*[clinic input]
1402gc.get_count
1403
1404Return a three-tuple of the current collection counts.
1405[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001406
1407static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001408gc_get_count_impl(PyObject *module)
1409/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001410{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001411 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001412 _PyRuntime.gc.generations[0].count,
1413 _PyRuntime.gc.generations[1].count,
1414 _PyRuntime.gc.generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001415}
1416
Neil Schemenauer48c70342001-08-09 15:38:31 +00001417static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001418referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001419{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001420 Py_ssize_t i;
1421 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1422 if (PyTuple_GET_ITEM(objs, i) == obj)
1423 return 1;
1424 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001425}
1426
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001427static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001428gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001429{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001430 PyGC_Head *gc;
1431 PyObject *obj;
1432 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001433 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001434 obj = FROM_GC(gc);
1435 traverse = Py_TYPE(obj)->tp_traverse;
1436 if (obj == objs || obj == resultlist)
1437 continue;
1438 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1439 if (PyList_Append(resultlist, obj) < 0)
1440 return 0; /* error */
1441 }
1442 }
1443 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001444}
1445
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001446PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001447"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001448Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001449
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001450static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001451gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001452{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001453 int i;
1454 PyObject *result = PyList_New(0);
1455 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001456
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001457 for (i = 0; i < NUM_GENERATIONS; i++) {
1458 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1459 Py_DECREF(result);
1460 return NULL;
1461 }
1462 }
1463 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001464}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001465
Tim Peters0f81ab62003-04-08 16:39:48 +00001466/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001467static int
Tim Peters730f5532003-04-08 17:17:17 +00001468referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001469{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001470 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001471}
1472
Tim Peters730f5532003-04-08 17:17:17 +00001473PyDoc_STRVAR(gc_get_referents__doc__,
1474"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001475Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001476
1477static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001478gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001479{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001480 Py_ssize_t i;
1481 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001482
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001483 if (result == NULL)
1484 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001485
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001486 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1487 traverseproc traverse;
1488 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001489
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001490 if (! PyObject_IS_GC(obj))
1491 continue;
1492 traverse = Py_TYPE(obj)->tp_traverse;
1493 if (! traverse)
1494 continue;
1495 if (traverse(obj, (visitproc)referentsvisit, result)) {
1496 Py_DECREF(result);
1497 return NULL;
1498 }
1499 }
1500 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001501}
1502
Serhiy Storchaka93260282017-02-04 11:19:59 +02001503/*[clinic input]
1504gc.get_objects
Pablo Galindo175421b2019-02-23 03:02:06 +00001505 generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None
1506 Generation to extract the objects from.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001507
1508Return a list of objects tracked by the collector (excluding the list returned).
Pablo Galindo175421b2019-02-23 03:02:06 +00001509
1510If generation is not None, return only the objects tracked by the collector
1511that are in that generation.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001512[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001513
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001514static PyObject *
Pablo Galindo175421b2019-02-23 03:02:06 +00001515gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
1516/*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001517{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001518 int i;
1519 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001520
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001521 result = PyList_New(0);
Pablo Galindo175421b2019-02-23 03:02:06 +00001522 if (result == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001523 return NULL;
Pablo Galindo175421b2019-02-23 03:02:06 +00001524 }
1525
1526 /* If generation is passed, we extract only that generation */
1527 if (generation != -1) {
1528 if (generation >= NUM_GENERATIONS) {
1529 PyErr_Format(PyExc_ValueError,
1530 "generation parameter must be less than the number of "
1531 "available generations (%i)",
1532 NUM_GENERATIONS);
1533 goto error;
1534 }
1535
1536 if (generation < 0) {
1537 PyErr_SetString(PyExc_ValueError,
1538 "generation parameter cannot be negative");
1539 goto error;
1540 }
1541
1542 if (append_objects(result, GEN_HEAD(generation))) {
1543 goto error;
1544 }
1545
1546 return result;
1547 }
1548
1549 /* If generation is not passed or None, get all objects from all generations */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001550 for (i = 0; i < NUM_GENERATIONS; i++) {
1551 if (append_objects(result, GEN_HEAD(i))) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001552 goto error;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001553 }
1554 }
1555 return result;
Pablo Galindo175421b2019-02-23 03:02:06 +00001556
1557error:
1558 Py_DECREF(result);
1559 return NULL;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001560}
1561
Serhiy Storchaka93260282017-02-04 11:19:59 +02001562/*[clinic input]
1563gc.get_stats
1564
1565Return a list of dictionaries containing per-generation statistics.
1566[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001567
1568static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001569gc_get_stats_impl(PyObject *module)
1570/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001571{
1572 int i;
1573 PyObject *result;
1574 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1575
1576 /* To get consistent values despite allocations while constructing
1577 the result list, we use a snapshot of the running stats. */
1578 for (i = 0; i < NUM_GENERATIONS; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001579 stats[i] = _PyRuntime.gc.generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001580 }
1581
1582 result = PyList_New(0);
1583 if (result == NULL)
1584 return NULL;
1585
1586 for (i = 0; i < NUM_GENERATIONS; i++) {
1587 PyObject *dict;
1588 st = &stats[i];
1589 dict = Py_BuildValue("{snsnsn}",
1590 "collections", st->collections,
1591 "collected", st->collected,
1592 "uncollectable", st->uncollectable
1593 );
1594 if (dict == NULL)
1595 goto error;
1596 if (PyList_Append(result, dict)) {
1597 Py_DECREF(dict);
1598 goto error;
1599 }
1600 Py_DECREF(dict);
1601 }
1602 return result;
1603
1604error:
1605 Py_XDECREF(result);
1606 return NULL;
1607}
1608
1609
Serhiy Storchaka93260282017-02-04 11:19:59 +02001610/*[clinic input]
1611gc.is_tracked
1612
1613 obj: object
1614 /
1615
1616Returns true if the object is tracked by the garbage collector.
1617
1618Simple atomic objects will return false.
1619[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001620
1621static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001622gc_is_tracked(PyObject *module, PyObject *obj)
1623/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001624{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001625 PyObject *result;
1626
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001627 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001628 result = Py_True;
1629 else
1630 result = Py_False;
1631 Py_INCREF(result);
1632 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001633}
1634
brainfvckc75edab2017-10-16 12:49:41 -07001635/*[clinic input]
1636gc.freeze
1637
1638Freeze all current tracked objects and ignore them for future collections.
1639
1640This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1641Note: collection before a POSIX fork() call may free pages for future allocation
1642which can cause copy-on-write.
1643[clinic start generated code]*/
1644
1645static PyObject *
1646gc_freeze_impl(PyObject *module)
1647/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1648{
1649 for (int i = 0; i < NUM_GENERATIONS; ++i) {
1650 gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1651 _PyRuntime.gc.generations[i].count = 0;
1652 }
1653 Py_RETURN_NONE;
1654}
1655
1656/*[clinic input]
1657gc.unfreeze
1658
1659Unfreeze all objects in the permanent generation.
1660
1661Put all objects in the permanent generation back into oldest generation.
1662[clinic start generated code]*/
1663
1664static PyObject *
1665gc_unfreeze_impl(PyObject *module)
1666/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1667{
1668 gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1669 Py_RETURN_NONE;
1670}
1671
1672/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001673gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001674
1675Return the number of objects in the permanent generation.
1676[clinic start generated code]*/
1677
Victor Stinner05d68a82018-01-18 11:15:25 +01001678static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001679gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001680/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001681{
1682 return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1683}
1684
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001685
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001686PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001687"This module provides access to the garbage collector for reference cycles.\n"
1688"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001689"enable() -- Enable automatic garbage collection.\n"
1690"disable() -- Disable automatic garbage collection.\n"
1691"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001692"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001693"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001694"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001695"set_debug() -- Set debugging flags.\n"
1696"get_debug() -- Get debugging flags.\n"
1697"set_threshold() -- Set the collection thresholds.\n"
1698"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001699"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001700"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001701"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001702"get_referents() -- Return the list of objects that an object refers to.\n"
1703"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1704"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1705"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001706
1707static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001708 GC_ENABLE_METHODDEF
1709 GC_DISABLE_METHODDEF
1710 GC_ISENABLED_METHODDEF
1711 GC_SET_DEBUG_METHODDEF
1712 GC_GET_DEBUG_METHODDEF
1713 GC_GET_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001714 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001715 GC_GET_THRESHOLD_METHODDEF
1716 GC_COLLECT_METHODDEF
1717 GC_GET_OBJECTS_METHODDEF
1718 GC_GET_STATS_METHODDEF
1719 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001720 {"get_referrers", gc_get_referrers, METH_VARARGS,
1721 gc_get_referrers__doc__},
1722 {"get_referents", gc_get_referents, METH_VARARGS,
1723 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001724 GC_FREEZE_METHODDEF
1725 GC_UNFREEZE_METHODDEF
1726 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001727 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001728};
1729
Martin v. Löwis1a214512008-06-11 05:26:20 +00001730static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001731 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001732 "gc", /* m_name */
1733 gc__doc__, /* m_doc */
1734 -1, /* m_size */
1735 GcMethods, /* m_methods */
1736 NULL, /* m_reload */
1737 NULL, /* m_traverse */
1738 NULL, /* m_clear */
1739 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001740};
1741
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001742PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001743PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001744{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001745 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001746
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001747 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001748
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001749 if (m == NULL)
1750 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001751
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001752 if (_PyRuntime.gc.garbage == NULL) {
1753 _PyRuntime.gc.garbage = PyList_New(0);
1754 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001755 return NULL;
1756 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001757 Py_INCREF(_PyRuntime.gc.garbage);
1758 if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001759 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001760
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001761 if (_PyRuntime.gc.callbacks == NULL) {
1762 _PyRuntime.gc.callbacks = PyList_New(0);
1763 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001764 return NULL;
1765 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001766 Py_INCREF(_PyRuntime.gc.callbacks);
1767 if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001768 return NULL;
1769
Martin v. Löwis1a214512008-06-11 05:26:20 +00001770#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001771 ADD_INT(DEBUG_STATS);
1772 ADD_INT(DEBUG_COLLECTABLE);
1773 ADD_INT(DEBUG_UNCOLLECTABLE);
1774 ADD_INT(DEBUG_SAVEALL);
1775 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001776#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001777 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001778}
1779
Guido van Rossume13ddc92003-04-17 17:29:22 +00001780/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001781Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001782PyGC_Collect(void)
1783{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001784 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001785
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001786 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001787 n = 0; /* already collecting, don't do anything */
1788 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001789 PyObject *exc, *value, *tb;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001790 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001791 PyErr_Fetch(&exc, &value, &tb);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001792 n = collect_with_callback(NUM_GENERATIONS - 1);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001793 PyErr_Restore(exc, value, tb);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001794 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001795 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001796
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001797 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001798}
1799
Antoine Pitroufef34e32013-05-19 01:11:58 +02001800Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001801_PyGC_CollectIfEnabled(void)
1802{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001803 if (!_PyRuntime.gc.enabled)
Łukasz Langafef7e942016-09-09 21:47:46 -07001804 return 0;
1805
1806 return PyGC_Collect();
1807}
1808
1809Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001810_PyGC_CollectNoFail(void)
1811{
1812 Py_ssize_t n;
1813
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001814 assert(!PyErr_Occurred());
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001815 /* Ideally, this function is only called on interpreter shutdown,
1816 and therefore not recursively. Unfortunately, when there are daemon
1817 threads, a daemon thread can start a cyclic garbage collection
1818 during interpreter shutdown (and then never finish it).
1819 See http://bugs.python.org/issue8713#msg195178 for an example.
1820 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001821 if (_PyRuntime.gc.collecting)
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001822 n = 0;
1823 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001824 _PyRuntime.gc.collecting = 1;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001825 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001826 _PyRuntime.gc.collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001827 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001828 return n;
1829}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001830
Antoine Pitrou696e0352010-08-08 22:18:46 +00001831void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001832_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001833{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001834 if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1835 && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001836 const char *message;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001837 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001838 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001839 "shutdown";
1840 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001841 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001842 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001843 /* PyErr_WarnFormat does too many things and we are at shutdown,
1844 the warnings module's dependencies (e.g. linecache) may be gone
1845 already. */
1846 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1847 "gc", NULL, message,
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001848 PyList_GET_SIZE(_PyRuntime.gc.garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001849 PyErr_WriteUnraisable(NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001850 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00001851 PyObject *repr = NULL, *bytes = NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001852 repr = PyObject_Repr(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001853 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001854 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001855 else {
1856 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001857 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001858 PyBytes_AS_STRING(bytes)
1859 );
1860 }
1861 Py_XDECREF(repr);
1862 Py_XDECREF(bytes);
1863 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001864 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001865}
1866
1867void
1868_PyGC_Fini(void)
1869{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001870 Py_CLEAR(_PyRuntime.gc.callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001871}
1872
Neil Schemenauer43411b52001-08-30 00:05:51 +00001873/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001874void
1875_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001876{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001877 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001878}
1879
Neil Schemenauer43411b52001-08-30 00:05:51 +00001880/* extension modules might be compiled with GC support so these
1881 functions must always be available */
1882
1883void
Victor Stinnera42de742018-11-22 10:25:22 +01001884PyObject_GC_Track(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001885{
Victor Stinnera42de742018-11-22 10:25:22 +01001886 PyObject *op = _PyObject_CAST(op_raw);
Victor Stinner271753a2018-11-22 01:02:54 +01001887 if (_PyObject_GC_IS_TRACKED(op)) {
1888 _PyObject_ASSERT_FAILED_MSG(op,
1889 "object already tracked "
1890 "by the garbage collector");
1891 }
Victor Stinnera42de742018-11-22 10:25:22 +01001892 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001893}
1894
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001895void
Victor Stinnera42de742018-11-22 10:25:22 +01001896PyObject_GC_UnTrack(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001897{
Victor Stinnera42de742018-11-22 10:25:22 +01001898 PyObject *op = _PyObject_CAST(op_raw);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001899 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1900 * call PyObject_GC_UnTrack twice on an object.
1901 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001902 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001903 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001904 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00001905}
1906
Victor Stinnerdb067af2014-05-02 22:31:14 +02001907static PyObject *
1908_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001909{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001910 PyObject *op;
1911 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001912 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001913 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1914 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001915 size = sizeof(PyGC_Head) + basicsize;
1916 if (use_calloc)
1917 g = (PyGC_Head *)PyObject_Calloc(1, size);
1918 else
1919 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001920 if (g == NULL)
1921 return PyErr_NoMemory();
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001922 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
1923 g->_gc_next = 0;
1924 g->_gc_prev = 0;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001925 _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1926 if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1927 _PyRuntime.gc.enabled &&
1928 _PyRuntime.gc.generations[0].threshold &&
1929 !_PyRuntime.gc.collecting &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001930 !PyErr_Occurred()) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001931 _PyRuntime.gc.collecting = 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001932 collect_generations();
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001933 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001934 }
1935 op = FROM_GC(g);
1936 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001937}
1938
1939PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001940_PyObject_GC_Malloc(size_t basicsize)
1941{
1942 return _PyObject_GC_Alloc(0, basicsize);
1943}
1944
1945PyObject *
1946_PyObject_GC_Calloc(size_t basicsize)
1947{
1948 return _PyObject_GC_Alloc(1, basicsize);
1949}
1950
1951PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001952_PyObject_GC_New(PyTypeObject *tp)
1953{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001954 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1955 if (op != NULL)
1956 op = PyObject_INIT(op, tp);
1957 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001958}
1959
1960PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001961_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001962{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001963 size_t size;
1964 PyVarObject *op;
1965
1966 if (nitems < 0) {
1967 PyErr_BadInternalCall();
1968 return NULL;
1969 }
1970 size = _PyObject_VAR_SIZE(tp, nitems);
1971 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001972 if (op != NULL)
1973 op = PyObject_INIT_VAR(op, tp, nitems);
1974 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001975}
1976
1977PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001978_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001979{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001980 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
Victor Stinnera4b2bc72018-10-26 18:00:13 +02001981 _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));
1982 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001983 return (PyVarObject *)PyErr_NoMemory();
Victor Stinnera4b2bc72018-10-26 18:00:13 +02001984 }
1985
1986 PyGC_Head *g = AS_GC(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001987 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1988 if (g == NULL)
1989 return (PyVarObject *)PyErr_NoMemory();
1990 op = (PyVarObject *) FROM_GC(g);
1991 Py_SIZE(op) = nitems;
1992 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001993}
1994
1995void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001996PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001997{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001998 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001999 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002000 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002001 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06002002 if (_PyRuntime.gc.generations[0].count > 0) {
2003 _PyRuntime.gc.generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002004 }
2005 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00002006}