blob: 7e9eae50a8ed6c757dea5cd26219f358810ecc25 [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Victor Stinner27e2d1f2018-11-01 00:52:28 +010027#include "pycore_context.h"
Victor Stinner444b39b2019-11-20 01:18:11 +010028#include "pycore_initconfig.h"
Victor Stinnerbcda8f12018-11-21 22:27:47 +010029#include "pycore_object.h"
Victor Stinner2e969062019-11-20 01:49:32 +010030#include "pycore_pyerrors.h"
Victor Stinner621cebe2018-11-12 16:53:38 +010031#include "pycore_pymem.h"
32#include "pycore_pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000033#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070034#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010035#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000036
Victor Stinner67e0de62019-11-20 11:48:18 +010037typedef struct _gc_runtime_state GCState;
38
Serhiy Storchaka93260282017-02-04 11:19:59 +020039/*[clinic input]
40module gc
41[clinic start generated code]*/
42/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
43
Pablo Galindo320dd502019-10-10 22:45:17 +010044
45#ifdef Py_DEBUG
46# define GC_DEBUG
47#endif
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090048
49#define GC_NEXT _PyGCHead_NEXT
50#define GC_PREV _PyGCHead_PREV
51
52// update_refs() set this bit for all objects in current generation.
53// subtract_refs() and move_unreachable() uses this to distinguish
54// visited object is in GCing or not.
55//
56// move_unreachable() removes this flag from reachable objects.
57// Only unreachable objects have this flag.
58//
59// No objects in interpreter have this flag after GC ends.
60#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
61
62// Lowest bit of _gc_next is used for UNREACHABLE flag.
63//
64// This flag represents the object is in unreachable list in move_unreachable()
65//
66// Although this flag is used only in move_unreachable(), move_unreachable()
67// doesn't clear this flag to skip unnecessary iteration.
68// move_legacy_finalizers() removes this flag instead.
69// Between them, unreachable list is not normal list and we can not use
70// most gc_list_* functions for it.
71#define NEXT_MASK_UNREACHABLE (1)
72
Victor Stinner626bff82018-10-25 17:31:10 +020073/* Get an object's GC head */
74#define AS_GC(o) ((PyGC_Head *)(o)-1)
75
76/* Get the object given the GC head */
77#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
78
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090079static inline int
80gc_is_collecting(PyGC_Head *g)
81{
82 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
83}
84
85static inline void
86gc_clear_collecting(PyGC_Head *g)
87{
88 g->_gc_prev &= ~PREV_MASK_COLLECTING;
89}
90
91static inline Py_ssize_t
92gc_get_refs(PyGC_Head *g)
93{
94 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
95}
96
97static inline void
98gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
99{
100 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
101 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
102}
103
104static inline void
105gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
106{
107 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
108 | PREV_MASK_COLLECTING
109 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
110}
111
112static inline void
113gc_decref(PyGC_Head *g)
114{
Victor Stinner626bff82018-10-25 17:31:10 +0200115 _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
116 gc_get_refs(g) > 0,
117 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900118 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
119}
120
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000121/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000122#define DEBUG_STATS (1<<0) /* print collection statistics */
123#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
124#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
125#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
126#define DEBUG_LEAK DEBUG_COLLECTABLE | \
127 DEBUG_UNCOLLECTABLE | \
128 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000129
Victor Stinner67e0de62019-11-20 11:48:18 +0100130#define GEN_HEAD(gcstate, n) (&(gcstate)->generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100131
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600132void
Victor Stinner72474072019-11-20 12:25:50 +0100133_PyGC_InitState(GCState *gcstate)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600134{
Victor Stinner67e0de62019-11-20 11:48:18 +0100135 gcstate->enabled = 1; /* automatic collection enabled? */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600136
Victor Stinner67e0de62019-11-20 11:48:18 +0100137#define _GEN_HEAD(n) GEN_HEAD(gcstate, n)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600138 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900139 /* PyGC_Head, threshold, count */
140 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
141 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
142 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600143 };
144 for (int i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner67e0de62019-11-20 11:48:18 +0100145 gcstate->generations[i] = generations[i];
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600146 };
Victor Stinner67e0de62019-11-20 11:48:18 +0100147 gcstate->generation0 = GEN_HEAD(gcstate, 0);
brainfvckc75edab2017-10-16 12:49:41 -0700148 struct gc_generation permanent_generation = {
Victor Stinner67e0de62019-11-20 11:48:18 +0100149 {(uintptr_t)&gcstate->permanent_generation.head,
150 (uintptr_t)&gcstate->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700151 };
Victor Stinner67e0de62019-11-20 11:48:18 +0100152 gcstate->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600153}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100154
Victor Stinner444b39b2019-11-20 01:18:11 +0100155
156PyStatus
Victor Stinner01b1cc12019-11-20 02:27:56 +0100157_PyGC_Init(PyThreadState *tstate)
Victor Stinner444b39b2019-11-20 01:18:11 +0100158{
Victor Stinner72474072019-11-20 12:25:50 +0100159 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +0100160 if (gcstate->garbage == NULL) {
161 gcstate->garbage = PyList_New(0);
162 if (gcstate->garbage == NULL) {
Victor Stinner444b39b2019-11-20 01:18:11 +0100163 return _PyStatus_NO_MEMORY();
164 }
165 }
166 return _PyStatus_OK();
167}
168
169
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900170/*
171_gc_prev values
172---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000173
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900174Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000175
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900176Lowest two bits of _gc_prev are used for flags.
177PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
178or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000179
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900180During a collection, _gc_prev is temporary used for gc_refs, and the gc list
181is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000182
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900183gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000184 At the start of a collection, update_refs() copies the true refcount
185 to gc_refs, for each object in the generation being collected.
186 subtract_refs() then adjusts gc_refs so that it equals the number of
187 times an object is referenced directly from outside the generation
188 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000189
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900190PREV_MASK_COLLECTING
191 Objects in generation being collected are marked PREV_MASK_COLLECTING in
192 update_refs().
193
194
195_gc_next values
196---------------
197
198_gc_next takes these values:
199
2000
201 The object is not tracked
202
203!= 0
204 Pointer to the next object in the GC list.
205 Additionally, lowest bit is used temporary for
206 NEXT_MASK_UNREACHABLE flag described below.
207
208NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000209 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900210 indirectly) from outside the generation into an "unreachable" set and
211 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000212
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900213 Objects that are found to be reachable have gc_refs set to 1.
214 When this flag is set for the reachable object, the object must be in
215 "unreachable" set.
216 The flag is unset and the object is moved back to "reachable" set.
217
218 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000219*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000220
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000221/*** list functions ***/
222
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900223static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000224gc_list_init(PyGC_Head *list)
225{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900226 // List header must not have flags.
227 // We can assign pointer by simple cast.
228 list->_gc_prev = (uintptr_t)list;
229 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000230}
231
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900232static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000233gc_list_is_empty(PyGC_Head *list)
234{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900235 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000236}
237
Tim Peterse2d59182004-11-01 01:39:08 +0000238/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900239static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000240gc_list_append(PyGC_Head *node, PyGC_Head *list)
241{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900242 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
243
244 // last <-> node
245 _PyGCHead_SET_PREV(node, last);
246 _PyGCHead_SET_NEXT(last, node);
247
248 // node <-> list
249 _PyGCHead_SET_NEXT(node, list);
250 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000251}
252
Tim Peterse2d59182004-11-01 01:39:08 +0000253/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900254static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000255gc_list_remove(PyGC_Head *node)
256{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900257 PyGC_Head *prev = GC_PREV(node);
258 PyGC_Head *next = GC_NEXT(node);
259
260 _PyGCHead_SET_NEXT(prev, next);
261 _PyGCHead_SET_PREV(next, prev);
262
263 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000264}
265
Tim Peterse2d59182004-11-01 01:39:08 +0000266/* Move `node` from the gc list it's currently in (which is not explicitly
267 * named here) to the end of `list`. This is semantically the same as
268 * gc_list_remove(node) followed by gc_list_append(node, list).
269 */
270static void
271gc_list_move(PyGC_Head *node, PyGC_Head *list)
272{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000273 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900274 PyGC_Head *from_prev = GC_PREV(node);
275 PyGC_Head *from_next = GC_NEXT(node);
276 _PyGCHead_SET_NEXT(from_prev, from_next);
277 _PyGCHead_SET_PREV(from_next, from_prev);
278
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000279 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900280 // list must not have flags. So we can skip macros.
281 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
282 _PyGCHead_SET_PREV(node, to_prev);
283 _PyGCHead_SET_NEXT(to_prev, node);
284 list->_gc_prev = (uintptr_t)node;
285 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000286}
287
288/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000289static void
290gc_list_merge(PyGC_Head *from, PyGC_Head *to)
291{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000292 assert(from != to);
293 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900294 PyGC_Head *to_tail = GC_PREV(to);
295 PyGC_Head *from_head = GC_NEXT(from);
296 PyGC_Head *from_tail = GC_PREV(from);
297 assert(from_head != from);
298 assert(from_tail != from);
299
300 _PyGCHead_SET_NEXT(to_tail, from_head);
301 _PyGCHead_SET_PREV(from_head, to_tail);
302
303 _PyGCHead_SET_NEXT(from_tail, to);
304 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000305 }
306 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000307}
308
Neal Norwitz7b216c52006-03-04 20:01:53 +0000309static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000310gc_list_size(PyGC_Head *list)
311{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000312 PyGC_Head *gc;
313 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900314 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000315 n++;
316 }
317 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000318}
319
Pablo Galindo466326d2019-10-13 16:48:59 +0100320/* Walk the list and mark all objects as non-collecting */
321static inline void
322gc_list_clear_collecting(PyGC_Head *collectable)
323{
324 PyGC_Head *gc;
325 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
326 gc_clear_collecting(gc);
327 }
328}
329
Tim Peters259272b2003-04-06 19:41:39 +0000330/* Append objects in a GC list to a Python list.
Pablo Galindo466326d2019-10-13 16:48:59 +0100331 * Return 0 if all OK, < 0 if error (out of memory for list)
Tim Peters259272b2003-04-06 19:41:39 +0000332 */
333static int
334append_objects(PyObject *py_list, PyGC_Head *gc_list)
335{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000336 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900337 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000338 PyObject *op = FROM_GC(gc);
339 if (op != py_list) {
340 if (PyList_Append(py_list, op)) {
341 return -1; /* exception */
342 }
343 }
344 }
345 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000346}
347
Tim Petersea55c512019-10-18 20:59:14 -0500348// Constants for validate_list's flags argument.
349enum flagstates {collecting_clear_unreachable_clear,
350 collecting_clear_unreachable_set,
351 collecting_set_unreachable_clear,
352 collecting_set_unreachable_set};
353
Pablo Galindo320dd502019-10-10 22:45:17 +0100354#ifdef GC_DEBUG
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900355// validate_list checks list consistency. And it works as document
Tim Peters95bfc8a2019-10-13 16:47:04 -0500356// describing when flags are expected to be set / unset.
357// `head` must be a doubly-linked gc list, although it's fine (expected!) if
358// the prev and next pointers are "polluted" with flags.
359// What's checked:
360// - The `head` pointers are not polluted.
Tim Petersea55c512019-10-18 20:59:14 -0500361// - The objects' PREV_MASK_COLLECTING and NEXT_MASK_UNREACHABLE flags are all
362// `set or clear, as specified by the 'flags' argument.
Tim Peters95bfc8a2019-10-13 16:47:04 -0500363// - The prev and next pointers are mutually consistent.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900364static void
Tim Petersea55c512019-10-18 20:59:14 -0500365validate_list(PyGC_Head *head, enum flagstates flags)
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900366{
Tim Peters95bfc8a2019-10-13 16:47:04 -0500367 assert((head->_gc_prev & PREV_MASK_COLLECTING) == 0);
368 assert((head->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
Tim Petersea55c512019-10-18 20:59:14 -0500369 uintptr_t prev_value = 0, next_value = 0;
370 switch (flags) {
371 case collecting_clear_unreachable_clear:
372 break;
373 case collecting_set_unreachable_clear:
374 prev_value = PREV_MASK_COLLECTING;
375 break;
376 case collecting_clear_unreachable_set:
377 next_value = NEXT_MASK_UNREACHABLE;
378 break;
379 case collecting_set_unreachable_set:
380 prev_value = PREV_MASK_COLLECTING;
381 next_value = NEXT_MASK_UNREACHABLE;
382 break;
383 default:
384 assert(! "bad internal flags argument");
385 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900386 PyGC_Head *prev = head;
387 PyGC_Head *gc = GC_NEXT(head);
388 while (gc != head) {
Tim Peters95bfc8a2019-10-13 16:47:04 -0500389 PyGC_Head *trueprev = GC_PREV(gc);
390 PyGC_Head *truenext = (PyGC_Head *)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
391 assert(truenext != NULL);
392 assert(trueprev == prev);
393 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == prev_value);
394 assert((gc->_gc_next & NEXT_MASK_UNREACHABLE) == next_value);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900395 prev = gc;
Tim Peters95bfc8a2019-10-13 16:47:04 -0500396 gc = truenext;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900397 }
398 assert(prev == GC_PREV(head));
399}
400#else
Tim Petersea55c512019-10-18 20:59:14 -0500401#define validate_list(x, y) do{}while(0)
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900402#endif
403
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000404/*** end of list stuff ***/
405
406
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900407/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
408 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000409 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410static void
411update_refs(PyGC_Head *containers)
412{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900413 PyGC_Head *gc = GC_NEXT(containers);
414 for (; gc != containers; gc = GC_NEXT(gc)) {
415 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000416 /* Python's cyclic gc should never see an incoming refcount
417 * of 0: if something decref'ed to 0, it should have been
418 * deallocated immediately at that time.
419 * Possible cause (if the assert triggers): a tp_dealloc
420 * routine left a gc-aware object tracked during its teardown
421 * phase, and did something-- or allowed something to happen --
422 * that called back into Python. gc can trigger then, and may
423 * see the still-tracked dying object. Before this assert
424 * was added, such mistakes went on to allow gc to try to
425 * delete the object again. In a debug build, that caused
426 * a mysterious segfault, when _Py_ForgetReference tried
427 * to remove the object from the doubly-linked list of all
428 * objects a second time. In a release build, an actual
429 * double deallocation occurred, which leads to corruption
430 * of the allocator's internal bookkeeping pointers. That's
431 * so serious that maybe this should be a release-build
432 * check instead of an assert?
433 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200434 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000435 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000436}
437
Tim Peters19b74c72002-07-01 03:52:19 +0000438/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000439static int
Victor Stinner4d5f94b2019-10-08 02:37:38 +0200440visit_decref(PyObject *op, void *parent)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000441{
Victor Stinner4d5f94b2019-10-08 02:37:38 +0200442 _PyObject_ASSERT(_PyObject_CAST(parent), !_PyObject_IsFreed(op));
Victor Stinnerd91d4de2019-09-09 17:44:59 +0200443
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000444 if (PyObject_IS_GC(op)) {
445 PyGC_Head *gc = AS_GC(op);
446 /* We're only interested in gc_refs for objects in the
447 * generation being collected, which can be recognized
448 * because only they have positive gc_refs.
449 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900450 if (gc_is_collecting(gc)) {
451 gc_decref(gc);
452 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000453 }
454 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000455}
456
Tim Peters19b74c72002-07-01 03:52:19 +0000457/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
458 * for all objects in containers, and is GC_REACHABLE for all tracked gc
459 * objects not in containers. The ones with gc_refs > 0 are directly
460 * reachable from outside containers, and so can't be collected.
461 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000462static void
463subtract_refs(PyGC_Head *containers)
464{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000465 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900466 PyGC_Head *gc = GC_NEXT(containers);
467 for (; gc != containers; gc = GC_NEXT(gc)) {
Victor Stinner4d5f94b2019-10-08 02:37:38 +0200468 PyObject *op = FROM_GC(gc);
469 traverse = Py_TYPE(op)->tp_traverse;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000470 (void) traverse(FROM_GC(gc),
471 (visitproc)visit_decref,
Victor Stinner4d5f94b2019-10-08 02:37:38 +0200472 op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000473 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000474}
475
Tim Peters19b74c72002-07-01 03:52:19 +0000476/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000477static int
Tim Peters19b74c72002-07-01 03:52:19 +0000478visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000479{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900480 if (!PyObject_IS_GC(op)) {
481 return 0;
482 }
Tim Peters19b74c72002-07-01 03:52:19 +0000483
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900484 PyGC_Head *gc = AS_GC(op);
485 const Py_ssize_t gc_refs = gc_get_refs(gc);
486
Tim Peters1e739452019-10-21 11:21:35 -0500487 // Ignore objects in other generation.
Tim Peters95bfc8a2019-10-13 16:47:04 -0500488 // This also skips objects "to the left" of the current position in
489 // move_unreachable's scan of the 'young' list - they've already been
490 // traversed, and no longer have the PREV_MASK_COLLECTING flag.
Tim Peters1e739452019-10-21 11:21:35 -0500491 if (! gc_is_collecting(gc)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900492 return 0;
493 }
Tim Peters1e739452019-10-21 11:21:35 -0500494 // It would be a logic error elsewhere if the collecting flag were set on
495 // an untracked object.
496 assert(gc->_gc_next != 0);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900497
498 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
499 /* This had gc_refs = 0 when move_unreachable got
500 * to it, but turns out it's reachable after all.
501 * Move it back to move_unreachable's 'young' list,
502 * and move_unreachable will eventually get to it
503 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000504 */
Tim Peters95bfc8a2019-10-13 16:47:04 -0500505 // Manually unlink gc from unreachable list because the list functions
506 // don't work right in the presence of NEXT_MASK_UNREACHABLE flags.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900507 PyGC_Head *prev = GC_PREV(gc);
508 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200509 _PyObject_ASSERT(FROM_GC(prev),
510 prev->_gc_next & NEXT_MASK_UNREACHABLE);
511 _PyObject_ASSERT(FROM_GC(next),
512 next->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900513 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
514 _PyGCHead_SET_PREV(next, prev);
515
516 gc_list_append(gc, reachable);
517 gc_set_refs(gc, 1);
518 }
519 else if (gc_refs == 0) {
520 /* This is in move_unreachable's 'young' list, but
521 * the traversal hasn't yet gotten to it. All
522 * we need to do is tell move_unreachable that it's
523 * reachable.
524 */
525 gc_set_refs(gc, 1);
526 }
527 /* Else there's nothing to do.
528 * If gc_refs > 0, it must be in move_unreachable's 'young'
529 * list, and move_unreachable will eventually get to it.
530 */
531 else {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200532 _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000533 }
534 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000535}
536
Tim Peters19b74c72002-07-01 03:52:19 +0000537/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900538 * all objects in young don't have PREV_MASK_COLLECTING flag and
539 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000540 * All objects in young after this are directly or indirectly reachable
541 * from outside the original young; and all objects in unreachable are
542 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900543 *
544 * This function restores _gc_prev pointer. young and unreachable are
545 * doubly linked list after this function.
546 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
547 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000548 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000549static void
Tim Peters19b74c72002-07-01 03:52:19 +0000550move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000551{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900552 // previous elem in the young list, used for restore gc_prev.
553 PyGC_Head *prev = young;
554 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000555
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900556 /* Invariants: all objects "to the left" of us in young are reachable
557 * (directly or indirectly) from outside the young list as it was at entry.
558 *
559 * All other objects from the original young "to the left" of us are in
560 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000561 * left of us in 'young' now have been scanned, and no objects here
562 * or to the right have been scanned yet.
563 */
Tim Peters19b74c72002-07-01 03:52:19 +0000564
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000565 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900566 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000567 /* gc is definitely reachable from outside the
568 * original 'young'. Mark it as such, and traverse
569 * its pointers to find any other objects that may
570 * be directly reachable from it. Note that the
571 * call to tp_traverse may append objects to young,
572 * so we have to wait until it returns to determine
573 * the next object to visit.
574 */
575 PyObject *op = FROM_GC(gc);
576 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200577 _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,
578 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900579 // NOTE: visit_reachable may change gc->_gc_next when
580 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000581 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900582 (visitproc)visit_reachable,
583 (void *)young);
584 // relink gc_prev to prev element.
585 _PyGCHead_SET_PREV(gc, prev);
Quan Tian3bd0d622018-10-20 05:30:03 +0800586 // gc is not COLLECTING state after here.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900587 gc_clear_collecting(gc);
588 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000589 }
590 else {
591 /* This *may* be unreachable. To make progress,
592 * assume it is. gc isn't directly reachable from
593 * any object we've already traversed, but may be
594 * reachable from an object we haven't gotten to yet.
595 * visit_reachable will eventually move gc back into
596 * young if that's so, and we'll see it again.
597 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900598 // Move gc to unreachable.
599 // No need to gc->next->prev = prev because it is single linked.
600 prev->_gc_next = gc->_gc_next;
601
602 // We can't use gc_list_append() here because we use
603 // NEXT_MASK_UNREACHABLE here.
604 PyGC_Head *last = GC_PREV(unreachable);
605 // NOTE: Since all objects in unreachable set has
606 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
Tim Peters95bfc8a2019-10-13 16:47:04 -0500607 // But this may pollute the unreachable list head's 'next' pointer
608 // too. That's semantically senseless but expedient here - the
Pablo Galindo97f12672020-01-13 12:25:05 +0000609 // damage is repaired when this function ends.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900610 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
611 _PyGCHead_SET_PREV(gc, last);
612 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
613 unreachable->_gc_prev = (uintptr_t)gc;
614 }
615 gc = (PyGC_Head*)prev->_gc_next;
616 }
617 // young->_gc_prev must be last element remained in the list.
618 young->_gc_prev = (uintptr_t)prev;
Tim Peters95bfc8a2019-10-13 16:47:04 -0500619 // don't let the pollution of the list head's next pointer leak
620 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900621}
622
623static void
624untrack_tuples(PyGC_Head *head)
625{
626 PyGC_Head *next, *gc = GC_NEXT(head);
627 while (gc != head) {
628 PyObject *op = FROM_GC(gc);
629 next = GC_NEXT(gc);
630 if (PyTuple_CheckExact(op)) {
631 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000632 }
633 gc = next;
634 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000635}
636
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200637/* Try to untrack all currently tracked dictionaries */
638static void
639untrack_dicts(PyGC_Head *head)
640{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900641 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200642 while (gc != head) {
643 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900644 next = GC_NEXT(gc);
645 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200646 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900647 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200648 gc = next;
649 }
650}
651
Antoine Pitrou796564c2013-07-30 19:59:21 +0200652/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000653static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200654has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000655{
Victor Stinnerdaa97562020-02-07 03:37:06 +0100656 return Py_TYPE(op)->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000657}
658
Antoine Pitrou796564c2013-07-30 19:59:21 +0200659/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900660 *
661 * This function also removes NEXT_MASK_UNREACHABLE flag
662 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000663 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000664static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200665move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000666{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900667 PyGC_Head *gc, *next;
Tim Peters95bfc8a2019-10-13 16:47:04 -0500668 assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
Tim Petersf6b80452003-04-07 19:21:15 +0000669
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000670 /* March over unreachable. Move objects with finalizers into
671 * `finalizers`.
672 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900673 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000674 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000675
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200676 _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900677 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
678 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000679
Antoine Pitrou796564c2013-07-30 19:59:21 +0200680 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900681 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000682 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000683 }
684 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000685}
686
Pablo Galindo466326d2019-10-13 16:48:59 +0100687static inline void
688clear_unreachable_mask(PyGC_Head *unreachable)
689{
690 /* Check that the list head does not have the unreachable bit set */
691 assert(((uintptr_t)unreachable & NEXT_MASK_UNREACHABLE) == 0);
692
693 PyGC_Head *gc, *next;
Tim Peters95bfc8a2019-10-13 16:47:04 -0500694 assert((unreachable->_gc_next & NEXT_MASK_UNREACHABLE) == 0);
Pablo Galindo466326d2019-10-13 16:48:59 +0100695 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
696 _PyObject_ASSERT((PyObject*)FROM_GC(gc), gc->_gc_next & NEXT_MASK_UNREACHABLE);
697 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
698 next = (PyGC_Head*)gc->_gc_next;
699 }
Tim Petersea55c512019-10-18 20:59:14 -0500700 validate_list(unreachable, collecting_set_unreachable_clear);
Pablo Galindo466326d2019-10-13 16:48:59 +0100701}
702
Antoine Pitrou796564c2013-07-30 19:59:21 +0200703/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000704static int
705visit_move(PyObject *op, PyGC_Head *tolist)
706{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000707 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900708 PyGC_Head *gc = AS_GC(op);
709 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000710 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900711 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000712 }
713 }
714 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000715}
716
717/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000718 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000719 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000720static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200721move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000722{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000723 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900724 PyGC_Head *gc = GC_NEXT(finalizers);
725 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000726 /* Note that the finalizers list may grow during this. */
727 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
728 (void) traverse(FROM_GC(gc),
729 (visitproc)visit_move,
730 (void *)finalizers);
731 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000732}
733
Tim Petersead8b7a2004-10-30 23:09:22 +0000734/* Clear all weakrefs to unreachable objects, and if such a weakref has a
735 * callback, invoke it if necessary. Note that it's possible for such
736 * weakrefs to be outside the unreachable set -- indeed, those are precisely
737 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
738 * overview & some details. Some weakrefs with callbacks may be reclaimed
739 * directly by this routine; the number reclaimed is the return value. Other
740 * weakrefs with callbacks may be moved into the `old` generation. Objects
741 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
742 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
743 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000744 */
745static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000746handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000747{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000748 PyGC_Head *gc;
749 PyObject *op; /* generally FROM_GC(gc) */
750 PyWeakReference *wr; /* generally a cast of op */
751 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
752 PyGC_Head *next;
753 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000754
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000755 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000756
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000757 /* Clear all weakrefs to the objects in unreachable. If such a weakref
758 * also has a callback, move it into `wrcb_to_call` if the callback
759 * needs to be invoked. Note that we cannot invoke any callbacks until
760 * all weakrefs to unreachable objects are cleared, lest the callback
761 * resurrect an unreachable object via a still-active weakref. We
762 * make another pass over wrcb_to_call, invoking callbacks, after this
763 * pass completes.
764 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900765 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000766 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000767
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000768 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900769 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000770
Neil Schemenauerbcda4602019-09-30 10:06:45 -0700771 if (PyWeakref_Check(op)) {
772 /* A weakref inside the unreachable set must be cleared. If we
773 * allow its callback to execute inside delete_garbage(), it
774 * could expose objects that have tp_clear already called on
775 * them. Or, it could resurrect unreachable objects. One way
776 * this can happen is if some container objects do not implement
777 * tp_traverse. Then, wr_object can be outside the unreachable
778 * set but can be deallocated as a result of breaking the
779 * reference cycle. If we don't clear the weakref, the callback
780 * will run and potentially cause a crash. See bpo-38006 for
781 * one example.
782 */
783 _PyWeakref_ClearRef((PyWeakReference *)op);
784 }
785
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000786 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
787 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000788
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000789 /* It supports weakrefs. Does it have any? */
790 wrlist = (PyWeakReference **)
791 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000792
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000793 /* `op` may have some weakrefs. March over the list, clear
794 * all the weakrefs, and move the weakrefs with callbacks
795 * that must be called into wrcb_to_call.
796 */
797 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
798 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000799
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000800 /* _PyWeakref_ClearRef clears the weakref but leaves
801 * the callback pointer intact. Obscure: it also
802 * changes *wrlist.
803 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200804 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000805 _PyWeakref_ClearRef(wr);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200806 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
807 if (wr->wr_callback == NULL) {
808 /* no callback */
809 continue;
810 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000811
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200812 /* Headache time. `op` is going away, and is weakly referenced by
813 * `wr`, which has a callback. Should the callback be invoked? If wr
814 * is also trash, no:
815 *
816 * 1. There's no need to call it. The object and the weakref are
817 * both going away, so it's legitimate to pretend the weakref is
818 * going away first. The user has to ensure a weakref outlives its
819 * referent if they want a guarantee that the wr callback will get
820 * invoked.
821 *
822 * 2. It may be catastrophic to call it. If the callback is also in
823 * cyclic trash (CT), then although the CT is unreachable from
824 * outside the current generation, CT may be reachable from the
825 * callback. Then the callback could resurrect insane objects.
826 *
827 * Since the callback is never needed and may be unsafe in this case,
828 * wr is simply left in the unreachable set. Note that because we
829 * already called _PyWeakref_ClearRef(wr), its callback will never
830 * trigger.
831 *
832 * OTOH, if wr isn't part of CT, we should invoke the callback: the
833 * weakref outlived the trash. Note that since wr isn't CT in this
834 * case, its callback can't be CT either -- wr acted as an external
835 * root to this generation, and therefore its callback did too. So
836 * nothing in CT is reachable from the callback either, so it's hard
837 * to imagine how calling it later could create a problem for us. wr
838 * is moved to wrcb_to_call in this case.
839 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900840 if (gc_is_collecting(AS_GC(wr))) {
Neil Schemenauerbcda4602019-09-30 10:06:45 -0700841 /* it should already have been cleared above */
842 assert(wr->wr_object == Py_None);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000843 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900844 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000845
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000846 /* Create a new reference so that wr can't go away
847 * before we can process it again.
848 */
849 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000850
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000851 /* Move wr to wrcb_to_call, for the next pass. */
852 wrasgc = AS_GC(wr);
853 assert(wrasgc != next); /* wrasgc is reachable, but
854 next isn't, so they can't
855 be the same */
856 gc_list_move(wrasgc, &wrcb_to_call);
857 }
858 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000859
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000860 /* Invoke the callbacks we decided to honor. It's safe to invoke them
861 * because they can't reference unreachable objects.
862 */
863 while (! gc_list_is_empty(&wrcb_to_call)) {
864 PyObject *temp;
865 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000866
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900867 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000868 op = FROM_GC(gc);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200869 _PyObject_ASSERT(op, PyWeakref_Check(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000870 wr = (PyWeakReference *)op;
871 callback = wr->wr_callback;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200872 _PyObject_ASSERT(op, callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000873
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000874 /* copy-paste of weakrefobject.c's handle_callback() */
Jeroen Demeyer196a5302019-07-04 12:31:34 +0200875 temp = _PyObject_CallOneArg(callback, (PyObject *)wr);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000876 if (temp == NULL)
877 PyErr_WriteUnraisable(callback);
878 else
879 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000880
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000881 /* Give up the reference we created in the first pass. When
882 * op's refcount hits 0 (which it may or may not do right now),
883 * op's tp_dealloc will decref op->wr_callback too. Note
884 * that the refcount probably will hit 0 now, and because this
885 * weakref was reachable to begin with, gc didn't already
886 * add it to its count of freed objects. Example: a reachable
887 * weak value dict maps some key to this reachable weakref.
888 * The callback removes this key->weakref mapping from the
889 * dict, leaving no other references to the weakref (excepting
890 * ours).
891 */
892 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900893 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000894 /* object is still alive -- move it */
895 gc_list_move(gc, old);
896 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900897 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000898 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900899 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000900 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000901
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000902 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000903}
904
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000905static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200906debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000907{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100908 PySys_FormatStderr("gc: %s <%s %p>\n",
909 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000910}
911
Antoine Pitrou796564c2013-07-30 19:59:21 +0200912/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000913 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000914 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
915 * garbage list (a Python list), else only the objects in finalizers with
916 * __del__ methods are appended to garbage. All objects in finalizers are
917 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000918 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300919static void
Victor Stinner2e969062019-11-20 01:49:32 +0100920handle_legacy_finalizers(PyThreadState *tstate,
Victor Stinner67e0de62019-11-20 11:48:18 +0100921 GCState *gcstate,
Victor Stinner9db03242019-04-26 02:32:01 +0200922 PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000923{
Victor Stinner2e969062019-11-20 01:49:32 +0100924 assert(!_PyErr_Occurred(tstate));
Victor Stinner67e0de62019-11-20 11:48:18 +0100925 assert(gcstate->garbage != NULL);
Victor Stinner9db03242019-04-26 02:32:01 +0200926
927 PyGC_Head *gc = GC_NEXT(finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900928 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000929 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000930
Victor Stinner67e0de62019-11-20 11:48:18 +0100931 if ((gcstate->debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
932 if (PyList_Append(gcstate->garbage, op) < 0) {
Victor Stinner2e969062019-11-20 01:49:32 +0100933 _PyErr_Clear(tstate);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300934 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300935 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000936 }
937 }
Tim Petersf6b80452003-04-07 19:21:15 +0000938
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000939 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000940}
941
Tim Peters5fbc7b12014-05-08 17:42:19 -0500942/* Run first-time finalizers (if any) on all the objects in collectable.
943 * Note that this may remove some (or even all) of the objects from the
944 * list, due to refcounts falling to 0.
945 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200946static void
Victor Stinner67e0de62019-11-20 11:48:18 +0100947finalize_garbage(PyThreadState *tstate, PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200948{
949 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500950 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200951
Tim Peters5fbc7b12014-05-08 17:42:19 -0500952 /* While we're going through the loop, `finalize(op)` may cause op, or
953 * other objects, to be reclaimed via refcounts falling to zero. So
954 * there's little we can rely on about the structure of the input
955 * `collectable` list across iterations. For safety, we always take the
956 * first object in that list and move it to a temporary `seen` list.
957 * If objects vanish from the `collectable` and `seen` lists we don't
958 * care.
959 */
960 gc_list_init(&seen);
961
962 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900963 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200964 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500965 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200966 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500967 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900968 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200969 Py_INCREF(op);
970 finalize(op);
Victor Stinner67e0de62019-11-20 11:48:18 +0100971 assert(!_PyErr_Occurred(tstate));
Antoine Pitrou796564c2013-07-30 19:59:21 +0200972 Py_DECREF(op);
973 }
974 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500975 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200976}
977
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000978/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000979 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000980 * objects may be freed. It is possible I screwed something up here.
981 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000982static void
Victor Stinner67e0de62019-11-20 11:48:18 +0100983delete_garbage(PyThreadState *tstate, GCState *gcstate,
Victor Stinner9db03242019-04-26 02:32:01 +0200984 PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000985{
Victor Stinner2e969062019-11-20 01:49:32 +0100986 assert(!_PyErr_Occurred(tstate));
Victor Stinner9db03242019-04-26 02:32:01 +0200987
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000988 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900989 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000990 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000991
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200992 _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
993 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900994
Victor Stinner67e0de62019-11-20 11:48:18 +0100995 if (gcstate->debug & DEBUG_SAVEALL) {
996 assert(gcstate->garbage != NULL);
997 if (PyList_Append(gcstate->garbage, op) < 0) {
Victor Stinner2e969062019-11-20 01:49:32 +0100998 _PyErr_Clear(tstate);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300999 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001000 }
1001 else {
Victor Stinner9db03242019-04-26 02:32:01 +02001002 inquiry clear;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001003 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
1004 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001005 (void) clear(op);
Victor Stinner2e969062019-11-20 01:49:32 +01001006 if (_PyErr_Occurred(tstate)) {
Victor Stinner71c52e32019-05-27 08:57:14 +02001007 _PyErr_WriteUnraisableMsg("in tp_clear of",
1008 (PyObject*)Py_TYPE(op));
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001009 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001010 Py_DECREF(op);
1011 }
1012 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001013 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001014 /* object is still alive, move it, it may die later */
Pablo Galindo466326d2019-10-13 16:48:59 +01001015 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001016 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001017 }
1018 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001019}
1020
Christian Heimesa156e092008-02-16 07:38:31 +00001021/* Clear all free lists
1022 * All free lists are cleared during the collection of the highest generation.
1023 * Allocated items in the free list may keep a pymalloc arena occupied.
1024 * Clearing the free lists may give back memory to the OS earlier.
1025 */
1026static void
1027clear_freelists(void)
1028{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001029 (void)PyFrame_ClearFreeList();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001030 (void)PyTuple_ClearFreeList();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001031 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +01001032 (void)PyList_ClearFreeList();
1033 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +01001034 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -07001035 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -05001036 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +00001037}
1038
Pablo Galindo97f12672020-01-13 12:25:05 +00001039// Show stats for objects in each generations
Inada Naokibf8162c2019-08-02 16:25:29 +09001040static void
Victor Stinner67e0de62019-11-20 11:48:18 +01001041show_stats_each_generations(GCState *gcstate)
Inada Naokibf8162c2019-08-02 16:25:29 +09001042{
1043 char buf[100];
1044 size_t pos = 0;
1045
1046 for (int i = 0; i < NUM_GENERATIONS && pos < sizeof(buf); i++) {
1047 pos += PyOS_snprintf(buf+pos, sizeof(buf)-pos,
1048 " %"PY_FORMAT_SIZE_T"d",
Victor Stinner67e0de62019-11-20 11:48:18 +01001049 gc_list_size(GEN_HEAD(gcstate, i)));
Inada Naokibf8162c2019-08-02 16:25:29 +09001050 }
1051
1052 PySys_FormatStderr(
1053 "gc: objects in each generation:%s\n"
1054 "gc: objects in permanent generation: %zd\n",
Victor Stinner67e0de62019-11-20 11:48:18 +01001055 buf, gc_list_size(&gcstate->permanent_generation.head));
Inada Naokibf8162c2019-08-02 16:25:29 +09001056}
1057
Pablo Galindo97f12672020-01-13 12:25:05 +00001058/* Deduce which objects among "base" are unreachable from outside the list
Pablo Galindo466326d2019-10-13 16:48:59 +01001059 and move them to 'unreachable'. The process consist in the following steps:
1060
10611. Copy all reference counts to a different field (gc_prev is used to hold
1062 this copy to save memory).
10632. Traverse all objects in "base" and visit all referred objects using
Pablo Galindo97f12672020-01-13 12:25:05 +00001064 "tp_traverse" and for every visited object, subtract 1 to the reference
Pablo Galindo466326d2019-10-13 16:48:59 +01001065 count (the one that we copied in the previous step). After this step, all
1066 objects that can be reached directly from outside must have strictly positive
1067 reference count, while all unreachable objects must have a count of exactly 0.
Pablo Galindo97f12672020-01-13 12:25:05 +000010683. Identify all unreachable objects (the ones with 0 reference count) and move
Pablo Galindo466326d2019-10-13 16:48:59 +01001069 them to the "unreachable" list. This step also needs to move back to "base" all
1070 objects that were initially marked as unreachable but are referred transitively
1071 by the reachable objects (the ones with strictly positive reference count).
1072
1073Contracts:
1074
1075 * The "base" has to be a valid list with no mask set.
1076
1077 * The "unreachable" list must be uninitialized (this function calls
1078 gc_list_init over 'unreachable').
Tim Peters95bfc8a2019-10-13 16:47:04 -05001079
Pablo Galindo466326d2019-10-13 16:48:59 +01001080IMPORTANT: This function leaves 'unreachable' with the NEXT_MASK_UNREACHABLE
1081flag set but it does not clear it to skip unnecessary iteration. Before the
1082flag is cleared (for example, by using 'clear_unreachable_mask' function or
1083by a call to 'move_legacy_finalizers'), the 'unreachable' list is not a normal
1084list and we can not use most gc_list_* functions for it. */
1085static inline void
1086deduce_unreachable(PyGC_Head *base, PyGC_Head *unreachable) {
Tim Petersea55c512019-10-18 20:59:14 -05001087 validate_list(base, collecting_clear_unreachable_clear);
Pablo Galindo466326d2019-10-13 16:48:59 +01001088 /* Using ob_refcnt and gc_refs, calculate which objects in the
1089 * container set are reachable from outside the set (i.e., have a
1090 * refcount greater than 0 when all the references within the
1091 * set are taken into account).
1092 */
1093 update_refs(base); // gc_prev is used for gc_refs
1094 subtract_refs(base);
1095
1096 /* Leave everything reachable from outside base in base, and move
1097 * everything else (in base) to unreachable.
Pablo Galindo97f12672020-01-13 12:25:05 +00001098 *
Pablo Galindo466326d2019-10-13 16:48:59 +01001099 * NOTE: This used to move the reachable objects into a reachable
1100 * set instead. But most things usually turn out to be reachable,
Pablo Galindo97f12672020-01-13 12:25:05 +00001101 * so it's more efficient to move the unreachable things. It "sounds slick"
1102 * to move the unreachable objects, until you think about it - the reason it
1103 * pays isn't actually obvious.
1104 *
1105 * Suppose we create objects A, B, C in that order. They appear in the young
1106 * generation in the same order. If B points to A, and C to B, and C is
1107 * reachable from outside, then the adjusted refcounts will be 0, 0, and 1
1108 * respectively.
1109 *
1110 * When move_unreachable finds A, A is moved to the unreachable list. The
1111 * same for B when it's first encountered. Then C is traversed, B is moved
1112 * _back_ to the reachable list. B is eventually traversed, and then A is
1113 * moved back to the reachable list.
1114 *
1115 * So instead of not moving at all, the reachable objects B and A are moved
1116 * twice each. Why is this a win? A straightforward algorithm to move the
1117 * reachable objects instead would move A, B, and C once each.
1118 *
1119 * The key is that this dance leaves the objects in order C, B, A - it's
1120 * reversed from the original order. On all _subsequent_ scans, none of
1121 * them will move. Since most objects aren't in cycles, this can save an
1122 * unbounded number of moves across an unbounded number of later collections.
1123 * It can cost more only the first time the chain is scanned.
1124 *
1125 * Drawback: move_unreachable is also used to find out what's still trash
1126 * after finalizers may resurrect objects. In _that_ case most unreachable
1127 * objects will remain unreachable, so it would be more efficient to move
1128 * the reachable objects instead. But this is a one-time cost, probably not
1129 * worth complicating the code to speed just a little.
Pablo Galindo466326d2019-10-13 16:48:59 +01001130 */
1131 gc_list_init(unreachable);
1132 move_unreachable(base, unreachable); // gc_prev is pointer again
Tim Petersea55c512019-10-18 20:59:14 -05001133 validate_list(base, collecting_clear_unreachable_clear);
1134 validate_list(unreachable, collecting_set_unreachable_set);
Pablo Galindo466326d2019-10-13 16:48:59 +01001135}
1136
1137/* Handle objects that may have resurrected after a call to 'finalize_garbage', moving
1138 them to 'old_generation' and placing the rest on 'still_unreachable'.
1139
1140 Contracts:
1141 * After this function 'unreachable' must not be used anymore and 'still_unreachable'
1142 will contain the objects that did not resurrect.
1143
1144 * The "still_unreachable" list must be uninitialized (this function calls
1145 gc_list_init over 'still_unreachable').
1146
1147IMPORTANT: After a call to this function, the 'still_unreachable' set will have the
1148PREV_MARK_COLLECTING set, but the objects in this set are going to be removed so
1149we can skip the expense of clearing the flag to avoid extra iteration. */
1150static inline void
1151handle_resurrected_objects(PyGC_Head *unreachable, PyGC_Head* still_unreachable,
1152 PyGC_Head *old_generation)
1153{
1154 // Remove the PREV_MASK_COLLECTING from unreachable
1155 // to prepare it for a new call to 'deduce_unreachable'
1156 gc_list_clear_collecting(unreachable);
1157
1158 // After the call to deduce_unreachable, the 'still_unreachable' set will
1159 // have the PREV_MARK_COLLECTING set, but the objects are going to be
1160 // removed so we can skip the expense of clearing the flag.
1161 PyGC_Head* resurrected = unreachable;
1162 deduce_unreachable(resurrected, still_unreachable);
1163 clear_unreachable_mask(still_unreachable);
1164
1165 // Move the resurrected objects to the old generation for future collection.
1166 gc_list_merge(resurrected, old_generation);
1167}
1168
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001169/* This is the main function. Read this to understand how the
1170 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001171static Py_ssize_t
Victor Stinner67e0de62019-11-20 11:48:18 +01001172collect(PyThreadState *tstate, int generation,
Victor Stinner9db03242019-04-26 02:32:01 +02001173 Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable, int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001174{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001175 int i;
1176 Py_ssize_t m = 0; /* # objects collected */
1177 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
1178 PyGC_Head *young; /* the generation we are examining */
1179 PyGC_Head *old; /* next older generation */
1180 PyGC_Head unreachable; /* non-problematic unreachable trash */
1181 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
1182 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +01001183 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Victor Stinner72474072019-11-20 12:25:50 +01001184 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001185
Victor Stinner67e0de62019-11-20 11:48:18 +01001186 if (gcstate->debug & DEBUG_STATS) {
Inada Naokibf8162c2019-08-02 16:25:29 +09001187 PySys_WriteStderr("gc: collecting generation %d...\n", generation);
Victor Stinner67e0de62019-11-20 11:48:18 +01001188 show_stats_each_generations(gcstate);
Victor Stinner7181dec2015-03-27 17:47:53 +01001189 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001190 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001191
Łukasz Langaa785c872016-09-09 17:37:37 -07001192 if (PyDTrace_GC_START_ENABLED())
1193 PyDTrace_GC_START(generation);
1194
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001195 /* update collection and allocation counters */
1196 if (generation+1 < NUM_GENERATIONS)
Victor Stinner67e0de62019-11-20 11:48:18 +01001197 gcstate->generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001198 for (i = 0; i <= generation; i++)
Victor Stinner67e0de62019-11-20 11:48:18 +01001199 gcstate->generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001200
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001201 /* merge younger generations with one we are currently collecting */
1202 for (i = 0; i < generation; i++) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001203 gc_list_merge(GEN_HEAD(gcstate, i), GEN_HEAD(gcstate, generation));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001204 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001205
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001206 /* handy references */
Victor Stinner67e0de62019-11-20 11:48:18 +01001207 young = GEN_HEAD(gcstate, generation);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001208 if (generation < NUM_GENERATIONS-1)
Victor Stinner67e0de62019-11-20 11:48:18 +01001209 old = GEN_HEAD(gcstate, generation+1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001210 else
1211 old = young;
Tim Petersea55c512019-10-18 20:59:14 -05001212 validate_list(old, collecting_clear_unreachable_clear);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001213
Pablo Galindo466326d2019-10-13 16:48:59 +01001214 deduce_unreachable(young, &unreachable);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001215
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001216 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001217 /* Move reachable objects to next generation. */
1218 if (young != old) {
1219 if (generation == NUM_GENERATIONS - 2) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001220 gcstate->long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001221 }
1222 gc_list_merge(young, old);
1223 }
1224 else {
Pablo Galindo97f12672020-01-13 12:25:05 +00001225 /* We only un-track dicts in full collections, to avoid quadratic
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001226 dict build-up. See issue #14775. */
1227 untrack_dicts(young);
Victor Stinner67e0de62019-11-20 11:48:18 +01001228 gcstate->long_lived_pending = 0;
1229 gcstate->long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001230 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001231
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001232 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001233 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001234 */
1235 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001236 // NEXT_MASK_UNREACHABLE is cleared here.
1237 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001238 move_legacy_finalizers(&unreachable, &finalizers);
1239 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001240 * unreachable objects reachable *from* those are also uncollectable,
1241 * and we move those into the finalizers list too.
1242 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001243 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001244
Tim Petersea55c512019-10-18 20:59:14 -05001245 validate_list(&finalizers, collecting_clear_unreachable_clear);
1246 validate_list(&unreachable, collecting_set_unreachable_clear);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001247
Tim Petersecbf35f2019-10-09 12:37:30 -05001248 /* Print debugging information. */
Victor Stinner67e0de62019-11-20 11:48:18 +01001249 if (gcstate->debug & DEBUG_COLLECTABLE) {
Tim Petersecbf35f2019-10-09 12:37:30 -05001250 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001251 debug_cycle("collectable", FROM_GC(gc));
1252 }
1253 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001254
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001255 /* Clear weakrefs and invoke callbacks as necessary. */
1256 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001257
Tim Petersea55c512019-10-18 20:59:14 -05001258 validate_list(old, collecting_clear_unreachable_clear);
1259 validate_list(&unreachable, collecting_set_unreachable_clear);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001260
Antoine Pitrou796564c2013-07-30 19:59:21 +02001261 /* Call tp_finalize on objects which have one. */
Victor Stinner67e0de62019-11-20 11:48:18 +01001262 finalize_garbage(tstate, &unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001263
Pablo Galindo466326d2019-10-13 16:48:59 +01001264 /* Handle any objects that may have resurrected after the call
1265 * to 'finalize_garbage' and continue the collection with the
1266 * objects that are still unreachable */
1267 PyGC_Head final_unreachable;
1268 handle_resurrected_objects(&unreachable, &final_unreachable, old);
1269
1270 /* Call tp_clear on objects in the final_unreachable set. This will cause
1271 * the reference cycles to be broken. It may also cause some objects
1272 * in finalizers to be freed.
1273 */
1274 m += gc_list_size(&final_unreachable);
Victor Stinner67e0de62019-11-20 11:48:18 +01001275 delete_garbage(tstate, gcstate, &final_unreachable, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001276
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001277 /* Collect statistics on uncollectable objects found and print
1278 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001279 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001280 n++;
Victor Stinner67e0de62019-11-20 11:48:18 +01001281 if (gcstate->debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001282 debug_cycle("uncollectable", FROM_GC(gc));
1283 }
Victor Stinner67e0de62019-11-20 11:48:18 +01001284 if (gcstate->debug & DEBUG_STATS) {
Inada Naokibf8162c2019-08-02 16:25:29 +09001285 double d = _PyTime_AsSecondsDouble(_PyTime_GetMonotonicClock() - t1);
Inada Naoki013e52f2019-08-31 09:13:42 +09001286 PySys_WriteStderr(
1287 "gc: done, %" PY_FORMAT_SIZE_T "d unreachable, "
1288 "%" PY_FORMAT_SIZE_T "d uncollectable, %.4fs elapsed\n",
Inada Naokibf8162c2019-08-02 16:25:29 +09001289 n+m, n, d);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001290 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001291
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001292 /* Append instances in the uncollectable set to a Python
1293 * reachable list of garbage. The programmer has to deal with
1294 * this if they insist on creating this type of structure.
1295 */
Victor Stinner67e0de62019-11-20 11:48:18 +01001296 handle_legacy_finalizers(tstate, gcstate, &finalizers, old);
Tim Petersea55c512019-10-18 20:59:14 -05001297 validate_list(old, collecting_clear_unreachable_clear);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001298
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001299 /* Clear free list only during the collection of the highest
1300 * generation */
1301 if (generation == NUM_GENERATIONS-1) {
1302 clear_freelists();
1303 }
Christian Heimesa156e092008-02-16 07:38:31 +00001304
Victor Stinner2e969062019-11-20 01:49:32 +01001305 if (_PyErr_Occurred(tstate)) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001306 if (nofail) {
Victor Stinner2e969062019-11-20 01:49:32 +01001307 _PyErr_Clear(tstate);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001308 }
1309 else {
Victor Stinner656c45e2020-01-24 18:05:24 +01001310 _PyErr_WriteUnraisableMsg("in garbage collection", NULL);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001311 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001312 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001313
Antoine Pitroud4156c12012-10-30 22:43:19 +01001314 /* Update stats */
Victor Stinner9db03242019-04-26 02:32:01 +02001315 if (n_collected) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001316 *n_collected = m;
Victor Stinner9db03242019-04-26 02:32:01 +02001317 }
1318 if (n_uncollectable) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001319 *n_uncollectable = n;
Victor Stinner9db03242019-04-26 02:32:01 +02001320 }
1321
Victor Stinner67e0de62019-11-20 11:48:18 +01001322 struct gc_generation_stats *stats = &gcstate->generation_stats[generation];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001323 stats->collections++;
1324 stats->collected += m;
1325 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001326
Victor Stinner9db03242019-04-26 02:32:01 +02001327 if (PyDTrace_GC_DONE_ENABLED()) {
Victor Stinner2e969062019-11-20 01:49:32 +01001328 PyDTrace_GC_DONE(n + m);
Victor Stinner9db03242019-04-26 02:32:01 +02001329 }
Łukasz Langaa785c872016-09-09 17:37:37 -07001330
Victor Stinner2e969062019-11-20 01:49:32 +01001331 assert(!_PyErr_Occurred(tstate));
1332 return n + m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001333}
1334
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001335/* Invoke progress callbacks to notify clients that garbage collection
1336 * is starting or stopping
1337 */
1338static void
Victor Stinner67e0de62019-11-20 11:48:18 +01001339invoke_gc_callback(PyThreadState *tstate, const char *phase,
Victor Stinner9db03242019-04-26 02:32:01 +02001340 int generation, Py_ssize_t collected,
1341 Py_ssize_t uncollectable)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001342{
Victor Stinner67e0de62019-11-20 11:48:18 +01001343 assert(!_PyErr_Occurred(tstate));
Victor Stinner9db03242019-04-26 02:32:01 +02001344
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001345 /* we may get called very early */
Victor Stinner72474072019-11-20 12:25:50 +01001346 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001347 if (gcstate->callbacks == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001348 return;
Victor Stinner9db03242019-04-26 02:32:01 +02001349 }
1350
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001351 /* The local variable cannot be rebound, check it for sanity */
Victor Stinner67e0de62019-11-20 11:48:18 +01001352 assert(PyList_CheckExact(gcstate->callbacks));
Victor Stinner9db03242019-04-26 02:32:01 +02001353 PyObject *info = NULL;
Victor Stinner67e0de62019-11-20 11:48:18 +01001354 if (PyList_GET_SIZE(gcstate->callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001355 info = Py_BuildValue("{sisnsn}",
1356 "generation", generation,
1357 "collected", collected,
1358 "uncollectable", uncollectable);
1359 if (info == NULL) {
1360 PyErr_WriteUnraisable(NULL);
1361 return;
1362 }
1363 }
Victor Stinner67e0de62019-11-20 11:48:18 +01001364 for (Py_ssize_t i=0; i<PyList_GET_SIZE(gcstate->callbacks); i++) {
1365 PyObject *r, *cb = PyList_GET_ITEM(gcstate->callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001366 Py_INCREF(cb); /* make sure cb doesn't go away */
1367 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001368 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001369 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001370 }
1371 else {
1372 Py_DECREF(r);
1373 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001374 Py_DECREF(cb);
1375 }
1376 Py_XDECREF(info);
Victor Stinner67e0de62019-11-20 11:48:18 +01001377 assert(!_PyErr_Occurred(tstate));
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001378}
1379
1380/* Perform garbage collection of a generation and invoke
1381 * progress callbacks.
1382 */
1383static Py_ssize_t
Victor Stinner67e0de62019-11-20 11:48:18 +01001384collect_with_callback(PyThreadState *tstate, int generation)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001385{
Victor Stinner67e0de62019-11-20 11:48:18 +01001386 assert(!_PyErr_Occurred(tstate));
Victor Stinner9db03242019-04-26 02:32:01 +02001387 Py_ssize_t result, collected, uncollectable;
Victor Stinner67e0de62019-11-20 11:48:18 +01001388 invoke_gc_callback(tstate, "start", generation, 0, 0);
1389 result = collect(tstate, generation, &collected, &uncollectable, 0);
1390 invoke_gc_callback(tstate, "stop", generation, collected, uncollectable);
1391 assert(!_PyErr_Occurred(tstate));
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001392 return result;
1393}
1394
Neal Norwitz7b216c52006-03-04 20:01:53 +00001395static Py_ssize_t
Victor Stinner67e0de62019-11-20 11:48:18 +01001396collect_generations(PyThreadState *tstate)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001397{
Victor Stinner72474072019-11-20 12:25:50 +01001398 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001399 /* Find the oldest generation (highest numbered) where the count
1400 * exceeds the threshold. Objects in the that generation and
1401 * generations younger than it will be collected. */
Victor Stinner9db03242019-04-26 02:32:01 +02001402 Py_ssize_t n = 0;
1403 for (int i = NUM_GENERATIONS-1; i >= 0; i--) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001404 if (gcstate->generations[i].count > gcstate->generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001405 /* Avoid quadratic performance degradation in number
Pablo Galindo90913982019-12-27 21:55:56 +00001406 of tracked objects (see also issue #4074):
1407
1408 To limit the cost of garbage collection, there are two strategies;
1409 - make each collection faster, e.g. by scanning fewer objects
1410 - do less collections
1411 This heuristic is about the latter strategy.
1412
1413 In addition to the various configurable thresholds, we only trigger a
1414 full collection if the ratio
1415
1416 long_lived_pending / long_lived_total
1417
1418 is above a given value (hardwired to 25%).
1419
1420 The reason is that, while "non-full" collections (i.e., collections of
1421 the young and middle generations) will always examine roughly the same
1422 number of objects -- determined by the aforementioned thresholds --,
1423 the cost of a full collection is proportional to the total number of
1424 long-lived objects, which is virtually unbounded.
1425
1426 Indeed, it has been remarked that doing a full collection every
1427 <constant number> of object creations entails a dramatic performance
1428 degradation in workloads which consist in creating and storing lots of
1429 long-lived objects (e.g. building a large list of GC-tracked objects would
1430 show quadratic performance, instead of linear as expected: see issue #4074).
1431
1432 Using the above ratio, instead, yields amortized linear performance in
1433 the total number of objects (the effect of which can be summarized
1434 thusly: "each full garbage collection is more and more costly as the
1435 number of objects grows, but we do fewer and fewer of them").
1436
1437 This heuristic was suggested by Martin von Löwis on python-dev in
1438 June 2008. His original analysis and proposal can be found at:
1439 http://mail.python.org/pipermail/python-dev/2008-June/080579.html
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001440 */
1441 if (i == NUM_GENERATIONS - 1
Victor Stinner67e0de62019-11-20 11:48:18 +01001442 && gcstate->long_lived_pending < gcstate->long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001443 continue;
Victor Stinner67e0de62019-11-20 11:48:18 +01001444 n = collect_with_callback(tstate, i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001445 break;
1446 }
1447 }
1448 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001449}
1450
Serhiy Storchaka93260282017-02-04 11:19:59 +02001451#include "clinic/gcmodule.c.h"
1452
1453/*[clinic input]
1454gc.enable
1455
1456Enable automatic garbage collection.
1457[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001458
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001459static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001460gc_enable_impl(PyObject *module)
1461/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001462{
Victor Stinner67e0de62019-11-20 11:48:18 +01001463 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001464 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001465 gcstate->enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001466 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001467}
1468
Serhiy Storchaka93260282017-02-04 11:19:59 +02001469/*[clinic input]
1470gc.disable
1471
1472Disable automatic garbage collection.
1473[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001474
1475static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001476gc_disable_impl(PyObject *module)
1477/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001478{
Victor Stinner67e0de62019-11-20 11:48:18 +01001479 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001480 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001481 gcstate->enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001482 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001483}
1484
Serhiy Storchaka93260282017-02-04 11:19:59 +02001485/*[clinic input]
1486gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001487
Serhiy Storchaka93260282017-02-04 11:19:59 +02001488Returns true if automatic garbage collection is enabled.
1489[clinic start generated code]*/
1490
1491static int
1492gc_isenabled_impl(PyObject *module)
1493/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001494{
Victor Stinner67e0de62019-11-20 11:48:18 +01001495 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001496 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001497 return gcstate->enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001498}
1499
Serhiy Storchaka93260282017-02-04 11:19:59 +02001500/*[clinic input]
1501gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001502
Serhiy Storchaka93260282017-02-04 11:19:59 +02001503 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1504
1505Run the garbage collector.
1506
1507With no arguments, run a full collection. The optional argument
1508may be an integer specifying which generation to collect. A ValueError
1509is raised if the generation number is invalid.
1510
1511The number of unreachable objects is returned.
1512[clinic start generated code]*/
1513
1514static Py_ssize_t
1515gc_collect_impl(PyObject *module, int generation)
1516/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001517{
Victor Stinner2e969062019-11-20 01:49:32 +01001518 PyThreadState *tstate = _PyThreadState_GET();
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001519
Serhiy Storchaka93260282017-02-04 11:19:59 +02001520 if (generation < 0 || generation >= NUM_GENERATIONS) {
Victor Stinner2e969062019-11-20 01:49:32 +01001521 _PyErr_SetString(tstate, PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001522 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001523 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001524
Victor Stinner72474072019-11-20 12:25:50 +01001525 GCState *gcstate = &tstate->interp->gc;
Victor Stinner9db03242019-04-26 02:32:01 +02001526 Py_ssize_t n;
Victor Stinner67e0de62019-11-20 11:48:18 +01001527 if (gcstate->collecting) {
Victor Stinner9db03242019-04-26 02:32:01 +02001528 /* already collecting, don't do anything */
1529 n = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001530 }
Victor Stinner9db03242019-04-26 02:32:01 +02001531 else {
Victor Stinner67e0de62019-11-20 11:48:18 +01001532 gcstate->collecting = 1;
1533 n = collect_with_callback(tstate, generation);
1534 gcstate->collecting = 0;
Victor Stinner9db03242019-04-26 02:32:01 +02001535 }
Serhiy Storchaka93260282017-02-04 11:19:59 +02001536 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001537}
1538
Serhiy Storchaka93260282017-02-04 11:19:59 +02001539/*[clinic input]
1540gc.set_debug
1541
1542 flags: int
1543 An integer that can have the following bits turned on:
1544 DEBUG_STATS - Print statistics during collection.
1545 DEBUG_COLLECTABLE - Print collectable objects found.
1546 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1547 found.
1548 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1549 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1550 /
1551
1552Set the garbage collection debugging flags.
1553
1554Debugging information is written to sys.stderr.
1555[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001556
1557static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001558gc_set_debug_impl(PyObject *module, int flags)
1559/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001560{
Victor Stinner67e0de62019-11-20 11:48:18 +01001561 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001562 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001563 gcstate->debug = flags;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001564 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001565}
1566
Serhiy Storchaka93260282017-02-04 11:19:59 +02001567/*[clinic input]
1568gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001569
Serhiy Storchaka93260282017-02-04 11:19:59 +02001570Get the garbage collection debugging flags.
1571[clinic start generated code]*/
1572
1573static int
1574gc_get_debug_impl(PyObject *module)
1575/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001576{
Victor Stinner67e0de62019-11-20 11:48:18 +01001577 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001578 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001579 return gcstate->debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001580}
1581
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001582PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001583"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001584"\n"
1585"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001586"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001587
1588static PyObject *
Victor Stinner9db03242019-04-26 02:32:01 +02001589gc_set_threshold(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001590{
Victor Stinner67e0de62019-11-20 11:48:18 +01001591 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001592 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001593 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Victor Stinner67e0de62019-11-20 11:48:18 +01001594 &gcstate->generations[0].threshold,
1595 &gcstate->generations[1].threshold,
1596 &gcstate->generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001597 return NULL;
Victor Stinner9db03242019-04-26 02:32:01 +02001598 for (int i = 3; i < NUM_GENERATIONS; i++) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001599 /* generations higher than 2 get the same threshold */
Victor Stinner67e0de62019-11-20 11:48:18 +01001600 gcstate->generations[i].threshold = gcstate->generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001601 }
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001602 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001603}
1604
Serhiy Storchaka93260282017-02-04 11:19:59 +02001605/*[clinic input]
1606gc.get_threshold
1607
1608Return the current collection thresholds.
1609[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001610
1611static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001612gc_get_threshold_impl(PyObject *module)
1613/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001614{
Victor Stinner67e0de62019-11-20 11:48:18 +01001615 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001616 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001617 return Py_BuildValue("(iii)",
Victor Stinner67e0de62019-11-20 11:48:18 +01001618 gcstate->generations[0].threshold,
1619 gcstate->generations[1].threshold,
1620 gcstate->generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001621}
1622
Serhiy Storchaka93260282017-02-04 11:19:59 +02001623/*[clinic input]
1624gc.get_count
1625
1626Return a three-tuple of the current collection counts.
1627[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001628
1629static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001630gc_get_count_impl(PyObject *module)
1631/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001632{
Victor Stinner67e0de62019-11-20 11:48:18 +01001633 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001634 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001635 return Py_BuildValue("(iii)",
Victor Stinner67e0de62019-11-20 11:48:18 +01001636 gcstate->generations[0].count,
1637 gcstate->generations[1].count,
1638 gcstate->generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001639}
1640
Neil Schemenauer48c70342001-08-09 15:38:31 +00001641static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001642referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001643{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001644 Py_ssize_t i;
1645 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1646 if (PyTuple_GET_ITEM(objs, i) == obj)
1647 return 1;
1648 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001649}
1650
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001651static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001652gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001653{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001654 PyGC_Head *gc;
1655 PyObject *obj;
1656 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001657 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001658 obj = FROM_GC(gc);
1659 traverse = Py_TYPE(obj)->tp_traverse;
1660 if (obj == objs || obj == resultlist)
1661 continue;
1662 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1663 if (PyList_Append(resultlist, obj) < 0)
1664 return 0; /* error */
1665 }
1666 }
1667 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001668}
1669
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001670PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001671"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001672Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001673
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001674static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001675gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001676{
Victor Stinner67e0de62019-11-20 11:48:18 +01001677 PyThreadState *tstate = _PyThreadState_GET();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001678 int i;
1679 PyObject *result = PyList_New(0);
Victor Stinner67e0de62019-11-20 11:48:18 +01001680 if (!result) {
1681 return NULL;
1682 }
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001683
Victor Stinner72474072019-11-20 12:25:50 +01001684 GCState *gcstate = &tstate->interp->gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001685 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001686 if (!(gc_referrers_for(args, GEN_HEAD(gcstate, i), result))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001687 Py_DECREF(result);
1688 return NULL;
1689 }
1690 }
1691 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001692}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001693
Tim Peters0f81ab62003-04-08 16:39:48 +00001694/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001695static int
Tim Peters730f5532003-04-08 17:17:17 +00001696referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001697{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001698 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001699}
1700
Tim Peters730f5532003-04-08 17:17:17 +00001701PyDoc_STRVAR(gc_get_referents__doc__,
1702"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001703Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001704
1705static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001706gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001707{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001708 Py_ssize_t i;
1709 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001710
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001711 if (result == NULL)
1712 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001713
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001714 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1715 traverseproc traverse;
1716 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001717
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001718 if (! PyObject_IS_GC(obj))
1719 continue;
1720 traverse = Py_TYPE(obj)->tp_traverse;
1721 if (! traverse)
1722 continue;
1723 if (traverse(obj, (visitproc)referentsvisit, result)) {
1724 Py_DECREF(result);
1725 return NULL;
1726 }
1727 }
1728 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001729}
1730
Serhiy Storchaka93260282017-02-04 11:19:59 +02001731/*[clinic input]
1732gc.get_objects
Pablo Galindo175421b2019-02-23 03:02:06 +00001733 generation: Py_ssize_t(accept={int, NoneType}, c_default="-1") = None
1734 Generation to extract the objects from.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001735
1736Return a list of objects tracked by the collector (excluding the list returned).
Pablo Galindo175421b2019-02-23 03:02:06 +00001737
1738If generation is not None, return only the objects tracked by the collector
1739that are in that generation.
Serhiy Storchaka93260282017-02-04 11:19:59 +02001740[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001741
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001742static PyObject *
Pablo Galindo175421b2019-02-23 03:02:06 +00001743gc_get_objects_impl(PyObject *module, Py_ssize_t generation)
1744/*[clinic end generated code: output=48b35fea4ba6cb0e input=ef7da9df9806754c]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001745{
Victor Stinner67e0de62019-11-20 11:48:18 +01001746 PyThreadState *tstate = _PyThreadState_GET();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001747 int i;
1748 PyObject* result;
Victor Stinner72474072019-11-20 12:25:50 +01001749 GCState *gcstate = &tstate->interp->gc;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001750
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001751 result = PyList_New(0);
Pablo Galindo175421b2019-02-23 03:02:06 +00001752 if (result == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001753 return NULL;
Pablo Galindo175421b2019-02-23 03:02:06 +00001754 }
1755
1756 /* If generation is passed, we extract only that generation */
Victor Stinner0810fa72019-04-15 17:54:09 +02001757 if (generation != -1) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001758 if (generation >= NUM_GENERATIONS) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001759 _PyErr_Format(tstate, PyExc_ValueError,
1760 "generation parameter must be less than the number of "
1761 "available generations (%i)",
1762 NUM_GENERATIONS);
Pablo Galindo175421b2019-02-23 03:02:06 +00001763 goto error;
1764 }
1765
1766 if (generation < 0) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001767 _PyErr_SetString(tstate, PyExc_ValueError,
1768 "generation parameter cannot be negative");
Pablo Galindo175421b2019-02-23 03:02:06 +00001769 goto error;
1770 }
1771
Victor Stinner67e0de62019-11-20 11:48:18 +01001772 if (append_objects(result, GEN_HEAD(gcstate, generation))) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001773 goto error;
1774 }
1775
1776 return result;
1777 }
1778
1779 /* If generation is not passed or None, get all objects from all generations */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001780 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001781 if (append_objects(result, GEN_HEAD(gcstate, i))) {
Pablo Galindo175421b2019-02-23 03:02:06 +00001782 goto error;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001783 }
1784 }
1785 return result;
Pablo Galindo175421b2019-02-23 03:02:06 +00001786
1787error:
1788 Py_DECREF(result);
1789 return NULL;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001790}
1791
Serhiy Storchaka93260282017-02-04 11:19:59 +02001792/*[clinic input]
1793gc.get_stats
1794
1795Return a list of dictionaries containing per-generation statistics.
1796[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001797
1798static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001799gc_get_stats_impl(PyObject *module)
1800/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001801{
1802 int i;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001803 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
Victor Stinner67e0de62019-11-20 11:48:18 +01001804 PyThreadState *tstate = _PyThreadState_GET();
Antoine Pitroud4156c12012-10-30 22:43:19 +01001805
1806 /* To get consistent values despite allocations while constructing
1807 the result list, we use a snapshot of the running stats. */
Victor Stinner72474072019-11-20 12:25:50 +01001808 GCState *gcstate = &tstate->interp->gc;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001809 for (i = 0; i < NUM_GENERATIONS; i++) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001810 stats[i] = gcstate->generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001811 }
1812
Victor Stinner9db03242019-04-26 02:32:01 +02001813 PyObject *result = PyList_New(0);
Antoine Pitroud4156c12012-10-30 22:43:19 +01001814 if (result == NULL)
1815 return NULL;
1816
1817 for (i = 0; i < NUM_GENERATIONS; i++) {
1818 PyObject *dict;
1819 st = &stats[i];
1820 dict = Py_BuildValue("{snsnsn}",
1821 "collections", st->collections,
1822 "collected", st->collected,
1823 "uncollectable", st->uncollectable
1824 );
1825 if (dict == NULL)
1826 goto error;
1827 if (PyList_Append(result, dict)) {
1828 Py_DECREF(dict);
1829 goto error;
1830 }
1831 Py_DECREF(dict);
1832 }
1833 return result;
1834
1835error:
1836 Py_XDECREF(result);
1837 return NULL;
1838}
1839
1840
Serhiy Storchaka93260282017-02-04 11:19:59 +02001841/*[clinic input]
1842gc.is_tracked
1843
1844 obj: object
1845 /
1846
1847Returns true if the object is tracked by the garbage collector.
1848
1849Simple atomic objects will return false.
1850[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001851
1852static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001853gc_is_tracked(PyObject *module, PyObject *obj)
1854/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001855{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001856 PyObject *result;
1857
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001858 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001859 result = Py_True;
1860 else
1861 result = Py_False;
1862 Py_INCREF(result);
1863 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001864}
1865
brainfvckc75edab2017-10-16 12:49:41 -07001866/*[clinic input]
Pablo Galindoa2ec3f02020-01-14 12:06:45 +00001867gc.is_finalized
1868
1869 obj: object
1870 /
1871
1872Returns true if the object has been already finalized by the GC.
1873[clinic start generated code]*/
1874
1875static PyObject *
1876gc_is_finalized(PyObject *module, PyObject *obj)
1877/*[clinic end generated code: output=e1516ac119a918ed input=201d0c58f69ae390]*/
1878{
1879 if (PyObject_IS_GC(obj) && _PyGCHead_FINALIZED(AS_GC(obj))) {
1880 Py_RETURN_TRUE;
1881 }
1882 Py_RETURN_FALSE;
1883}
1884
1885/*[clinic input]
brainfvckc75edab2017-10-16 12:49:41 -07001886gc.freeze
1887
1888Freeze all current tracked objects and ignore them for future collections.
1889
1890This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1891Note: collection before a POSIX fork() call may free pages for future allocation
1892which can cause copy-on-write.
1893[clinic start generated code]*/
1894
1895static PyObject *
1896gc_freeze_impl(PyObject *module)
1897/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1898{
Victor Stinner67e0de62019-11-20 11:48:18 +01001899 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001900 GCState *gcstate = &tstate->interp->gc;
brainfvckc75edab2017-10-16 12:49:41 -07001901 for (int i = 0; i < NUM_GENERATIONS; ++i) {
Victor Stinner67e0de62019-11-20 11:48:18 +01001902 gc_list_merge(GEN_HEAD(gcstate, i), &gcstate->permanent_generation.head);
1903 gcstate->generations[i].count = 0;
brainfvckc75edab2017-10-16 12:49:41 -07001904 }
1905 Py_RETURN_NONE;
1906}
1907
1908/*[clinic input]
1909gc.unfreeze
1910
1911Unfreeze all objects in the permanent generation.
1912
1913Put all objects in the permanent generation back into oldest generation.
1914[clinic start generated code]*/
1915
1916static PyObject *
1917gc_unfreeze_impl(PyObject *module)
1918/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1919{
Victor Stinner67e0de62019-11-20 11:48:18 +01001920 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001921 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001922 gc_list_merge(&gcstate->permanent_generation.head,
1923 GEN_HEAD(gcstate, NUM_GENERATIONS-1));
brainfvckc75edab2017-10-16 12:49:41 -07001924 Py_RETURN_NONE;
1925}
1926
1927/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001928gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001929
1930Return the number of objects in the permanent generation.
1931[clinic start generated code]*/
1932
Victor Stinner05d68a82018-01-18 11:15:25 +01001933static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001934gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001935/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001936{
Victor Stinner67e0de62019-11-20 11:48:18 +01001937 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01001938 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01001939 return gc_list_size(&gcstate->permanent_generation.head);
brainfvckc75edab2017-10-16 12:49:41 -07001940}
1941
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001942
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001943PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001944"This module provides access to the garbage collector for reference cycles.\n"
1945"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001946"enable() -- Enable automatic garbage collection.\n"
1947"disable() -- Disable automatic garbage collection.\n"
1948"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001949"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001950"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001951"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001952"set_debug() -- Set debugging flags.\n"
1953"get_debug() -- Get debugging flags.\n"
1954"set_threshold() -- Set the collection thresholds.\n"
1955"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001956"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001957"is_tracked() -- Returns true if a given object is tracked.\n"
Pablo Galindob6791372020-01-14 17:38:15 +00001958"is_finalized() -- Returns true if a given object has been already finalized.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001959"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001960"get_referents() -- Return the list of objects that an object refers to.\n"
1961"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1962"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1963"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001964
1965static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001966 GC_ENABLE_METHODDEF
1967 GC_DISABLE_METHODDEF
1968 GC_ISENABLED_METHODDEF
1969 GC_SET_DEBUG_METHODDEF
1970 GC_GET_DEBUG_METHODDEF
1971 GC_GET_COUNT_METHODDEF
Victor Stinner9db03242019-04-26 02:32:01 +02001972 {"set_threshold", gc_set_threshold, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001973 GC_GET_THRESHOLD_METHODDEF
1974 GC_COLLECT_METHODDEF
1975 GC_GET_OBJECTS_METHODDEF
1976 GC_GET_STATS_METHODDEF
1977 GC_IS_TRACKED_METHODDEF
Pablo Galindoa2ec3f02020-01-14 12:06:45 +00001978 GC_IS_FINALIZED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001979 {"get_referrers", gc_get_referrers, METH_VARARGS,
1980 gc_get_referrers__doc__},
1981 {"get_referents", gc_get_referents, METH_VARARGS,
1982 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001983 GC_FREEZE_METHODDEF
1984 GC_UNFREEZE_METHODDEF
1985 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001986 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001987};
1988
Martin v. Löwis1a214512008-06-11 05:26:20 +00001989static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001990 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001991 "gc", /* m_name */
1992 gc__doc__, /* m_doc */
1993 -1, /* m_size */
1994 GcMethods, /* m_methods */
1995 NULL, /* m_reload */
1996 NULL, /* m_traverse */
1997 NULL, /* m_clear */
1998 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001999};
2000
Jason Tishler6bc06ec2003-09-04 11:59:50 +00002001PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00002002PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00002003{
Victor Stinner72474072019-11-20 12:25:50 +01002004 PyThreadState *tstate = _PyThreadState_GET();
2005 GCState *gcstate = &tstate->interp->gc;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00002006
Victor Stinner72474072019-11-20 12:25:50 +01002007 PyObject *m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00002008
Victor Stinner9db03242019-04-26 02:32:01 +02002009 if (m == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002010 return NULL;
Victor Stinner9db03242019-04-26 02:32:01 +02002011 }
Tim Peters11558872003-04-06 23:30:52 +00002012
Victor Stinner67e0de62019-11-20 11:48:18 +01002013 if (gcstate->garbage == NULL) {
2014 gcstate->garbage = PyList_New(0);
Victor Stinner72474072019-11-20 12:25:50 +01002015 if (gcstate->garbage == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002016 return NULL;
Victor Stinner72474072019-11-20 12:25:50 +01002017 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002018 }
Victor Stinner67e0de62019-11-20 11:48:18 +01002019 Py_INCREF(gcstate->garbage);
Victor Stinner72474072019-11-20 12:25:50 +01002020 if (PyModule_AddObject(m, "garbage", gcstate->garbage) < 0) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002021 return NULL;
Victor Stinner72474072019-11-20 12:25:50 +01002022 }
Thomas Wouters477c8d52006-05-27 19:21:47 +00002023
Victor Stinner67e0de62019-11-20 11:48:18 +01002024 if (gcstate->callbacks == NULL) {
2025 gcstate->callbacks = PyList_New(0);
Victor Stinner72474072019-11-20 12:25:50 +01002026 if (gcstate->callbacks == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00002027 return NULL;
Victor Stinner72474072019-11-20 12:25:50 +01002028 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00002029 }
Victor Stinner67e0de62019-11-20 11:48:18 +01002030 Py_INCREF(gcstate->callbacks);
Victor Stinner72474072019-11-20 12:25:50 +01002031 if (PyModule_AddObject(m, "callbacks", gcstate->callbacks) < 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00002032 return NULL;
Victor Stinner72474072019-11-20 12:25:50 +01002033 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00002034
Victor Stinner72474072019-11-20 12:25:50 +01002035#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) { return NULL; }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002036 ADD_INT(DEBUG_STATS);
2037 ADD_INT(DEBUG_COLLECTABLE);
2038 ADD_INT(DEBUG_UNCOLLECTABLE);
2039 ADD_INT(DEBUG_SAVEALL);
2040 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00002041#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002042 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00002043}
2044
Guido van Rossume13ddc92003-04-17 17:29:22 +00002045/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00002046Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00002047PyGC_Collect(void)
2048{
Victor Stinner2e969062019-11-20 01:49:32 +01002049 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01002050 GCState *gcstate = &tstate->interp->gc;
Victor Stinner2e969062019-11-20 01:49:32 +01002051
Victor Stinner67e0de62019-11-20 11:48:18 +01002052 if (!gcstate->enabled) {
Victor Stinner9db03242019-04-26 02:32:01 +02002053 return 0;
2054 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00002055
Victor Stinner9db03242019-04-26 02:32:01 +02002056 Py_ssize_t n;
Victor Stinner67e0de62019-11-20 11:48:18 +01002057 if (gcstate->collecting) {
Victor Stinner9db03242019-04-26 02:32:01 +02002058 /* already collecting, don't do anything */
2059 n = 0;
2060 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002061 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03002062 PyObject *exc, *value, *tb;
Victor Stinner67e0de62019-11-20 11:48:18 +01002063 gcstate->collecting = 1;
Victor Stinner2e969062019-11-20 01:49:32 +01002064 _PyErr_Fetch(tstate, &exc, &value, &tb);
Victor Stinner67e0de62019-11-20 11:48:18 +01002065 n = collect_with_callback(tstate, NUM_GENERATIONS - 1);
Victor Stinner2e969062019-11-20 01:49:32 +01002066 _PyErr_Restore(tstate, exc, value, tb);
Victor Stinner67e0de62019-11-20 11:48:18 +01002067 gcstate->collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002068 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00002069
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002070 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00002071}
2072
Antoine Pitroufef34e32013-05-19 01:11:58 +02002073Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07002074_PyGC_CollectIfEnabled(void)
2075{
Łukasz Langafef7e942016-09-09 21:47:46 -07002076 return PyGC_Collect();
2077}
2078
2079Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02002080_PyGC_CollectNoFail(void)
2081{
Victor Stinner2e969062019-11-20 01:49:32 +01002082 PyThreadState *tstate = _PyThreadState_GET();
2083 assert(!_PyErr_Occurred(tstate));
Victor Stinner9db03242019-04-26 02:32:01 +02002084
Victor Stinner72474072019-11-20 12:25:50 +01002085 GCState *gcstate = &tstate->interp->gc;
Antoine Pitroufef34e32013-05-19 01:11:58 +02002086 Py_ssize_t n;
2087
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02002088 /* Ideally, this function is only called on interpreter shutdown,
2089 and therefore not recursively. Unfortunately, when there are daemon
2090 threads, a daemon thread can start a cyclic garbage collection
2091 during interpreter shutdown (and then never finish it).
2092 See http://bugs.python.org/issue8713#msg195178 for an example.
2093 */
Victor Stinner67e0de62019-11-20 11:48:18 +01002094 if (gcstate->collecting) {
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02002095 n = 0;
Victor Stinner9db03242019-04-26 02:32:01 +02002096 }
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02002097 else {
Victor Stinner67e0de62019-11-20 11:48:18 +01002098 gcstate->collecting = 1;
2099 n = collect(tstate, NUM_GENERATIONS - 1, NULL, NULL, 1);
2100 gcstate->collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02002101 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02002102 return n;
2103}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02002104
Antoine Pitrou696e0352010-08-08 22:18:46 +00002105void
Victor Stinner67e0de62019-11-20 11:48:18 +01002106_PyGC_DumpShutdownStats(PyThreadState *tstate)
Antoine Pitrou696e0352010-08-08 22:18:46 +00002107{
Victor Stinner72474072019-11-20 12:25:50 +01002108 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01002109 if (!(gcstate->debug & DEBUG_SAVEALL)
2110 && gcstate->garbage != NULL && PyList_GET_SIZE(gcstate->garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002111 const char *message;
Victor Stinner67e0de62019-11-20 11:48:18 +01002112 if (gcstate->debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00002113 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00002114 "shutdown";
2115 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00002116 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00002117 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02002118 /* PyErr_WarnFormat does too many things and we are at shutdown,
2119 the warnings module's dependencies (e.g. linecache) may be gone
2120 already. */
2121 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
2122 "gc", NULL, message,
Victor Stinner67e0de62019-11-20 11:48:18 +01002123 PyList_GET_SIZE(gcstate->garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00002124 PyErr_WriteUnraisable(NULL);
Victor Stinner67e0de62019-11-20 11:48:18 +01002125 if (gcstate->debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00002126 PyObject *repr = NULL, *bytes = NULL;
Victor Stinner67e0de62019-11-20 11:48:18 +01002127 repr = PyObject_Repr(gcstate->garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00002128 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Victor Stinner67e0de62019-11-20 11:48:18 +01002129 PyErr_WriteUnraisable(gcstate->garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00002130 else {
2131 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02002132 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00002133 PyBytes_AS_STRING(bytes)
2134 );
2135 }
2136 Py_XDECREF(repr);
2137 Py_XDECREF(bytes);
2138 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00002139 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02002140}
2141
2142void
Victor Stinner7eee5be2019-11-20 10:38:34 +01002143_PyGC_Fini(PyThreadState *tstate)
Antoine Pitrou5f454a02013-05-06 21:15:57 +02002144{
Victor Stinner72474072019-11-20 12:25:50 +01002145 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01002146 Py_CLEAR(gcstate->garbage);
2147 Py_CLEAR(gcstate->callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00002148}
2149
Neil Schemenauer43411b52001-08-30 00:05:51 +00002150/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00002151void
2152_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002153{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002154 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00002155}
2156
Victor Stinnera5447732019-10-10 09:32:13 +02002157
2158#ifdef Py_DEBUG
Victor Stinner1b184552019-10-08 00:09:31 +02002159static int
2160visit_validate(PyObject *op, void *parent_raw)
2161{
2162 PyObject *parent = _PyObject_CAST(parent_raw);
2163 if (_PyObject_IsFreed(op)) {
2164 _PyObject_ASSERT_FAILED_MSG(parent,
2165 "PyObject_GC_Track() object is not valid");
2166 }
2167 return 0;
2168}
Victor Stinnera5447732019-10-10 09:32:13 +02002169#endif
Victor Stinner1b184552019-10-08 00:09:31 +02002170
2171
Neil Schemenauer43411b52001-08-30 00:05:51 +00002172/* extension modules might be compiled with GC support so these
2173 functions must always be available */
2174
2175void
Victor Stinnera42de742018-11-22 10:25:22 +01002176PyObject_GC_Track(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002177{
Victor Stinnera42de742018-11-22 10:25:22 +01002178 PyObject *op = _PyObject_CAST(op_raw);
Victor Stinner271753a2018-11-22 01:02:54 +01002179 if (_PyObject_GC_IS_TRACKED(op)) {
2180 _PyObject_ASSERT_FAILED_MSG(op,
2181 "object already tracked "
2182 "by the garbage collector");
2183 }
Victor Stinnera42de742018-11-22 10:25:22 +01002184 _PyObject_GC_TRACK(op);
Victor Stinner1b184552019-10-08 00:09:31 +02002185
2186#ifdef Py_DEBUG
2187 /* Check that the object is valid: validate objects traversed
2188 by tp_traverse() */
2189 traverseproc traverse = Py_TYPE(op)->tp_traverse;
2190 (void)traverse(op, visit_validate, op);
2191#endif
Neil Schemenauer43411b52001-08-30 00:05:51 +00002192}
2193
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00002194void
Victor Stinnera42de742018-11-22 10:25:22 +01002195PyObject_GC_UnTrack(void *op_raw)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002196{
Victor Stinnera42de742018-11-22 10:25:22 +01002197 PyObject *op = _PyObject_CAST(op_raw);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002198 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
2199 * call PyObject_GC_UnTrack twice on an object.
2200 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002201 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002202 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002203 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00002204}
2205
Victor Stinnerdb067af2014-05-02 22:31:14 +02002206static PyObject *
2207_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002208{
Victor Stinner67e0de62019-11-20 11:48:18 +01002209 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01002210 GCState *gcstate = &tstate->interp->gc;
Victor Stinner2e969062019-11-20 01:49:32 +01002211 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) {
Victor Stinner67e0de62019-11-20 11:48:18 +01002212 return _PyErr_NoMemory(tstate);
Victor Stinner2e969062019-11-20 01:49:32 +01002213 }
2214 size_t size = sizeof(PyGC_Head) + basicsize;
2215
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002216 PyGC_Head *g;
Victor Stinner2e969062019-11-20 01:49:32 +01002217 if (use_calloc) {
Victor Stinnerdb067af2014-05-02 22:31:14 +02002218 g = (PyGC_Head *)PyObject_Calloc(1, size);
Victor Stinner2e969062019-11-20 01:49:32 +01002219 }
2220 else {
Victor Stinnerdb067af2014-05-02 22:31:14 +02002221 g = (PyGC_Head *)PyObject_Malloc(size);
Victor Stinner2e969062019-11-20 01:49:32 +01002222 }
2223 if (g == NULL) {
Victor Stinner67e0de62019-11-20 11:48:18 +01002224 return _PyErr_NoMemory(tstate);
Victor Stinner2e969062019-11-20 01:49:32 +01002225 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002226 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
Victor Stinner2e969062019-11-20 01:49:32 +01002227
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002228 g->_gc_next = 0;
2229 g->_gc_prev = 0;
Victor Stinner67e0de62019-11-20 11:48:18 +01002230 gcstate->generations[0].count++; /* number of allocated GC objects */
2231 if (gcstate->generations[0].count > gcstate->generations[0].threshold &&
2232 gcstate->enabled &&
2233 gcstate->generations[0].threshold &&
2234 !gcstate->collecting &&
2235 !_PyErr_Occurred(tstate))
Victor Stinner2e969062019-11-20 01:49:32 +01002236 {
Victor Stinner67e0de62019-11-20 11:48:18 +01002237 gcstate->collecting = 1;
2238 collect_generations(tstate);
2239 gcstate->collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002240 }
Victor Stinner2e969062019-11-20 01:49:32 +01002241 PyObject *op = FROM_GC(g);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002242 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002243}
2244
2245PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02002246_PyObject_GC_Malloc(size_t basicsize)
2247{
2248 return _PyObject_GC_Alloc(0, basicsize);
2249}
2250
2251PyObject *
2252_PyObject_GC_Calloc(size_t basicsize)
2253{
2254 return _PyObject_GC_Alloc(1, basicsize);
2255}
2256
2257PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00002258_PyObject_GC_New(PyTypeObject *tp)
2259{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002260 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
2261 if (op != NULL)
2262 op = PyObject_INIT(op, tp);
2263 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002264}
2265
2266PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00002267_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002268{
Victor Stinner5d1866c2013-07-08 22:17:52 +02002269 size_t size;
2270 PyVarObject *op;
2271
2272 if (nitems < 0) {
2273 PyErr_BadInternalCall();
2274 return NULL;
2275 }
2276 size = _PyObject_VAR_SIZE(tp, nitems);
2277 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002278 if (op != NULL)
2279 op = PyObject_INIT_VAR(op, tp, nitems);
2280 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002281}
2282
2283PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00002284_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002285{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002286 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
Victor Stinnera4b2bc72018-10-26 18:00:13 +02002287 _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));
2288 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002289 return (PyVarObject *)PyErr_NoMemory();
Victor Stinnera4b2bc72018-10-26 18:00:13 +02002290 }
2291
2292 PyGC_Head *g = AS_GC(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002293 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
2294 if (g == NULL)
2295 return (PyVarObject *)PyErr_NoMemory();
2296 op = (PyVarObject *) FROM_GC(g);
2297 Py_SIZE(op) = nitems;
2298 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00002299}
2300
2301void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00002302PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00002303{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002304 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002305 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002306 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09002307 }
Victor Stinner67e0de62019-11-20 11:48:18 +01002308 PyThreadState *tstate = _PyThreadState_GET();
Victor Stinner72474072019-11-20 12:25:50 +01002309 GCState *gcstate = &tstate->interp->gc;
Victor Stinner67e0de62019-11-20 11:48:18 +01002310 if (gcstate->generations[0].count > 0) {
2311 gcstate->generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002312 }
2313 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00002314}