blob: e3e290cf97a1bd361ddbc25b02b049b2dd23707f [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Yury Selivanovf23746a2018-01-22 19:11:18 -050027#include "internal/context.h"
Eric Snow2ebc5ce2017-09-07 23:51:28 -060028#include "internal/mem.h"
29#include "internal/pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000030#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070031#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010032#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000033
Serhiy Storchaka93260282017-02-04 11:19:59 +020034/*[clinic input]
35module gc
36[clinic start generated code]*/
37/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
38
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090039#define GC_DEBUG (0) /* Enable more asserts */
40
41#define GC_NEXT _PyGCHead_NEXT
42#define GC_PREV _PyGCHead_PREV
43
44// update_refs() set this bit for all objects in current generation.
45// subtract_refs() and move_unreachable() uses this to distinguish
46// visited object is in GCing or not.
47//
48// move_unreachable() removes this flag from reachable objects.
49// Only unreachable objects have this flag.
50//
51// No objects in interpreter have this flag after GC ends.
52#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
53
54// Lowest bit of _gc_next is used for UNREACHABLE flag.
55//
56// This flag represents the object is in unreachable list in move_unreachable()
57//
58// Although this flag is used only in move_unreachable(), move_unreachable()
59// doesn't clear this flag to skip unnecessary iteration.
60// move_legacy_finalizers() removes this flag instead.
61// Between them, unreachable list is not normal list and we can not use
62// most gc_list_* functions for it.
63#define NEXT_MASK_UNREACHABLE (1)
64
65static inline int
66gc_is_collecting(PyGC_Head *g)
67{
68 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
69}
70
71static inline void
72gc_clear_collecting(PyGC_Head *g)
73{
74 g->_gc_prev &= ~PREV_MASK_COLLECTING;
75}
76
77static inline Py_ssize_t
78gc_get_refs(PyGC_Head *g)
79{
80 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
81}
82
83static inline void
84gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
85{
86 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
87 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
88}
89
90static inline void
91gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
92{
93 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
94 | PREV_MASK_COLLECTING
95 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
96}
97
98static inline void
99gc_decref(PyGC_Head *g)
100{
101 assert(gc_get_refs(g) > 0);
102 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
103}
104
Neil Schemenauer43411b52001-08-30 00:05:51 +0000105/* Get an object's GC head */
106#define AS_GC(o) ((PyGC_Head *)(o)-1)
107
108/* Get the object given the GC head */
109#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
110
Tim Peters6fc13d92002-07-02 18:12:35 +0000111/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +0000112static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +0000113
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000114/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000115#define DEBUG_STATS (1<<0) /* print collection statistics */
116#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
117#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
118#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
119#define DEBUG_LEAK DEBUG_COLLECTABLE | \
120 DEBUG_UNCOLLECTABLE | \
121 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000122
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600123#define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100124
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600125void
126_PyGC_Initialize(struct _gc_runtime_state *state)
127{
128 state->enabled = 1; /* automatic collection enabled? */
129
130#define _GEN_HEAD(n) (&state->generations[n].head)
131 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900132 /* PyGC_Head, threshold, count */
133 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
134 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
135 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600136 };
137 for (int i = 0; i < NUM_GENERATIONS; i++) {
138 state->generations[i] = generations[i];
139 };
140 state->generation0 = GEN_HEAD(0);
brainfvckc75edab2017-10-16 12:49:41 -0700141 struct gc_generation permanent_generation = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900142 {(uintptr_t)&state->permanent_generation.head,
143 (uintptr_t)&state->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700144 };
145 state->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600146}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100147
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900148/*
149_gc_prev values
150---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000151
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900152Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000153
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900154Lowest two bits of _gc_prev are used for flags.
155PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
156or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000157
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900158During a collection, _gc_prev is temporary used for gc_refs, and the gc list
159is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000160
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900161gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000162 At the start of a collection, update_refs() copies the true refcount
163 to gc_refs, for each object in the generation being collected.
164 subtract_refs() then adjusts gc_refs so that it equals the number of
165 times an object is referenced directly from outside the generation
166 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000167
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900168PREV_MASK_COLLECTING
169 Objects in generation being collected are marked PREV_MASK_COLLECTING in
170 update_refs().
171
172
173_gc_next values
174---------------
175
176_gc_next takes these values:
177
1780
179 The object is not tracked
180
181!= 0
182 Pointer to the next object in the GC list.
183 Additionally, lowest bit is used temporary for
184 NEXT_MASK_UNREACHABLE flag described below.
185
186NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000187 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900188 indirectly) from outside the generation into an "unreachable" set and
189 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000190
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900191 Objects that are found to be reachable have gc_refs set to 1.
192 When this flag is set for the reachable object, the object must be in
193 "unreachable" set.
194 The flag is unset and the object is moved back to "reachable" set.
195
196 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000197*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000198
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000199/*** list functions ***/
200
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900201static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000202gc_list_init(PyGC_Head *list)
203{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900204 // List header must not have flags.
205 // We can assign pointer by simple cast.
206 list->_gc_prev = (uintptr_t)list;
207 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000208}
209
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900210static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000211gc_list_is_empty(PyGC_Head *list)
212{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900213 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000214}
215
Tim Peterse2d59182004-11-01 01:39:08 +0000216/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900217static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000218gc_list_append(PyGC_Head *node, PyGC_Head *list)
219{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900220 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
221
222 // last <-> node
223 _PyGCHead_SET_PREV(node, last);
224 _PyGCHead_SET_NEXT(last, node);
225
226 // node <-> list
227 _PyGCHead_SET_NEXT(node, list);
228 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000229}
230
Tim Peterse2d59182004-11-01 01:39:08 +0000231/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900232static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000233gc_list_remove(PyGC_Head *node)
234{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900235 PyGC_Head *prev = GC_PREV(node);
236 PyGC_Head *next = GC_NEXT(node);
237
238 _PyGCHead_SET_NEXT(prev, next);
239 _PyGCHead_SET_PREV(next, prev);
240
241 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000242}
243
Tim Peterse2d59182004-11-01 01:39:08 +0000244/* Move `node` from the gc list it's currently in (which is not explicitly
245 * named here) to the end of `list`. This is semantically the same as
246 * gc_list_remove(node) followed by gc_list_append(node, list).
247 */
248static void
249gc_list_move(PyGC_Head *node, PyGC_Head *list)
250{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000251 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900252 PyGC_Head *from_prev = GC_PREV(node);
253 PyGC_Head *from_next = GC_NEXT(node);
254 _PyGCHead_SET_NEXT(from_prev, from_next);
255 _PyGCHead_SET_PREV(from_next, from_prev);
256
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000257 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900258 // list must not have flags. So we can skip macros.
259 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
260 _PyGCHead_SET_PREV(node, to_prev);
261 _PyGCHead_SET_NEXT(to_prev, node);
262 list->_gc_prev = (uintptr_t)node;
263 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000264}
265
266/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000267static void
268gc_list_merge(PyGC_Head *from, PyGC_Head *to)
269{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000270 assert(from != to);
271 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900272 PyGC_Head *to_tail = GC_PREV(to);
273 PyGC_Head *from_head = GC_NEXT(from);
274 PyGC_Head *from_tail = GC_PREV(from);
275 assert(from_head != from);
276 assert(from_tail != from);
277
278 _PyGCHead_SET_NEXT(to_tail, from_head);
279 _PyGCHead_SET_PREV(from_head, to_tail);
280
281 _PyGCHead_SET_NEXT(from_tail, to);
282 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000283 }
284 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000285}
286
Neal Norwitz7b216c52006-03-04 20:01:53 +0000287static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000288gc_list_size(PyGC_Head *list)
289{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000290 PyGC_Head *gc;
291 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900292 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000293 n++;
294 }
295 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000296}
297
Tim Peters259272b2003-04-06 19:41:39 +0000298/* Append objects in a GC list to a Python list.
299 * Return 0 if all OK, < 0 if error (out of memory for list).
300 */
301static int
302append_objects(PyObject *py_list, PyGC_Head *gc_list)
303{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000304 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900305 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000306 PyObject *op = FROM_GC(gc);
307 if (op != py_list) {
308 if (PyList_Append(py_list, op)) {
309 return -1; /* exception */
310 }
311 }
312 }
313 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000314}
315
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900316#if GC_DEBUG
317// validate_list checks list consistency. And it works as document
318// describing when expected_mask is set / unset.
319static void
320validate_list(PyGC_Head *head, uintptr_t expected_mask)
321{
322 PyGC_Head *prev = head;
323 PyGC_Head *gc = GC_NEXT(head);
324 while (gc != head) {
325 assert(GC_NEXT(gc) != NULL);
326 assert(GC_PREV(gc) == prev);
327 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
328 prev = gc;
329 gc = GC_NEXT(gc);
330 }
331 assert(prev == GC_PREV(head));
332}
333#else
334#define validate_list(x,y) do{}while(0)
335#endif
336
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000337/*** end of list stuff ***/
338
339
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900340/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
341 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000342 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000343static void
344update_refs(PyGC_Head *containers)
345{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900346 PyGC_Head *gc = GC_NEXT(containers);
347 for (; gc != containers; gc = GC_NEXT(gc)) {
348 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000349 /* Python's cyclic gc should never see an incoming refcount
350 * of 0: if something decref'ed to 0, it should have been
351 * deallocated immediately at that time.
352 * Possible cause (if the assert triggers): a tp_dealloc
353 * routine left a gc-aware object tracked during its teardown
354 * phase, and did something-- or allowed something to happen --
355 * that called back into Python. gc can trigger then, and may
356 * see the still-tracked dying object. Before this assert
357 * was added, such mistakes went on to allow gc to try to
358 * delete the object again. In a debug build, that caused
359 * a mysterious segfault, when _Py_ForgetReference tried
360 * to remove the object from the doubly-linked list of all
361 * objects a second time. In a release build, an actual
362 * double deallocation occurred, which leads to corruption
363 * of the allocator's internal bookkeeping pointers. That's
364 * so serious that maybe this should be a release-build
365 * check instead of an assert?
366 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900367 assert(gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000368 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000369}
370
Tim Peters19b74c72002-07-01 03:52:19 +0000371/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000372static int
373visit_decref(PyObject *op, void *data)
374{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000375 assert(op != NULL);
376 if (PyObject_IS_GC(op)) {
377 PyGC_Head *gc = AS_GC(op);
378 /* We're only interested in gc_refs for objects in the
379 * generation being collected, which can be recognized
380 * because only they have positive gc_refs.
381 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900382 if (gc_is_collecting(gc)) {
383 gc_decref(gc);
384 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000385 }
386 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000387}
388
Tim Peters19b74c72002-07-01 03:52:19 +0000389/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
390 * for all objects in containers, and is GC_REACHABLE for all tracked gc
391 * objects not in containers. The ones with gc_refs > 0 are directly
392 * reachable from outside containers, and so can't be collected.
393 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000394static void
395subtract_refs(PyGC_Head *containers)
396{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000397 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900398 PyGC_Head *gc = GC_NEXT(containers);
399 for (; gc != containers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000400 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
401 (void) traverse(FROM_GC(gc),
402 (visitproc)visit_decref,
403 NULL);
404 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000405}
406
Tim Peters19b74c72002-07-01 03:52:19 +0000407/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000408static int
Tim Peters19b74c72002-07-01 03:52:19 +0000409visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900411 if (!PyObject_IS_GC(op)) {
412 return 0;
413 }
Tim Peters19b74c72002-07-01 03:52:19 +0000414
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900415 PyGC_Head *gc = AS_GC(op);
416 const Py_ssize_t gc_refs = gc_get_refs(gc);
417
418 // Ignore untracked objects and objects in other generation.
419 if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
420 return 0;
421 }
422
423 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
424 /* This had gc_refs = 0 when move_unreachable got
425 * to it, but turns out it's reachable after all.
426 * Move it back to move_unreachable's 'young' list,
427 * and move_unreachable will eventually get to it
428 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000429 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900430 // Manually unlink gc from unreachable list because
431 PyGC_Head *prev = GC_PREV(gc);
432 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
433 assert(prev->_gc_next & NEXT_MASK_UNREACHABLE);
434 assert(next->_gc_next & NEXT_MASK_UNREACHABLE);
435 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
436 _PyGCHead_SET_PREV(next, prev);
437
438 gc_list_append(gc, reachable);
439 gc_set_refs(gc, 1);
440 }
441 else if (gc_refs == 0) {
442 /* This is in move_unreachable's 'young' list, but
443 * the traversal hasn't yet gotten to it. All
444 * we need to do is tell move_unreachable that it's
445 * reachable.
446 */
447 gc_set_refs(gc, 1);
448 }
449 /* Else there's nothing to do.
450 * If gc_refs > 0, it must be in move_unreachable's 'young'
451 * list, and move_unreachable will eventually get to it.
452 */
453 else {
454 assert(gc_refs > 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000455 }
456 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000457}
458
Tim Peters19b74c72002-07-01 03:52:19 +0000459/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900460 * all objects in young don't have PREV_MASK_COLLECTING flag and
461 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000462 * All objects in young after this are directly or indirectly reachable
463 * from outside the original young; and all objects in unreachable are
464 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900465 *
466 * This function restores _gc_prev pointer. young and unreachable are
467 * doubly linked list after this function.
468 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
469 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000470 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000471static void
Tim Peters19b74c72002-07-01 03:52:19 +0000472move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000473{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900474 // previous elem in the young list, used for restore gc_prev.
475 PyGC_Head *prev = young;
476 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000477
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900478 /* Invariants: all objects "to the left" of us in young are reachable
479 * (directly or indirectly) from outside the young list as it was at entry.
480 *
481 * All other objects from the original young "to the left" of us are in
482 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000483 * left of us in 'young' now have been scanned, and no objects here
484 * or to the right have been scanned yet.
485 */
Tim Peters19b74c72002-07-01 03:52:19 +0000486
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000487 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900488 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000489 /* gc is definitely reachable from outside the
490 * original 'young'. Mark it as such, and traverse
491 * its pointers to find any other objects that may
492 * be directly reachable from it. Note that the
493 * call to tp_traverse may append objects to young,
494 * so we have to wait until it returns to determine
495 * the next object to visit.
496 */
497 PyObject *op = FROM_GC(gc);
498 traverseproc traverse = Py_TYPE(op)->tp_traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900499 assert(gc_get_refs(gc) > 0);
500 // NOTE: visit_reachable may change gc->_gc_next when
501 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000502 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900503 (visitproc)visit_reachable,
504 (void *)young);
505 // relink gc_prev to prev element.
506 _PyGCHead_SET_PREV(gc, prev);
507 // gc is not COLLECTING state aftere here.
508 gc_clear_collecting(gc);
509 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000510 }
511 else {
512 /* This *may* be unreachable. To make progress,
513 * assume it is. gc isn't directly reachable from
514 * any object we've already traversed, but may be
515 * reachable from an object we haven't gotten to yet.
516 * visit_reachable will eventually move gc back into
517 * young if that's so, and we'll see it again.
518 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900519 // Move gc to unreachable.
520 // No need to gc->next->prev = prev because it is single linked.
521 prev->_gc_next = gc->_gc_next;
522
523 // We can't use gc_list_append() here because we use
524 // NEXT_MASK_UNREACHABLE here.
525 PyGC_Head *last = GC_PREV(unreachable);
526 // NOTE: Since all objects in unreachable set has
527 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
528 // But this may set the flat to unreachable too.
529 // move_legacy_finalizers() should care about it.
530 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
531 _PyGCHead_SET_PREV(gc, last);
532 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
533 unreachable->_gc_prev = (uintptr_t)gc;
534 }
535 gc = (PyGC_Head*)prev->_gc_next;
536 }
537 // young->_gc_prev must be last element remained in the list.
538 young->_gc_prev = (uintptr_t)prev;
539}
540
541static void
542untrack_tuples(PyGC_Head *head)
543{
544 PyGC_Head *next, *gc = GC_NEXT(head);
545 while (gc != head) {
546 PyObject *op = FROM_GC(gc);
547 next = GC_NEXT(gc);
548 if (PyTuple_CheckExact(op)) {
549 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000550 }
551 gc = next;
552 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000553}
554
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200555/* Try to untrack all currently tracked dictionaries */
556static void
557untrack_dicts(PyGC_Head *head)
558{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900559 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200560 while (gc != head) {
561 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900562 next = GC_NEXT(gc);
563 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200564 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900565 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200566 gc = next;
567 }
568}
569
Antoine Pitrou796564c2013-07-30 19:59:21 +0200570/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000571static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200572has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000573{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200574 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000575}
576
Antoine Pitrou796564c2013-07-30 19:59:21 +0200577/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900578 *
579 * This function also removes NEXT_MASK_UNREACHABLE flag
580 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000581 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000582static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200583move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000584{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900585 PyGC_Head *gc, *next;
586 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
Tim Petersf6b80452003-04-07 19:21:15 +0000587
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000588 /* March over unreachable. Move objects with finalizers into
589 * `finalizers`.
590 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900591 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000592 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000593
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900594 assert(gc->_gc_next & NEXT_MASK_UNREACHABLE);
595 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
596 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000597
Antoine Pitrou796564c2013-07-30 19:59:21 +0200598 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900599 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000600 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000601 }
602 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000603}
604
Antoine Pitrou796564c2013-07-30 19:59:21 +0200605/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000606static int
607visit_move(PyObject *op, PyGC_Head *tolist)
608{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000609 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900610 PyGC_Head *gc = AS_GC(op);
611 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000612 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900613 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000614 }
615 }
616 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000617}
618
619/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000620 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000621 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000622static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200623move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000624{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000625 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900626 PyGC_Head *gc = GC_NEXT(finalizers);
627 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000628 /* Note that the finalizers list may grow during this. */
629 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
630 (void) traverse(FROM_GC(gc),
631 (visitproc)visit_move,
632 (void *)finalizers);
633 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000634}
635
Tim Petersead8b7a2004-10-30 23:09:22 +0000636/* Clear all weakrefs to unreachable objects, and if such a weakref has a
637 * callback, invoke it if necessary. Note that it's possible for such
638 * weakrefs to be outside the unreachable set -- indeed, those are precisely
639 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
640 * overview & some details. Some weakrefs with callbacks may be reclaimed
641 * directly by this routine; the number reclaimed is the return value. Other
642 * weakrefs with callbacks may be moved into the `old` generation. Objects
643 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
644 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
645 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000646 */
647static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000648handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000649{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000650 PyGC_Head *gc;
651 PyObject *op; /* generally FROM_GC(gc) */
652 PyWeakReference *wr; /* generally a cast of op */
653 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
654 PyGC_Head *next;
655 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000656
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000657 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000658
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000659 /* Clear all weakrefs to the objects in unreachable. If such a weakref
660 * also has a callback, move it into `wrcb_to_call` if the callback
661 * needs to be invoked. Note that we cannot invoke any callbacks until
662 * all weakrefs to unreachable objects are cleared, lest the callback
663 * resurrect an unreachable object via a still-active weakref. We
664 * make another pass over wrcb_to_call, invoking callbacks, after this
665 * pass completes.
666 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900667 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000668 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000669
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000670 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900671 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000672
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000673 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
674 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000675
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000676 /* It supports weakrefs. Does it have any? */
677 wrlist = (PyWeakReference **)
678 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000679
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000680 /* `op` may have some weakrefs. March over the list, clear
681 * all the weakrefs, and move the weakrefs with callbacks
682 * that must be called into wrcb_to_call.
683 */
684 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
685 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000686
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000687 /* _PyWeakref_ClearRef clears the weakref but leaves
688 * the callback pointer intact. Obscure: it also
689 * changes *wrlist.
690 */
691 assert(wr->wr_object == op);
692 _PyWeakref_ClearRef(wr);
693 assert(wr->wr_object == Py_None);
694 if (wr->wr_callback == NULL)
695 continue; /* no callback */
Tim Petersead8b7a2004-10-30 23:09:22 +0000696
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000697 /* Headache time. `op` is going away, and is weakly referenced by
698 * `wr`, which has a callback. Should the callback be invoked? If wr
699 * is also trash, no:
700 *
701 * 1. There's no need to call it. The object and the weakref are
702 * both going away, so it's legitimate to pretend the weakref is
703 * going away first. The user has to ensure a weakref outlives its
704 * referent if they want a guarantee that the wr callback will get
705 * invoked.
706 *
707 * 2. It may be catastrophic to call it. If the callback is also in
708 * cyclic trash (CT), then although the CT is unreachable from
709 * outside the current generation, CT may be reachable from the
710 * callback. Then the callback could resurrect insane objects.
711 *
712 * Since the callback is never needed and may be unsafe in this case,
713 * wr is simply left in the unreachable set. Note that because we
714 * already called _PyWeakref_ClearRef(wr), its callback will never
715 * trigger.
716 *
717 * OTOH, if wr isn't part of CT, we should invoke the callback: the
718 * weakref outlived the trash. Note that since wr isn't CT in this
719 * case, its callback can't be CT either -- wr acted as an external
720 * root to this generation, and therefore its callback did too. So
721 * nothing in CT is reachable from the callback either, so it's hard
722 * to imagine how calling it later could create a problem for us. wr
723 * is moved to wrcb_to_call in this case.
724 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900725 if (gc_is_collecting(AS_GC(wr))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000726 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900727 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000728
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000729 /* Create a new reference so that wr can't go away
730 * before we can process it again.
731 */
732 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000733
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000734 /* Move wr to wrcb_to_call, for the next pass. */
735 wrasgc = AS_GC(wr);
736 assert(wrasgc != next); /* wrasgc is reachable, but
737 next isn't, so they can't
738 be the same */
739 gc_list_move(wrasgc, &wrcb_to_call);
740 }
741 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000742
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000743 /* Invoke the callbacks we decided to honor. It's safe to invoke them
744 * because they can't reference unreachable objects.
745 */
746 while (! gc_list_is_empty(&wrcb_to_call)) {
747 PyObject *temp;
748 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000749
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900750 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000751 op = FROM_GC(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000752 assert(PyWeakref_Check(op));
753 wr = (PyWeakReference *)op;
754 callback = wr->wr_callback;
755 assert(callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000756
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000757 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100758 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000759 if (temp == NULL)
760 PyErr_WriteUnraisable(callback);
761 else
762 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000763
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000764 /* Give up the reference we created in the first pass. When
765 * op's refcount hits 0 (which it may or may not do right now),
766 * op's tp_dealloc will decref op->wr_callback too. Note
767 * that the refcount probably will hit 0 now, and because this
768 * weakref was reachable to begin with, gc didn't already
769 * add it to its count of freed objects. Example: a reachable
770 * weak value dict maps some key to this reachable weakref.
771 * The callback removes this key->weakref mapping from the
772 * dict, leaving no other references to the weakref (excepting
773 * ours).
774 */
775 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900776 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000777 /* object is still alive -- move it */
778 gc_list_move(gc, old);
779 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900780 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000781 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900782 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000783 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000784
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000785 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000786}
787
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000788static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200789debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000790{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100791 PySys_FormatStderr("gc: %s <%s %p>\n",
792 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000793}
794
Antoine Pitrou796564c2013-07-30 19:59:21 +0200795/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000796 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000797 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
798 * garbage list (a Python list), else only the objects in finalizers with
799 * __del__ methods are appended to garbage. All objects in finalizers are
800 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000801 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300802static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200803handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000804{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900805 PyGC_Head *gc = GC_NEXT(finalizers);
Tim Petersf6b80452003-04-07 19:21:15 +0000806
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300807 assert(!PyErr_Occurred());
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600808 if (_PyRuntime.gc.garbage == NULL) {
809 _PyRuntime.gc.garbage = PyList_New(0);
810 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000811 Py_FatalError("gc couldn't create gc.garbage list");
812 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900813 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000814 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000815
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600816 if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300817 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
818 PyErr_Clear();
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300819 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300820 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000821 }
822 }
Tim Petersf6b80452003-04-07 19:21:15 +0000823
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000824 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000825}
826
Tim Peters5fbc7b12014-05-08 17:42:19 -0500827/* Run first-time finalizers (if any) on all the objects in collectable.
828 * Note that this may remove some (or even all) of the objects from the
829 * list, due to refcounts falling to 0.
830 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200831static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500832finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200833{
834 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500835 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200836
Tim Peters5fbc7b12014-05-08 17:42:19 -0500837 /* While we're going through the loop, `finalize(op)` may cause op, or
838 * other objects, to be reclaimed via refcounts falling to zero. So
839 * there's little we can rely on about the structure of the input
840 * `collectable` list across iterations. For safety, we always take the
841 * first object in that list and move it to a temporary `seen` list.
842 * If objects vanish from the `collectable` and `seen` lists we don't
843 * care.
844 */
845 gc_list_init(&seen);
846
847 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900848 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200849 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500850 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200851 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500852 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
853 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900854 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200855 Py_INCREF(op);
856 finalize(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300857 assert(!PyErr_Occurred());
Antoine Pitrou796564c2013-07-30 19:59:21 +0200858 Py_DECREF(op);
859 }
860 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500861 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200862}
863
864/* Walk the collectable list and check that they are really unreachable
865 from the outside (some objects could have been resurrected by a
866 finalizer). */
867static int
868check_garbage(PyGC_Head *collectable)
869{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900870 int ret = 0;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200871 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900872 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
873 // Use gc_refs and break gc_prev again.
874 gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
875 assert(gc_get_refs(gc) != 0);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200876 }
877 subtract_refs(collectable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900878 PyGC_Head *prev = collectable;
879 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
880 assert(gc_get_refs(gc) >= 0);
881 if (gc_get_refs(gc) != 0) {
882 ret = -1;
883 }
884 // Restore gc_prev here.
885 _PyGCHead_SET_PREV(gc, prev);
886 gc_clear_collecting(gc);
887 prev = gc;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200888 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900889 return ret;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200890}
891
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000892/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000893 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000894 * objects may be freed. It is possible I screwed something up here.
895 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000896static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000897delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000898{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000899 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000900
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300901 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000902 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900903 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000904 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000905
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900906 assert(Py_REFCNT(FROM_GC(gc)) > 0);
907
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600908 if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300909 assert(_PyRuntime.gc.garbage != NULL);
910 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
911 PyErr_Clear();
912 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000913 }
914 else {
915 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
916 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300917 (void) clear(op);
918 if (PyErr_Occurred()) {
919 PySys_WriteStderr("Exception ignored in tp_clear of "
920 "%.50s\n", Py_TYPE(op)->tp_name);
921 PyErr_WriteUnraisable(NULL);
922 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000923 Py_DECREF(op);
924 }
925 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900926 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000927 /* object is still alive, move it, it may die later */
928 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000929 }
930 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000931}
932
Christian Heimesa156e092008-02-16 07:38:31 +0000933/* Clear all free lists
934 * All free lists are cleared during the collection of the highest generation.
935 * Allocated items in the free list may keep a pymalloc arena occupied.
936 * Clearing the free lists may give back memory to the OS earlier.
937 */
938static void
939clear_freelists(void)
940{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000941 (void)PyMethod_ClearFreeList();
942 (void)PyFrame_ClearFreeList();
943 (void)PyCFunction_ClearFreeList();
944 (void)PyTuple_ClearFreeList();
945 (void)PyUnicode_ClearFreeList();
946 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100947 (void)PyList_ClearFreeList();
948 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100949 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700950 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -0500951 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +0000952}
953
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000954/* This is the main function. Read this to understand how the
955 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000956static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200957collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
958 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000959{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000960 int i;
961 Py_ssize_t m = 0; /* # objects collected */
962 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
963 PyGC_Head *young; /* the generation we are examining */
964 PyGC_Head *old; /* next older generation */
965 PyGC_Head unreachable; /* non-problematic unreachable trash */
966 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
967 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100968 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200969
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600970 struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000971
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600972 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000973 PySys_WriteStderr("gc: collecting generation %d...\n",
974 generation);
975 PySys_WriteStderr("gc: objects in each generation:");
976 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200977 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000978 gc_list_size(GEN_HEAD(i)));
brainfvckc75edab2017-10-16 12:49:41 -0700979 PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
980 gc_list_size(&_PyRuntime.gc.permanent_generation.head));
Victor Stinner7181dec2015-03-27 17:47:53 +0100981 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200982
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000983 PySys_WriteStderr("\n");
984 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000985
Łukasz Langaa785c872016-09-09 17:37:37 -0700986 if (PyDTrace_GC_START_ENABLED())
987 PyDTrace_GC_START(generation);
988
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000989 /* update collection and allocation counters */
990 if (generation+1 < NUM_GENERATIONS)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600991 _PyRuntime.gc.generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000992 for (i = 0; i <= generation; i++)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600993 _PyRuntime.gc.generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000994
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000995 /* merge younger generations with one we are currently collecting */
996 for (i = 0; i < generation; i++) {
997 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
998 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000999
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001000 /* handy references */
1001 young = GEN_HEAD(generation);
1002 if (generation < NUM_GENERATIONS-1)
1003 old = GEN_HEAD(generation+1);
1004 else
1005 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001006
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001007 validate_list(young, 0);
1008 validate_list(old, 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001009 /* Using ob_refcnt and gc_refs, calculate which objects in the
1010 * container set are reachable from outside the set (i.e., have a
1011 * refcount greater than 0 when all the references within the
1012 * set are taken into account).
1013 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001014 update_refs(young); // gc_prev is used for gc_refs
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001015 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001016
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001017 /* Leave everything reachable from outside young in young, and move
1018 * everything else (in young) to unreachable.
1019 * NOTE: This used to move the reachable objects into a reachable
1020 * set instead. But most things usually turn out to be reachable,
1021 * so it's more efficient to move the unreachable things.
1022 */
1023 gc_list_init(&unreachable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001024 move_unreachable(young, &unreachable); // gc_prev is pointer again
1025 validate_list(young, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001026
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001027 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001028 /* Move reachable objects to next generation. */
1029 if (young != old) {
1030 if (generation == NUM_GENERATIONS - 2) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001031 _PyRuntime.gc.long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001032 }
1033 gc_list_merge(young, old);
1034 }
1035 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001036 /* We only untrack dicts in full collections, to avoid quadratic
1037 dict build-up. See issue #14775. */
1038 untrack_dicts(young);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001039 _PyRuntime.gc.long_lived_pending = 0;
1040 _PyRuntime.gc.long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001041 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001042
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001043 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001044 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001045 */
1046 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001047 // NEXT_MASK_UNREACHABLE is cleared here.
1048 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001049 move_legacy_finalizers(&unreachable, &finalizers);
1050 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001051 * unreachable objects reachable *from* those are also uncollectable,
1052 * and we move those into the finalizers list too.
1053 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001054 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001055
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001056 validate_list(&finalizers, 0);
1057 validate_list(&unreachable, PREV_MASK_COLLECTING);
1058
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001059 /* Collect statistics on collectable objects found and print
1060 * debugging information.
1061 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001062 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001063 m++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001064 if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001065 debug_cycle("collectable", FROM_GC(gc));
1066 }
1067 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001068
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001069 /* Clear weakrefs and invoke callbacks as necessary. */
1070 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001071
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001072 validate_list(old, 0);
1073 validate_list(&unreachable, PREV_MASK_COLLECTING);
1074
Antoine Pitrou796564c2013-07-30 19:59:21 +02001075 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001076 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001077
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001078 if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
Antoine Pitrou796564c2013-07-30 19:59:21 +02001079 gc_list_merge(&unreachable, old);
1080 }
1081 else {
1082 /* Call tp_clear on objects in the unreachable set. This will cause
1083 * the reference cycles to be broken. It may also cause some objects
1084 * in finalizers to be freed.
1085 */
1086 delete_garbage(&unreachable, old);
1087 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001088
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001089 /* Collect statistics on uncollectable objects found and print
1090 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001091 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001092 n++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001093 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001094 debug_cycle("uncollectable", FROM_GC(gc));
1095 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001096 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001097 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001098
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001099 if (m == 0 && n == 0)
1100 PySys_WriteStderr("gc: done");
1101 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001102 PySys_FormatStderr(
1103 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001104 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001105 PySys_WriteStderr(", %.4fs elapsed\n",
1106 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001107 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001108
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001109 /* Append instances in the uncollectable set to a Python
1110 * reachable list of garbage. The programmer has to deal with
1111 * this if they insist on creating this type of structure.
1112 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001113 handle_legacy_finalizers(&finalizers, old);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001114 validate_list(old, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001115
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001116 /* Clear free list only during the collection of the highest
1117 * generation */
1118 if (generation == NUM_GENERATIONS-1) {
1119 clear_freelists();
1120 }
Christian Heimesa156e092008-02-16 07:38:31 +00001121
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001122 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001123 if (nofail) {
1124 PyErr_Clear();
1125 }
1126 else {
1127 if (gc_str == NULL)
1128 gc_str = PyUnicode_FromString("garbage collection");
1129 PyErr_WriteUnraisable(gc_str);
1130 Py_FatalError("unexpected exception during garbage collection");
1131 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001132 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001133
Antoine Pitroud4156c12012-10-30 22:43:19 +01001134 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001135 if (n_collected)
1136 *n_collected = m;
1137 if (n_uncollectable)
1138 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001139 stats->collections++;
1140 stats->collected += m;
1141 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001142
1143 if (PyDTrace_GC_DONE_ENABLED())
1144 PyDTrace_GC_DONE(n+m);
1145
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001146 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001147 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001148}
1149
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001150/* Invoke progress callbacks to notify clients that garbage collection
1151 * is starting or stopping
1152 */
1153static void
1154invoke_gc_callback(const char *phase, int generation,
1155 Py_ssize_t collected, Py_ssize_t uncollectable)
1156{
1157 Py_ssize_t i;
1158 PyObject *info = NULL;
1159
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001160 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001161 /* we may get called very early */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001162 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001163 return;
1164 /* The local variable cannot be rebound, check it for sanity */
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001165 assert(PyList_CheckExact(_PyRuntime.gc.callbacks));
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001166 if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001167 info = Py_BuildValue("{sisnsn}",
1168 "generation", generation,
1169 "collected", collected,
1170 "uncollectable", uncollectable);
1171 if (info == NULL) {
1172 PyErr_WriteUnraisable(NULL);
1173 return;
1174 }
1175 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001176 for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1177 PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001178 Py_INCREF(cb); /* make sure cb doesn't go away */
1179 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001180 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001181 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001182 }
1183 else {
1184 Py_DECREF(r);
1185 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001186 Py_DECREF(cb);
1187 }
1188 Py_XDECREF(info);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001189 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001190}
1191
1192/* Perform garbage collection of a generation and invoke
1193 * progress callbacks.
1194 */
1195static Py_ssize_t
1196collect_with_callback(int generation)
1197{
1198 Py_ssize_t result, collected, uncollectable;
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001199 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001200 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001201 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001202 invoke_gc_callback("stop", generation, collected, uncollectable);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001203 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001204 return result;
1205}
1206
Neal Norwitz7b216c52006-03-04 20:01:53 +00001207static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001208collect_generations(void)
1209{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001210 int i;
1211 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001212
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001213 /* Find the oldest generation (highest numbered) where the count
1214 * exceeds the threshold. Objects in the that generation and
1215 * generations younger than it will be collected. */
1216 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001217 if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001218 /* Avoid quadratic performance degradation in number
1219 of tracked objects. See comments at the beginning
1220 of this file, and issue #4074.
1221 */
1222 if (i == NUM_GENERATIONS - 1
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001223 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001224 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001225 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001226 break;
1227 }
1228 }
1229 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001230}
1231
Serhiy Storchaka93260282017-02-04 11:19:59 +02001232#include "clinic/gcmodule.c.h"
1233
1234/*[clinic input]
1235gc.enable
1236
1237Enable automatic garbage collection.
1238[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001239
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001240static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001241gc_enable_impl(PyObject *module)
1242/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001243{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001244 _PyRuntime.gc.enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001245 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001246}
1247
Serhiy Storchaka93260282017-02-04 11:19:59 +02001248/*[clinic input]
1249gc.disable
1250
1251Disable automatic garbage collection.
1252[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001253
1254static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001255gc_disable_impl(PyObject *module)
1256/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001257{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001258 _PyRuntime.gc.enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001259 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001260}
1261
Serhiy Storchaka93260282017-02-04 11:19:59 +02001262/*[clinic input]
1263gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001264
Serhiy Storchaka93260282017-02-04 11:19:59 +02001265Returns true if automatic garbage collection is enabled.
1266[clinic start generated code]*/
1267
1268static int
1269gc_isenabled_impl(PyObject *module)
1270/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001271{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001272 return _PyRuntime.gc.enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001273}
1274
Serhiy Storchaka93260282017-02-04 11:19:59 +02001275/*[clinic input]
1276gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001277
Serhiy Storchaka93260282017-02-04 11:19:59 +02001278 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1279
1280Run the garbage collector.
1281
1282With no arguments, run a full collection. The optional argument
1283may be an integer specifying which generation to collect. A ValueError
1284is raised if the generation number is invalid.
1285
1286The number of unreachable objects is returned.
1287[clinic start generated code]*/
1288
1289static Py_ssize_t
1290gc_collect_impl(PyObject *module, int generation)
1291/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001292{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001293 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001294
Serhiy Storchaka93260282017-02-04 11:19:59 +02001295 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001296 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001297 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001298 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001299
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001300 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001301 n = 0; /* already collecting, don't do anything */
1302 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001303 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka93260282017-02-04 11:19:59 +02001304 n = collect_with_callback(generation);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001305 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001306 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001307
Serhiy Storchaka93260282017-02-04 11:19:59 +02001308 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001309}
1310
Serhiy Storchaka93260282017-02-04 11:19:59 +02001311/*[clinic input]
1312gc.set_debug
1313
1314 flags: int
1315 An integer that can have the following bits turned on:
1316 DEBUG_STATS - Print statistics during collection.
1317 DEBUG_COLLECTABLE - Print collectable objects found.
1318 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1319 found.
1320 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1321 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1322 /
1323
1324Set the garbage collection debugging flags.
1325
1326Debugging information is written to sys.stderr.
1327[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001328
1329static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001330gc_set_debug_impl(PyObject *module, int flags)
1331/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001332{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001333 _PyRuntime.gc.debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001334
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001335 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001336}
1337
Serhiy Storchaka93260282017-02-04 11:19:59 +02001338/*[clinic input]
1339gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001340
Serhiy Storchaka93260282017-02-04 11:19:59 +02001341Get the garbage collection debugging flags.
1342[clinic start generated code]*/
1343
1344static int
1345gc_get_debug_impl(PyObject *module)
1346/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001347{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001348 return _PyRuntime.gc.debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001349}
1350
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001351PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001352"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001353"\n"
1354"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001355"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001356
1357static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001358gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001359{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001360 int i;
1361 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001362 &_PyRuntime.gc.generations[0].threshold,
1363 &_PyRuntime.gc.generations[1].threshold,
1364 &_PyRuntime.gc.generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001365 return NULL;
1366 for (i = 2; i < NUM_GENERATIONS; i++) {
1367 /* generations higher than 2 get the same threshold */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001368 _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001369 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001370
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001371 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001372}
1373
Serhiy Storchaka93260282017-02-04 11:19:59 +02001374/*[clinic input]
1375gc.get_threshold
1376
1377Return the current collection thresholds.
1378[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001379
1380static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001381gc_get_threshold_impl(PyObject *module)
1382/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001383{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001384 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001385 _PyRuntime.gc.generations[0].threshold,
1386 _PyRuntime.gc.generations[1].threshold,
1387 _PyRuntime.gc.generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001388}
1389
Serhiy Storchaka93260282017-02-04 11:19:59 +02001390/*[clinic input]
1391gc.get_count
1392
1393Return a three-tuple of the current collection counts.
1394[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001395
1396static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001397gc_get_count_impl(PyObject *module)
1398/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001399{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001400 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001401 _PyRuntime.gc.generations[0].count,
1402 _PyRuntime.gc.generations[1].count,
1403 _PyRuntime.gc.generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001404}
1405
Neil Schemenauer48c70342001-08-09 15:38:31 +00001406static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001407referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001408{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001409 Py_ssize_t i;
1410 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1411 if (PyTuple_GET_ITEM(objs, i) == obj)
1412 return 1;
1413 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001414}
1415
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001416static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001417gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001418{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001419 PyGC_Head *gc;
1420 PyObject *obj;
1421 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001422 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001423 obj = FROM_GC(gc);
1424 traverse = Py_TYPE(obj)->tp_traverse;
1425 if (obj == objs || obj == resultlist)
1426 continue;
1427 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1428 if (PyList_Append(resultlist, obj) < 0)
1429 return 0; /* error */
1430 }
1431 }
1432 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001433}
1434
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001435PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001436"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001437Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001438
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001439static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001440gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001441{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001442 int i;
1443 PyObject *result = PyList_New(0);
1444 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001445
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001446 for (i = 0; i < NUM_GENERATIONS; i++) {
1447 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1448 Py_DECREF(result);
1449 return NULL;
1450 }
1451 }
1452 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001453}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001454
Tim Peters0f81ab62003-04-08 16:39:48 +00001455/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001456static int
Tim Peters730f5532003-04-08 17:17:17 +00001457referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001458{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001459 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001460}
1461
Tim Peters730f5532003-04-08 17:17:17 +00001462PyDoc_STRVAR(gc_get_referents__doc__,
1463"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001464Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001465
1466static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001467gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001468{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001469 Py_ssize_t i;
1470 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001471
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001472 if (result == NULL)
1473 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001474
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001475 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1476 traverseproc traverse;
1477 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001478
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001479 if (! PyObject_IS_GC(obj))
1480 continue;
1481 traverse = Py_TYPE(obj)->tp_traverse;
1482 if (! traverse)
1483 continue;
1484 if (traverse(obj, (visitproc)referentsvisit, result)) {
1485 Py_DECREF(result);
1486 return NULL;
1487 }
1488 }
1489 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001490}
1491
Serhiy Storchaka93260282017-02-04 11:19:59 +02001492/*[clinic input]
1493gc.get_objects
1494
1495Return a list of objects tracked by the collector (excluding the list returned).
1496[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001497
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001498static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001499gc_get_objects_impl(PyObject *module)
1500/*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001501{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001502 int i;
1503 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001504
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001505 result = PyList_New(0);
1506 if (result == NULL)
1507 return NULL;
1508 for (i = 0; i < NUM_GENERATIONS; i++) {
1509 if (append_objects(result, GEN_HEAD(i))) {
1510 Py_DECREF(result);
1511 return NULL;
1512 }
1513 }
1514 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001515}
1516
Serhiy Storchaka93260282017-02-04 11:19:59 +02001517/*[clinic input]
1518gc.get_stats
1519
1520Return a list of dictionaries containing per-generation statistics.
1521[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001522
1523static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001524gc_get_stats_impl(PyObject *module)
1525/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001526{
1527 int i;
1528 PyObject *result;
1529 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1530
1531 /* To get consistent values despite allocations while constructing
1532 the result list, we use a snapshot of the running stats. */
1533 for (i = 0; i < NUM_GENERATIONS; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001534 stats[i] = _PyRuntime.gc.generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001535 }
1536
1537 result = PyList_New(0);
1538 if (result == NULL)
1539 return NULL;
1540
1541 for (i = 0; i < NUM_GENERATIONS; i++) {
1542 PyObject *dict;
1543 st = &stats[i];
1544 dict = Py_BuildValue("{snsnsn}",
1545 "collections", st->collections,
1546 "collected", st->collected,
1547 "uncollectable", st->uncollectable
1548 );
1549 if (dict == NULL)
1550 goto error;
1551 if (PyList_Append(result, dict)) {
1552 Py_DECREF(dict);
1553 goto error;
1554 }
1555 Py_DECREF(dict);
1556 }
1557 return result;
1558
1559error:
1560 Py_XDECREF(result);
1561 return NULL;
1562}
1563
1564
Serhiy Storchaka93260282017-02-04 11:19:59 +02001565/*[clinic input]
1566gc.is_tracked
1567
1568 obj: object
1569 /
1570
1571Returns true if the object is tracked by the garbage collector.
1572
1573Simple atomic objects will return false.
1574[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001575
1576static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001577gc_is_tracked(PyObject *module, PyObject *obj)
1578/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001579{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001580 PyObject *result;
1581
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001582 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001583 result = Py_True;
1584 else
1585 result = Py_False;
1586 Py_INCREF(result);
1587 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001588}
1589
brainfvckc75edab2017-10-16 12:49:41 -07001590/*[clinic input]
1591gc.freeze
1592
1593Freeze all current tracked objects and ignore them for future collections.
1594
1595This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1596Note: collection before a POSIX fork() call may free pages for future allocation
1597which can cause copy-on-write.
1598[clinic start generated code]*/
1599
1600static PyObject *
1601gc_freeze_impl(PyObject *module)
1602/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1603{
1604 for (int i = 0; i < NUM_GENERATIONS; ++i) {
1605 gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1606 _PyRuntime.gc.generations[i].count = 0;
1607 }
1608 Py_RETURN_NONE;
1609}
1610
1611/*[clinic input]
1612gc.unfreeze
1613
1614Unfreeze all objects in the permanent generation.
1615
1616Put all objects in the permanent generation back into oldest generation.
1617[clinic start generated code]*/
1618
1619static PyObject *
1620gc_unfreeze_impl(PyObject *module)
1621/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1622{
1623 gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1624 Py_RETURN_NONE;
1625}
1626
1627/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001628gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001629
1630Return the number of objects in the permanent generation.
1631[clinic start generated code]*/
1632
Victor Stinner05d68a82018-01-18 11:15:25 +01001633static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001634gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001635/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001636{
1637 return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1638}
1639
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001640
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001641PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001642"This module provides access to the garbage collector for reference cycles.\n"
1643"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001644"enable() -- Enable automatic garbage collection.\n"
1645"disable() -- Disable automatic garbage collection.\n"
1646"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001647"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001648"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001649"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001650"set_debug() -- Set debugging flags.\n"
1651"get_debug() -- Get debugging flags.\n"
1652"set_threshold() -- Set the collection thresholds.\n"
1653"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001654"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001655"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001656"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001657"get_referents() -- Return the list of objects that an object refers to.\n"
1658"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1659"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1660"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001661
1662static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001663 GC_ENABLE_METHODDEF
1664 GC_DISABLE_METHODDEF
1665 GC_ISENABLED_METHODDEF
1666 GC_SET_DEBUG_METHODDEF
1667 GC_GET_DEBUG_METHODDEF
1668 GC_GET_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001669 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001670 GC_GET_THRESHOLD_METHODDEF
1671 GC_COLLECT_METHODDEF
1672 GC_GET_OBJECTS_METHODDEF
1673 GC_GET_STATS_METHODDEF
1674 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001675 {"get_referrers", gc_get_referrers, METH_VARARGS,
1676 gc_get_referrers__doc__},
1677 {"get_referents", gc_get_referents, METH_VARARGS,
1678 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001679 GC_FREEZE_METHODDEF
1680 GC_UNFREEZE_METHODDEF
1681 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001682 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001683};
1684
Martin v. Löwis1a214512008-06-11 05:26:20 +00001685static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001686 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001687 "gc", /* m_name */
1688 gc__doc__, /* m_doc */
1689 -1, /* m_size */
1690 GcMethods, /* m_methods */
1691 NULL, /* m_reload */
1692 NULL, /* m_traverse */
1693 NULL, /* m_clear */
1694 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001695};
1696
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001697PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001698PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001699{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001700 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001701
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001702 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001703
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001704 if (m == NULL)
1705 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001706
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001707 if (_PyRuntime.gc.garbage == NULL) {
1708 _PyRuntime.gc.garbage = PyList_New(0);
1709 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001710 return NULL;
1711 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001712 Py_INCREF(_PyRuntime.gc.garbage);
1713 if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001714 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001715
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001716 if (_PyRuntime.gc.callbacks == NULL) {
1717 _PyRuntime.gc.callbacks = PyList_New(0);
1718 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001719 return NULL;
1720 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001721 Py_INCREF(_PyRuntime.gc.callbacks);
1722 if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001723 return NULL;
1724
Martin v. Löwis1a214512008-06-11 05:26:20 +00001725#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001726 ADD_INT(DEBUG_STATS);
1727 ADD_INT(DEBUG_COLLECTABLE);
1728 ADD_INT(DEBUG_UNCOLLECTABLE);
1729 ADD_INT(DEBUG_SAVEALL);
1730 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001731#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001732 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001733}
1734
Guido van Rossume13ddc92003-04-17 17:29:22 +00001735/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001736Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001737PyGC_Collect(void)
1738{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001739 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001740
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001741 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001742 n = 0; /* already collecting, don't do anything */
1743 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001744 PyObject *exc, *value, *tb;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001745 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001746 PyErr_Fetch(&exc, &value, &tb);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001747 n = collect_with_callback(NUM_GENERATIONS - 1);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001748 PyErr_Restore(exc, value, tb);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001749 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001750 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001751
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001752 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001753}
1754
Antoine Pitroufef34e32013-05-19 01:11:58 +02001755Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001756_PyGC_CollectIfEnabled(void)
1757{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001758 if (!_PyRuntime.gc.enabled)
Łukasz Langafef7e942016-09-09 21:47:46 -07001759 return 0;
1760
1761 return PyGC_Collect();
1762}
1763
1764Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001765_PyGC_CollectNoFail(void)
1766{
1767 Py_ssize_t n;
1768
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001769 assert(!PyErr_Occurred());
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001770 /* Ideally, this function is only called on interpreter shutdown,
1771 and therefore not recursively. Unfortunately, when there are daemon
1772 threads, a daemon thread can start a cyclic garbage collection
1773 during interpreter shutdown (and then never finish it).
1774 See http://bugs.python.org/issue8713#msg195178 for an example.
1775 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001776 if (_PyRuntime.gc.collecting)
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001777 n = 0;
1778 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001779 _PyRuntime.gc.collecting = 1;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001780 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001781 _PyRuntime.gc.collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001782 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001783 return n;
1784}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001785
Antoine Pitrou696e0352010-08-08 22:18:46 +00001786void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001787_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001788{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001789 if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1790 && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001791 const char *message;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001792 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001793 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001794 "shutdown";
1795 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001796 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001797 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001798 /* PyErr_WarnFormat does too many things and we are at shutdown,
1799 the warnings module's dependencies (e.g. linecache) may be gone
1800 already. */
1801 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1802 "gc", NULL, message,
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001803 PyList_GET_SIZE(_PyRuntime.gc.garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001804 PyErr_WriteUnraisable(NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001805 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00001806 PyObject *repr = NULL, *bytes = NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001807 repr = PyObject_Repr(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001808 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001809 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001810 else {
1811 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001812 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001813 PyBytes_AS_STRING(bytes)
1814 );
1815 }
1816 Py_XDECREF(repr);
1817 Py_XDECREF(bytes);
1818 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001819 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001820}
1821
1822void
1823_PyGC_Fini(void)
1824{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001825 Py_CLEAR(_PyRuntime.gc.callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001826}
1827
Neil Schemenauer43411b52001-08-30 00:05:51 +00001828/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001829void
1830_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001831{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001832 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001833}
1834
Neil Schemenauer43411b52001-08-30 00:05:51 +00001835/* extension modules might be compiled with GC support so these
1836 functions must always be available */
1837
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001838#undef PyObject_GC_Track
1839#undef PyObject_GC_UnTrack
1840#undef PyObject_GC_Del
1841#undef _PyObject_GC_Malloc
1842
Neil Schemenauer43411b52001-08-30 00:05:51 +00001843void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001844PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001845{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001846 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001847}
1848
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001849void
1850PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001851{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001852 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1853 * call PyObject_GC_UnTrack twice on an object.
1854 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001855 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001856 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001857 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00001858}
1859
Victor Stinnerdb067af2014-05-02 22:31:14 +02001860static PyObject *
1861_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001862{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001863 PyObject *op;
1864 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001865 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001866 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1867 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001868 size = sizeof(PyGC_Head) + basicsize;
1869 if (use_calloc)
1870 g = (PyGC_Head *)PyObject_Calloc(1, size);
1871 else
1872 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001873 if (g == NULL)
1874 return PyErr_NoMemory();
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001875 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
1876 g->_gc_next = 0;
1877 g->_gc_prev = 0;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001878 _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1879 if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1880 _PyRuntime.gc.enabled &&
1881 _PyRuntime.gc.generations[0].threshold &&
1882 !_PyRuntime.gc.collecting &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001883 !PyErr_Occurred()) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001884 _PyRuntime.gc.collecting = 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001885 collect_generations();
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001886 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001887 }
1888 op = FROM_GC(g);
1889 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001890}
1891
1892PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001893_PyObject_GC_Malloc(size_t basicsize)
1894{
1895 return _PyObject_GC_Alloc(0, basicsize);
1896}
1897
1898PyObject *
1899_PyObject_GC_Calloc(size_t basicsize)
1900{
1901 return _PyObject_GC_Alloc(1, basicsize);
1902}
1903
1904PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001905_PyObject_GC_New(PyTypeObject *tp)
1906{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001907 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1908 if (op != NULL)
1909 op = PyObject_INIT(op, tp);
1910 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001911}
1912
1913PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001914_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001915{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001916 size_t size;
1917 PyVarObject *op;
1918
1919 if (nitems < 0) {
1920 PyErr_BadInternalCall();
1921 return NULL;
1922 }
1923 size = _PyObject_VAR_SIZE(tp, nitems);
1924 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001925 if (op != NULL)
1926 op = PyObject_INIT_VAR(op, tp, nitems);
1927 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001928}
1929
1930PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001931_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001932{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001933 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1934 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001935 assert(!_PyObject_GC_IS_TRACKED(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001936 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1937 return (PyVarObject *)PyErr_NoMemory();
1938 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1939 if (g == NULL)
1940 return (PyVarObject *)PyErr_NoMemory();
1941 op = (PyVarObject *) FROM_GC(g);
1942 Py_SIZE(op) = nitems;
1943 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001944}
1945
1946void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001947PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001948{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001949 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001950 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001951 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001952 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001953 if (_PyRuntime.gc.generations[0].count > 0) {
1954 _PyRuntime.gc.generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001955 }
1956 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001957}