blob: 2e19fe4b36460d8f5cf735b12ab93a9a52fa4ba8 [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Yury Selivanovf23746a2018-01-22 19:11:18 -050027#include "internal/context.h"
Eric Snow2ebc5ce2017-09-07 23:51:28 -060028#include "internal/mem.h"
29#include "internal/pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000030#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070031#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010032#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000033
Serhiy Storchaka93260282017-02-04 11:19:59 +020034/*[clinic input]
35module gc
36[clinic start generated code]*/
37/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
38
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090039#define GC_DEBUG (0) /* Enable more asserts */
40
41#define GC_NEXT _PyGCHead_NEXT
42#define GC_PREV _PyGCHead_PREV
43
44// update_refs() set this bit for all objects in current generation.
45// subtract_refs() and move_unreachable() uses this to distinguish
46// visited object is in GCing or not.
47//
48// move_unreachable() removes this flag from reachable objects.
49// Only unreachable objects have this flag.
50//
51// No objects in interpreter have this flag after GC ends.
52#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
53
54// Lowest bit of _gc_next is used for UNREACHABLE flag.
55//
56// This flag represents the object is in unreachable list in move_unreachable()
57//
58// Although this flag is used only in move_unreachable(), move_unreachable()
59// doesn't clear this flag to skip unnecessary iteration.
60// move_legacy_finalizers() removes this flag instead.
61// Between them, unreachable list is not normal list and we can not use
62// most gc_list_* functions for it.
63#define NEXT_MASK_UNREACHABLE (1)
64
Victor Stinner626bff82018-10-25 17:31:10 +020065/* Get an object's GC head */
66#define AS_GC(o) ((PyGC_Head *)(o)-1)
67
68/* Get the object given the GC head */
69#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
70
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090071static inline int
72gc_is_collecting(PyGC_Head *g)
73{
74 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
75}
76
77static inline void
78gc_clear_collecting(PyGC_Head *g)
79{
80 g->_gc_prev &= ~PREV_MASK_COLLECTING;
81}
82
83static inline Py_ssize_t
84gc_get_refs(PyGC_Head *g)
85{
86 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
87}
88
89static inline void
90gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
91{
92 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
93 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
94}
95
96static inline void
97gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
98{
99 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
100 | PREV_MASK_COLLECTING
101 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
102}
103
104static inline void
105gc_decref(PyGC_Head *g)
106{
Victor Stinner626bff82018-10-25 17:31:10 +0200107 _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
108 gc_get_refs(g) > 0,
109 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900110 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
111}
112
Tim Peters6fc13d92002-07-02 18:12:35 +0000113/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +0000114static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +0000115
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000116/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000117#define DEBUG_STATS (1<<0) /* print collection statistics */
118#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
119#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
120#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
121#define DEBUG_LEAK DEBUG_COLLECTABLE | \
122 DEBUG_UNCOLLECTABLE | \
123 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000124
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600125#define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100126
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600127void
128_PyGC_Initialize(struct _gc_runtime_state *state)
129{
130 state->enabled = 1; /* automatic collection enabled? */
131
132#define _GEN_HEAD(n) (&state->generations[n].head)
133 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900134 /* PyGC_Head, threshold, count */
135 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
136 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
137 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600138 };
139 for (int i = 0; i < NUM_GENERATIONS; i++) {
140 state->generations[i] = generations[i];
141 };
142 state->generation0 = GEN_HEAD(0);
brainfvckc75edab2017-10-16 12:49:41 -0700143 struct gc_generation permanent_generation = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900144 {(uintptr_t)&state->permanent_generation.head,
145 (uintptr_t)&state->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700146 };
147 state->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600148}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100149
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900150/*
151_gc_prev values
152---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000153
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900154Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000155
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900156Lowest two bits of _gc_prev are used for flags.
157PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
158or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000159
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900160During a collection, _gc_prev is temporary used for gc_refs, and the gc list
161is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000162
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900163gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000164 At the start of a collection, update_refs() copies the true refcount
165 to gc_refs, for each object in the generation being collected.
166 subtract_refs() then adjusts gc_refs so that it equals the number of
167 times an object is referenced directly from outside the generation
168 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000169
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900170PREV_MASK_COLLECTING
171 Objects in generation being collected are marked PREV_MASK_COLLECTING in
172 update_refs().
173
174
175_gc_next values
176---------------
177
178_gc_next takes these values:
179
1800
181 The object is not tracked
182
183!= 0
184 Pointer to the next object in the GC list.
185 Additionally, lowest bit is used temporary for
186 NEXT_MASK_UNREACHABLE flag described below.
187
188NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000189 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900190 indirectly) from outside the generation into an "unreachable" set and
191 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000192
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900193 Objects that are found to be reachable have gc_refs set to 1.
194 When this flag is set for the reachable object, the object must be in
195 "unreachable" set.
196 The flag is unset and the object is moved back to "reachable" set.
197
198 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000199*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000200
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000201/*** list functions ***/
202
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900203static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000204gc_list_init(PyGC_Head *list)
205{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900206 // List header must not have flags.
207 // We can assign pointer by simple cast.
208 list->_gc_prev = (uintptr_t)list;
209 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000210}
211
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900212static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000213gc_list_is_empty(PyGC_Head *list)
214{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900215 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000216}
217
Tim Peterse2d59182004-11-01 01:39:08 +0000218/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900219static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000220gc_list_append(PyGC_Head *node, PyGC_Head *list)
221{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900222 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
223
224 // last <-> node
225 _PyGCHead_SET_PREV(node, last);
226 _PyGCHead_SET_NEXT(last, node);
227
228 // node <-> list
229 _PyGCHead_SET_NEXT(node, list);
230 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000231}
232
Tim Peterse2d59182004-11-01 01:39:08 +0000233/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900234static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000235gc_list_remove(PyGC_Head *node)
236{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900237 PyGC_Head *prev = GC_PREV(node);
238 PyGC_Head *next = GC_NEXT(node);
239
240 _PyGCHead_SET_NEXT(prev, next);
241 _PyGCHead_SET_PREV(next, prev);
242
243 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000244}
245
Tim Peterse2d59182004-11-01 01:39:08 +0000246/* Move `node` from the gc list it's currently in (which is not explicitly
247 * named here) to the end of `list`. This is semantically the same as
248 * gc_list_remove(node) followed by gc_list_append(node, list).
249 */
250static void
251gc_list_move(PyGC_Head *node, PyGC_Head *list)
252{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000253 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900254 PyGC_Head *from_prev = GC_PREV(node);
255 PyGC_Head *from_next = GC_NEXT(node);
256 _PyGCHead_SET_NEXT(from_prev, from_next);
257 _PyGCHead_SET_PREV(from_next, from_prev);
258
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000259 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900260 // list must not have flags. So we can skip macros.
261 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
262 _PyGCHead_SET_PREV(node, to_prev);
263 _PyGCHead_SET_NEXT(to_prev, node);
264 list->_gc_prev = (uintptr_t)node;
265 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000266}
267
268/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000269static void
270gc_list_merge(PyGC_Head *from, PyGC_Head *to)
271{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000272 assert(from != to);
273 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900274 PyGC_Head *to_tail = GC_PREV(to);
275 PyGC_Head *from_head = GC_NEXT(from);
276 PyGC_Head *from_tail = GC_PREV(from);
277 assert(from_head != from);
278 assert(from_tail != from);
279
280 _PyGCHead_SET_NEXT(to_tail, from_head);
281 _PyGCHead_SET_PREV(from_head, to_tail);
282
283 _PyGCHead_SET_NEXT(from_tail, to);
284 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000285 }
286 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000287}
288
Neal Norwitz7b216c52006-03-04 20:01:53 +0000289static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000290gc_list_size(PyGC_Head *list)
291{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000292 PyGC_Head *gc;
293 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900294 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000295 n++;
296 }
297 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000298}
299
Tim Peters259272b2003-04-06 19:41:39 +0000300/* Append objects in a GC list to a Python list.
301 * Return 0 if all OK, < 0 if error (out of memory for list).
302 */
303static int
304append_objects(PyObject *py_list, PyGC_Head *gc_list)
305{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000306 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900307 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000308 PyObject *op = FROM_GC(gc);
309 if (op != py_list) {
310 if (PyList_Append(py_list, op)) {
311 return -1; /* exception */
312 }
313 }
314 }
315 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000316}
317
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900318#if GC_DEBUG
319// validate_list checks list consistency. And it works as document
320// describing when expected_mask is set / unset.
321static void
322validate_list(PyGC_Head *head, uintptr_t expected_mask)
323{
324 PyGC_Head *prev = head;
325 PyGC_Head *gc = GC_NEXT(head);
326 while (gc != head) {
327 assert(GC_NEXT(gc) != NULL);
328 assert(GC_PREV(gc) == prev);
329 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
330 prev = gc;
331 gc = GC_NEXT(gc);
332 }
333 assert(prev == GC_PREV(head));
334}
335#else
336#define validate_list(x,y) do{}while(0)
337#endif
338
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000339/*** end of list stuff ***/
340
341
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900342/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
343 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000344 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000345static void
346update_refs(PyGC_Head *containers)
347{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900348 PyGC_Head *gc = GC_NEXT(containers);
349 for (; gc != containers; gc = GC_NEXT(gc)) {
350 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000351 /* Python's cyclic gc should never see an incoming refcount
352 * of 0: if something decref'ed to 0, it should have been
353 * deallocated immediately at that time.
354 * Possible cause (if the assert triggers): a tp_dealloc
355 * routine left a gc-aware object tracked during its teardown
356 * phase, and did something-- or allowed something to happen --
357 * that called back into Python. gc can trigger then, and may
358 * see the still-tracked dying object. Before this assert
359 * was added, such mistakes went on to allow gc to try to
360 * delete the object again. In a debug build, that caused
361 * a mysterious segfault, when _Py_ForgetReference tried
362 * to remove the object from the doubly-linked list of all
363 * objects a second time. In a release build, an actual
364 * double deallocation occurred, which leads to corruption
365 * of the allocator's internal bookkeeping pointers. That's
366 * so serious that maybe this should be a release-build
367 * check instead of an assert?
368 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900369 assert(gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000370 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000371}
372
Tim Peters19b74c72002-07-01 03:52:19 +0000373/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000374static int
375visit_decref(PyObject *op, void *data)
376{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000377 assert(op != NULL);
378 if (PyObject_IS_GC(op)) {
379 PyGC_Head *gc = AS_GC(op);
380 /* We're only interested in gc_refs for objects in the
381 * generation being collected, which can be recognized
382 * because only they have positive gc_refs.
383 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900384 if (gc_is_collecting(gc)) {
385 gc_decref(gc);
386 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000387 }
388 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000389}
390
Tim Peters19b74c72002-07-01 03:52:19 +0000391/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
392 * for all objects in containers, and is GC_REACHABLE for all tracked gc
393 * objects not in containers. The ones with gc_refs > 0 are directly
394 * reachable from outside containers, and so can't be collected.
395 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000396static void
397subtract_refs(PyGC_Head *containers)
398{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000399 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900400 PyGC_Head *gc = GC_NEXT(containers);
401 for (; gc != containers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000402 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
403 (void) traverse(FROM_GC(gc),
404 (visitproc)visit_decref,
405 NULL);
406 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000407}
408
Tim Peters19b74c72002-07-01 03:52:19 +0000409/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410static int
Tim Peters19b74c72002-07-01 03:52:19 +0000411visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000412{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900413 if (!PyObject_IS_GC(op)) {
414 return 0;
415 }
Tim Peters19b74c72002-07-01 03:52:19 +0000416
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900417 PyGC_Head *gc = AS_GC(op);
418 const Py_ssize_t gc_refs = gc_get_refs(gc);
419
420 // Ignore untracked objects and objects in other generation.
421 if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
422 return 0;
423 }
424
425 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
426 /* This had gc_refs = 0 when move_unreachable got
427 * to it, but turns out it's reachable after all.
428 * Move it back to move_unreachable's 'young' list,
429 * and move_unreachable will eventually get to it
430 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000431 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900432 // Manually unlink gc from unreachable list because
433 PyGC_Head *prev = GC_PREV(gc);
434 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
435 assert(prev->_gc_next & NEXT_MASK_UNREACHABLE);
436 assert(next->_gc_next & NEXT_MASK_UNREACHABLE);
437 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
438 _PyGCHead_SET_PREV(next, prev);
439
440 gc_list_append(gc, reachable);
441 gc_set_refs(gc, 1);
442 }
443 else if (gc_refs == 0) {
444 /* This is in move_unreachable's 'young' list, but
445 * the traversal hasn't yet gotten to it. All
446 * we need to do is tell move_unreachable that it's
447 * reachable.
448 */
449 gc_set_refs(gc, 1);
450 }
451 /* Else there's nothing to do.
452 * If gc_refs > 0, it must be in move_unreachable's 'young'
453 * list, and move_unreachable will eventually get to it.
454 */
455 else {
456 assert(gc_refs > 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000457 }
458 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000459}
460
Tim Peters19b74c72002-07-01 03:52:19 +0000461/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900462 * all objects in young don't have PREV_MASK_COLLECTING flag and
463 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000464 * All objects in young after this are directly or indirectly reachable
465 * from outside the original young; and all objects in unreachable are
466 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900467 *
468 * This function restores _gc_prev pointer. young and unreachable are
469 * doubly linked list after this function.
470 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
471 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000472 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000473static void
Tim Peters19b74c72002-07-01 03:52:19 +0000474move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000475{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900476 // previous elem in the young list, used for restore gc_prev.
477 PyGC_Head *prev = young;
478 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000479
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900480 /* Invariants: all objects "to the left" of us in young are reachable
481 * (directly or indirectly) from outside the young list as it was at entry.
482 *
483 * All other objects from the original young "to the left" of us are in
484 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000485 * left of us in 'young' now have been scanned, and no objects here
486 * or to the right have been scanned yet.
487 */
Tim Peters19b74c72002-07-01 03:52:19 +0000488
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000489 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900490 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000491 /* gc is definitely reachable from outside the
492 * original 'young'. Mark it as such, and traverse
493 * its pointers to find any other objects that may
494 * be directly reachable from it. Note that the
495 * call to tp_traverse may append objects to young,
496 * so we have to wait until it returns to determine
497 * the next object to visit.
498 */
499 PyObject *op = FROM_GC(gc);
500 traverseproc traverse = Py_TYPE(op)->tp_traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900501 assert(gc_get_refs(gc) > 0);
502 // NOTE: visit_reachable may change gc->_gc_next when
503 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000504 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900505 (visitproc)visit_reachable,
506 (void *)young);
507 // relink gc_prev to prev element.
508 _PyGCHead_SET_PREV(gc, prev);
Quan Tian3bd0d622018-10-20 05:30:03 +0800509 // gc is not COLLECTING state after here.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900510 gc_clear_collecting(gc);
511 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000512 }
513 else {
514 /* This *may* be unreachable. To make progress,
515 * assume it is. gc isn't directly reachable from
516 * any object we've already traversed, but may be
517 * reachable from an object we haven't gotten to yet.
518 * visit_reachable will eventually move gc back into
519 * young if that's so, and we'll see it again.
520 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900521 // Move gc to unreachable.
522 // No need to gc->next->prev = prev because it is single linked.
523 prev->_gc_next = gc->_gc_next;
524
525 // We can't use gc_list_append() here because we use
526 // NEXT_MASK_UNREACHABLE here.
527 PyGC_Head *last = GC_PREV(unreachable);
528 // NOTE: Since all objects in unreachable set has
529 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
530 // But this may set the flat to unreachable too.
531 // move_legacy_finalizers() should care about it.
532 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
533 _PyGCHead_SET_PREV(gc, last);
534 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
535 unreachable->_gc_prev = (uintptr_t)gc;
536 }
537 gc = (PyGC_Head*)prev->_gc_next;
538 }
539 // young->_gc_prev must be last element remained in the list.
540 young->_gc_prev = (uintptr_t)prev;
541}
542
543static void
544untrack_tuples(PyGC_Head *head)
545{
546 PyGC_Head *next, *gc = GC_NEXT(head);
547 while (gc != head) {
548 PyObject *op = FROM_GC(gc);
549 next = GC_NEXT(gc);
550 if (PyTuple_CheckExact(op)) {
551 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000552 }
553 gc = next;
554 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000555}
556
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200557/* Try to untrack all currently tracked dictionaries */
558static void
559untrack_dicts(PyGC_Head *head)
560{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900561 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200562 while (gc != head) {
563 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900564 next = GC_NEXT(gc);
565 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200566 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900567 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200568 gc = next;
569 }
570}
571
Antoine Pitrou796564c2013-07-30 19:59:21 +0200572/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000573static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200574has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000575{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200576 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000577}
578
Antoine Pitrou796564c2013-07-30 19:59:21 +0200579/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900580 *
581 * This function also removes NEXT_MASK_UNREACHABLE flag
582 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000583 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000584static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200585move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000586{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900587 PyGC_Head *gc, *next;
588 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
Tim Petersf6b80452003-04-07 19:21:15 +0000589
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000590 /* March over unreachable. Move objects with finalizers into
591 * `finalizers`.
592 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900593 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000594 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000595
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900596 assert(gc->_gc_next & NEXT_MASK_UNREACHABLE);
597 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
598 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000599
Antoine Pitrou796564c2013-07-30 19:59:21 +0200600 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900601 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000602 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000603 }
604 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000605}
606
Antoine Pitrou796564c2013-07-30 19:59:21 +0200607/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000608static int
609visit_move(PyObject *op, PyGC_Head *tolist)
610{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000611 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900612 PyGC_Head *gc = AS_GC(op);
613 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000614 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900615 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000616 }
617 }
618 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000619}
620
621/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000622 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000623 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000624static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200625move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000626{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000627 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900628 PyGC_Head *gc = GC_NEXT(finalizers);
629 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000630 /* Note that the finalizers list may grow during this. */
631 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
632 (void) traverse(FROM_GC(gc),
633 (visitproc)visit_move,
634 (void *)finalizers);
635 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000636}
637
Tim Petersead8b7a2004-10-30 23:09:22 +0000638/* Clear all weakrefs to unreachable objects, and if such a weakref has a
639 * callback, invoke it if necessary. Note that it's possible for such
640 * weakrefs to be outside the unreachable set -- indeed, those are precisely
641 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
642 * overview & some details. Some weakrefs with callbacks may be reclaimed
643 * directly by this routine; the number reclaimed is the return value. Other
644 * weakrefs with callbacks may be moved into the `old` generation. Objects
645 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
646 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
647 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000648 */
649static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000650handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000651{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000652 PyGC_Head *gc;
653 PyObject *op; /* generally FROM_GC(gc) */
654 PyWeakReference *wr; /* generally a cast of op */
655 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
656 PyGC_Head *next;
657 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000658
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000659 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000660
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000661 /* Clear all weakrefs to the objects in unreachable. If such a weakref
662 * also has a callback, move it into `wrcb_to_call` if the callback
663 * needs to be invoked. Note that we cannot invoke any callbacks until
664 * all weakrefs to unreachable objects are cleared, lest the callback
665 * resurrect an unreachable object via a still-active weakref. We
666 * make another pass over wrcb_to_call, invoking callbacks, after this
667 * pass completes.
668 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900669 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000670 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000671
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000672 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900673 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000674
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000675 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
676 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000677
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000678 /* It supports weakrefs. Does it have any? */
679 wrlist = (PyWeakReference **)
680 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000681
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000682 /* `op` may have some weakrefs. March over the list, clear
683 * all the weakrefs, and move the weakrefs with callbacks
684 * that must be called into wrcb_to_call.
685 */
686 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
687 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000688
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000689 /* _PyWeakref_ClearRef clears the weakref but leaves
690 * the callback pointer intact. Obscure: it also
691 * changes *wrlist.
692 */
693 assert(wr->wr_object == op);
694 _PyWeakref_ClearRef(wr);
695 assert(wr->wr_object == Py_None);
696 if (wr->wr_callback == NULL)
697 continue; /* no callback */
Tim Petersead8b7a2004-10-30 23:09:22 +0000698
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000699 /* Headache time. `op` is going away, and is weakly referenced by
700 * `wr`, which has a callback. Should the callback be invoked? If wr
701 * is also trash, no:
702 *
703 * 1. There's no need to call it. The object and the weakref are
704 * both going away, so it's legitimate to pretend the weakref is
705 * going away first. The user has to ensure a weakref outlives its
706 * referent if they want a guarantee that the wr callback will get
707 * invoked.
708 *
709 * 2. It may be catastrophic to call it. If the callback is also in
710 * cyclic trash (CT), then although the CT is unreachable from
711 * outside the current generation, CT may be reachable from the
712 * callback. Then the callback could resurrect insane objects.
713 *
714 * Since the callback is never needed and may be unsafe in this case,
715 * wr is simply left in the unreachable set. Note that because we
716 * already called _PyWeakref_ClearRef(wr), its callback will never
717 * trigger.
718 *
719 * OTOH, if wr isn't part of CT, we should invoke the callback: the
720 * weakref outlived the trash. Note that since wr isn't CT in this
721 * case, its callback can't be CT either -- wr acted as an external
722 * root to this generation, and therefore its callback did too. So
723 * nothing in CT is reachable from the callback either, so it's hard
724 * to imagine how calling it later could create a problem for us. wr
725 * is moved to wrcb_to_call in this case.
726 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900727 if (gc_is_collecting(AS_GC(wr))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000728 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900729 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000730
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000731 /* Create a new reference so that wr can't go away
732 * before we can process it again.
733 */
734 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000735
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000736 /* Move wr to wrcb_to_call, for the next pass. */
737 wrasgc = AS_GC(wr);
738 assert(wrasgc != next); /* wrasgc is reachable, but
739 next isn't, so they can't
740 be the same */
741 gc_list_move(wrasgc, &wrcb_to_call);
742 }
743 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000744
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000745 /* Invoke the callbacks we decided to honor. It's safe to invoke them
746 * because they can't reference unreachable objects.
747 */
748 while (! gc_list_is_empty(&wrcb_to_call)) {
749 PyObject *temp;
750 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000751
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900752 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000753 op = FROM_GC(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000754 assert(PyWeakref_Check(op));
755 wr = (PyWeakReference *)op;
756 callback = wr->wr_callback;
757 assert(callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000758
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000759 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100760 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000761 if (temp == NULL)
762 PyErr_WriteUnraisable(callback);
763 else
764 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000765
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000766 /* Give up the reference we created in the first pass. When
767 * op's refcount hits 0 (which it may or may not do right now),
768 * op's tp_dealloc will decref op->wr_callback too. Note
769 * that the refcount probably will hit 0 now, and because this
770 * weakref was reachable to begin with, gc didn't already
771 * add it to its count of freed objects. Example: a reachable
772 * weak value dict maps some key to this reachable weakref.
773 * The callback removes this key->weakref mapping from the
774 * dict, leaving no other references to the weakref (excepting
775 * ours).
776 */
777 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900778 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000779 /* object is still alive -- move it */
780 gc_list_move(gc, old);
781 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900782 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000783 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900784 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000785 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000786
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000787 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000788}
789
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000790static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200791debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000792{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100793 PySys_FormatStderr("gc: %s <%s %p>\n",
794 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000795}
796
Antoine Pitrou796564c2013-07-30 19:59:21 +0200797/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000798 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000799 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
800 * garbage list (a Python list), else only the objects in finalizers with
801 * __del__ methods are appended to garbage. All objects in finalizers are
802 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000803 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300804static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200805handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000806{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900807 PyGC_Head *gc = GC_NEXT(finalizers);
Tim Petersf6b80452003-04-07 19:21:15 +0000808
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300809 assert(!PyErr_Occurred());
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600810 if (_PyRuntime.gc.garbage == NULL) {
811 _PyRuntime.gc.garbage = PyList_New(0);
812 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000813 Py_FatalError("gc couldn't create gc.garbage list");
814 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900815 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000816 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000817
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600818 if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300819 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
820 PyErr_Clear();
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300821 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300822 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000823 }
824 }
Tim Petersf6b80452003-04-07 19:21:15 +0000825
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000826 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000827}
828
Tim Peters5fbc7b12014-05-08 17:42:19 -0500829/* Run first-time finalizers (if any) on all the objects in collectable.
830 * Note that this may remove some (or even all) of the objects from the
831 * list, due to refcounts falling to 0.
832 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200833static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500834finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200835{
836 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500837 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200838
Tim Peters5fbc7b12014-05-08 17:42:19 -0500839 /* While we're going through the loop, `finalize(op)` may cause op, or
840 * other objects, to be reclaimed via refcounts falling to zero. So
841 * there's little we can rely on about the structure of the input
842 * `collectable` list across iterations. For safety, we always take the
843 * first object in that list and move it to a temporary `seen` list.
844 * If objects vanish from the `collectable` and `seen` lists we don't
845 * care.
846 */
847 gc_list_init(&seen);
848
849 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900850 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200851 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500852 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200853 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500854 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
855 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900856 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200857 Py_INCREF(op);
858 finalize(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300859 assert(!PyErr_Occurred());
Antoine Pitrou796564c2013-07-30 19:59:21 +0200860 Py_DECREF(op);
861 }
862 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500863 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200864}
865
866/* Walk the collectable list and check that they are really unreachable
867 from the outside (some objects could have been resurrected by a
868 finalizer). */
869static int
870check_garbage(PyGC_Head *collectable)
871{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900872 int ret = 0;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200873 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900874 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
875 // Use gc_refs and break gc_prev again.
876 gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
877 assert(gc_get_refs(gc) != 0);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200878 }
879 subtract_refs(collectable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900880 PyGC_Head *prev = collectable;
881 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
882 assert(gc_get_refs(gc) >= 0);
883 if (gc_get_refs(gc) != 0) {
884 ret = -1;
885 }
886 // Restore gc_prev here.
887 _PyGCHead_SET_PREV(gc, prev);
888 gc_clear_collecting(gc);
889 prev = gc;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200890 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900891 return ret;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200892}
893
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000894/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000895 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000896 * objects may be freed. It is possible I screwed something up here.
897 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000898static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000899delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000900{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000901 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000902
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300903 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000904 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900905 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000906 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000907
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900908 assert(Py_REFCNT(FROM_GC(gc)) > 0);
909
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600910 if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300911 assert(_PyRuntime.gc.garbage != NULL);
912 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
913 PyErr_Clear();
914 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000915 }
916 else {
917 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
918 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300919 (void) clear(op);
920 if (PyErr_Occurred()) {
921 PySys_WriteStderr("Exception ignored in tp_clear of "
922 "%.50s\n", Py_TYPE(op)->tp_name);
923 PyErr_WriteUnraisable(NULL);
924 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000925 Py_DECREF(op);
926 }
927 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900928 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000929 /* object is still alive, move it, it may die later */
930 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000931 }
932 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000933}
934
Christian Heimesa156e092008-02-16 07:38:31 +0000935/* Clear all free lists
936 * All free lists are cleared during the collection of the highest generation.
937 * Allocated items in the free list may keep a pymalloc arena occupied.
938 * Clearing the free lists may give back memory to the OS earlier.
939 */
940static void
941clear_freelists(void)
942{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000943 (void)PyMethod_ClearFreeList();
944 (void)PyFrame_ClearFreeList();
945 (void)PyCFunction_ClearFreeList();
946 (void)PyTuple_ClearFreeList();
947 (void)PyUnicode_ClearFreeList();
948 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100949 (void)PyList_ClearFreeList();
950 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100951 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700952 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -0500953 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +0000954}
955
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000956/* This is the main function. Read this to understand how the
957 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000958static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200959collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
960 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000961{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000962 int i;
963 Py_ssize_t m = 0; /* # objects collected */
964 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
965 PyGC_Head *young; /* the generation we are examining */
966 PyGC_Head *old; /* next older generation */
967 PyGC_Head unreachable; /* non-problematic unreachable trash */
968 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
969 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100970 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200971
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600972 struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000973
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600974 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000975 PySys_WriteStderr("gc: collecting generation %d...\n",
976 generation);
977 PySys_WriteStderr("gc: objects in each generation:");
978 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200979 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000980 gc_list_size(GEN_HEAD(i)));
brainfvckc75edab2017-10-16 12:49:41 -0700981 PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
982 gc_list_size(&_PyRuntime.gc.permanent_generation.head));
Victor Stinner7181dec2015-03-27 17:47:53 +0100983 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200984
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000985 PySys_WriteStderr("\n");
986 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000987
Łukasz Langaa785c872016-09-09 17:37:37 -0700988 if (PyDTrace_GC_START_ENABLED())
989 PyDTrace_GC_START(generation);
990
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000991 /* update collection and allocation counters */
992 if (generation+1 < NUM_GENERATIONS)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600993 _PyRuntime.gc.generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000994 for (i = 0; i <= generation; i++)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600995 _PyRuntime.gc.generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000996
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000997 /* merge younger generations with one we are currently collecting */
998 for (i = 0; i < generation; i++) {
999 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
1000 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001001
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001002 /* handy references */
1003 young = GEN_HEAD(generation);
1004 if (generation < NUM_GENERATIONS-1)
1005 old = GEN_HEAD(generation+1);
1006 else
1007 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001008
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001009 validate_list(young, 0);
1010 validate_list(old, 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001011 /* Using ob_refcnt and gc_refs, calculate which objects in the
1012 * container set are reachable from outside the set (i.e., have a
1013 * refcount greater than 0 when all the references within the
1014 * set are taken into account).
1015 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001016 update_refs(young); // gc_prev is used for gc_refs
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001017 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001018
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001019 /* Leave everything reachable from outside young in young, and move
1020 * everything else (in young) to unreachable.
1021 * NOTE: This used to move the reachable objects into a reachable
1022 * set instead. But most things usually turn out to be reachable,
1023 * so it's more efficient to move the unreachable things.
1024 */
1025 gc_list_init(&unreachable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001026 move_unreachable(young, &unreachable); // gc_prev is pointer again
1027 validate_list(young, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001028
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001029 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001030 /* Move reachable objects to next generation. */
1031 if (young != old) {
1032 if (generation == NUM_GENERATIONS - 2) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001033 _PyRuntime.gc.long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001034 }
1035 gc_list_merge(young, old);
1036 }
1037 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001038 /* We only untrack dicts in full collections, to avoid quadratic
1039 dict build-up. See issue #14775. */
1040 untrack_dicts(young);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001041 _PyRuntime.gc.long_lived_pending = 0;
1042 _PyRuntime.gc.long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001043 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001044
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001045 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001046 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001047 */
1048 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001049 // NEXT_MASK_UNREACHABLE is cleared here.
1050 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001051 move_legacy_finalizers(&unreachable, &finalizers);
1052 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001053 * unreachable objects reachable *from* those are also uncollectable,
1054 * and we move those into the finalizers list too.
1055 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001056 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001057
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001058 validate_list(&finalizers, 0);
1059 validate_list(&unreachable, PREV_MASK_COLLECTING);
1060
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001061 /* Collect statistics on collectable objects found and print
1062 * debugging information.
1063 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001064 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001065 m++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001066 if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001067 debug_cycle("collectable", FROM_GC(gc));
1068 }
1069 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001070
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001071 /* Clear weakrefs and invoke callbacks as necessary. */
1072 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001073
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001074 validate_list(old, 0);
1075 validate_list(&unreachable, PREV_MASK_COLLECTING);
1076
Antoine Pitrou796564c2013-07-30 19:59:21 +02001077 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001078 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001079
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001080 if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
Antoine Pitrou796564c2013-07-30 19:59:21 +02001081 gc_list_merge(&unreachable, old);
1082 }
1083 else {
1084 /* Call tp_clear on objects in the unreachable set. This will cause
1085 * the reference cycles to be broken. It may also cause some objects
1086 * in finalizers to be freed.
1087 */
1088 delete_garbage(&unreachable, old);
1089 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001090
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001091 /* Collect statistics on uncollectable objects found and print
1092 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001093 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001094 n++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001095 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001096 debug_cycle("uncollectable", FROM_GC(gc));
1097 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001098 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001099 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001100
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001101 if (m == 0 && n == 0)
1102 PySys_WriteStderr("gc: done");
1103 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001104 PySys_FormatStderr(
1105 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001106 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001107 PySys_WriteStderr(", %.4fs elapsed\n",
1108 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001109 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001110
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001111 /* Append instances in the uncollectable set to a Python
1112 * reachable list of garbage. The programmer has to deal with
1113 * this if they insist on creating this type of structure.
1114 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001115 handle_legacy_finalizers(&finalizers, old);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001116 validate_list(old, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001117
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001118 /* Clear free list only during the collection of the highest
1119 * generation */
1120 if (generation == NUM_GENERATIONS-1) {
1121 clear_freelists();
1122 }
Christian Heimesa156e092008-02-16 07:38:31 +00001123
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001124 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001125 if (nofail) {
1126 PyErr_Clear();
1127 }
1128 else {
1129 if (gc_str == NULL)
1130 gc_str = PyUnicode_FromString("garbage collection");
1131 PyErr_WriteUnraisable(gc_str);
1132 Py_FatalError("unexpected exception during garbage collection");
1133 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001134 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001135
Antoine Pitroud4156c12012-10-30 22:43:19 +01001136 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001137 if (n_collected)
1138 *n_collected = m;
1139 if (n_uncollectable)
1140 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001141 stats->collections++;
1142 stats->collected += m;
1143 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001144
1145 if (PyDTrace_GC_DONE_ENABLED())
1146 PyDTrace_GC_DONE(n+m);
1147
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001148 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001149 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001150}
1151
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001152/* Invoke progress callbacks to notify clients that garbage collection
1153 * is starting or stopping
1154 */
1155static void
1156invoke_gc_callback(const char *phase, int generation,
1157 Py_ssize_t collected, Py_ssize_t uncollectable)
1158{
1159 Py_ssize_t i;
1160 PyObject *info = NULL;
1161
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001162 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001163 /* we may get called very early */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001164 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001165 return;
1166 /* The local variable cannot be rebound, check it for sanity */
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001167 assert(PyList_CheckExact(_PyRuntime.gc.callbacks));
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001168 if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001169 info = Py_BuildValue("{sisnsn}",
1170 "generation", generation,
1171 "collected", collected,
1172 "uncollectable", uncollectable);
1173 if (info == NULL) {
1174 PyErr_WriteUnraisable(NULL);
1175 return;
1176 }
1177 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001178 for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1179 PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001180 Py_INCREF(cb); /* make sure cb doesn't go away */
1181 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001182 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001183 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001184 }
1185 else {
1186 Py_DECREF(r);
1187 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001188 Py_DECREF(cb);
1189 }
1190 Py_XDECREF(info);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001191 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001192}
1193
1194/* Perform garbage collection of a generation and invoke
1195 * progress callbacks.
1196 */
1197static Py_ssize_t
1198collect_with_callback(int generation)
1199{
1200 Py_ssize_t result, collected, uncollectable;
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001201 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001202 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001203 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001204 invoke_gc_callback("stop", generation, collected, uncollectable);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001205 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001206 return result;
1207}
1208
Neal Norwitz7b216c52006-03-04 20:01:53 +00001209static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001210collect_generations(void)
1211{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001212 int i;
1213 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001214
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001215 /* Find the oldest generation (highest numbered) where the count
1216 * exceeds the threshold. Objects in the that generation and
1217 * generations younger than it will be collected. */
1218 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001219 if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001220 /* Avoid quadratic performance degradation in number
1221 of tracked objects. See comments at the beginning
1222 of this file, and issue #4074.
1223 */
1224 if (i == NUM_GENERATIONS - 1
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001225 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001226 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001227 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001228 break;
1229 }
1230 }
1231 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001232}
1233
Serhiy Storchaka93260282017-02-04 11:19:59 +02001234#include "clinic/gcmodule.c.h"
1235
1236/*[clinic input]
1237gc.enable
1238
1239Enable automatic garbage collection.
1240[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001241
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001242static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001243gc_enable_impl(PyObject *module)
1244/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001245{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001246 _PyRuntime.gc.enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001247 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001248}
1249
Serhiy Storchaka93260282017-02-04 11:19:59 +02001250/*[clinic input]
1251gc.disable
1252
1253Disable automatic garbage collection.
1254[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001255
1256static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001257gc_disable_impl(PyObject *module)
1258/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001259{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001260 _PyRuntime.gc.enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001261 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001262}
1263
Serhiy Storchaka93260282017-02-04 11:19:59 +02001264/*[clinic input]
1265gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001266
Serhiy Storchaka93260282017-02-04 11:19:59 +02001267Returns true if automatic garbage collection is enabled.
1268[clinic start generated code]*/
1269
1270static int
1271gc_isenabled_impl(PyObject *module)
1272/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001273{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001274 return _PyRuntime.gc.enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001275}
1276
Serhiy Storchaka93260282017-02-04 11:19:59 +02001277/*[clinic input]
1278gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001279
Serhiy Storchaka93260282017-02-04 11:19:59 +02001280 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1281
1282Run the garbage collector.
1283
1284With no arguments, run a full collection. The optional argument
1285may be an integer specifying which generation to collect. A ValueError
1286is raised if the generation number is invalid.
1287
1288The number of unreachable objects is returned.
1289[clinic start generated code]*/
1290
1291static Py_ssize_t
1292gc_collect_impl(PyObject *module, int generation)
1293/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001294{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001295 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001296
Serhiy Storchaka93260282017-02-04 11:19:59 +02001297 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001298 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001299 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001300 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001301
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001302 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001303 n = 0; /* already collecting, don't do anything */
1304 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001305 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka93260282017-02-04 11:19:59 +02001306 n = collect_with_callback(generation);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001307 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001308 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001309
Serhiy Storchaka93260282017-02-04 11:19:59 +02001310 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001311}
1312
Serhiy Storchaka93260282017-02-04 11:19:59 +02001313/*[clinic input]
1314gc.set_debug
1315
1316 flags: int
1317 An integer that can have the following bits turned on:
1318 DEBUG_STATS - Print statistics during collection.
1319 DEBUG_COLLECTABLE - Print collectable objects found.
1320 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1321 found.
1322 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1323 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1324 /
1325
1326Set the garbage collection debugging flags.
1327
1328Debugging information is written to sys.stderr.
1329[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001330
1331static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001332gc_set_debug_impl(PyObject *module, int flags)
1333/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001334{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001335 _PyRuntime.gc.debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001336
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001337 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001338}
1339
Serhiy Storchaka93260282017-02-04 11:19:59 +02001340/*[clinic input]
1341gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001342
Serhiy Storchaka93260282017-02-04 11:19:59 +02001343Get the garbage collection debugging flags.
1344[clinic start generated code]*/
1345
1346static int
1347gc_get_debug_impl(PyObject *module)
1348/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001349{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001350 return _PyRuntime.gc.debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001351}
1352
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001353PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001354"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001355"\n"
1356"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001357"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001358
1359static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001360gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001361{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001362 int i;
1363 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001364 &_PyRuntime.gc.generations[0].threshold,
1365 &_PyRuntime.gc.generations[1].threshold,
1366 &_PyRuntime.gc.generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001367 return NULL;
1368 for (i = 2; i < NUM_GENERATIONS; i++) {
1369 /* generations higher than 2 get the same threshold */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001370 _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001371 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001372
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001373 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001374}
1375
Serhiy Storchaka93260282017-02-04 11:19:59 +02001376/*[clinic input]
1377gc.get_threshold
1378
1379Return the current collection thresholds.
1380[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001381
1382static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001383gc_get_threshold_impl(PyObject *module)
1384/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001385{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001386 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001387 _PyRuntime.gc.generations[0].threshold,
1388 _PyRuntime.gc.generations[1].threshold,
1389 _PyRuntime.gc.generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001390}
1391
Serhiy Storchaka93260282017-02-04 11:19:59 +02001392/*[clinic input]
1393gc.get_count
1394
1395Return a three-tuple of the current collection counts.
1396[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001397
1398static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001399gc_get_count_impl(PyObject *module)
1400/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001401{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001402 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001403 _PyRuntime.gc.generations[0].count,
1404 _PyRuntime.gc.generations[1].count,
1405 _PyRuntime.gc.generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001406}
1407
Neil Schemenauer48c70342001-08-09 15:38:31 +00001408static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001409referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001410{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001411 Py_ssize_t i;
1412 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1413 if (PyTuple_GET_ITEM(objs, i) == obj)
1414 return 1;
1415 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001416}
1417
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001418static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001419gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001420{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001421 PyGC_Head *gc;
1422 PyObject *obj;
1423 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001424 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001425 obj = FROM_GC(gc);
1426 traverse = Py_TYPE(obj)->tp_traverse;
1427 if (obj == objs || obj == resultlist)
1428 continue;
1429 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1430 if (PyList_Append(resultlist, obj) < 0)
1431 return 0; /* error */
1432 }
1433 }
1434 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001435}
1436
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001437PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001438"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001439Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001440
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001441static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001442gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001443{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001444 int i;
1445 PyObject *result = PyList_New(0);
1446 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001447
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001448 for (i = 0; i < NUM_GENERATIONS; i++) {
1449 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1450 Py_DECREF(result);
1451 return NULL;
1452 }
1453 }
1454 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001455}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001456
Tim Peters0f81ab62003-04-08 16:39:48 +00001457/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001458static int
Tim Peters730f5532003-04-08 17:17:17 +00001459referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001460{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001461 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001462}
1463
Tim Peters730f5532003-04-08 17:17:17 +00001464PyDoc_STRVAR(gc_get_referents__doc__,
1465"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001466Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001467
1468static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001469gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001470{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001471 Py_ssize_t i;
1472 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001473
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001474 if (result == NULL)
1475 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001476
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001477 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1478 traverseproc traverse;
1479 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001480
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001481 if (! PyObject_IS_GC(obj))
1482 continue;
1483 traverse = Py_TYPE(obj)->tp_traverse;
1484 if (! traverse)
1485 continue;
1486 if (traverse(obj, (visitproc)referentsvisit, result)) {
1487 Py_DECREF(result);
1488 return NULL;
1489 }
1490 }
1491 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001492}
1493
Serhiy Storchaka93260282017-02-04 11:19:59 +02001494/*[clinic input]
1495gc.get_objects
1496
1497Return a list of objects tracked by the collector (excluding the list returned).
1498[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001499
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001500static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001501gc_get_objects_impl(PyObject *module)
1502/*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001503{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001504 int i;
1505 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001506
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001507 result = PyList_New(0);
1508 if (result == NULL)
1509 return NULL;
1510 for (i = 0; i < NUM_GENERATIONS; i++) {
1511 if (append_objects(result, GEN_HEAD(i))) {
1512 Py_DECREF(result);
1513 return NULL;
1514 }
1515 }
1516 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001517}
1518
Serhiy Storchaka93260282017-02-04 11:19:59 +02001519/*[clinic input]
1520gc.get_stats
1521
1522Return a list of dictionaries containing per-generation statistics.
1523[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001524
1525static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001526gc_get_stats_impl(PyObject *module)
1527/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001528{
1529 int i;
1530 PyObject *result;
1531 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1532
1533 /* To get consistent values despite allocations while constructing
1534 the result list, we use a snapshot of the running stats. */
1535 for (i = 0; i < NUM_GENERATIONS; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001536 stats[i] = _PyRuntime.gc.generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001537 }
1538
1539 result = PyList_New(0);
1540 if (result == NULL)
1541 return NULL;
1542
1543 for (i = 0; i < NUM_GENERATIONS; i++) {
1544 PyObject *dict;
1545 st = &stats[i];
1546 dict = Py_BuildValue("{snsnsn}",
1547 "collections", st->collections,
1548 "collected", st->collected,
1549 "uncollectable", st->uncollectable
1550 );
1551 if (dict == NULL)
1552 goto error;
1553 if (PyList_Append(result, dict)) {
1554 Py_DECREF(dict);
1555 goto error;
1556 }
1557 Py_DECREF(dict);
1558 }
1559 return result;
1560
1561error:
1562 Py_XDECREF(result);
1563 return NULL;
1564}
1565
1566
Serhiy Storchaka93260282017-02-04 11:19:59 +02001567/*[clinic input]
1568gc.is_tracked
1569
1570 obj: object
1571 /
1572
1573Returns true if the object is tracked by the garbage collector.
1574
1575Simple atomic objects will return false.
1576[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001577
1578static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001579gc_is_tracked(PyObject *module, PyObject *obj)
1580/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001581{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001582 PyObject *result;
1583
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001584 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001585 result = Py_True;
1586 else
1587 result = Py_False;
1588 Py_INCREF(result);
1589 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001590}
1591
brainfvckc75edab2017-10-16 12:49:41 -07001592/*[clinic input]
1593gc.freeze
1594
1595Freeze all current tracked objects and ignore them for future collections.
1596
1597This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1598Note: collection before a POSIX fork() call may free pages for future allocation
1599which can cause copy-on-write.
1600[clinic start generated code]*/
1601
1602static PyObject *
1603gc_freeze_impl(PyObject *module)
1604/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1605{
1606 for (int i = 0; i < NUM_GENERATIONS; ++i) {
1607 gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1608 _PyRuntime.gc.generations[i].count = 0;
1609 }
1610 Py_RETURN_NONE;
1611}
1612
1613/*[clinic input]
1614gc.unfreeze
1615
1616Unfreeze all objects in the permanent generation.
1617
1618Put all objects in the permanent generation back into oldest generation.
1619[clinic start generated code]*/
1620
1621static PyObject *
1622gc_unfreeze_impl(PyObject *module)
1623/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1624{
1625 gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1626 Py_RETURN_NONE;
1627}
1628
1629/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001630gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001631
1632Return the number of objects in the permanent generation.
1633[clinic start generated code]*/
1634
Victor Stinner05d68a82018-01-18 11:15:25 +01001635static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001636gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001637/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001638{
1639 return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1640}
1641
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001642
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001643PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001644"This module provides access to the garbage collector for reference cycles.\n"
1645"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001646"enable() -- Enable automatic garbage collection.\n"
1647"disable() -- Disable automatic garbage collection.\n"
1648"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001649"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001650"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001651"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001652"set_debug() -- Set debugging flags.\n"
1653"get_debug() -- Get debugging flags.\n"
1654"set_threshold() -- Set the collection thresholds.\n"
1655"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001656"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001657"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001658"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001659"get_referents() -- Return the list of objects that an object refers to.\n"
1660"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1661"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1662"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001663
1664static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001665 GC_ENABLE_METHODDEF
1666 GC_DISABLE_METHODDEF
1667 GC_ISENABLED_METHODDEF
1668 GC_SET_DEBUG_METHODDEF
1669 GC_GET_DEBUG_METHODDEF
1670 GC_GET_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001671 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001672 GC_GET_THRESHOLD_METHODDEF
1673 GC_COLLECT_METHODDEF
1674 GC_GET_OBJECTS_METHODDEF
1675 GC_GET_STATS_METHODDEF
1676 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001677 {"get_referrers", gc_get_referrers, METH_VARARGS,
1678 gc_get_referrers__doc__},
1679 {"get_referents", gc_get_referents, METH_VARARGS,
1680 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001681 GC_FREEZE_METHODDEF
1682 GC_UNFREEZE_METHODDEF
1683 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001684 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001685};
1686
Martin v. Löwis1a214512008-06-11 05:26:20 +00001687static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001688 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001689 "gc", /* m_name */
1690 gc__doc__, /* m_doc */
1691 -1, /* m_size */
1692 GcMethods, /* m_methods */
1693 NULL, /* m_reload */
1694 NULL, /* m_traverse */
1695 NULL, /* m_clear */
1696 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001697};
1698
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001699PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001700PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001701{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001702 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001703
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001704 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001705
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001706 if (m == NULL)
1707 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001708
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001709 if (_PyRuntime.gc.garbage == NULL) {
1710 _PyRuntime.gc.garbage = PyList_New(0);
1711 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001712 return NULL;
1713 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001714 Py_INCREF(_PyRuntime.gc.garbage);
1715 if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001716 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001717
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001718 if (_PyRuntime.gc.callbacks == NULL) {
1719 _PyRuntime.gc.callbacks = PyList_New(0);
1720 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001721 return NULL;
1722 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001723 Py_INCREF(_PyRuntime.gc.callbacks);
1724 if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001725 return NULL;
1726
Martin v. Löwis1a214512008-06-11 05:26:20 +00001727#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001728 ADD_INT(DEBUG_STATS);
1729 ADD_INT(DEBUG_COLLECTABLE);
1730 ADD_INT(DEBUG_UNCOLLECTABLE);
1731 ADD_INT(DEBUG_SAVEALL);
1732 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001733#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001734 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001735}
1736
Guido van Rossume13ddc92003-04-17 17:29:22 +00001737/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001738Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001739PyGC_Collect(void)
1740{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001741 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001742
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001743 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001744 n = 0; /* already collecting, don't do anything */
1745 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001746 PyObject *exc, *value, *tb;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001747 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001748 PyErr_Fetch(&exc, &value, &tb);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001749 n = collect_with_callback(NUM_GENERATIONS - 1);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001750 PyErr_Restore(exc, value, tb);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001751 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001752 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001753
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001754 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001755}
1756
Antoine Pitroufef34e32013-05-19 01:11:58 +02001757Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001758_PyGC_CollectIfEnabled(void)
1759{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001760 if (!_PyRuntime.gc.enabled)
Łukasz Langafef7e942016-09-09 21:47:46 -07001761 return 0;
1762
1763 return PyGC_Collect();
1764}
1765
1766Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001767_PyGC_CollectNoFail(void)
1768{
1769 Py_ssize_t n;
1770
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001771 assert(!PyErr_Occurred());
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001772 /* Ideally, this function is only called on interpreter shutdown,
1773 and therefore not recursively. Unfortunately, when there are daemon
1774 threads, a daemon thread can start a cyclic garbage collection
1775 during interpreter shutdown (and then never finish it).
1776 See http://bugs.python.org/issue8713#msg195178 for an example.
1777 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001778 if (_PyRuntime.gc.collecting)
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001779 n = 0;
1780 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001781 _PyRuntime.gc.collecting = 1;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001782 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001783 _PyRuntime.gc.collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001784 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001785 return n;
1786}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001787
Antoine Pitrou696e0352010-08-08 22:18:46 +00001788void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001789_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001790{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001791 if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1792 && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001793 const char *message;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001794 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001795 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001796 "shutdown";
1797 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001798 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001799 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001800 /* PyErr_WarnFormat does too many things and we are at shutdown,
1801 the warnings module's dependencies (e.g. linecache) may be gone
1802 already. */
1803 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1804 "gc", NULL, message,
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001805 PyList_GET_SIZE(_PyRuntime.gc.garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001806 PyErr_WriteUnraisable(NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001807 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00001808 PyObject *repr = NULL, *bytes = NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001809 repr = PyObject_Repr(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001810 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001811 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001812 else {
1813 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001814 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001815 PyBytes_AS_STRING(bytes)
1816 );
1817 }
1818 Py_XDECREF(repr);
1819 Py_XDECREF(bytes);
1820 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001821 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001822}
1823
1824void
1825_PyGC_Fini(void)
1826{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001827 Py_CLEAR(_PyRuntime.gc.callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001828}
1829
Neil Schemenauer43411b52001-08-30 00:05:51 +00001830/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001831void
1832_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001833{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001834 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001835}
1836
Neil Schemenauer43411b52001-08-30 00:05:51 +00001837/* extension modules might be compiled with GC support so these
1838 functions must always be available */
1839
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001840#undef PyObject_GC_Track
1841#undef PyObject_GC_UnTrack
1842#undef PyObject_GC_Del
1843#undef _PyObject_GC_Malloc
1844
Neil Schemenauer43411b52001-08-30 00:05:51 +00001845void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001846PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001847{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001848 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001849}
1850
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001851void
1852PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001853{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001854 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1855 * call PyObject_GC_UnTrack twice on an object.
1856 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001857 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001858 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001859 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00001860}
1861
Victor Stinnerdb067af2014-05-02 22:31:14 +02001862static PyObject *
1863_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001864{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001865 PyObject *op;
1866 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001867 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001868 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1869 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001870 size = sizeof(PyGC_Head) + basicsize;
1871 if (use_calloc)
1872 g = (PyGC_Head *)PyObject_Calloc(1, size);
1873 else
1874 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001875 if (g == NULL)
1876 return PyErr_NoMemory();
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001877 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
1878 g->_gc_next = 0;
1879 g->_gc_prev = 0;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001880 _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1881 if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1882 _PyRuntime.gc.enabled &&
1883 _PyRuntime.gc.generations[0].threshold &&
1884 !_PyRuntime.gc.collecting &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001885 !PyErr_Occurred()) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001886 _PyRuntime.gc.collecting = 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001887 collect_generations();
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001888 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001889 }
1890 op = FROM_GC(g);
1891 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001892}
1893
1894PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001895_PyObject_GC_Malloc(size_t basicsize)
1896{
1897 return _PyObject_GC_Alloc(0, basicsize);
1898}
1899
1900PyObject *
1901_PyObject_GC_Calloc(size_t basicsize)
1902{
1903 return _PyObject_GC_Alloc(1, basicsize);
1904}
1905
1906PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001907_PyObject_GC_New(PyTypeObject *tp)
1908{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001909 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1910 if (op != NULL)
1911 op = PyObject_INIT(op, tp);
1912 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001913}
1914
1915PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001916_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001917{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001918 size_t size;
1919 PyVarObject *op;
1920
1921 if (nitems < 0) {
1922 PyErr_BadInternalCall();
1923 return NULL;
1924 }
1925 size = _PyObject_VAR_SIZE(tp, nitems);
1926 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001927 if (op != NULL)
1928 op = PyObject_INIT_VAR(op, tp, nitems);
1929 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001930}
1931
1932PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001933_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001934{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001935 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
1936 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001937 assert(!_PyObject_GC_IS_TRACKED(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001938 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1939 return (PyVarObject *)PyErr_NoMemory();
1940 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1941 if (g == NULL)
1942 return (PyVarObject *)PyErr_NoMemory();
1943 op = (PyVarObject *) FROM_GC(g);
1944 Py_SIZE(op) = nitems;
1945 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001946}
1947
1948void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001949PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001950{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001951 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001952 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001953 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001954 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001955 if (_PyRuntime.gc.generations[0].count > 0) {
1956 _PyRuntime.gc.generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001957 }
1958 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001959}