blob: 48b470006c4a6e3f3781b213eb988eba586b8e73 [file] [log] [blame]
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001/*
Tim Peters88396172002-06-30 17:56:40 +00002
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00003 Reference Cycle Garbage Collection
4 ==================================
5
Neil Schemenauerb2c2c9e2000-10-04 16:34:09 +00006 Neil Schemenauer <nas@arctrix.com>
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00007
8 Based on a post on the python-dev list. Ideas from Guido van Rossum,
9 Eric Tiedemann, and various others.
10
Neil Schemenauer43411b52001-08-30 00:05:51 +000011 http://www.arctrix.com/nas/python/gc/
Neil Schemenauera7024e92008-07-15 19:24:01 +000012
13 The following mailing list threads provide a historical perspective on
14 the design of this module. Note that a fair amount of refinement has
15 occurred since those discussions.
16
17 http://mail.python.org/pipermail/python-dev/2000-March/002385.html
18 http://mail.python.org/pipermail/python-dev/2000-March/002434.html
19 http://mail.python.org/pipermail/python-dev/2000-March/002497.html
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000020
21 For a highlevel view of the collection process, read the collect
22 function.
23
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000024*/
25
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000026#include "Python.h"
Victor Stinner27e2d1f2018-11-01 00:52:28 +010027#include "pycore_context.h"
Victor Stinner621cebe2018-11-12 16:53:38 +010028#include "pycore_pymem.h"
29#include "pycore_pystate.h"
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000030#include "frameobject.h" /* for PyFrame_ClearFreeList */
Łukasz Langaa785c872016-09-09 17:37:37 -070031#include "pydtrace.h"
Victor Stinner7181dec2015-03-27 17:47:53 +010032#include "pytime.h" /* for _PyTime_GetMonotonicClock() */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +000033
Serhiy Storchaka93260282017-02-04 11:19:59 +020034/*[clinic input]
35module gc
36[clinic start generated code]*/
37/*[clinic end generated code: output=da39a3ee5e6b4b0d input=b5c9690ecc842d79]*/
38
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090039#define GC_DEBUG (0) /* Enable more asserts */
40
41#define GC_NEXT _PyGCHead_NEXT
42#define GC_PREV _PyGCHead_PREV
43
44// update_refs() set this bit for all objects in current generation.
45// subtract_refs() and move_unreachable() uses this to distinguish
46// visited object is in GCing or not.
47//
48// move_unreachable() removes this flag from reachable objects.
49// Only unreachable objects have this flag.
50//
51// No objects in interpreter have this flag after GC ends.
52#define PREV_MASK_COLLECTING _PyGC_PREV_MASK_COLLECTING
53
54// Lowest bit of _gc_next is used for UNREACHABLE flag.
55//
56// This flag represents the object is in unreachable list in move_unreachable()
57//
58// Although this flag is used only in move_unreachable(), move_unreachable()
59// doesn't clear this flag to skip unnecessary iteration.
60// move_legacy_finalizers() removes this flag instead.
61// Between them, unreachable list is not normal list and we can not use
62// most gc_list_* functions for it.
63#define NEXT_MASK_UNREACHABLE (1)
64
Victor Stinner626bff82018-10-25 17:31:10 +020065/* Get an object's GC head */
66#define AS_GC(o) ((PyGC_Head *)(o)-1)
67
68/* Get the object given the GC head */
69#define FROM_GC(g) ((PyObject *)(((PyGC_Head *)g)+1))
70
INADA Naoki5ac9e6e2018-07-10 17:19:53 +090071static inline int
72gc_is_collecting(PyGC_Head *g)
73{
74 return (g->_gc_prev & PREV_MASK_COLLECTING) != 0;
75}
76
77static inline void
78gc_clear_collecting(PyGC_Head *g)
79{
80 g->_gc_prev &= ~PREV_MASK_COLLECTING;
81}
82
83static inline Py_ssize_t
84gc_get_refs(PyGC_Head *g)
85{
86 return (Py_ssize_t)(g->_gc_prev >> _PyGC_PREV_SHIFT);
87}
88
89static inline void
90gc_set_refs(PyGC_Head *g, Py_ssize_t refs)
91{
92 g->_gc_prev = (g->_gc_prev & ~_PyGC_PREV_MASK)
93 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
94}
95
96static inline void
97gc_reset_refs(PyGC_Head *g, Py_ssize_t refs)
98{
99 g->_gc_prev = (g->_gc_prev & _PyGC_PREV_MASK_FINALIZED)
100 | PREV_MASK_COLLECTING
101 | ((uintptr_t)(refs) << _PyGC_PREV_SHIFT);
102}
103
104static inline void
105gc_decref(PyGC_Head *g)
106{
Victor Stinner626bff82018-10-25 17:31:10 +0200107 _PyObject_ASSERT_WITH_MSG(FROM_GC(g),
108 gc_get_refs(g) > 0,
109 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900110 g->_gc_prev -= 1 << _PyGC_PREV_SHIFT;
111}
112
Tim Peters6fc13d92002-07-02 18:12:35 +0000113/* Python string to use if unhandled exception occurs */
Tim Petersbf384c22003-04-06 00:11:39 +0000114static PyObject *gc_str = NULL;
Tim Peters6fc13d92002-07-02 18:12:35 +0000115
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000116/* set for debugging information */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000117#define DEBUG_STATS (1<<0) /* print collection statistics */
118#define DEBUG_COLLECTABLE (1<<1) /* print collectable objects */
119#define DEBUG_UNCOLLECTABLE (1<<2) /* print uncollectable objects */
120#define DEBUG_SAVEALL (1<<5) /* save all garbage in gc.garbage */
121#define DEBUG_LEAK DEBUG_COLLECTABLE | \
122 DEBUG_UNCOLLECTABLE | \
123 DEBUG_SAVEALL
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000124
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600125#define GEN_HEAD(n) (&_PyRuntime.gc.generations[n].head)
Antoine Pitroud4156c12012-10-30 22:43:19 +0100126
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600127void
128_PyGC_Initialize(struct _gc_runtime_state *state)
129{
130 state->enabled = 1; /* automatic collection enabled? */
131
132#define _GEN_HEAD(n) (&state->generations[n].head)
133 struct gc_generation generations[NUM_GENERATIONS] = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900134 /* PyGC_Head, threshold, count */
135 {{(uintptr_t)_GEN_HEAD(0), (uintptr_t)_GEN_HEAD(0)}, 700, 0},
136 {{(uintptr_t)_GEN_HEAD(1), (uintptr_t)_GEN_HEAD(1)}, 10, 0},
137 {{(uintptr_t)_GEN_HEAD(2), (uintptr_t)_GEN_HEAD(2)}, 10, 0},
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600138 };
139 for (int i = 0; i < NUM_GENERATIONS; i++) {
140 state->generations[i] = generations[i];
141 };
142 state->generation0 = GEN_HEAD(0);
brainfvckc75edab2017-10-16 12:49:41 -0700143 struct gc_generation permanent_generation = {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900144 {(uintptr_t)&state->permanent_generation.head,
145 (uintptr_t)&state->permanent_generation.head}, 0, 0
brainfvckc75edab2017-10-16 12:49:41 -0700146 };
147 state->permanent_generation = permanent_generation;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600148}
Antoine Pitroud4156c12012-10-30 22:43:19 +0100149
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900150/*
151_gc_prev values
152---------------
Neil Schemenauer43411b52001-08-30 00:05:51 +0000153
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900154Between collections, _gc_prev is used for doubly linked list.
Tim Peters6fc13d92002-07-02 18:12:35 +0000155
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900156Lowest two bits of _gc_prev are used for flags.
157PREV_MASK_COLLECTING is used only while collecting and cleared before GC ends
158or _PyObject_GC_UNTRACK() is called.
Tim Peters6fc13d92002-07-02 18:12:35 +0000159
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900160During a collection, _gc_prev is temporary used for gc_refs, and the gc list
161is singly linked until _gc_prev is restored.
Tim Peters6fc13d92002-07-02 18:12:35 +0000162
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900163gc_refs
Tim Peters6fc13d92002-07-02 18:12:35 +0000164 At the start of a collection, update_refs() copies the true refcount
165 to gc_refs, for each object in the generation being collected.
166 subtract_refs() then adjusts gc_refs so that it equals the number of
167 times an object is referenced directly from outside the generation
168 being collected.
Tim Peters6fc13d92002-07-02 18:12:35 +0000169
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900170PREV_MASK_COLLECTING
171 Objects in generation being collected are marked PREV_MASK_COLLECTING in
172 update_refs().
173
174
175_gc_next values
176---------------
177
178_gc_next takes these values:
179
1800
181 The object is not tracked
182
183!= 0
184 Pointer to the next object in the GC list.
185 Additionally, lowest bit is used temporary for
186 NEXT_MASK_UNREACHABLE flag described below.
187
188NEXT_MASK_UNREACHABLE
Tim Peters6fc13d92002-07-02 18:12:35 +0000189 move_unreachable() then moves objects not reachable (whether directly or
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900190 indirectly) from outside the generation into an "unreachable" set and
191 set this flag.
Tim Peters6fc13d92002-07-02 18:12:35 +0000192
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900193 Objects that are found to be reachable have gc_refs set to 1.
194 When this flag is set for the reachable object, the object must be in
195 "unreachable" set.
196 The flag is unset and the object is moved back to "reachable" set.
197
198 move_legacy_finalizers() will remove this flag from "unreachable" set.
Tim Peters6fc13d92002-07-02 18:12:35 +0000199*/
Neil Schemenauera2b11ec2002-05-21 15:53:24 +0000200
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000201/*** list functions ***/
202
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900203static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000204gc_list_init(PyGC_Head *list)
205{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900206 // List header must not have flags.
207 // We can assign pointer by simple cast.
208 list->_gc_prev = (uintptr_t)list;
209 list->_gc_next = (uintptr_t)list;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000210}
211
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900212static inline int
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000213gc_list_is_empty(PyGC_Head *list)
214{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900215 return (list->_gc_next == (uintptr_t)list);
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000216}
217
Tim Peterse2d59182004-11-01 01:39:08 +0000218/* Append `node` to `list`. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900219static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000220gc_list_append(PyGC_Head *node, PyGC_Head *list)
221{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900222 PyGC_Head *last = (PyGC_Head *)list->_gc_prev;
223
224 // last <-> node
225 _PyGCHead_SET_PREV(node, last);
226 _PyGCHead_SET_NEXT(last, node);
227
228 // node <-> list
229 _PyGCHead_SET_NEXT(node, list);
230 list->_gc_prev = (uintptr_t)node;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000231}
232
Tim Peterse2d59182004-11-01 01:39:08 +0000233/* Remove `node` from the gc list it's currently in. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900234static inline void
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000235gc_list_remove(PyGC_Head *node)
236{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900237 PyGC_Head *prev = GC_PREV(node);
238 PyGC_Head *next = GC_NEXT(node);
239
240 _PyGCHead_SET_NEXT(prev, next);
241 _PyGCHead_SET_PREV(next, prev);
242
243 node->_gc_next = 0; /* object is not currently tracked */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000244}
245
Tim Peterse2d59182004-11-01 01:39:08 +0000246/* Move `node` from the gc list it's currently in (which is not explicitly
247 * named here) to the end of `list`. This is semantically the same as
248 * gc_list_remove(node) followed by gc_list_append(node, list).
249 */
250static void
251gc_list_move(PyGC_Head *node, PyGC_Head *list)
252{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000253 /* Unlink from current list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900254 PyGC_Head *from_prev = GC_PREV(node);
255 PyGC_Head *from_next = GC_NEXT(node);
256 _PyGCHead_SET_NEXT(from_prev, from_next);
257 _PyGCHead_SET_PREV(from_next, from_prev);
258
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000259 /* Relink at end of new list. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900260 // list must not have flags. So we can skip macros.
261 PyGC_Head *to_prev = (PyGC_Head*)list->_gc_prev;
262 _PyGCHead_SET_PREV(node, to_prev);
263 _PyGCHead_SET_NEXT(to_prev, node);
264 list->_gc_prev = (uintptr_t)node;
265 _PyGCHead_SET_NEXT(node, list);
Tim Peterse2d59182004-11-01 01:39:08 +0000266}
267
268/* append list `from` onto list `to`; `from` becomes an empty list */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000269static void
270gc_list_merge(PyGC_Head *from, PyGC_Head *to)
271{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000272 assert(from != to);
273 if (!gc_list_is_empty(from)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900274 PyGC_Head *to_tail = GC_PREV(to);
275 PyGC_Head *from_head = GC_NEXT(from);
276 PyGC_Head *from_tail = GC_PREV(from);
277 assert(from_head != from);
278 assert(from_tail != from);
279
280 _PyGCHead_SET_NEXT(to_tail, from_head);
281 _PyGCHead_SET_PREV(from_head, to_tail);
282
283 _PyGCHead_SET_NEXT(from_tail, to);
284 _PyGCHead_SET_PREV(to, from_tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000285 }
286 gc_list_init(from);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000287}
288
Neal Norwitz7b216c52006-03-04 20:01:53 +0000289static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000290gc_list_size(PyGC_Head *list)
291{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000292 PyGC_Head *gc;
293 Py_ssize_t n = 0;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900294 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000295 n++;
296 }
297 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000298}
299
Tim Peters259272b2003-04-06 19:41:39 +0000300/* Append objects in a GC list to a Python list.
301 * Return 0 if all OK, < 0 if error (out of memory for list).
302 */
303static int
304append_objects(PyObject *py_list, PyGC_Head *gc_list)
305{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000306 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900307 for (gc = GC_NEXT(gc_list); gc != gc_list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000308 PyObject *op = FROM_GC(gc);
309 if (op != py_list) {
310 if (PyList_Append(py_list, op)) {
311 return -1; /* exception */
312 }
313 }
314 }
315 return 0;
Tim Peters259272b2003-04-06 19:41:39 +0000316}
317
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900318#if GC_DEBUG
319// validate_list checks list consistency. And it works as document
320// describing when expected_mask is set / unset.
321static void
322validate_list(PyGC_Head *head, uintptr_t expected_mask)
323{
324 PyGC_Head *prev = head;
325 PyGC_Head *gc = GC_NEXT(head);
326 while (gc != head) {
327 assert(GC_NEXT(gc) != NULL);
328 assert(GC_PREV(gc) == prev);
329 assert((gc->_gc_prev & PREV_MASK_COLLECTING) == expected_mask);
330 prev = gc;
331 gc = GC_NEXT(gc);
332 }
333 assert(prev == GC_PREV(head));
334}
335#else
336#define validate_list(x,y) do{}while(0)
337#endif
338
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000339/*** end of list stuff ***/
340
341
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900342/* Set all gc_refs = ob_refcnt. After this, gc_refs is > 0 and
343 * PREV_MASK_COLLECTING bit is set for all objects in containers.
Tim Peters88396172002-06-30 17:56:40 +0000344 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000345static void
346update_refs(PyGC_Head *containers)
347{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900348 PyGC_Head *gc = GC_NEXT(containers);
349 for (; gc != containers; gc = GC_NEXT(gc)) {
350 gc_reset_refs(gc, Py_REFCNT(FROM_GC(gc)));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000351 /* Python's cyclic gc should never see an incoming refcount
352 * of 0: if something decref'ed to 0, it should have been
353 * deallocated immediately at that time.
354 * Possible cause (if the assert triggers): a tp_dealloc
355 * routine left a gc-aware object tracked during its teardown
356 * phase, and did something-- or allowed something to happen --
357 * that called back into Python. gc can trigger then, and may
358 * see the still-tracked dying object. Before this assert
359 * was added, such mistakes went on to allow gc to try to
360 * delete the object again. In a debug build, that caused
361 * a mysterious segfault, when _Py_ForgetReference tried
362 * to remove the object from the doubly-linked list of all
363 * objects a second time. In a release build, an actual
364 * double deallocation occurred, which leads to corruption
365 * of the allocator's internal bookkeeping pointers. That's
366 * so serious that maybe this should be a release-build
367 * check instead of an assert?
368 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200369 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000370 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000371}
372
Tim Peters19b74c72002-07-01 03:52:19 +0000373/* A traversal callback for subtract_refs. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000374static int
375visit_decref(PyObject *op, void *data)
376{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000377 assert(op != NULL);
378 if (PyObject_IS_GC(op)) {
379 PyGC_Head *gc = AS_GC(op);
380 /* We're only interested in gc_refs for objects in the
381 * generation being collected, which can be recognized
382 * because only they have positive gc_refs.
383 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900384 if (gc_is_collecting(gc)) {
385 gc_decref(gc);
386 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000387 }
388 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000389}
390
Tim Peters19b74c72002-07-01 03:52:19 +0000391/* Subtract internal references from gc_refs. After this, gc_refs is >= 0
392 * for all objects in containers, and is GC_REACHABLE for all tracked gc
393 * objects not in containers. The ones with gc_refs > 0 are directly
394 * reachable from outside containers, and so can't be collected.
395 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000396static void
397subtract_refs(PyGC_Head *containers)
398{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000399 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900400 PyGC_Head *gc = GC_NEXT(containers);
401 for (; gc != containers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000402 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
403 (void) traverse(FROM_GC(gc),
404 (visitproc)visit_decref,
405 NULL);
406 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000407}
408
Tim Peters19b74c72002-07-01 03:52:19 +0000409/* A traversal callback for move_unreachable. */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000410static int
Tim Peters19b74c72002-07-01 03:52:19 +0000411visit_reachable(PyObject *op, PyGC_Head *reachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000412{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900413 if (!PyObject_IS_GC(op)) {
414 return 0;
415 }
Tim Peters19b74c72002-07-01 03:52:19 +0000416
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900417 PyGC_Head *gc = AS_GC(op);
418 const Py_ssize_t gc_refs = gc_get_refs(gc);
419
420 // Ignore untracked objects and objects in other generation.
421 if (gc->_gc_next == 0 || !gc_is_collecting(gc)) {
422 return 0;
423 }
424
425 if (gc->_gc_next & NEXT_MASK_UNREACHABLE) {
426 /* This had gc_refs = 0 when move_unreachable got
427 * to it, but turns out it's reachable after all.
428 * Move it back to move_unreachable's 'young' list,
429 * and move_unreachable will eventually get to it
430 * again.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000431 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900432 // Manually unlink gc from unreachable list because
433 PyGC_Head *prev = GC_PREV(gc);
434 PyGC_Head *next = (PyGC_Head*)(gc->_gc_next & ~NEXT_MASK_UNREACHABLE);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200435 _PyObject_ASSERT(FROM_GC(prev),
436 prev->_gc_next & NEXT_MASK_UNREACHABLE);
437 _PyObject_ASSERT(FROM_GC(next),
438 next->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900439 prev->_gc_next = gc->_gc_next; // copy NEXT_MASK_UNREACHABLE
440 _PyGCHead_SET_PREV(next, prev);
441
442 gc_list_append(gc, reachable);
443 gc_set_refs(gc, 1);
444 }
445 else if (gc_refs == 0) {
446 /* This is in move_unreachable's 'young' list, but
447 * the traversal hasn't yet gotten to it. All
448 * we need to do is tell move_unreachable that it's
449 * reachable.
450 */
451 gc_set_refs(gc, 1);
452 }
453 /* Else there's nothing to do.
454 * If gc_refs > 0, it must be in move_unreachable's 'young'
455 * list, and move_unreachable will eventually get to it.
456 */
457 else {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200458 _PyObject_ASSERT_WITH_MSG(op, gc_refs > 0, "refcount is too small");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000459 }
460 return 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000461}
462
Tim Peters19b74c72002-07-01 03:52:19 +0000463/* Move the unreachable objects from young to unreachable. After this,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900464 * all objects in young don't have PREV_MASK_COLLECTING flag and
465 * unreachable have the flag.
Tim Peters19b74c72002-07-01 03:52:19 +0000466 * All objects in young after this are directly or indirectly reachable
467 * from outside the original young; and all objects in unreachable are
468 * not.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900469 *
470 * This function restores _gc_prev pointer. young and unreachable are
471 * doubly linked list after this function.
472 * But _gc_next in unreachable list has NEXT_MASK_UNREACHABLE flag.
473 * So we can not gc_list_* functions for unreachable until we remove the flag.
Tim Peters88396172002-06-30 17:56:40 +0000474 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000475static void
Tim Peters19b74c72002-07-01 03:52:19 +0000476move_unreachable(PyGC_Head *young, PyGC_Head *unreachable)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000477{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900478 // previous elem in the young list, used for restore gc_prev.
479 PyGC_Head *prev = young;
480 PyGC_Head *gc = GC_NEXT(young);
Tim Peters19b74c72002-07-01 03:52:19 +0000481
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900482 /* Invariants: all objects "to the left" of us in young are reachable
483 * (directly or indirectly) from outside the young list as it was at entry.
484 *
485 * All other objects from the original young "to the left" of us are in
486 * unreachable now, and have NEXT_MASK_UNREACHABLE. All objects to the
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000487 * left of us in 'young' now have been scanned, and no objects here
488 * or to the right have been scanned yet.
489 */
Tim Peters19b74c72002-07-01 03:52:19 +0000490
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000491 while (gc != young) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900492 if (gc_get_refs(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000493 /* gc is definitely reachable from outside the
494 * original 'young'. Mark it as such, and traverse
495 * its pointers to find any other objects that may
496 * be directly reachable from it. Note that the
497 * call to tp_traverse may append objects to young,
498 * so we have to wait until it returns to determine
499 * the next object to visit.
500 */
501 PyObject *op = FROM_GC(gc);
502 traverseproc traverse = Py_TYPE(op)->tp_traverse;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200503 _PyObject_ASSERT_WITH_MSG(op, gc_get_refs(gc) > 0,
504 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900505 // NOTE: visit_reachable may change gc->_gc_next when
506 // young->_gc_prev == gc. Don't do gc = GC_NEXT(gc) before!
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000507 (void) traverse(op,
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900508 (visitproc)visit_reachable,
509 (void *)young);
510 // relink gc_prev to prev element.
511 _PyGCHead_SET_PREV(gc, prev);
Quan Tian3bd0d622018-10-20 05:30:03 +0800512 // gc is not COLLECTING state after here.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900513 gc_clear_collecting(gc);
514 prev = gc;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000515 }
516 else {
517 /* This *may* be unreachable. To make progress,
518 * assume it is. gc isn't directly reachable from
519 * any object we've already traversed, but may be
520 * reachable from an object we haven't gotten to yet.
521 * visit_reachable will eventually move gc back into
522 * young if that's so, and we'll see it again.
523 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900524 // Move gc to unreachable.
525 // No need to gc->next->prev = prev because it is single linked.
526 prev->_gc_next = gc->_gc_next;
527
528 // We can't use gc_list_append() here because we use
529 // NEXT_MASK_UNREACHABLE here.
530 PyGC_Head *last = GC_PREV(unreachable);
531 // NOTE: Since all objects in unreachable set has
532 // NEXT_MASK_UNREACHABLE flag, we set it unconditionally.
533 // But this may set the flat to unreachable too.
534 // move_legacy_finalizers() should care about it.
535 last->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)gc);
536 _PyGCHead_SET_PREV(gc, last);
537 gc->_gc_next = (NEXT_MASK_UNREACHABLE | (uintptr_t)unreachable);
538 unreachable->_gc_prev = (uintptr_t)gc;
539 }
540 gc = (PyGC_Head*)prev->_gc_next;
541 }
542 // young->_gc_prev must be last element remained in the list.
543 young->_gc_prev = (uintptr_t)prev;
544}
545
546static void
547untrack_tuples(PyGC_Head *head)
548{
549 PyGC_Head *next, *gc = GC_NEXT(head);
550 while (gc != head) {
551 PyObject *op = FROM_GC(gc);
552 next = GC_NEXT(gc);
553 if (PyTuple_CheckExact(op)) {
554 _PyTuple_MaybeUntrack(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000555 }
556 gc = next;
557 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000558}
559
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200560/* Try to untrack all currently tracked dictionaries */
561static void
562untrack_dicts(PyGC_Head *head)
563{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900564 PyGC_Head *next, *gc = GC_NEXT(head);
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200565 while (gc != head) {
566 PyObject *op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900567 next = GC_NEXT(gc);
568 if (PyDict_CheckExact(op)) {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200569 _PyDict_MaybeUntrack(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900570 }
Antoine Pitroue1ad3da2012-05-28 22:22:34 +0200571 gc = next;
572 }
573}
574
Antoine Pitrou796564c2013-07-30 19:59:21 +0200575/* Return true if object has a pre-PEP 442 finalization method. */
Neil Schemenauera765c122001-11-01 17:35:23 +0000576static int
Antoine Pitrou796564c2013-07-30 19:59:21 +0200577has_legacy_finalizer(PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000578{
Antoine Pitrou796564c2013-07-30 19:59:21 +0200579 return op->ob_type->tp_del != NULL;
Neil Schemenauera765c122001-11-01 17:35:23 +0000580}
581
Antoine Pitrou796564c2013-07-30 19:59:21 +0200582/* Move the objects in unreachable with tp_del slots into `finalizers`.
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900583 *
584 * This function also removes NEXT_MASK_UNREACHABLE flag
585 * from _gc_next in unreachable.
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000586 */
Neil Schemenauera765c122001-11-01 17:35:23 +0000587static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200588move_legacy_finalizers(PyGC_Head *unreachable, PyGC_Head *finalizers)
Neil Schemenauera765c122001-11-01 17:35:23 +0000589{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900590 PyGC_Head *gc, *next;
591 unreachable->_gc_next &= ~NEXT_MASK_UNREACHABLE;
Tim Petersf6b80452003-04-07 19:21:15 +0000592
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000593 /* March over unreachable. Move objects with finalizers into
594 * `finalizers`.
595 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900596 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000597 PyObject *op = FROM_GC(gc);
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000598
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200599 _PyObject_ASSERT(op, gc->_gc_next & NEXT_MASK_UNREACHABLE);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900600 gc->_gc_next &= ~NEXT_MASK_UNREACHABLE;
601 next = (PyGC_Head*)gc->_gc_next;
Tim Petersf6ae7a42003-04-05 18:40:50 +0000602
Antoine Pitrou796564c2013-07-30 19:59:21 +0200603 if (has_legacy_finalizer(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900604 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000605 gc_list_move(gc, finalizers);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000606 }
607 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000608}
609
Antoine Pitrou796564c2013-07-30 19:59:21 +0200610/* A traversal callback for move_legacy_finalizer_reachable. */
Tim Peters19b74c72002-07-01 03:52:19 +0000611static int
612visit_move(PyObject *op, PyGC_Head *tolist)
613{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000614 if (PyObject_IS_GC(op)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900615 PyGC_Head *gc = AS_GC(op);
616 if (gc_is_collecting(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000617 gc_list_move(gc, tolist);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900618 gc_clear_collecting(gc);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000619 }
620 }
621 return 0;
Tim Peters19b74c72002-07-01 03:52:19 +0000622}
623
624/* Move objects that are reachable from finalizers, from the unreachable set
Tim Petersf6b80452003-04-07 19:21:15 +0000625 * into finalizers set.
Tim Peters19b74c72002-07-01 03:52:19 +0000626 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000627static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200628move_legacy_finalizer_reachable(PyGC_Head *finalizers)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000629{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000630 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900631 PyGC_Head *gc = GC_NEXT(finalizers);
632 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000633 /* Note that the finalizers list may grow during this. */
634 traverse = Py_TYPE(FROM_GC(gc))->tp_traverse;
635 (void) traverse(FROM_GC(gc),
636 (visitproc)visit_move,
637 (void *)finalizers);
638 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000639}
640
Tim Petersead8b7a2004-10-30 23:09:22 +0000641/* Clear all weakrefs to unreachable objects, and if such a weakref has a
642 * callback, invoke it if necessary. Note that it's possible for such
643 * weakrefs to be outside the unreachable set -- indeed, those are precisely
644 * the weakrefs whose callbacks must be invoked. See gc_weakref.txt for
645 * overview & some details. Some weakrefs with callbacks may be reclaimed
646 * directly by this routine; the number reclaimed is the return value. Other
647 * weakrefs with callbacks may be moved into the `old` generation. Objects
648 * moved into `old` have gc_refs set to GC_REACHABLE; the objects remaining in
649 * unreachable are left at GC_TENTATIVELY_UNREACHABLE. When this returns,
650 * no object in `unreachable` is weakly referenced anymore.
Tim Peters403a2032003-11-20 21:21:46 +0000651 */
652static int
Tim Petersead8b7a2004-10-30 23:09:22 +0000653handle_weakrefs(PyGC_Head *unreachable, PyGC_Head *old)
Tim Peters403a2032003-11-20 21:21:46 +0000654{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000655 PyGC_Head *gc;
656 PyObject *op; /* generally FROM_GC(gc) */
657 PyWeakReference *wr; /* generally a cast of op */
658 PyGC_Head wrcb_to_call; /* weakrefs with callbacks to call */
659 PyGC_Head *next;
660 int num_freed = 0;
Tim Peters403a2032003-11-20 21:21:46 +0000661
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000662 gc_list_init(&wrcb_to_call);
Tim Peters403a2032003-11-20 21:21:46 +0000663
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000664 /* Clear all weakrefs to the objects in unreachable. If such a weakref
665 * also has a callback, move it into `wrcb_to_call` if the callback
666 * needs to be invoked. Note that we cannot invoke any callbacks until
667 * all weakrefs to unreachable objects are cleared, lest the callback
668 * resurrect an unreachable object via a still-active weakref. We
669 * make another pass over wrcb_to_call, invoking callbacks, after this
670 * pass completes.
671 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900672 for (gc = GC_NEXT(unreachable); gc != unreachable; gc = next) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000673 PyWeakReference **wrlist;
Tim Petersead8b7a2004-10-30 23:09:22 +0000674
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000675 op = FROM_GC(gc);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900676 next = GC_NEXT(gc);
Tim Petersead8b7a2004-10-30 23:09:22 +0000677
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000678 if (! PyType_SUPPORTS_WEAKREFS(Py_TYPE(op)))
679 continue;
Tim Petersead8b7a2004-10-30 23:09:22 +0000680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000681 /* It supports weakrefs. Does it have any? */
682 wrlist = (PyWeakReference **)
683 PyObject_GET_WEAKREFS_LISTPTR(op);
Tim Petersead8b7a2004-10-30 23:09:22 +0000684
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000685 /* `op` may have some weakrefs. March over the list, clear
686 * all the weakrefs, and move the weakrefs with callbacks
687 * that must be called into wrcb_to_call.
688 */
689 for (wr = *wrlist; wr != NULL; wr = *wrlist) {
690 PyGC_Head *wrasgc; /* AS_GC(wr) */
Tim Petersead8b7a2004-10-30 23:09:22 +0000691
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000692 /* _PyWeakref_ClearRef clears the weakref but leaves
693 * the callback pointer intact. Obscure: it also
694 * changes *wrlist.
695 */
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200696 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000697 _PyWeakref_ClearRef(wr);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200698 _PyObject_ASSERT((PyObject *)wr, wr->wr_object == Py_None);
699 if (wr->wr_callback == NULL) {
700 /* no callback */
701 continue;
702 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000703
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200704 /* Headache time. `op` is going away, and is weakly referenced by
705 * `wr`, which has a callback. Should the callback be invoked? If wr
706 * is also trash, no:
707 *
708 * 1. There's no need to call it. The object and the weakref are
709 * both going away, so it's legitimate to pretend the weakref is
710 * going away first. The user has to ensure a weakref outlives its
711 * referent if they want a guarantee that the wr callback will get
712 * invoked.
713 *
714 * 2. It may be catastrophic to call it. If the callback is also in
715 * cyclic trash (CT), then although the CT is unreachable from
716 * outside the current generation, CT may be reachable from the
717 * callback. Then the callback could resurrect insane objects.
718 *
719 * Since the callback is never needed and may be unsafe in this case,
720 * wr is simply left in the unreachable set. Note that because we
721 * already called _PyWeakref_ClearRef(wr), its callback will never
722 * trigger.
723 *
724 * OTOH, if wr isn't part of CT, we should invoke the callback: the
725 * weakref outlived the trash. Note that since wr isn't CT in this
726 * case, its callback can't be CT either -- wr acted as an external
727 * root to this generation, and therefore its callback did too. So
728 * nothing in CT is reachable from the callback either, so it's hard
729 * to imagine how calling it later could create a problem for us. wr
730 * is moved to wrcb_to_call in this case.
731 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900732 if (gc_is_collecting(AS_GC(wr))) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000733 continue;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900734 }
Tim Peterscc2a8662004-10-31 22:12:43 +0000735
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000736 /* Create a new reference so that wr can't go away
737 * before we can process it again.
738 */
739 Py_INCREF(wr);
Tim Petersead8b7a2004-10-30 23:09:22 +0000740
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000741 /* Move wr to wrcb_to_call, for the next pass. */
742 wrasgc = AS_GC(wr);
743 assert(wrasgc != next); /* wrasgc is reachable, but
744 next isn't, so they can't
745 be the same */
746 gc_list_move(wrasgc, &wrcb_to_call);
747 }
748 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000749
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000750 /* Invoke the callbacks we decided to honor. It's safe to invoke them
751 * because they can't reference unreachable objects.
752 */
753 while (! gc_list_is_empty(&wrcb_to_call)) {
754 PyObject *temp;
755 PyObject *callback;
Tim Petersead8b7a2004-10-30 23:09:22 +0000756
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900757 gc = (PyGC_Head*)wrcb_to_call._gc_next;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000758 op = FROM_GC(gc);
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200759 _PyObject_ASSERT(op, PyWeakref_Check(op));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000760 wr = (PyWeakReference *)op;
761 callback = wr->wr_callback;
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200762 _PyObject_ASSERT(op, callback != NULL);
Tim Petersead8b7a2004-10-30 23:09:22 +0000763
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000764 /* copy-paste of weakrefobject.c's handle_callback() */
Victor Stinnerde4ae3d2016-12-04 22:59:09 +0100765 temp = PyObject_CallFunctionObjArgs(callback, wr, NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000766 if (temp == NULL)
767 PyErr_WriteUnraisable(callback);
768 else
769 Py_DECREF(temp);
Tim Petersead8b7a2004-10-30 23:09:22 +0000770
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000771 /* Give up the reference we created in the first pass. When
772 * op's refcount hits 0 (which it may or may not do right now),
773 * op's tp_dealloc will decref op->wr_callback too. Note
774 * that the refcount probably will hit 0 now, and because this
775 * weakref was reachable to begin with, gc didn't already
776 * add it to its count of freed objects. Example: a reachable
777 * weak value dict maps some key to this reachable weakref.
778 * The callback removes this key->weakref mapping from the
779 * dict, leaving no other references to the weakref (excepting
780 * ours).
781 */
782 Py_DECREF(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900783 if (wrcb_to_call._gc_next == (uintptr_t)gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000784 /* object is still alive -- move it */
785 gc_list_move(gc, old);
786 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900787 else {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000788 ++num_freed;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900789 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000790 }
Tim Petersead8b7a2004-10-30 23:09:22 +0000791
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000792 return num_freed;
Tim Peters403a2032003-11-20 21:21:46 +0000793}
794
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000795static void
Serhiy Storchakaef1585e2015-12-25 20:01:53 +0200796debug_cycle(const char *msg, PyObject *op)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000797{
Victor Stinner499dfcf2011-03-21 13:26:24 +0100798 PySys_FormatStderr("gc: %s <%s %p>\n",
799 msg, Py_TYPE(op)->tp_name, op);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000800}
801
Antoine Pitrou796564c2013-07-30 19:59:21 +0200802/* Handle uncollectable garbage (cycles with tp_del slots, and stuff reachable
Tim Petersbf384c22003-04-06 00:11:39 +0000803 * only from such cycles).
Tim Petersf6b80452003-04-07 19:21:15 +0000804 * If DEBUG_SAVEALL, all objects in finalizers are appended to the module
805 * garbage list (a Python list), else only the objects in finalizers with
806 * __del__ methods are appended to garbage. All objects in finalizers are
807 * merged into the old list regardless.
Tim Petersbf384c22003-04-06 00:11:39 +0000808 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300809static void
Antoine Pitrou796564c2013-07-30 19:59:21 +0200810handle_legacy_finalizers(PyGC_Head *finalizers, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000811{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900812 PyGC_Head *gc = GC_NEXT(finalizers);
Tim Petersf6b80452003-04-07 19:21:15 +0000813
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300814 assert(!PyErr_Occurred());
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600815 if (_PyRuntime.gc.garbage == NULL) {
816 _PyRuntime.gc.garbage = PyList_New(0);
817 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000818 Py_FatalError("gc couldn't create gc.garbage list");
819 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900820 for (; gc != finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000821 PyObject *op = FROM_GC(gc);
Tim Petersf6b80452003-04-07 19:21:15 +0000822
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600823 if ((_PyRuntime.gc.debug & DEBUG_SAVEALL) || has_legacy_finalizer(op)) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300824 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
825 PyErr_Clear();
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +0300826 break;
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300827 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000828 }
829 }
Tim Petersf6b80452003-04-07 19:21:15 +0000830
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000831 gc_list_merge(finalizers, old);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000832}
833
Tim Peters5fbc7b12014-05-08 17:42:19 -0500834/* Run first-time finalizers (if any) on all the objects in collectable.
835 * Note that this may remove some (or even all) of the objects from the
836 * list, due to refcounts falling to 0.
837 */
Antoine Pitrou796564c2013-07-30 19:59:21 +0200838static void
Tim Peters5fbc7b12014-05-08 17:42:19 -0500839finalize_garbage(PyGC_Head *collectable)
Antoine Pitrou796564c2013-07-30 19:59:21 +0200840{
841 destructor finalize;
Tim Peters5fbc7b12014-05-08 17:42:19 -0500842 PyGC_Head seen;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200843
Tim Peters5fbc7b12014-05-08 17:42:19 -0500844 /* While we're going through the loop, `finalize(op)` may cause op, or
845 * other objects, to be reclaimed via refcounts falling to zero. So
846 * there's little we can rely on about the structure of the input
847 * `collectable` list across iterations. For safety, we always take the
848 * first object in that list and move it to a temporary `seen` list.
849 * If objects vanish from the `collectable` and `seen` lists we don't
850 * care.
851 */
852 gc_list_init(&seen);
853
854 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900855 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200856 PyObject *op = FROM_GC(gc);
Tim Peters5fbc7b12014-05-08 17:42:19 -0500857 gc_list_move(gc, &seen);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200858 if (!_PyGCHead_FINALIZED(gc) &&
Tim Peters5fbc7b12014-05-08 17:42:19 -0500859 PyType_HasFeature(Py_TYPE(op), Py_TPFLAGS_HAVE_FINALIZE) &&
860 (finalize = Py_TYPE(op)->tp_finalize) != NULL) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900861 _PyGCHead_SET_FINALIZED(gc);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200862 Py_INCREF(op);
863 finalize(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300864 assert(!PyErr_Occurred());
Antoine Pitrou796564c2013-07-30 19:59:21 +0200865 Py_DECREF(op);
866 }
867 }
Tim Peters5fbc7b12014-05-08 17:42:19 -0500868 gc_list_merge(&seen, collectable);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200869}
870
871/* Walk the collectable list and check that they are really unreachable
872 from the outside (some objects could have been resurrected by a
873 finalizer). */
874static int
875check_garbage(PyGC_Head *collectable)
876{
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900877 int ret = 0;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200878 PyGC_Head *gc;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900879 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
880 // Use gc_refs and break gc_prev again.
881 gc_set_refs(gc, Py_REFCNT(FROM_GC(gc)));
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200882 _PyObject_ASSERT(FROM_GC(gc), gc_get_refs(gc) != 0);
Antoine Pitrou796564c2013-07-30 19:59:21 +0200883 }
884 subtract_refs(collectable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900885 PyGC_Head *prev = collectable;
886 for (gc = GC_NEXT(collectable); gc != collectable; gc = GC_NEXT(gc)) {
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200887 _PyObject_ASSERT_WITH_MSG(FROM_GC(gc),
888 gc_get_refs(gc) >= 0,
889 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900890 if (gc_get_refs(gc) != 0) {
891 ret = -1;
892 }
893 // Restore gc_prev here.
894 _PyGCHead_SET_PREV(gc, prev);
895 gc_clear_collecting(gc);
896 prev = gc;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200897 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900898 return ret;
Antoine Pitrou796564c2013-07-30 19:59:21 +0200899}
900
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000901/* Break reference cycles by clearing the containers involved. This is
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000902 * tricky business as the lists can be changing and we don't know which
Tim Peters19b74c72002-07-01 03:52:19 +0000903 * objects may be freed. It is possible I screwed something up here.
904 */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000905static void
Jeremy Hyltonce136e92003-04-04 19:59:06 +0000906delete_garbage(PyGC_Head *collectable, PyGC_Head *old)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000907{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000908 inquiry clear;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000909
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300910 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000911 while (!gc_list_is_empty(collectable)) {
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900912 PyGC_Head *gc = GC_NEXT(collectable);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000913 PyObject *op = FROM_GC(gc);
Tim Peters88396172002-06-30 17:56:40 +0000914
Victor Stinnera4b2bc72018-10-26 18:00:13 +0200915 _PyObject_ASSERT_WITH_MSG(op, Py_REFCNT(op) > 0,
916 "refcount is too small");
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900917
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600918 if (_PyRuntime.gc.debug & DEBUG_SAVEALL) {
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300919 assert(_PyRuntime.gc.garbage != NULL);
920 if (PyList_Append(_PyRuntime.gc.garbage, op) < 0) {
921 PyErr_Clear();
922 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000923 }
924 else {
925 if ((clear = Py_TYPE(op)->tp_clear) != NULL) {
926 Py_INCREF(op);
Serhiy Storchakac4653c92018-05-29 18:50:10 +0300927 (void) clear(op);
928 if (PyErr_Occurred()) {
929 PySys_WriteStderr("Exception ignored in tp_clear of "
930 "%.50s\n", Py_TYPE(op)->tp_name);
931 PyErr_WriteUnraisable(NULL);
932 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000933 Py_DECREF(op);
934 }
935 }
INADA Naoki5ac9e6e2018-07-10 17:19:53 +0900936 if (GC_NEXT(collectable) == gc) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000937 /* object is still alive, move it, it may die later */
938 gc_list_move(gc, old);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000939 }
940 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000941}
942
Christian Heimesa156e092008-02-16 07:38:31 +0000943/* Clear all free lists
944 * All free lists are cleared during the collection of the highest generation.
945 * Allocated items in the free list may keep a pymalloc arena occupied.
946 * Clearing the free lists may give back memory to the OS earlier.
947 */
948static void
949clear_freelists(void)
950{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000951 (void)PyMethod_ClearFreeList();
952 (void)PyFrame_ClearFreeList();
953 (void)PyCFunction_ClearFreeList();
954 (void)PyTuple_ClearFreeList();
955 (void)PyUnicode_ClearFreeList();
956 (void)PyFloat_ClearFreeList();
Antoine Pitrou9a812cb2011-11-15 00:00:12 +0100957 (void)PyList_ClearFreeList();
958 (void)PyDict_ClearFreeList();
Antoine Pitrou093ce9c2011-12-16 11:24:27 +0100959 (void)PySet_ClearFreeList();
Yury Selivanoveb636452016-09-08 22:01:51 -0700960 (void)PyAsyncGen_ClearFreeLists();
Yury Selivanovf23746a2018-01-22 19:11:18 -0500961 (void)PyContext_ClearFreeList();
Christian Heimesa156e092008-02-16 07:38:31 +0000962}
963
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000964/* This is the main function. Read this to understand how the
965 * collection process works. */
Neal Norwitz7b216c52006-03-04 20:01:53 +0000966static Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +0200967collect(int generation, Py_ssize_t *n_collected, Py_ssize_t *n_uncollectable,
968 int nofail)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000969{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000970 int i;
971 Py_ssize_t m = 0; /* # objects collected */
972 Py_ssize_t n = 0; /* # unreachable objects that couldn't be collected */
973 PyGC_Head *young; /* the generation we are examining */
974 PyGC_Head *old; /* next older generation */
975 PyGC_Head unreachable; /* non-problematic unreachable trash */
976 PyGC_Head finalizers; /* objects with, & reachable from, __del__ */
977 PyGC_Head *gc;
Victor Stinner7181dec2015-03-27 17:47:53 +0100978 _PyTime_t t1 = 0; /* initialize to prevent a compiler warning */
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200979
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600980 struct gc_generation_stats *stats = &_PyRuntime.gc.generation_stats[generation];
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +0000981
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600982 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000983 PySys_WriteStderr("gc: collecting generation %d...\n",
984 generation);
985 PySys_WriteStderr("gc: objects in each generation:");
986 for (i = 0; i < NUM_GENERATIONS; i++)
Antoine Pitrouded3c1b2014-05-24 19:24:40 +0200987 PySys_FormatStderr(" %zd",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000988 gc_list_size(GEN_HEAD(i)));
brainfvckc75edab2017-10-16 12:49:41 -0700989 PySys_WriteStderr("\ngc: objects in permanent generation: %zd",
990 gc_list_size(&_PyRuntime.gc.permanent_generation.head));
Victor Stinner7181dec2015-03-27 17:47:53 +0100991 t1 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +0200992
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000993 PySys_WriteStderr("\n");
994 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +0000995
Łukasz Langaa785c872016-09-09 17:37:37 -0700996 if (PyDTrace_GC_START_ENABLED())
997 PyDTrace_GC_START(generation);
998
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000999 /* update collection and allocation counters */
1000 if (generation+1 < NUM_GENERATIONS)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001001 _PyRuntime.gc.generations[generation+1].count += 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001002 for (i = 0; i <= generation; i++)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001003 _PyRuntime.gc.generations[i].count = 0;
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001004
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001005 /* merge younger generations with one we are currently collecting */
1006 for (i = 0; i < generation; i++) {
1007 gc_list_merge(GEN_HEAD(i), GEN_HEAD(generation));
1008 }
Neil Schemenauer2880ae52002-05-04 05:35:20 +00001009
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001010 /* handy references */
1011 young = GEN_HEAD(generation);
1012 if (generation < NUM_GENERATIONS-1)
1013 old = GEN_HEAD(generation+1);
1014 else
1015 old = young;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001016
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001017 validate_list(young, 0);
1018 validate_list(old, 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001019 /* Using ob_refcnt and gc_refs, calculate which objects in the
1020 * container set are reachable from outside the set (i.e., have a
1021 * refcount greater than 0 when all the references within the
1022 * set are taken into account).
1023 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001024 update_refs(young); // gc_prev is used for gc_refs
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001025 subtract_refs(young);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001026
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001027 /* Leave everything reachable from outside young in young, and move
1028 * everything else (in young) to unreachable.
1029 * NOTE: This used to move the reachable objects into a reachable
1030 * set instead. But most things usually turn out to be reachable,
1031 * so it's more efficient to move the unreachable things.
1032 */
1033 gc_list_init(&unreachable);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001034 move_unreachable(young, &unreachable); // gc_prev is pointer again
1035 validate_list(young, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001036
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001037 untrack_tuples(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001038 /* Move reachable objects to next generation. */
1039 if (young != old) {
1040 if (generation == NUM_GENERATIONS - 2) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001041 _PyRuntime.gc.long_lived_pending += gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001042 }
1043 gc_list_merge(young, old);
1044 }
1045 else {
Antoine Pitroue1ad3da2012-05-28 22:22:34 +02001046 /* We only untrack dicts in full collections, to avoid quadratic
1047 dict build-up. See issue #14775. */
1048 untrack_dicts(young);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001049 _PyRuntime.gc.long_lived_pending = 0;
1050 _PyRuntime.gc.long_lived_total = gc_list_size(young);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001051 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001052
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001053 /* All objects in unreachable are trash, but objects reachable from
Antoine Pitrou796564c2013-07-30 19:59:21 +02001054 * legacy finalizers (e.g. tp_del) can't safely be deleted.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001055 */
1056 gc_list_init(&finalizers);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001057 // NEXT_MASK_UNREACHABLE is cleared here.
1058 // After move_legacy_finalizers(), unreachable is normal list.
Antoine Pitrou796564c2013-07-30 19:59:21 +02001059 move_legacy_finalizers(&unreachable, &finalizers);
1060 /* finalizers contains the unreachable objects with a legacy finalizer;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001061 * unreachable objects reachable *from* those are also uncollectable,
1062 * and we move those into the finalizers list too.
1063 */
Antoine Pitrou796564c2013-07-30 19:59:21 +02001064 move_legacy_finalizer_reachable(&finalizers);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001065
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001066 validate_list(&finalizers, 0);
1067 validate_list(&unreachable, PREV_MASK_COLLECTING);
1068
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001069 /* Collect statistics on collectable objects found and print
1070 * debugging information.
1071 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001072 for (gc = GC_NEXT(&unreachable); gc != &unreachable; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001073 m++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001074 if (_PyRuntime.gc.debug & DEBUG_COLLECTABLE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001075 debug_cycle("collectable", FROM_GC(gc));
1076 }
1077 }
Tim Petersead8b7a2004-10-30 23:09:22 +00001078
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001079 /* Clear weakrefs and invoke callbacks as necessary. */
1080 m += handle_weakrefs(&unreachable, old);
Tim Petersead8b7a2004-10-30 23:09:22 +00001081
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001082 validate_list(old, 0);
1083 validate_list(&unreachable, PREV_MASK_COLLECTING);
1084
Antoine Pitrou796564c2013-07-30 19:59:21 +02001085 /* Call tp_finalize on objects which have one. */
Tim Peters5fbc7b12014-05-08 17:42:19 -05001086 finalize_garbage(&unreachable);
Antoine Pitrou796564c2013-07-30 19:59:21 +02001087
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001088 if (check_garbage(&unreachable)) { // clear PREV_MASK_COLLECTING here
Antoine Pitrou796564c2013-07-30 19:59:21 +02001089 gc_list_merge(&unreachable, old);
1090 }
1091 else {
1092 /* Call tp_clear on objects in the unreachable set. This will cause
1093 * the reference cycles to be broken. It may also cause some objects
1094 * in finalizers to be freed.
1095 */
1096 delete_garbage(&unreachable, old);
1097 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001098
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001099 /* Collect statistics on uncollectable objects found and print
1100 * debugging information. */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001101 for (gc = GC_NEXT(&finalizers); gc != &finalizers; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001102 n++;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001103 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001104 debug_cycle("uncollectable", FROM_GC(gc));
1105 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001106 if (_PyRuntime.gc.debug & DEBUG_STATS) {
Victor Stinner7181dec2015-03-27 17:47:53 +01001107 _PyTime_t t2 = _PyTime_GetMonotonicClock();
Antoine Pitrou40f6b122014-05-24 19:21:53 +02001108
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001109 if (m == 0 && n == 0)
1110 PySys_WriteStderr("gc: done");
1111 else
Antoine Pitrouded3c1b2014-05-24 19:24:40 +02001112 PySys_FormatStderr(
1113 "gc: done, %zd unreachable, %zd uncollectable",
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001114 n+m, n);
Victor Stinner7181dec2015-03-27 17:47:53 +01001115 PySys_WriteStderr(", %.4fs elapsed\n",
1116 _PyTime_AsSecondsDouble(t2 - t1));
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001117 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001118
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001119 /* Append instances in the uncollectable set to a Python
1120 * reachable list of garbage. The programmer has to deal with
1121 * this if they insist on creating this type of structure.
1122 */
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001123 handle_legacy_finalizers(&finalizers, old);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001124 validate_list(old, 0);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001125
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001126 /* Clear free list only during the collection of the highest
1127 * generation */
1128 if (generation == NUM_GENERATIONS-1) {
1129 clear_freelists();
1130 }
Christian Heimesa156e092008-02-16 07:38:31 +00001131
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001132 if (PyErr_Occurred()) {
Antoine Pitroufef34e32013-05-19 01:11:58 +02001133 if (nofail) {
1134 PyErr_Clear();
1135 }
1136 else {
1137 if (gc_str == NULL)
1138 gc_str = PyUnicode_FromString("garbage collection");
1139 PyErr_WriteUnraisable(gc_str);
1140 Py_FatalError("unexpected exception during garbage collection");
1141 }
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001142 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001143
Antoine Pitroud4156c12012-10-30 22:43:19 +01001144 /* Update stats */
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001145 if (n_collected)
1146 *n_collected = m;
1147 if (n_uncollectable)
1148 *n_uncollectable = n;
Antoine Pitroud4156c12012-10-30 22:43:19 +01001149 stats->collections++;
1150 stats->collected += m;
1151 stats->uncollectable += n;
Łukasz Langaa785c872016-09-09 17:37:37 -07001152
1153 if (PyDTrace_GC_DONE_ENABLED())
1154 PyDTrace_GC_DONE(n+m);
1155
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001156 assert(!PyErr_Occurred());
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001157 return n+m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001158}
1159
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001160/* Invoke progress callbacks to notify clients that garbage collection
1161 * is starting or stopping
1162 */
1163static void
1164invoke_gc_callback(const char *phase, int generation,
1165 Py_ssize_t collected, Py_ssize_t uncollectable)
1166{
1167 Py_ssize_t i;
1168 PyObject *info = NULL;
1169
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001170 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001171 /* we may get called very early */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001172 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001173 return;
1174 /* The local variable cannot be rebound, check it for sanity */
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001175 assert(PyList_CheckExact(_PyRuntime.gc.callbacks));
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001176 if (PyList_GET_SIZE(_PyRuntime.gc.callbacks) != 0) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001177 info = Py_BuildValue("{sisnsn}",
1178 "generation", generation,
1179 "collected", collected,
1180 "uncollectable", uncollectable);
1181 if (info == NULL) {
1182 PyErr_WriteUnraisable(NULL);
1183 return;
1184 }
1185 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001186 for (i=0; i<PyList_GET_SIZE(_PyRuntime.gc.callbacks); i++) {
1187 PyObject *r, *cb = PyList_GET_ITEM(_PyRuntime.gc.callbacks, i);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001188 Py_INCREF(cb); /* make sure cb doesn't go away */
1189 r = PyObject_CallFunction(cb, "sO", phase, info);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001190 if (r == NULL) {
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001191 PyErr_WriteUnraisable(cb);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001192 }
1193 else {
1194 Py_DECREF(r);
1195 }
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001196 Py_DECREF(cb);
1197 }
1198 Py_XDECREF(info);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001199 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001200}
1201
1202/* Perform garbage collection of a generation and invoke
1203 * progress callbacks.
1204 */
1205static Py_ssize_t
1206collect_with_callback(int generation)
1207{
1208 Py_ssize_t result, collected, uncollectable;
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001209 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001210 invoke_gc_callback("start", generation, 0, 0);
Antoine Pitroufef34e32013-05-19 01:11:58 +02001211 result = collect(generation, &collected, &uncollectable, 0);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001212 invoke_gc_callback("stop", generation, collected, uncollectable);
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001213 assert(!PyErr_Occurred());
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001214 return result;
1215}
1216
Neal Norwitz7b216c52006-03-04 20:01:53 +00001217static Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001218collect_generations(void)
1219{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001220 int i;
1221 Py_ssize_t n = 0;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001222
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001223 /* Find the oldest generation (highest numbered) where the count
1224 * exceeds the threshold. Objects in the that generation and
1225 * generations younger than it will be collected. */
1226 for (i = NUM_GENERATIONS-1; i >= 0; i--) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001227 if (_PyRuntime.gc.generations[i].count > _PyRuntime.gc.generations[i].threshold) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001228 /* Avoid quadratic performance degradation in number
1229 of tracked objects. See comments at the beginning
1230 of this file, and issue #4074.
1231 */
1232 if (i == NUM_GENERATIONS - 1
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001233 && _PyRuntime.gc.long_lived_pending < _PyRuntime.gc.long_lived_total / 4)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001234 continue;
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001235 n = collect_with_callback(i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001236 break;
1237 }
1238 }
1239 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001240}
1241
Serhiy Storchaka93260282017-02-04 11:19:59 +02001242#include "clinic/gcmodule.c.h"
1243
1244/*[clinic input]
1245gc.enable
1246
1247Enable automatic garbage collection.
1248[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001249
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001250static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001251gc_enable_impl(PyObject *module)
1252/*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001253{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001254 _PyRuntime.gc.enabled = 1;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001255 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001256}
1257
Serhiy Storchaka93260282017-02-04 11:19:59 +02001258/*[clinic input]
1259gc.disable
1260
1261Disable automatic garbage collection.
1262[clinic start generated code]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001263
1264static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001265gc_disable_impl(PyObject *module)
1266/*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001267{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001268 _PyRuntime.gc.enabled = 0;
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001269 Py_RETURN_NONE;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001270}
1271
Serhiy Storchaka93260282017-02-04 11:19:59 +02001272/*[clinic input]
1273gc.isenabled -> bool
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001274
Serhiy Storchaka93260282017-02-04 11:19:59 +02001275Returns true if automatic garbage collection is enabled.
1276[clinic start generated code]*/
1277
1278static int
1279gc_isenabled_impl(PyObject *module)
1280/*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001281{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001282 return _PyRuntime.gc.enabled;
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001283}
1284
Serhiy Storchaka93260282017-02-04 11:19:59 +02001285/*[clinic input]
1286gc.collect -> Py_ssize_t
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001287
Serhiy Storchaka93260282017-02-04 11:19:59 +02001288 generation: int(c_default="NUM_GENERATIONS - 1") = 2
1289
1290Run the garbage collector.
1291
1292With no arguments, run a full collection. The optional argument
1293may be an integer specifying which generation to collect. A ValueError
1294is raised if the generation number is invalid.
1295
1296The number of unreachable objects is returned.
1297[clinic start generated code]*/
1298
1299static Py_ssize_t
1300gc_collect_impl(PyObject *module, int generation)
1301/*[clinic end generated code: output=b697e633043233c7 input=40720128b682d879]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001302{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001303 Py_ssize_t n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001304
Serhiy Storchaka93260282017-02-04 11:19:59 +02001305 if (generation < 0 || generation >= NUM_GENERATIONS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001306 PyErr_SetString(PyExc_ValueError, "invalid generation");
Serhiy Storchaka93260282017-02-04 11:19:59 +02001307 return -1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001308 }
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001309
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001310 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001311 n = 0; /* already collecting, don't do anything */
1312 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001313 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka93260282017-02-04 11:19:59 +02001314 n = collect_with_callback(generation);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001315 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001316 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001317
Serhiy Storchaka93260282017-02-04 11:19:59 +02001318 return n;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001319}
1320
Serhiy Storchaka93260282017-02-04 11:19:59 +02001321/*[clinic input]
1322gc.set_debug
1323
1324 flags: int
1325 An integer that can have the following bits turned on:
1326 DEBUG_STATS - Print statistics during collection.
1327 DEBUG_COLLECTABLE - Print collectable objects found.
1328 DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects
1329 found.
1330 DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
1331 DEBUG_LEAK - Debug leaking programs (everything but STATS).
1332 /
1333
1334Set the garbage collection debugging flags.
1335
1336Debugging information is written to sys.stderr.
1337[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001338
1339static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001340gc_set_debug_impl(PyObject *module, int flags)
1341/*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001342{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001343 _PyRuntime.gc.debug = flags;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001344
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001345 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001346}
1347
Serhiy Storchaka93260282017-02-04 11:19:59 +02001348/*[clinic input]
1349gc.get_debug -> int
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001350
Serhiy Storchaka93260282017-02-04 11:19:59 +02001351Get the garbage collection debugging flags.
1352[clinic start generated code]*/
1353
1354static int
1355gc_get_debug_impl(PyObject *module)
1356/*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001357{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001358 return _PyRuntime.gc.debug;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001359}
1360
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001361PyDoc_STRVAR(gc_set_thresh__doc__,
Neal Norwitz2a47c0f2002-01-29 00:53:41 +00001362"set_threshold(threshold0, [threshold1, threshold2]) -> None\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001363"\n"
1364"Sets the collection thresholds. Setting threshold0 to zero disables\n"
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001365"collection.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001366
1367static PyObject *
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001368gc_set_thresh(PyObject *self, PyObject *args)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001369{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001370 int i;
1371 if (!PyArg_ParseTuple(args, "i|ii:set_threshold",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001372 &_PyRuntime.gc.generations[0].threshold,
1373 &_PyRuntime.gc.generations[1].threshold,
1374 &_PyRuntime.gc.generations[2].threshold))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001375 return NULL;
1376 for (i = 2; i < NUM_GENERATIONS; i++) {
1377 /* generations higher than 2 get the same threshold */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001378 _PyRuntime.gc.generations[i].threshold = _PyRuntime.gc.generations[2].threshold;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001379 }
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001380
Serhiy Storchaka228b12e2017-01-23 09:47:21 +02001381 Py_RETURN_NONE;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001382}
1383
Serhiy Storchaka93260282017-02-04 11:19:59 +02001384/*[clinic input]
1385gc.get_threshold
1386
1387Return the current collection thresholds.
1388[clinic start generated code]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001389
1390static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001391gc_get_threshold_impl(PyObject *module)
1392/*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001393{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001394 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001395 _PyRuntime.gc.generations[0].threshold,
1396 _PyRuntime.gc.generations[1].threshold,
1397 _PyRuntime.gc.generations[2].threshold);
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001398}
1399
Serhiy Storchaka93260282017-02-04 11:19:59 +02001400/*[clinic input]
1401gc.get_count
1402
1403Return a three-tuple of the current collection counts.
1404[clinic start generated code]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001405
1406static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001407gc_get_count_impl(PyObject *module)
1408/*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001409{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001410 return Py_BuildValue("(iii)",
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001411 _PyRuntime.gc.generations[0].count,
1412 _PyRuntime.gc.generations[1].count,
1413 _PyRuntime.gc.generations[2].count);
Barry Warsawd3c38ff2006-03-07 09:46:03 +00001414}
1415
Neil Schemenauer48c70342001-08-09 15:38:31 +00001416static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001417referrersvisit(PyObject* obj, PyObject *objs)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001418{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001419 Py_ssize_t i;
1420 for (i = 0; i < PyTuple_GET_SIZE(objs); i++)
1421 if (PyTuple_GET_ITEM(objs, i) == obj)
1422 return 1;
1423 return 0;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001424}
1425
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001426static int
Martin v. Löwis560da622001-11-24 09:24:51 +00001427gc_referrers_for(PyObject *objs, PyGC_Head *list, PyObject *resultlist)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001428{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001429 PyGC_Head *gc;
1430 PyObject *obj;
1431 traverseproc traverse;
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001432 for (gc = GC_NEXT(list); gc != list; gc = GC_NEXT(gc)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001433 obj = FROM_GC(gc);
1434 traverse = Py_TYPE(obj)->tp_traverse;
1435 if (obj == objs || obj == resultlist)
1436 continue;
1437 if (traverse(obj, (visitproc)referrersvisit, objs)) {
1438 if (PyList_Append(resultlist, obj) < 0)
1439 return 0; /* error */
1440 }
1441 }
1442 return 1; /* no error */
Neil Schemenauer48c70342001-08-09 15:38:31 +00001443}
1444
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001445PyDoc_STRVAR(gc_get_referrers__doc__,
Martin v. Löwis560da622001-11-24 09:24:51 +00001446"get_referrers(*objs) -> list\n\
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001447Return the list of objects that directly refer to any of objs.");
Neil Schemenauer48c70342001-08-09 15:38:31 +00001448
Neil Schemenauer17e7be62001-08-10 14:46:47 +00001449static PyObject *
Martin v. Löwis560da622001-11-24 09:24:51 +00001450gc_get_referrers(PyObject *self, PyObject *args)
Neil Schemenauer48c70342001-08-09 15:38:31 +00001451{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001452 int i;
1453 PyObject *result = PyList_New(0);
1454 if (!result) return NULL;
Thomas Wouters49fd7fa2006-04-21 10:40:58 +00001455
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001456 for (i = 0; i < NUM_GENERATIONS; i++) {
1457 if (!(gc_referrers_for(args, GEN_HEAD(i), result))) {
1458 Py_DECREF(result);
1459 return NULL;
1460 }
1461 }
1462 return result;
Neil Schemenauer48c70342001-08-09 15:38:31 +00001463}
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001464
Tim Peters0f81ab62003-04-08 16:39:48 +00001465/* Append obj to list; return true if error (out of memory), false if OK. */
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001466static int
Tim Peters730f5532003-04-08 17:17:17 +00001467referentsvisit(PyObject *obj, PyObject *list)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001468{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001469 return PyList_Append(list, obj) < 0;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001470}
1471
Tim Peters730f5532003-04-08 17:17:17 +00001472PyDoc_STRVAR(gc_get_referents__doc__,
1473"get_referents(*objs) -> list\n\
Jeremy Hylton059b0942003-04-03 16:29:13 +00001474Return the list of objects that are directly referred to by objs.");
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001475
1476static PyObject *
Tim Peters730f5532003-04-08 17:17:17 +00001477gc_get_referents(PyObject *self, PyObject *args)
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001478{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001479 Py_ssize_t i;
1480 PyObject *result = PyList_New(0);
Tim Peters0f81ab62003-04-08 16:39:48 +00001481
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001482 if (result == NULL)
1483 return NULL;
Tim Peters0f81ab62003-04-08 16:39:48 +00001484
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001485 for (i = 0; i < PyTuple_GET_SIZE(args); i++) {
1486 traverseproc traverse;
1487 PyObject *obj = PyTuple_GET_ITEM(args, i);
Tim Peters0f81ab62003-04-08 16:39:48 +00001488
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001489 if (! PyObject_IS_GC(obj))
1490 continue;
1491 traverse = Py_TYPE(obj)->tp_traverse;
1492 if (! traverse)
1493 continue;
1494 if (traverse(obj, (visitproc)referentsvisit, result)) {
1495 Py_DECREF(result);
1496 return NULL;
1497 }
1498 }
1499 return result;
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001500}
1501
Serhiy Storchaka93260282017-02-04 11:19:59 +02001502/*[clinic input]
1503gc.get_objects
1504
1505Return a list of objects tracked by the collector (excluding the list returned).
1506[clinic start generated code]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001507
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001508static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001509gc_get_objects_impl(PyObject *module)
1510/*[clinic end generated code: output=fcb95d2e23e1f750 input=9439fe8170bf35d8]*/
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001511{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001512 int i;
1513 PyObject* result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001514
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001515 result = PyList_New(0);
1516 if (result == NULL)
1517 return NULL;
1518 for (i = 0; i < NUM_GENERATIONS; i++) {
1519 if (append_objects(result, GEN_HEAD(i))) {
1520 Py_DECREF(result);
1521 return NULL;
1522 }
1523 }
1524 return result;
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001525}
1526
Serhiy Storchaka93260282017-02-04 11:19:59 +02001527/*[clinic input]
1528gc.get_stats
1529
1530Return a list of dictionaries containing per-generation statistics.
1531[clinic start generated code]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001532
1533static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001534gc_get_stats_impl(PyObject *module)
1535/*[clinic end generated code: output=a8ab1d8a5d26f3ab input=1ef4ed9d17b1a470]*/
Antoine Pitroud4156c12012-10-30 22:43:19 +01001536{
1537 int i;
1538 PyObject *result;
1539 struct gc_generation_stats stats[NUM_GENERATIONS], *st;
1540
1541 /* To get consistent values despite allocations while constructing
1542 the result list, we use a snapshot of the running stats. */
1543 for (i = 0; i < NUM_GENERATIONS; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001544 stats[i] = _PyRuntime.gc.generation_stats[i];
Antoine Pitroud4156c12012-10-30 22:43:19 +01001545 }
1546
1547 result = PyList_New(0);
1548 if (result == NULL)
1549 return NULL;
1550
1551 for (i = 0; i < NUM_GENERATIONS; i++) {
1552 PyObject *dict;
1553 st = &stats[i];
1554 dict = Py_BuildValue("{snsnsn}",
1555 "collections", st->collections,
1556 "collected", st->collected,
1557 "uncollectable", st->uncollectable
1558 );
1559 if (dict == NULL)
1560 goto error;
1561 if (PyList_Append(result, dict)) {
1562 Py_DECREF(dict);
1563 goto error;
1564 }
1565 Py_DECREF(dict);
1566 }
1567 return result;
1568
1569error:
1570 Py_XDECREF(result);
1571 return NULL;
1572}
1573
1574
Serhiy Storchaka93260282017-02-04 11:19:59 +02001575/*[clinic input]
1576gc.is_tracked
1577
1578 obj: object
1579 /
1580
1581Returns true if the object is tracked by the garbage collector.
1582
1583Simple atomic objects will return false.
1584[clinic start generated code]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001585
1586static PyObject *
Serhiy Storchaka93260282017-02-04 11:19:59 +02001587gc_is_tracked(PyObject *module, PyObject *obj)
1588/*[clinic end generated code: output=14f0103423b28e31 input=d83057f170ea2723]*/
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001589{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001590 PyObject *result;
1591
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001592 if (PyObject_IS_GC(obj) && _PyObject_GC_IS_TRACKED(obj))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001593 result = Py_True;
1594 else
1595 result = Py_False;
1596 Py_INCREF(result);
1597 return result;
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001598}
1599
brainfvckc75edab2017-10-16 12:49:41 -07001600/*[clinic input]
1601gc.freeze
1602
1603Freeze all current tracked objects and ignore them for future collections.
1604
1605This can be used before a POSIX fork() call to make the gc copy-on-write friendly.
1606Note: collection before a POSIX fork() call may free pages for future allocation
1607which can cause copy-on-write.
1608[clinic start generated code]*/
1609
1610static PyObject *
1611gc_freeze_impl(PyObject *module)
1612/*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/
1613{
1614 for (int i = 0; i < NUM_GENERATIONS; ++i) {
1615 gc_list_merge(GEN_HEAD(i), &_PyRuntime.gc.permanent_generation.head);
1616 _PyRuntime.gc.generations[i].count = 0;
1617 }
1618 Py_RETURN_NONE;
1619}
1620
1621/*[clinic input]
1622gc.unfreeze
1623
1624Unfreeze all objects in the permanent generation.
1625
1626Put all objects in the permanent generation back into oldest generation.
1627[clinic start generated code]*/
1628
1629static PyObject *
1630gc_unfreeze_impl(PyObject *module)
1631/*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/
1632{
1633 gc_list_merge(&_PyRuntime.gc.permanent_generation.head, GEN_HEAD(NUM_GENERATIONS-1));
1634 Py_RETURN_NONE;
1635}
1636
1637/*[clinic input]
Victor Stinner05d68a82018-01-18 11:15:25 +01001638gc.get_freeze_count -> Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001639
1640Return the number of objects in the permanent generation.
1641[clinic start generated code]*/
1642
Victor Stinner05d68a82018-01-18 11:15:25 +01001643static Py_ssize_t
brainfvckc75edab2017-10-16 12:49:41 -07001644gc_get_freeze_count_impl(PyObject *module)
Victor Stinner05d68a82018-01-18 11:15:25 +01001645/*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/
brainfvckc75edab2017-10-16 12:49:41 -07001646{
1647 return gc_list_size(&_PyRuntime.gc.permanent_generation.head);
1648}
1649
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001650
Martin v. Löwis14f8b4c2002-06-13 20:33:02 +00001651PyDoc_STRVAR(gc__doc__,
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001652"This module provides access to the garbage collector for reference cycles.\n"
1653"\n"
Vladimir Marangozovf9d20c32000-08-06 22:45:31 +00001654"enable() -- Enable automatic garbage collection.\n"
1655"disable() -- Disable automatic garbage collection.\n"
1656"isenabled() -- Returns true if automatic collection is enabled.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001657"collect() -- Do a full collection right now.\n"
Thomas Wouters89f507f2006-12-13 04:49:30 +00001658"get_count() -- Return the current collection counts.\n"
R David Murray0e814632013-12-26 15:11:28 -05001659"get_stats() -- Return list of dictionaries containing per-generation stats.\n"
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001660"set_debug() -- Set debugging flags.\n"
1661"get_debug() -- Get debugging flags.\n"
1662"set_threshold() -- Set the collection thresholds.\n"
1663"get_threshold() -- Return the current the collection thresholds.\n"
Neil Schemenauerc7c8d8e2001-08-09 15:58:59 +00001664"get_objects() -- Return a list of all objects tracked by the collector.\n"
Antoine Pitrou3a652b12009-03-23 18:52:06 +00001665"is_tracked() -- Returns true if a given object is tracked.\n"
Jeremy Hylton5bd378b2003-04-03 16:28:38 +00001666"get_referrers() -- Return the list of objects that refer to an object.\n"
brainfvckc75edab2017-10-16 12:49:41 -07001667"get_referents() -- Return the list of objects that an object refers to.\n"
1668"freeze() -- Freeze all tracked objects and ignore them for future collections.\n"
1669"unfreeze() -- Unfreeze all objects in the permanent generation.\n"
1670"get_freeze_count() -- Return the number of objects in the permanent generation.\n");
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001671
1672static PyMethodDef GcMethods[] = {
Serhiy Storchaka93260282017-02-04 11:19:59 +02001673 GC_ENABLE_METHODDEF
1674 GC_DISABLE_METHODDEF
1675 GC_ISENABLED_METHODDEF
1676 GC_SET_DEBUG_METHODDEF
1677 GC_GET_DEBUG_METHODDEF
1678 GC_GET_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001679 {"set_threshold", gc_set_thresh, METH_VARARGS, gc_set_thresh__doc__},
Serhiy Storchaka93260282017-02-04 11:19:59 +02001680 GC_GET_THRESHOLD_METHODDEF
1681 GC_COLLECT_METHODDEF
1682 GC_GET_OBJECTS_METHODDEF
1683 GC_GET_STATS_METHODDEF
1684 GC_IS_TRACKED_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001685 {"get_referrers", gc_get_referrers, METH_VARARGS,
1686 gc_get_referrers__doc__},
1687 {"get_referents", gc_get_referents, METH_VARARGS,
1688 gc_get_referents__doc__},
brainfvckc75edab2017-10-16 12:49:41 -07001689 GC_FREEZE_METHODDEF
1690 GC_UNFREEZE_METHODDEF
1691 GC_GET_FREEZE_COUNT_METHODDEF
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001692 {NULL, NULL} /* Sentinel */
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001693};
1694
Martin v. Löwis1a214512008-06-11 05:26:20 +00001695static struct PyModuleDef gcmodule = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001696 PyModuleDef_HEAD_INIT,
Antoine Pitrou696e0352010-08-08 22:18:46 +00001697 "gc", /* m_name */
1698 gc__doc__, /* m_doc */
1699 -1, /* m_size */
1700 GcMethods, /* m_methods */
1701 NULL, /* m_reload */
1702 NULL, /* m_traverse */
1703 NULL, /* m_clear */
1704 NULL /* m_free */
Martin v. Löwis1a214512008-06-11 05:26:20 +00001705};
1706
Jason Tishler6bc06ec2003-09-04 11:59:50 +00001707PyMODINIT_FUNC
Martin v. Löwis1a214512008-06-11 05:26:20 +00001708PyInit_gc(void)
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001709{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001710 PyObject *m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001711
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001712 m = PyModule_Create(&gcmodule);
Martin v. Löwis1a214512008-06-11 05:26:20 +00001713
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001714 if (m == NULL)
1715 return NULL;
Tim Peters11558872003-04-06 23:30:52 +00001716
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001717 if (_PyRuntime.gc.garbage == NULL) {
1718 _PyRuntime.gc.garbage = PyList_New(0);
1719 if (_PyRuntime.gc.garbage == NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001720 return NULL;
1721 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001722 Py_INCREF(_PyRuntime.gc.garbage);
1723 if (PyModule_AddObject(m, "garbage", _PyRuntime.gc.garbage) < 0)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001724 return NULL;
Thomas Wouters477c8d52006-05-27 19:21:47 +00001725
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001726 if (_PyRuntime.gc.callbacks == NULL) {
1727 _PyRuntime.gc.callbacks = PyList_New(0);
1728 if (_PyRuntime.gc.callbacks == NULL)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001729 return NULL;
1730 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001731 Py_INCREF(_PyRuntime.gc.callbacks);
1732 if (PyModule_AddObject(m, "callbacks", _PyRuntime.gc.callbacks) < 0)
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001733 return NULL;
1734
Martin v. Löwis1a214512008-06-11 05:26:20 +00001735#define ADD_INT(NAME) if (PyModule_AddIntConstant(m, #NAME, NAME) < 0) return NULL
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001736 ADD_INT(DEBUG_STATS);
1737 ADD_INT(DEBUG_COLLECTABLE);
1738 ADD_INT(DEBUG_UNCOLLECTABLE);
1739 ADD_INT(DEBUG_SAVEALL);
1740 ADD_INT(DEBUG_LEAK);
Tim Peters11558872003-04-06 23:30:52 +00001741#undef ADD_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001742 return m;
Jeremy Hyltonc5007aa2000-06-30 05:02:53 +00001743}
1744
Guido van Rossume13ddc92003-04-17 17:29:22 +00001745/* API to invoke gc.collect() from C */
Neal Norwitz7b216c52006-03-04 20:01:53 +00001746Py_ssize_t
Guido van Rossume13ddc92003-04-17 17:29:22 +00001747PyGC_Collect(void)
1748{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001749 Py_ssize_t n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001750
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001751 if (_PyRuntime.gc.collecting)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001752 n = 0; /* already collecting, don't do anything */
1753 else {
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001754 PyObject *exc, *value, *tb;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001755 _PyRuntime.gc.collecting = 1;
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001756 PyErr_Fetch(&exc, &value, &tb);
Kristján Valur Jónsson69c63522012-04-15 11:41:32 +00001757 n = collect_with_callback(NUM_GENERATIONS - 1);
Serhiy Storchaka301e3cc2018-05-24 15:19:29 +03001758 PyErr_Restore(exc, value, tb);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001759 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001760 }
Guido van Rossume13ddc92003-04-17 17:29:22 +00001761
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001762 return n;
Guido van Rossume13ddc92003-04-17 17:29:22 +00001763}
1764
Antoine Pitroufef34e32013-05-19 01:11:58 +02001765Py_ssize_t
Łukasz Langafef7e942016-09-09 21:47:46 -07001766_PyGC_CollectIfEnabled(void)
1767{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001768 if (!_PyRuntime.gc.enabled)
Łukasz Langafef7e942016-09-09 21:47:46 -07001769 return 0;
1770
1771 return PyGC_Collect();
1772}
1773
1774Py_ssize_t
Antoine Pitroufef34e32013-05-19 01:11:58 +02001775_PyGC_CollectNoFail(void)
1776{
1777 Py_ssize_t n;
1778
Serhiy Storchakac4653c92018-05-29 18:50:10 +03001779 assert(!PyErr_Occurred());
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001780 /* Ideally, this function is only called on interpreter shutdown,
1781 and therefore not recursively. Unfortunately, when there are daemon
1782 threads, a daemon thread can start a cyclic garbage collection
1783 during interpreter shutdown (and then never finish it).
1784 See http://bugs.python.org/issue8713#msg195178 for an example.
1785 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001786 if (_PyRuntime.gc.collecting)
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001787 n = 0;
1788 else {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001789 _PyRuntime.gc.collecting = 1;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001790 n = collect(NUM_GENERATIONS - 1, NULL, NULL, 1);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001791 _PyRuntime.gc.collecting = 0;
Antoine Pitrouc69c9bc2013-08-15 20:15:15 +02001792 }
Antoine Pitroufef34e32013-05-19 01:11:58 +02001793 return n;
1794}
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001795
Antoine Pitrou696e0352010-08-08 22:18:46 +00001796void
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001797_PyGC_DumpShutdownStats(void)
Antoine Pitrou696e0352010-08-08 22:18:46 +00001798{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001799 if (!(_PyRuntime.gc.debug & DEBUG_SAVEALL)
1800 && _PyRuntime.gc.garbage != NULL && PyList_GET_SIZE(_PyRuntime.gc.garbage) > 0) {
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001801 const char *message;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001802 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE)
Antoine Pitroub5d82042010-11-05 00:05:25 +00001803 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001804 "shutdown";
1805 else
Antoine Pitroub5d82042010-11-05 00:05:25 +00001806 message = "gc: %zd uncollectable objects at " \
Georg Brandl08be72d2010-10-24 15:11:22 +00001807 "shutdown; use gc.set_debug(gc.DEBUG_UNCOLLECTABLE) to list them";
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001808 /* PyErr_WarnFormat does too many things and we are at shutdown,
1809 the warnings module's dependencies (e.g. linecache) may be gone
1810 already. */
1811 if (PyErr_WarnExplicitFormat(PyExc_ResourceWarning, "gc", 0,
1812 "gc", NULL, message,
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001813 PyList_GET_SIZE(_PyRuntime.gc.garbage)))
Georg Brandl08be72d2010-10-24 15:11:22 +00001814 PyErr_WriteUnraisable(NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001815 if (_PyRuntime.gc.debug & DEBUG_UNCOLLECTABLE) {
Antoine Pitrou696e0352010-08-08 22:18:46 +00001816 PyObject *repr = NULL, *bytes = NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001817 repr = PyObject_Repr(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001818 if (!repr || !(bytes = PyUnicode_EncodeFSDefault(repr)))
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001819 PyErr_WriteUnraisable(_PyRuntime.gc.garbage);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001820 else {
1821 PySys_WriteStderr(
Antoine Pitrou070cb3c2013-05-08 13:23:25 +02001822 " %s\n",
Antoine Pitrou696e0352010-08-08 22:18:46 +00001823 PyBytes_AS_STRING(bytes)
1824 );
1825 }
1826 Py_XDECREF(repr);
1827 Py_XDECREF(bytes);
1828 }
Antoine Pitrou696e0352010-08-08 22:18:46 +00001829 }
Antoine Pitrou5f454a02013-05-06 21:15:57 +02001830}
1831
1832void
1833_PyGC_Fini(void)
1834{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001835 Py_CLEAR(_PyRuntime.gc.callbacks);
Antoine Pitrou696e0352010-08-08 22:18:46 +00001836}
1837
Neil Schemenauer43411b52001-08-30 00:05:51 +00001838/* for debugging */
Guido van Rossume13ddc92003-04-17 17:29:22 +00001839void
1840_PyGC_Dump(PyGC_Head *g)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001841{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001842 _PyObject_Dump(FROM_GC(g));
Neil Schemenauer43411b52001-08-30 00:05:51 +00001843}
1844
Neil Schemenauer43411b52001-08-30 00:05:51 +00001845/* extension modules might be compiled with GC support so these
1846 functions must always be available */
1847
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001848#undef PyObject_GC_Track
1849#undef PyObject_GC_UnTrack
1850#undef PyObject_GC_Del
1851#undef _PyObject_GC_Malloc
1852
Neil Schemenauer43411b52001-08-30 00:05:51 +00001853void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001854PyObject_GC_Track(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001855{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001856 _PyObject_GC_TRACK(op);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001857}
1858
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001859void
1860PyObject_GC_UnTrack(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001861{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001862 /* Obscure: the Py_TRASHCAN mechanism requires that we be able to
1863 * call PyObject_GC_UnTrack twice on an object.
1864 */
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001865 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001866 _PyObject_GC_UNTRACK(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001867 }
Neil Schemenauer43411b52001-08-30 00:05:51 +00001868}
1869
Victor Stinnerdb067af2014-05-02 22:31:14 +02001870static PyObject *
1871_PyObject_GC_Alloc(int use_calloc, size_t basicsize)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001872{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001873 PyObject *op;
1874 PyGC_Head *g;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001875 size_t size;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001876 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head))
1877 return PyErr_NoMemory();
Victor Stinnerdb067af2014-05-02 22:31:14 +02001878 size = sizeof(PyGC_Head) + basicsize;
1879 if (use_calloc)
1880 g = (PyGC_Head *)PyObject_Calloc(1, size);
1881 else
1882 g = (PyGC_Head *)PyObject_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001883 if (g == NULL)
1884 return PyErr_NoMemory();
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001885 assert(((uintptr_t)g & 3) == 0); // g must be aligned 4bytes boundary
1886 g->_gc_next = 0;
1887 g->_gc_prev = 0;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001888 _PyRuntime.gc.generations[0].count++; /* number of allocated GC objects */
1889 if (_PyRuntime.gc.generations[0].count > _PyRuntime.gc.generations[0].threshold &&
1890 _PyRuntime.gc.enabled &&
1891 _PyRuntime.gc.generations[0].threshold &&
1892 !_PyRuntime.gc.collecting &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001893 !PyErr_Occurred()) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001894 _PyRuntime.gc.collecting = 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001895 collect_generations();
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001896 _PyRuntime.gc.collecting = 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001897 }
1898 op = FROM_GC(g);
1899 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001900}
1901
1902PyObject *
Victor Stinnerdb067af2014-05-02 22:31:14 +02001903_PyObject_GC_Malloc(size_t basicsize)
1904{
1905 return _PyObject_GC_Alloc(0, basicsize);
1906}
1907
1908PyObject *
1909_PyObject_GC_Calloc(size_t basicsize)
1910{
1911 return _PyObject_GC_Alloc(1, basicsize);
1912}
1913
1914PyObject *
Neil Schemenauer43411b52001-08-30 00:05:51 +00001915_PyObject_GC_New(PyTypeObject *tp)
1916{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001917 PyObject *op = _PyObject_GC_Malloc(_PyObject_SIZE(tp));
1918 if (op != NULL)
1919 op = PyObject_INIT(op, tp);
1920 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001921}
1922
1923PyVarObject *
Martin v. Löwis18e16552006-02-15 17:27:45 +00001924_PyObject_GC_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001925{
Victor Stinner5d1866c2013-07-08 22:17:52 +02001926 size_t size;
1927 PyVarObject *op;
1928
1929 if (nitems < 0) {
1930 PyErr_BadInternalCall();
1931 return NULL;
1932 }
1933 size = _PyObject_VAR_SIZE(tp, nitems);
1934 op = (PyVarObject *) _PyObject_GC_Malloc(size);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001935 if (op != NULL)
1936 op = PyObject_INIT_VAR(op, tp, nitems);
1937 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001938}
1939
1940PyVarObject *
Martin v. Löwis41290682006-02-16 14:56:14 +00001941_PyObject_GC_Resize(PyVarObject *op, Py_ssize_t nitems)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001942{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001943 const size_t basicsize = _PyObject_VAR_SIZE(Py_TYPE(op), nitems);
Victor Stinnera4b2bc72018-10-26 18:00:13 +02001944 _PyObject_ASSERT((PyObject *)op, !_PyObject_GC_IS_TRACKED(op));
1945 if (basicsize > PY_SSIZE_T_MAX - sizeof(PyGC_Head)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001946 return (PyVarObject *)PyErr_NoMemory();
Victor Stinnera4b2bc72018-10-26 18:00:13 +02001947 }
1948
1949 PyGC_Head *g = AS_GC(op);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001950 g = (PyGC_Head *)PyObject_REALLOC(g, sizeof(PyGC_Head) + basicsize);
1951 if (g == NULL)
1952 return (PyVarObject *)PyErr_NoMemory();
1953 op = (PyVarObject *) FROM_GC(g);
1954 Py_SIZE(op) = nitems;
1955 return op;
Neil Schemenauer43411b52001-08-30 00:05:51 +00001956}
1957
1958void
Neil Schemenauerfec4eb12002-04-12 02:41:03 +00001959PyObject_GC_Del(void *op)
Neil Schemenauer43411b52001-08-30 00:05:51 +00001960{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001961 PyGC_Head *g = AS_GC(op);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001962 if (_PyObject_GC_IS_TRACKED(op)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001963 gc_list_remove(g);
INADA Naoki5ac9e6e2018-07-10 17:19:53 +09001964 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001965 if (_PyRuntime.gc.generations[0].count > 0) {
1966 _PyRuntime.gc.generations[0].count--;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001967 }
1968 PyObject_FREE(g);
Neil Schemenauer43411b52001-08-30 00:05:51 +00001969}