blob: fa985da9878abf4fcb9febe6093b5a65267d61f4 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Benjamin Peterson91c12eb2009-12-03 02:52:39 +00005#ifdef WITH_VALGRIND
6#include <valgrind/valgrind.h>
7
8/* If we're using GCC, use __builtin_expect() to reduce overhead of
9 the valgrind checks */
10#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
11# define UNLIKELY(value) __builtin_expect((value), 0)
12#else
13# define UNLIKELY(value) (value)
14#endif
15
16/* -1 indicates that we haven't checked that we're running on valgrind yet. */
17static int running_on_valgrind = -1;
18#endif
19
Neil Schemenauera35c6882001-02-27 04:45:05 +000020/* An object allocator for Python.
21
22 Here is an introduction to the layers of the Python memory architecture,
23 showing where the object allocator is actually used (layer +2), It is
24 called for every object allocation and deallocation (PyObject_New/Del),
25 unless the object-specific allocators implement a proprietary allocation
26 scheme (ex.: ints use a simple free list). This is also the place where
27 the cyclic garbage collector operates selectively on container objects.
28
29
30 Object-specific allocators
31 _____ ______ ______ ________
32 [ int ] [ dict ] [ list ] ... [ string ] Python core |
33+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
34 _______________________________ | |
35 [ Python's object allocator ] | |
36+2 | ####### Object memory ####### | <------ Internal buffers ------> |
37 ______________________________________________________________ |
38 [ Python's raw memory allocator (PyMem_ API) ] |
39+1 | <----- Python memory (under PyMem manager's control) ------> | |
40 __________________________________________________________________
41 [ Underlying general-purpose allocator (ex: C library malloc) ]
42 0 | <------ Virtual memory allocated for the python process -------> |
43
44 =========================================================================
45 _______________________________________________________________________
46 [ OS-specific Virtual Memory Manager (VMM) ]
47-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
48 __________________________________ __________________________________
49 [ ] [ ]
50-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
51
52*/
53/*==========================================================================*/
54
55/* A fast, special-purpose memory allocator for small blocks, to be used
56 on top of a general-purpose malloc -- heavily based on previous art. */
57
58/* Vladimir Marangozov -- August 2000 */
59
60/*
61 * "Memory management is where the rubber meets the road -- if we do the wrong
62 * thing at any level, the results will not be good. And if we don't make the
63 * levels work well together, we are in serious trouble." (1)
64 *
65 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
66 * "Dynamic Storage Allocation: A Survey and Critical Review",
67 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
68 */
69
70/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000071
72/*==========================================================================*/
73
74/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000075 * Allocation strategy abstract:
76 *
77 * For small requests, the allocator sub-allocates <Big> blocks of memory.
78 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000079 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000080 * Small requests are grouped in size classes spaced 8 bytes apart, due
81 * to the required valid alignment of the returned address. Requests of
82 * a particular size are serviced from memory pools of 4K (one VMM page).
83 * Pools are fragmented on demand and contain free lists of blocks of one
84 * particular size class. In other words, there is a fixed-size allocator
85 * for each size class. Free pools are shared by the different allocators
86 * thus minimizing the space reserved for a particular size class.
87 *
88 * This allocation strategy is a variant of what is known as "simple
89 * segregated storage based on array of free lists". The main drawback of
90 * simple segregated storage is that we might end up with lot of reserved
91 * memory for the different free lists, which degenerate in time. To avoid
92 * this, we partition each free list in pools and we share dynamically the
93 * reserved space between all free lists. This technique is quite efficient
94 * for memory intensive programs which allocate mainly small-sized blocks.
95 *
96 * For small requests we have the following table:
97 *
98 * Request in bytes Size of allocated block Size class idx
99 * ----------------------------------------------------------------
100 * 1-8 8 0
101 * 9-16 16 1
102 * 17-24 24 2
103 * 25-32 32 3
104 * 33-40 40 4
105 * 41-48 48 5
106 * 49-56 56 6
107 * 57-64 64 7
108 * 65-72 72 8
109 * ... ... ...
110 * 241-248 248 30
111 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +0000112 *
Neil Schemenauera35c6882001-02-27 04:45:05 +0000113 * 0, 257 and up: routed to the underlying allocator.
114 */
115
116/*==========================================================================*/
117
118/*
119 * -- Main tunable settings section --
120 */
121
122/*
123 * Alignment of addresses returned to the user. 8-bytes alignment works
124 * on most current architectures (with 32-bit or 64-bit address busses).
125 * The alignment value is also used for grouping small requests in size
126 * classes spaced ALIGNMENT bytes apart.
127 *
128 * You shouldn't change this unless you know what you are doing.
129 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130#define ALIGNMENT 8 /* must be 2^N */
131#define ALIGNMENT_SHIFT 3
132#define ALIGNMENT_MASK (ALIGNMENT - 1)
133
Tim Peterse70ddf32002-04-05 04:32:29 +0000134/* Return the number of bytes in size class I, as a uint. */
135#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
136
Neil Schemenauera35c6882001-02-27 04:45:05 +0000137/*
138 * Max size threshold below which malloc requests are considered to be
139 * small enough in order to use preallocated memory pools. You can tune
140 * this value according to your application behaviour and memory needs.
141 *
142 * The following invariants must hold:
143 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000144 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000145 *
146 * Although not required, for better performance and space efficiency,
147 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
148 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000149#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000150#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
151
152/*
153 * The system's VMM page size can be obtained on most unices with a
154 * getpagesize() call or deduced from various header files. To make
155 * things simpler, we assume that it is 4K, which is OK for most systems.
156 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000157 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
158 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
159 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000160 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000161 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000162#define SYSTEM_PAGE_SIZE (4 * 1024)
163#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
164
165/*
166 * Maximum amount of memory managed by the allocator for small requests.
167 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000168#ifdef WITH_MEMORY_LIMITS
169#ifndef SMALL_MEMORY_LIMIT
170#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
171#endif
172#endif
173
174/*
175 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
176 * on a page boundary. This is a reserved virtual address space for the
177 * current process (obtained through a malloc call). In no way this means
178 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
179 * an address range reservation for <Big> bytes, unless all pages within this
180 * space are referenced subsequently. So malloc'ing big blocks and not using
181 * them does not mean "wasting memory". It's an addressable range wastage...
182 *
183 * Therefore, allocating arenas with malloc is not optimal, because there is
184 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000185 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000186 */
Tim Peters3c83df22002-03-30 07:04:41 +0000187#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000188
189#ifdef WITH_MEMORY_LIMITS
190#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
191#endif
192
193/*
194 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000195 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000196 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000197#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
198#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000199
200/*
201 * -- End of tunable settings section --
202 */
203
204/*==========================================================================*/
205
206/*
207 * Locking
208 *
209 * To reduce lock contention, it would probably be better to refine the
210 * crude function locking with per size class locking. I'm not positive
211 * however, whether it's worth switching to such locking policy because
212 * of the performance penalty it might introduce.
213 *
214 * The following macros describe the simplest (should also be the fastest)
215 * lock object on a particular platform and the init/fini/lock/unlock
216 * operations on it. The locks defined here are not expected to be recursive
217 * because it is assumed that they will always be called in the order:
218 * INIT, [LOCK, UNLOCK]*, FINI.
219 */
220
221/*
222 * Python's threads are serialized, so object malloc locking is disabled.
223 */
224#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
225#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
226#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
227#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
228#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
229
230/*
231 * Basic types
232 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
233 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000234#undef uchar
Tim Peterscf79aac2006-03-16 01:14:46 +0000235#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000236
Neil Schemenauera35c6882001-02-27 04:45:05 +0000237#undef uint
Tim Peterscf79aac2006-03-16 01:14:46 +0000238#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000239
240#undef ulong
Tim Peterscf79aac2006-03-16 01:14:46 +0000241#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242
Tim Petersd97a1c02002-03-30 06:09:22 +0000243#undef uptr
Tim Peterscf79aac2006-03-16 01:14:46 +0000244#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000245
Neil Schemenauera35c6882001-02-27 04:45:05 +0000246/* When you say memory, my mind reasons in terms of (pointers to) blocks */
247typedef uchar block;
248
Tim Peterse70ddf32002-04-05 04:32:29 +0000249/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000250struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000251 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000252 uint count; } ref; /* number of allocated blocks */
253 block *freeblock; /* pool's free list head */
254 struct pool_header *nextpool; /* next pool of this size class */
255 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000256 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000257 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000258 uint nextoffset; /* bytes to virgin block */
259 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000260};
261
262typedef struct pool_header *poolp;
263
Tim Peterscf79aac2006-03-16 01:14:46 +0000264/* Record keeping for arenas. */
265struct arena_object {
266 /* The address of the arena, as returned by malloc. Note that 0
267 * will never be returned by a successful malloc, and is used
268 * here to mark an arena_object that doesn't correspond to an
269 * allocated arena.
270 */
271 uptr address;
272
273 /* Pool-aligned pointer to the next pool to be carved off. */
274 block* pool_address;
275
276 /* The number of available pools in the arena: free pools + never-
277 * allocated pools.
278 */
279 uint nfreepools;
280
281 /* The total number of pools in the arena, whether or not available. */
282 uint ntotalpools;
283
284 /* Singly-linked list of available pools. */
285 struct pool_header* freepools;
286
287 /* Whenever this arena_object is not associated with an allocated
288 * arena, the nextarena member is used to link all unassociated
289 * arena_objects in the singly-linked `unused_arena_objects` list.
290 * The prevarena member is unused in this case.
291 *
292 * When this arena_object is associated with an allocated arena
293 * with at least one available pool, both members are used in the
294 * doubly-linked `usable_arenas` list, which is maintained in
295 * increasing order of `nfreepools` values.
296 *
297 * Else this arena_object is associated with an allocated arena
298 * all of whose pools are in use. `nextarena` and `prevarena`
299 * are both meaningless in this case.
300 */
301 struct arena_object* nextarena;
302 struct arena_object* prevarena;
303};
304
Neil Schemenauera35c6882001-02-27 04:45:05 +0000305#undef ROUNDUP
306#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
307#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
308
309#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
310
Tim Petersd97a1c02002-03-30 06:09:22 +0000311/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000312#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
313
Tim Peters16bcb6b2002-04-05 05:45:31 +0000314/* Return total number of blocks in pool of size index I, as a uint. */
315#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000316
Neil Schemenauera35c6882001-02-27 04:45:05 +0000317/*==========================================================================*/
318
319/*
320 * This malloc lock
321 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000322SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000323#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
324#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
325#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
326#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000327
328/*
Tim Peters1e16db62002-03-31 01:05:22 +0000329 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
330
331This is involved. For an index i, usedpools[i+i] is the header for a list of
332all partially used pools holding small blocks with "size class idx" i. So
333usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
33416, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
335
Tim Peterscf79aac2006-03-16 01:14:46 +0000336Pools are carved off an arena's highwater mark (an arena_object's pool_address
337member) as needed. Once carved off, a pool is in one of three states forever
338after:
Tim Peters1e16db62002-03-31 01:05:22 +0000339
Tim Peters338e0102002-04-01 19:23:44 +0000340used == partially used, neither empty nor full
341 At least one block in the pool is currently allocated, and at least one
342 block in the pool is not currently allocated (note this implies a pool
343 has room for at least two blocks).
344 This is a pool's initial state, as a pool is created only when malloc
345 needs space.
346 The pool holds blocks of a fixed size, and is in the circular list headed
347 at usedpools[i] (see above). It's linked to the other used pools of the
348 same size class via the pool_header's nextpool and prevpool members.
349 If all but one block is currently allocated, a malloc can cause a
350 transition to the full state. If all but one block is not currently
351 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000352
Tim Peters338e0102002-04-01 19:23:44 +0000353full == all the pool's blocks are currently allocated
354 On transition to full, a pool is unlinked from its usedpools[] list.
355 It's not linked to from anything then anymore, and its nextpool and
356 prevpool members are meaningless until it transitions back to used.
357 A free of a block in a full pool puts the pool back in the used state.
358 Then it's linked in at the front of the appropriate usedpools[] list, so
359 that the next allocation for its size class will reuse the freed block.
360
361empty == all the pool's blocks are currently available for allocation
362 On transition to empty, a pool is unlinked from its usedpools[] list,
Tim Peterscf79aac2006-03-16 01:14:46 +0000363 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000364 via its nextpool member. The prevpool member has no meaning in this case.
365 Empty pools have no inherent size class: the next time a malloc finds
366 an empty list in usedpools[], it takes the first pool off of freepools.
367 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000368 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000369
370
371Block Management
372
373Blocks within pools are again carved out as needed. pool->freeblock points to
374the start of a singly-linked list of free blocks within the pool. When a
375block is freed, it's inserted at the front of its pool's freeblock list. Note
376that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000377is initialized. Instead only "the first two" (lowest addresses) blocks are
378set up, returning the first such block, and setting pool->freeblock to a
379one-block list holding the second such block. This is consistent with that
380pymalloc strives at all levels (arena, pool, and block) never to touch a piece
381of memory until it's actually needed.
382
383So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000384available for allocating, and pool->freeblock is not NULL. If pool->freeblock
385points to the end of the free list before we've carved the entire pool into
386blocks, that means we simply haven't yet gotten to one of the higher-address
387blocks. The offset from the pool_header to the start of "the next" virgin
388block is stored in the pool_header nextoffset member, and the largest value
389of nextoffset that makes sense is stored in the maxnextoffset member when a
390pool is initialized. All the blocks in a pool have been passed out at least
391once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000392
Tim Peters1e16db62002-03-31 01:05:22 +0000393
394Major obscurity: While the usedpools vector is declared to have poolp
395entries, it doesn't really. It really contains two pointers per (conceptual)
396poolp entry, the nextpool and prevpool members of a pool_header. The
397excruciating initialization code below fools C so that
398
399 usedpool[i+i]
400
401"acts like" a genuine poolp, but only so long as you only reference its
402nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
403compensating for that a pool_header's nextpool and prevpool members
404immediately follow a pool_header's first two members:
405
406 union { block *_padding;
407 uint count; } ref;
408 block *freeblock;
409
410each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
411contains is a fudged-up pointer p such that *if* C believes it's a poolp
412pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
413circular list is empty).
414
415It's unclear why the usedpools setup is so convoluted. It could be to
416minimize the amount of cache required to hold this heavily-referenced table
417(which only *needs* the two interpool pointer members of a pool_header). OTOH,
418referencing code has to remember to "double the index" and doing so isn't
419free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
420on that C doesn't insert any padding anywhere in a pool_header at or before
421the prevpool member.
422**************************************************************************** */
423
Neil Schemenauera35c6882001-02-27 04:45:05 +0000424#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
425#define PT(x) PTA(x), PTA(x)
426
427static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
428 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
429#if NB_SMALL_SIZE_CLASSES > 8
430 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
431#if NB_SMALL_SIZE_CLASSES > 16
432 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
433#if NB_SMALL_SIZE_CLASSES > 24
434 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
435#if NB_SMALL_SIZE_CLASSES > 32
436 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
437#if NB_SMALL_SIZE_CLASSES > 40
438 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
439#if NB_SMALL_SIZE_CLASSES > 48
440 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
441#if NB_SMALL_SIZE_CLASSES > 56
442 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
443#endif /* NB_SMALL_SIZE_CLASSES > 56 */
444#endif /* NB_SMALL_SIZE_CLASSES > 48 */
445#endif /* NB_SMALL_SIZE_CLASSES > 40 */
446#endif /* NB_SMALL_SIZE_CLASSES > 32 */
447#endif /* NB_SMALL_SIZE_CLASSES > 24 */
448#endif /* NB_SMALL_SIZE_CLASSES > 16 */
449#endif /* NB_SMALL_SIZE_CLASSES > 8 */
450};
451
Tim Peterscf79aac2006-03-16 01:14:46 +0000452/*==========================================================================
453Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000454
Tim Peterscf79aac2006-03-16 01:14:46 +0000455`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
456which may not be currently used (== they're arena_objects that aren't
457currently associated with an allocated arena). Note that arenas proper are
458separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000459
Tim Peterscf79aac2006-03-16 01:14:46 +0000460Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
461we do try to free() arenas, and use some mild heuristic strategies to increase
462the likelihood that arenas eventually can be freed.
463
464unused_arena_objects
465
466 This is a singly-linked list of the arena_objects that are currently not
467 being used (no arena is associated with them). Objects are taken off the
468 head of the list in new_arena(), and are pushed on the head of the list in
469 PyObject_Free() when the arena is empty. Key invariant: an arena_object
470 is on this list if and only if its .address member is 0.
471
472usable_arenas
473
474 This is a doubly-linked list of the arena_objects associated with arenas
475 that have pools available. These pools are either waiting to be reused,
476 or have not been used before. The list is sorted to have the most-
477 allocated arenas first (ascending order based on the nfreepools member).
478 This means that the next allocation will come from a heavily used arena,
479 which gives the nearly empty arenas a chance to be returned to the system.
480 In my unscientific tests this dramatically improved the number of arenas
481 that could be freed.
482
483Note that an arena_object associated with an arena all of whose pools are
484currently in use isn't on either list.
485*/
486
487/* Array of objects used to track chunks of memory (arenas). */
488static struct arena_object* arenas = NULL;
489/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000490static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000491
Tim Peterscf79aac2006-03-16 01:14:46 +0000492/* The head of the singly-linked, NULL-terminated list of available
493 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000494 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000495static struct arena_object* unused_arena_objects = NULL;
496
497/* The head of the doubly-linked, NULL-terminated at each end, list of
498 * arena_objects associated with arenas that have pools available.
499 */
500static struct arena_object* usable_arenas = NULL;
501
502/* How many arena_objects do we initially allocate?
503 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
504 * `arenas` vector.
505 */
506#define INITIAL_ARENA_OBJECTS 16
507
508/* Number of arenas allocated that haven't been free()'d. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000509static size_t narenas_currently_allocated = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000510
511#ifdef PYMALLOC_DEBUG
512/* Total number of times malloc() called to allocate an arena. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000513static size_t ntimes_arena_allocated = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000514/* High water mark (max value ever seen) for narenas_currently_allocated. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000515static size_t narenas_highwater = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000516#endif
517
518/* Allocate a new arena. If we run out of memory, return NULL. Else
519 * allocate a new arena, and return the address of an arena_object
520 * describing the new arena. It's expected that the caller will set
521 * `usable_arenas` to the return value.
522 */
523static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000524new_arena(void)
525{
Tim Peterscf79aac2006-03-16 01:14:46 +0000526 struct arena_object* arenaobj;
Tim Peters3c83df22002-03-30 07:04:41 +0000527 uint excess; /* number of bytes above pool alignment */
Tim Petersd97a1c02002-03-30 06:09:22 +0000528
Tim Peters0e871182002-04-13 08:29:14 +0000529#ifdef PYMALLOC_DEBUG
530 if (Py_GETENV("PYTHONMALLOCSTATS"))
531 _PyObject_DebugMallocStats();
532#endif
Tim Peterscf79aac2006-03-16 01:14:46 +0000533 if (unused_arena_objects == NULL) {
534 uint i;
Martin v. Löwis9fa5a282008-09-11 06:53:30 +0000535 uint numarenas;
Tim Peterscf79aac2006-03-16 01:14:46 +0000536 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000537
Tim Peterscf79aac2006-03-16 01:14:46 +0000538 /* Double the number of arena objects on each allocation.
539 * Note that it's possible for `numarenas` to overflow.
Tim Petersd97a1c02002-03-30 06:09:22 +0000540 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000541 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
542 if (numarenas <= maxarenas)
543 return NULL; /* overflow */
Martin v. Löwis9fa5a282008-09-11 06:53:30 +0000544#if SIZEOF_SIZE_T <= SIZEOF_INT
Gregory P. Smith9d534572008-06-11 07:41:16 +0000545 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
Tim Peterscf79aac2006-03-16 01:14:46 +0000546 return NULL; /* overflow */
Martin v. Löwis9fa5a282008-09-11 06:53:30 +0000547#endif
Gregory P. Smith9d534572008-06-11 07:41:16 +0000548 nbytes = numarenas * sizeof(*arenas);
Neal Norwitz9b261222006-04-11 07:58:54 +0000549 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
Tim Peterscf79aac2006-03-16 01:14:46 +0000550 if (arenaobj == NULL)
551 return NULL;
552 arenas = arenaobj;
553
554 /* We might need to fix pointers that were copied. However,
555 * new_arena only gets called when all the pages in the
556 * previous arenas are full. Thus, there are *no* pointers
557 * into the old array. Thus, we don't have to worry about
558 * invalid pointers. Just to be sure, some asserts:
559 */
560 assert(usable_arenas == NULL);
561 assert(unused_arena_objects == NULL);
562
563 /* Put the new arenas on the unused_arena_objects list. */
564 for (i = maxarenas; i < numarenas; ++i) {
565 arenas[i].address = 0; /* mark as unassociated */
566 arenas[i].nextarena = i < numarenas - 1 ?
567 &arenas[i+1] : NULL;
568 }
569
570 /* Update globals. */
571 unused_arena_objects = &arenas[maxarenas];
572 maxarenas = numarenas;
Tim Petersd97a1c02002-03-30 06:09:22 +0000573 }
574
Tim Peterscf79aac2006-03-16 01:14:46 +0000575 /* Take the next available arena object off the head of the list. */
576 assert(unused_arena_objects != NULL);
577 arenaobj = unused_arena_objects;
578 unused_arena_objects = arenaobj->nextarena;
579 assert(arenaobj->address == 0);
580 arenaobj->address = (uptr)malloc(ARENA_SIZE);
581 if (arenaobj->address == 0) {
582 /* The allocation failed: return NULL after putting the
583 * arenaobj back.
584 */
585 arenaobj->nextarena = unused_arena_objects;
586 unused_arena_objects = arenaobj;
587 return NULL;
588 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000589
Tim Peterscf79aac2006-03-16 01:14:46 +0000590 ++narenas_currently_allocated;
591#ifdef PYMALLOC_DEBUG
592 ++ntimes_arena_allocated;
593 if (narenas_currently_allocated > narenas_highwater)
594 narenas_highwater = narenas_currently_allocated;
595#endif
596 arenaobj->freepools = NULL;
597 /* pool_address <- first pool-aligned address in the arena
598 nfreepools <- number of whole pools that fit after alignment */
599 arenaobj->pool_address = (block*)arenaobj->address;
600 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
601 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
602 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
603 if (excess != 0) {
604 --arenaobj->nfreepools;
605 arenaobj->pool_address += POOL_SIZE - excess;
606 }
607 arenaobj->ntotalpools = arenaobj->nfreepools;
608
609 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000610}
611
Tim Peterscf79aac2006-03-16 01:14:46 +0000612/*
613Py_ADDRESS_IN_RANGE(P, POOL)
614
615Return true if and only if P is an address that was allocated by pymalloc.
616POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
617(the caller is asked to compute this because the macro expands POOL more than
618once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
619variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
620called on every alloc/realloc/free, micro-efficiency is important here).
621
622Tricky: Let B be the arena base address associated with the pool, B =
623arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
624
625 B <= P < B + ARENA_SIZE
626
627Subtracting B throughout, this is true iff
628
629 0 <= P-B < ARENA_SIZE
630
631By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
632
633Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
634before the first arena has been allocated. `arenas` is still NULL in that
635case. We're relying on that maxarenas is also 0 in that case, so that
636(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
637into a NULL arenas.
638
639Details: given P and POOL, the arena_object corresponding to P is AO =
640arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
641stores, etc), POOL is the correct address of P's pool, AO.address is the
642correct base address of the pool's arena, and P must be within ARENA_SIZE of
643AO.address. In addition, AO.address is not 0 (no arena can start at address 0
644(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
645controls P.
646
647Now suppose obmalloc does not control P (e.g., P was obtained via a direct
648call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
649in this case -- it may even be uninitialized trash. If the trash arenaindex
650is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
651control P.
652
653Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
654allocated arena, obmalloc controls all the memory in slice AO.address :
655AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
656so P doesn't lie in that slice, so the macro correctly reports that P is not
657controlled by obmalloc.
658
659Finally, if P is not controlled by obmalloc and AO corresponds to an unused
660arena_object (one not currently associated with an allocated arena),
661AO.address is 0, and the second test in the macro reduces to:
662
663 P < ARENA_SIZE
664
665If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
666that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
667of the test still passes, and the third clause (AO.address != 0) is necessary
668to get the correct result: AO.address is 0 in this case, so the macro
669correctly reports that P is not controlled by obmalloc (despite that P lies in
670slice AO.address : AO.address + ARENA_SIZE).
671
672Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6732.5, arenas were never free()'ed, and an arenaindex < maxarena always
674corresponded to a currently-allocated arena, so the "P is not controlled by
675obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
676was impossible.
677
678Note that the logic is excruciating, and reading up possibly uninitialized
679memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
680creates problems for some memory debuggers. The overwhelming advantage is
681that this test determines whether an arbitrary address is controlled by
682obmalloc in a small constant time, independent of the number of arenas
683obmalloc controls. Since this test is needed at every entry point, it's
684extremely desirable that it be this fast.
685*/
686#define Py_ADDRESS_IN_RANGE(P, POOL) \
687 ((POOL)->arenaindex < maxarenas && \
688 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
689 arenas[(POOL)->arenaindex].address != 0)
690
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000691
692/* This is only useful when running memory debuggers such as
693 * Purify or Valgrind. Uncomment to use.
694 *
Martin v. Löwis68192102007-07-21 06:55:02 +0000695#define Py_USING_MEMORY_DEBUGGER
Martin v. Löwise86b07c2008-09-25 04:12:50 +0000696 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000697
698#ifdef Py_USING_MEMORY_DEBUGGER
699
700/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
701 * This leads to thousands of spurious warnings when using
702 * Purify or Valgrind. By making a function, we can easily
703 * suppress the uninitialized memory reads in this one function.
704 * So we won't ignore real errors elsewhere.
705 *
706 * Disable the macro and use a function.
707 */
708
709#undef Py_ADDRESS_IN_RANGE
710
Neal Norwitzab772272006-10-28 21:21:00 +0000711#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
712 (__GNUC__ >= 4))
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000713#define Py_NO_INLINE __attribute__((__noinline__))
714#else
715#define Py_NO_INLINE
716#endif
717
718/* Don't make static, to try to ensure this isn't inlined. */
719int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
720#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000721#endif
Tim Peters338e0102002-04-01 19:23:44 +0000722
Neil Schemenauera35c6882001-02-27 04:45:05 +0000723/*==========================================================================*/
724
Tim Peters84c1b972002-04-04 04:44:32 +0000725/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
726 * from all other currently live pointers. This may not be possible.
727 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000728
729/*
730 * The basic blocks are ordered by decreasing execution frequency,
731 * which minimizes the number of jumps in the most common cases,
732 * improves branching prediction and instruction scheduling (small
733 * block allocations typically result in a couple of instructions).
734 * Unless the optimizer reorders everything, being too smart...
735 */
736
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000737#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000738void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000739PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000740{
741 block *bp;
742 poolp pool;
743 poolp next;
744 uint size;
745
Benjamin Peterson91c12eb2009-12-03 02:52:39 +0000746#ifdef WITH_VALGRIND
747 if (UNLIKELY(running_on_valgrind == -1))
748 running_on_valgrind = RUNNING_ON_VALGRIND;
749 if (UNLIKELY(running_on_valgrind))
750 goto redirect;
751#endif
752
Neil Schemenauera35c6882001-02-27 04:45:05 +0000753 /*
Gregory P. Smith0470bab2008-07-22 04:46:32 +0000754 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
755 * Most python internals blindly use a signed Py_ssize_t to track
756 * things without checking for overflows or negatives.
757 * As size_t is unsigned, checking for nbytes < 0 is not required.
758 */
759 if (nbytes > PY_SSIZE_T_MAX)
760 return NULL;
761
762 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000763 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000764 */
765 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
766 LOCK();
767 /*
768 * Most frequent paths first
769 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000770 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000771 pool = usedpools[size + size];
772 if (pool != pool->nextpool) {
773 /*
774 * There is a used pool for this size class.
775 * Pick up the head block of its free list.
776 */
777 ++pool->ref.count;
778 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000779 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000780 if ((pool->freeblock = *(block **)bp) != NULL) {
781 UNLOCK();
782 return (void *)bp;
783 }
784 /*
Tim Peterscf79aac2006-03-16 01:14:46 +0000785 * Reached the end of the free list, try to extend it.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000786 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000787 if (pool->nextoffset <= pool->maxnextoffset) {
Tim Peterscf79aac2006-03-16 01:14:46 +0000788 /* There is room for another block. */
789 pool->freeblock = (block*)pool +
Tim Peterse70ddf32002-04-05 04:32:29 +0000790 pool->nextoffset;
791 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000792 *(block **)(pool->freeblock) = NULL;
793 UNLOCK();
794 return (void *)bp;
795 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000796 /* Pool is full, unlink from used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000797 next = pool->nextpool;
798 pool = pool->prevpool;
799 next->prevpool = pool;
800 pool->nextpool = next;
801 UNLOCK();
802 return (void *)bp;
803 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000804
805 /* There isn't a pool of the right size class immediately
806 * available: use a free pool.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000807 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000808 if (usable_arenas == NULL) {
809 /* No arena has a free pool: allocate a new arena. */
810#ifdef WITH_MEMORY_LIMITS
811 if (narenas_currently_allocated >= MAX_ARENAS) {
812 UNLOCK();
813 goto redirect;
814 }
815#endif
816 usable_arenas = new_arena();
817 if (usable_arenas == NULL) {
818 UNLOCK();
819 goto redirect;
820 }
821 usable_arenas->nextarena =
822 usable_arenas->prevarena = NULL;
823 }
824 assert(usable_arenas->address != 0);
825
826 /* Try to get a cached free pool. */
827 pool = usable_arenas->freepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000828 if (pool != NULL) {
Tim Peterscf79aac2006-03-16 01:14:46 +0000829 /* Unlink from cached pools. */
830 usable_arenas->freepools = pool->nextpool;
831
832 /* This arena already had the smallest nfreepools
833 * value, so decreasing nfreepools doesn't change
834 * that, and we don't need to rearrange the
835 * usable_arenas list. However, if the arena has
836 * become wholly allocated, we need to remove its
837 * arena_object from usable_arenas.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000838 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000839 --usable_arenas->nfreepools;
840 if (usable_arenas->nfreepools == 0) {
841 /* Wholly allocated: remove. */
842 assert(usable_arenas->freepools == NULL);
843 assert(usable_arenas->nextarena == NULL ||
844 usable_arenas->nextarena->prevarena ==
845 usable_arenas);
846
847 usable_arenas = usable_arenas->nextarena;
848 if (usable_arenas != NULL) {
849 usable_arenas->prevarena = NULL;
850 assert(usable_arenas->address != 0);
851 }
852 }
853 else {
854 /* nfreepools > 0: it must be that freepools
855 * isn't NULL, or that we haven't yet carved
856 * off all the arena's pools for the first
857 * time.
858 */
859 assert(usable_arenas->freepools != NULL ||
860 usable_arenas->pool_address <=
861 (block*)usable_arenas->address +
862 ARENA_SIZE - POOL_SIZE);
863 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000864 init_pool:
Tim Peterscf79aac2006-03-16 01:14:46 +0000865 /* Frontlink to used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000866 next = usedpools[size + size]; /* == prev */
867 pool->nextpool = next;
868 pool->prevpool = next;
869 next->nextpool = pool;
870 next->prevpool = pool;
871 pool->ref.count = 1;
872 if (pool->szidx == size) {
Tim Peterscf79aac2006-03-16 01:14:46 +0000873 /* Luckily, this pool last contained blocks
Neil Schemenauera35c6882001-02-27 04:45:05 +0000874 * of the same size class, so its header
875 * and free list are already initialized.
876 */
877 bp = pool->freeblock;
878 pool->freeblock = *(block **)bp;
879 UNLOCK();
880 return (void *)bp;
881 }
882 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000883 * Initialize the pool header, set up the free list to
884 * contain just the second block, and return the first
885 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000886 */
887 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000888 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000889 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000890 pool->nextoffset = POOL_OVERHEAD + (size << 1);
891 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000892 pool->freeblock = bp + size;
893 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000894 UNLOCK();
895 return (void *)bp;
896 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000897
898 /* Carve off a new pool. */
899 assert(usable_arenas->nfreepools > 0);
900 assert(usable_arenas->freepools == NULL);
901 pool = (poolp)usable_arenas->pool_address;
902 assert((block*)pool <= (block*)usable_arenas->address +
903 ARENA_SIZE - POOL_SIZE);
904 pool->arenaindex = usable_arenas - arenas;
905 assert(&arenas[pool->arenaindex] == usable_arenas);
906 pool->szidx = DUMMY_SIZE_IDX;
907 usable_arenas->pool_address += POOL_SIZE;
908 --usable_arenas->nfreepools;
909
910 if (usable_arenas->nfreepools == 0) {
911 assert(usable_arenas->nextarena == NULL ||
912 usable_arenas->nextarena->prevarena ==
913 usable_arenas);
914 /* Unlink the arena: it is completely allocated. */
915 usable_arenas = usable_arenas->nextarena;
916 if (usable_arenas != NULL) {
917 usable_arenas->prevarena = NULL;
918 assert(usable_arenas->address != 0);
919 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000920 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000921
922 goto init_pool;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000923 }
924
925 /* The small block allocator ends here. */
926
Tim Petersd97a1c02002-03-30 06:09:22 +0000927redirect:
Tim Peterscf79aac2006-03-16 01:14:46 +0000928 /* Redirect the original request to the underlying (libc) allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000929 * We jump here on bigger requests, on error in the code above (as a
930 * last chance to serve the request) or when the max memory limit
931 * has been reached.
932 */
Tim Peters64d80c92002-04-18 21:58:56 +0000933 if (nbytes == 0)
934 nbytes = 1;
Tim Peters64d80c92002-04-18 21:58:56 +0000935 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000936}
937
938/* free */
939
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000940#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000941void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000942PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000943{
944 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000945 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000946 poolp next, prev;
947 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000948
Neil Schemenauera35c6882001-02-27 04:45:05 +0000949 if (p == NULL) /* free(NULL) has no effect */
950 return;
951
Benjamin Peterson91c12eb2009-12-03 02:52:39 +0000952#ifdef WITH_VALGRIND
953 if (UNLIKELY(running_on_valgrind > 0))
954 goto redirect;
955#endif
956
Tim Petersd97a1c02002-03-30 06:09:22 +0000957 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000958 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Petersd97a1c02002-03-30 06:09:22 +0000959 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000960 LOCK();
Tim Peterscf79aac2006-03-16 01:14:46 +0000961 /* Link p to the start of the pool's freeblock list. Since
Tim Peters2c95c992002-03-31 02:18:01 +0000962 * the pool had at least the p block outstanding, the pool
963 * wasn't empty (so it's already in a usedpools[] list, or
964 * was full and is in no list -- it's not in the freeblocks
965 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000966 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000967 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000968 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000969 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000970 if (lastfree) {
Tim Peterscf79aac2006-03-16 01:14:46 +0000971 struct arena_object* ao;
972 uint nf; /* ao->nfreepools */
973
974 /* freeblock wasn't NULL, so the pool wasn't full,
Tim Peters2c95c992002-03-31 02:18:01 +0000975 * and the pool is in a usedpools[] list.
976 */
Tim Peters2c95c992002-03-31 02:18:01 +0000977 if (--pool->ref.count != 0) {
978 /* pool isn't empty: leave it in usedpools */
979 UNLOCK();
980 return;
981 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000982 /* Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000983 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000984 * previously freed pools will be allocated later
985 * (being not referenced, they are perhaps paged out).
986 */
987 next = pool->nextpool;
988 prev = pool->prevpool;
989 next->prevpool = prev;
990 prev->nextpool = next;
Tim Peterscf79aac2006-03-16 01:14:46 +0000991
992 /* Link the pool to freepools. This is a singly-linked
993 * list, and pool->prevpool isn't used there.
Tim Peters2c95c992002-03-31 02:18:01 +0000994 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000995 ao = &arenas[pool->arenaindex];
996 pool->nextpool = ao->freepools;
997 ao->freepools = pool;
998 nf = ++ao->nfreepools;
999
1000 /* All the rest is arena management. We just freed
1001 * a pool, and there are 4 cases for arena mgmt:
1002 * 1. If all the pools are free, return the arena to
1003 * the system free().
1004 * 2. If this is the only free pool in the arena,
1005 * add the arena back to the `usable_arenas` list.
1006 * 3. If the "next" arena has a smaller count of free
1007 * pools, we have to "slide this arena right" to
1008 * restore that usable_arenas is sorted in order of
1009 * nfreepools.
1010 * 4. Else there's nothing more to do.
1011 */
1012 if (nf == ao->ntotalpools) {
1013 /* Case 1. First unlink ao from usable_arenas.
1014 */
1015 assert(ao->prevarena == NULL ||
1016 ao->prevarena->address != 0);
1017 assert(ao ->nextarena == NULL ||
1018 ao->nextarena->address != 0);
1019
1020 /* Fix the pointer in the prevarena, or the
1021 * usable_arenas pointer.
1022 */
1023 if (ao->prevarena == NULL) {
1024 usable_arenas = ao->nextarena;
1025 assert(usable_arenas == NULL ||
1026 usable_arenas->address != 0);
1027 }
1028 else {
1029 assert(ao->prevarena->nextarena == ao);
1030 ao->prevarena->nextarena =
1031 ao->nextarena;
1032 }
1033 /* Fix the pointer in the nextarena. */
1034 if (ao->nextarena != NULL) {
1035 assert(ao->nextarena->prevarena == ao);
1036 ao->nextarena->prevarena =
1037 ao->prevarena;
1038 }
1039 /* Record that this arena_object slot is
1040 * available to be reused.
1041 */
1042 ao->nextarena = unused_arena_objects;
1043 unused_arena_objects = ao;
1044
1045 /* Free the entire arena. */
1046 free((void *)ao->address);
1047 ao->address = 0; /* mark unassociated */
1048 --narenas_currently_allocated;
1049
1050 UNLOCK();
1051 return;
1052 }
1053 if (nf == 1) {
1054 /* Case 2. Put ao at the head of
1055 * usable_arenas. Note that because
1056 * ao->nfreepools was 0 before, ao isn't
1057 * currently on the usable_arenas list.
1058 */
1059 ao->nextarena = usable_arenas;
1060 ao->prevarena = NULL;
1061 if (usable_arenas)
1062 usable_arenas->prevarena = ao;
1063 usable_arenas = ao;
1064 assert(usable_arenas->address != 0);
1065
1066 UNLOCK();
1067 return;
1068 }
1069 /* If this arena is now out of order, we need to keep
1070 * the list sorted. The list is kept sorted so that
1071 * the "most full" arenas are used first, which allows
1072 * the nearly empty arenas to be completely freed. In
1073 * a few un-scientific tests, it seems like this
1074 * approach allowed a lot more memory to be freed.
1075 */
1076 if (ao->nextarena == NULL ||
1077 nf <= ao->nextarena->nfreepools) {
1078 /* Case 4. Nothing to do. */
1079 UNLOCK();
1080 return;
1081 }
1082 /* Case 3: We have to move the arena towards the end
1083 * of the list, because it has more free pools than
1084 * the arena to its right.
1085 * First unlink ao from usable_arenas.
1086 */
1087 if (ao->prevarena != NULL) {
1088 /* ao isn't at the head of the list */
1089 assert(ao->prevarena->nextarena == ao);
1090 ao->prevarena->nextarena = ao->nextarena;
1091 }
1092 else {
1093 /* ao is at the head of the list */
1094 assert(usable_arenas == ao);
1095 usable_arenas = ao->nextarena;
1096 }
1097 ao->nextarena->prevarena = ao->prevarena;
1098
1099 /* Locate the new insertion point by iterating over
1100 * the list, using our nextarena pointer.
1101 */
1102 while (ao->nextarena != NULL &&
1103 nf > ao->nextarena->nfreepools) {
1104 ao->prevarena = ao->nextarena;
1105 ao->nextarena = ao->nextarena->nextarena;
1106 }
1107
1108 /* Insert ao at this point. */
1109 assert(ao->nextarena == NULL ||
1110 ao->prevarena == ao->nextarena->prevarena);
1111 assert(ao->prevarena->nextarena == ao->nextarena);
1112
1113 ao->prevarena->nextarena = ao;
1114 if (ao->nextarena != NULL)
1115 ao->nextarena->prevarena = ao;
1116
1117 /* Verify that the swaps worked. */
1118 assert(ao->nextarena == NULL ||
1119 nf <= ao->nextarena->nfreepools);
1120 assert(ao->prevarena == NULL ||
1121 nf > ao->prevarena->nfreepools);
1122 assert(ao->nextarena == NULL ||
1123 ao->nextarena->prevarena == ao);
1124 assert((usable_arenas == ao &&
1125 ao->prevarena == NULL) ||
1126 ao->prevarena->nextarena == ao);
1127
Tim Petersd97a1c02002-03-30 06:09:22 +00001128 UNLOCK();
1129 return;
1130 }
Tim Peterscf79aac2006-03-16 01:14:46 +00001131 /* Pool was full, so doesn't currently live in any list:
Tim Peters2c95c992002-03-31 02:18:01 +00001132 * link it to the front of the appropriate usedpools[] list.
1133 * This mimics LRU pool usage for new allocations and
1134 * targets optimal filling when several pools contain
1135 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +00001136 */
Tim Peters2c95c992002-03-31 02:18:01 +00001137 --pool->ref.count;
1138 assert(pool->ref.count > 0); /* else the pool is empty */
1139 size = pool->szidx;
1140 next = usedpools[size + size];
1141 prev = next->prevpool;
1142 /* insert pool before next: prev <-> pool <-> next */
1143 pool->nextpool = next;
1144 pool->prevpool = prev;
1145 next->prevpool = pool;
1146 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +00001147 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +00001148 return;
1149 }
1150
Benjamin Peterson91c12eb2009-12-03 02:52:39 +00001151#ifdef WITH_VALGRIND
1152redirect:
1153#endif
Tim Peters2c95c992002-03-31 02:18:01 +00001154 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +00001155 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001156}
1157
Tim Peters84c1b972002-04-04 04:44:32 +00001158/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1159 * then as the Python docs promise, we do not treat this like free(p), and
1160 * return a non-NULL result.
1161 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001162
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001163#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +00001164void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001165PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001166{
Tim Peters84c1b972002-04-04 04:44:32 +00001167 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001168 poolp pool;
Martin v. Löwis18e16552006-02-15 17:27:45 +00001169 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001170
Neil Schemenauera35c6882001-02-27 04:45:05 +00001171 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001172 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001173
Gregory P. Smith0470bab2008-07-22 04:46:32 +00001174 /*
1175 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
1176 * Most python internals blindly use a signed Py_ssize_t to track
1177 * things without checking for overflows or negatives.
1178 * As size_t is unsigned, checking for nbytes < 0 is not required.
1179 */
1180 if (nbytes > PY_SSIZE_T_MAX)
1181 return NULL;
1182
Benjamin Peterson91c12eb2009-12-03 02:52:39 +00001183#ifdef WITH_VALGRIND
1184 /* Treat running_on_valgrind == -1 the same as 0 */
1185 if (UNLIKELY(running_on_valgrind > 0))
1186 goto redirect;
1187#endif
1188
Tim Petersd97a1c02002-03-30 06:09:22 +00001189 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001190 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +00001191 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +00001192 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +00001193 if (nbytes <= size) {
1194 /* The block is staying the same or shrinking. If
1195 * it's shrinking, there's a tradeoff: it costs
1196 * cycles to copy the block to a smaller size class,
1197 * but it wastes memory not to copy it. The
1198 * compromise here is to copy on shrink only if at
1199 * least 25% of size can be shaved off.
1200 */
1201 if (4 * nbytes > 3 * size) {
1202 /* It's the same,
1203 * or shrinking and new/old > 3/4.
1204 */
1205 return p;
1206 }
1207 size = nbytes;
1208 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001209 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +00001210 if (bp != NULL) {
1211 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001212 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001213 }
Tim Peters84c1b972002-04-04 04:44:32 +00001214 return bp;
1215 }
Benjamin Peterson91c12eb2009-12-03 02:52:39 +00001216#ifdef WITH_VALGRIND
1217 redirect:
1218#endif
Tim Petersecc6e6a2005-07-10 22:30:55 +00001219 /* We're not managing this block. If nbytes <=
1220 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1221 * block. However, if we do, we need to copy the valid data from
1222 * the C-managed block to one of our blocks, and there's no portable
1223 * way to know how much of the memory space starting at p is valid.
1224 * As bug 1185883 pointed out the hard way, it's possible that the
1225 * C-managed block is "at the end" of allocated VM space, so that
1226 * a memory fault can occur if we try to copy nbytes bytes starting
1227 * at p. Instead we punt: let C continue to manage this block.
1228 */
1229 if (nbytes)
1230 return realloc(p, nbytes);
1231 /* C doesn't define the result of realloc(p, 0) (it may or may not
1232 * return NULL then), but Python's docs promise that nbytes==0 never
1233 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1234 * to begin with. Even then, we can't be sure that realloc() won't
1235 * return NULL.
1236 */
1237 bp = realloc(p, 1);
1238 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001239}
1240
Tim Peters1221c0a2002-03-23 00:20:15 +00001241#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001242
1243/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001244/* pymalloc not enabled: Redirect the entry points to malloc. These will
1245 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001246
Tim Petersce7fb9b2002-03-23 00:28:57 +00001247void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001248PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001249{
1250 return PyMem_MALLOC(n);
1251}
1252
Tim Petersce7fb9b2002-03-23 00:28:57 +00001253void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001254PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001255{
1256 return PyMem_REALLOC(p, n);
1257}
1258
1259void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001260PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +00001261{
1262 PyMem_FREE(p);
1263}
1264#endif /* WITH_PYMALLOC */
1265
Tim Petersddea2082002-03-23 10:03:50 +00001266#ifdef PYMALLOC_DEBUG
1267/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001268/* A x-platform debugging allocator. This doesn't manage memory directly,
1269 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1270 */
Tim Petersddea2082002-03-23 10:03:50 +00001271
Tim Petersf6fb5012002-04-12 07:38:53 +00001272/* Special bytes broadcast into debug memory blocks at appropriate times.
1273 * Strings of these are unlikely to be valid addresses, floats, ints or
1274 * 7-bit ASCII.
1275 */
1276#undef CLEANBYTE
1277#undef DEADBYTE
1278#undef FORBIDDENBYTE
1279#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001280#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001281#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001282
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001283/* We tag each block with an API ID in order to tag API violations */
1284#define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */
1285#define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */
1286
Tim Peters9ea89d22006-06-04 03:26:02 +00001287static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
Tim Petersddea2082002-03-23 10:03:50 +00001288
Tim Peterse0850172002-03-24 00:34:21 +00001289/* serialno is always incremented via calling this routine. The point is
Tim Peters9ea89d22006-06-04 03:26:02 +00001290 * to supply a single place to set a breakpoint.
1291 */
Tim Peterse0850172002-03-24 00:34:21 +00001292static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001293bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001294{
1295 ++serialno;
1296}
1297
Tim Peters9ea89d22006-06-04 03:26:02 +00001298#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001299
Tim Peters9ea89d22006-06-04 03:26:02 +00001300/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1301static size_t
1302read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001303{
Tim Peters62c06ba2002-03-23 22:28:18 +00001304 const uchar *q = (const uchar *)p;
Tim Peters9ea89d22006-06-04 03:26:02 +00001305 size_t result = *q++;
1306 int i;
1307
1308 for (i = SST; --i > 0; ++q)
1309 result = (result << 8) | *q;
1310 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001311}
1312
Tim Peters9ea89d22006-06-04 03:26:02 +00001313/* Write n as a big-endian size_t, MSB at address p, LSB at
1314 * p + sizeof(size_t) - 1.
1315 */
Tim Petersddea2082002-03-23 10:03:50 +00001316static void
Tim Peters9ea89d22006-06-04 03:26:02 +00001317write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001318{
Tim Peters9ea89d22006-06-04 03:26:02 +00001319 uchar *q = (uchar *)p + SST - 1;
1320 int i;
1321
1322 for (i = SST; --i >= 0; --q) {
1323 *q = (uchar)(n & 0xff);
1324 n >>= 8;
1325 }
Tim Petersddea2082002-03-23 10:03:50 +00001326}
1327
Tim Peters08d82152002-04-18 22:25:03 +00001328#ifdef Py_DEBUG
1329/* Is target in the list? The list is traversed via the nextpool pointers.
1330 * The list may be NULL-terminated, or circular. Return 1 if target is in
1331 * list, else 0.
1332 */
1333static int
1334pool_is_in_list(const poolp target, poolp list)
1335{
1336 poolp origlist = list;
1337 assert(target != NULL);
1338 if (list == NULL)
1339 return 0;
1340 do {
1341 if (target == list)
1342 return 1;
1343 list = list->nextpool;
1344 } while (list != NULL && list != origlist);
1345 return 0;
1346}
1347
1348#else
1349#define pool_is_in_list(X, Y) 1
1350
1351#endif /* Py_DEBUG */
1352
Tim Peters9ea89d22006-06-04 03:26:02 +00001353/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1354 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001355
Tim Peters9ea89d22006-06-04 03:26:02 +00001356p[0: S]
1357 Number of bytes originally asked for. This is a size_t, big-endian (easier
1358 to read in a memory dump).
1359p[S: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001360 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Peters9ea89d22006-06-04 03:26:02 +00001361p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001362 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001363 Used to catch reference to uninitialized memory.
Tim Peters9ea89d22006-06-04 03:26:02 +00001364 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001365 handled the request itself.
Tim Peters9ea89d22006-06-04 03:26:02 +00001366p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001367 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Peters9ea89d22006-06-04 03:26:02 +00001368p[2*S+n+S: 2*S+n+2*S]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001369 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1370 and _PyObject_DebugRealloc.
Tim Peters9ea89d22006-06-04 03:26:02 +00001371 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001372 If "bad memory" is detected later, the serial number gives an
1373 excellent way to set a breakpoint on the next run, to capture the
1374 instant at which this block was passed out.
1375*/
1376
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001377/* debug replacements for the PyMem_* memory API */
1378void *
1379_PyMem_DebugMalloc(size_t nbytes)
1380{
1381 return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);
1382}
1383void *
1384_PyMem_DebugRealloc(void *p, size_t nbytes)
1385{
1386 return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);
1387}
1388void
1389_PyMem_DebugFree(void *p)
1390{
1391 _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);
1392}
1393
1394/* debug replacements for the PyObject_* memory API */
Tim Petersddea2082002-03-23 10:03:50 +00001395void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001396_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001397{
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001398 return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);
1399}
1400void *
1401_PyObject_DebugRealloc(void *p, size_t nbytes)
1402{
1403 return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);
1404}
1405void
1406_PyObject_DebugFree(void *p)
1407{
1408 _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);
1409}
1410void
Kristján Valur Jónssonb3318022009-09-28 15:56:25 +00001411_PyObject_DebugCheckAddress(const void *p)
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001412{
1413 _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);
1414}
1415
1416
1417/* generic debug memory api, with an "id" to identify the API in use */
1418void *
1419_PyObject_DebugMallocApi(char id, size_t nbytes)
1420{
Tim Petersddea2082002-03-23 10:03:50 +00001421 uchar *p; /* base address of malloc'ed block */
Tim Peters9ea89d22006-06-04 03:26:02 +00001422 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1423 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001424
Tim Peterse0850172002-03-24 00:34:21 +00001425 bumpserialno();
Tim Peters9ea89d22006-06-04 03:26:02 +00001426 total = nbytes + 4*SST;
1427 if (total < nbytes)
1428 /* overflow: can't represent total as a size_t */
Tim Petersddea2082002-03-23 10:03:50 +00001429 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001430
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001431 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +00001432 if (p == NULL)
1433 return NULL;
1434
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001435 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
Tim Peters9ea89d22006-06-04 03:26:02 +00001436 write_size_t(p, nbytes);
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001437 p[SST] = (uchar)id;
1438 memset(p + SST + 1 , FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001439
1440 if (nbytes > 0)
Tim Peters9ea89d22006-06-04 03:26:02 +00001441 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001442
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001443 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Tim Peters9ea89d22006-06-04 03:26:02 +00001444 tail = p + 2*SST + nbytes;
1445 memset(tail, FORBIDDENBYTE, SST);
1446 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001447
Tim Peters3eeb1732006-06-04 03:38:04 +00001448 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001449}
1450
Tim Peters9ea89d22006-06-04 03:26:02 +00001451/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001452 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001453 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001454 Then calls the underlying free.
1455*/
1456void
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001457_PyObject_DebugFreeApi(char api, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001458{
Tim Peters9ea89d22006-06-04 03:26:02 +00001459 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
Tim Petersddea2082002-03-23 10:03:50 +00001460 size_t nbytes;
1461
Tim Petersddea2082002-03-23 10:03:50 +00001462 if (p == NULL)
1463 return;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001464 _PyObject_DebugCheckAddressApi(api, p);
Tim Peters9ea89d22006-06-04 03:26:02 +00001465 nbytes = read_size_t(q);
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001466 nbytes += 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001467 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001468 memset(q, DEADBYTE, nbytes);
Tim Peters9ea89d22006-06-04 03:26:02 +00001469 PyObject_Free(q);
Tim Petersddea2082002-03-23 10:03:50 +00001470}
1471
1472void *
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001473_PyObject_DebugReallocApi(char api, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001474{
1475 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001476 uchar *tail;
Tim Peters9ea89d22006-06-04 03:26:02 +00001477 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001478 size_t original_nbytes;
Tim Peters9ea89d22006-06-04 03:26:02 +00001479 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001480
Tim Petersddea2082002-03-23 10:03:50 +00001481 if (p == NULL)
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001482 return _PyObject_DebugMallocApi(api, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001483
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001484 _PyObject_DebugCheckAddressApi(api, p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001485 bumpserialno();
Tim Peters9ea89d22006-06-04 03:26:02 +00001486 original_nbytes = read_size_t(q - 2*SST);
1487 total = nbytes + 4*SST;
1488 if (total < nbytes)
1489 /* overflow: can't represent total as a size_t */
Tim Peters85cc1c42002-04-12 08:52:50 +00001490 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001491
1492 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001493 /* shrinking: mark old extra memory dead */
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001494 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);
Tim Petersddea2082002-03-23 10:03:50 +00001495 }
1496
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001497 /* Resize and add decorations. We may get a new pointer here, in which
1498 * case we didn't get the chance to mark the old memory with DEADBYTE,
1499 * but we live with that.
1500 */
Tim Peters9ea89d22006-06-04 03:26:02 +00001501 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
Tim Peters85cc1c42002-04-12 08:52:50 +00001502 if (q == NULL)
1503 return NULL;
1504
Tim Peters9ea89d22006-06-04 03:26:02 +00001505 write_size_t(q, nbytes);
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001506 assert(q[SST] == (uchar)api);
1507 for (i = 1; i < SST; ++i)
Tim Peters9ea89d22006-06-04 03:26:02 +00001508 assert(q[SST + i] == FORBIDDENBYTE);
1509 q += 2*SST;
Tim Peters85cc1c42002-04-12 08:52:50 +00001510 tail = q + nbytes;
Tim Peters9ea89d22006-06-04 03:26:02 +00001511 memset(tail, FORBIDDENBYTE, SST);
1512 write_size_t(tail + SST, serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001513
1514 if (nbytes > original_nbytes) {
1515 /* growing: mark new extra memory clean */
1516 memset(q + original_nbytes, CLEANBYTE,
1517 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001518 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001519
1520 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001521}
1522
Tim Peters7ccfadf2002-04-01 06:04:21 +00001523/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001524 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001525 * and call Py_FatalError to kill the program.
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001526 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001527 */
1528 void
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001529_PyObject_DebugCheckAddressApi(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001530{
1531 const uchar *q = (const uchar *)p;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001532 char msgbuf[64];
Tim Petersd1139e02002-03-28 07:32:11 +00001533 char *msg;
Tim Peters9ea89d22006-06-04 03:26:02 +00001534 size_t nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001535 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001536 int i;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001537 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001538
Tim Petersd1139e02002-03-28 07:32:11 +00001539 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001540 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001541 goto error;
1542 }
Tim Petersddea2082002-03-23 10:03:50 +00001543
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001544 /* Check the API id */
1545 id = (char)q[-SST];
1546 if (id != api) {
1547 msg = msgbuf;
1548 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1549 msgbuf[sizeof(msgbuf)-1] = 0;
1550 goto error;
1551 }
1552
Tim Peters449b5a82002-04-28 06:14:45 +00001553 /* Check the stuff at the start of p first: if there's underwrite
1554 * corruption, the number-of-bytes field may be nuts, and checking
1555 * the tail could lead to a segfault then.
1556 */
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001557 for (i = SST-1; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001558 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001559 msg = "bad leading pad byte";
1560 goto error;
1561 }
1562 }
Tim Petersddea2082002-03-23 10:03:50 +00001563
Tim Peters9ea89d22006-06-04 03:26:02 +00001564 nbytes = read_size_t(q - 2*SST);
Tim Peters449b5a82002-04-28 06:14:45 +00001565 tail = q + nbytes;
Tim Peters9ea89d22006-06-04 03:26:02 +00001566 for (i = 0; i < SST; ++i) {
Tim Peters449b5a82002-04-28 06:14:45 +00001567 if (tail[i] != FORBIDDENBYTE) {
1568 msg = "bad trailing pad byte";
1569 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001570 }
1571 }
1572
Tim Petersd1139e02002-03-28 07:32:11 +00001573 return;
1574
1575error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001576 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001577 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001578}
1579
Tim Peters7ccfadf2002-04-01 06:04:21 +00001580/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001581void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001582_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001583{
1584 const uchar *q = (const uchar *)p;
1585 const uchar *tail;
Tim Peters9ea89d22006-06-04 03:26:02 +00001586 size_t nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001587 int i;
Tim Peters9ea89d22006-06-04 03:26:02 +00001588 int ok;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001589 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001590
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001591 fprintf(stderr, "Debug memory block at address p=%p:", p);
1592 if (p == NULL) {
1593 fprintf(stderr, "\n");
Tim Petersddea2082002-03-23 10:03:50 +00001594 return;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001595 }
1596 id = (char)q[-SST];
1597 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001598
Tim Peters9ea89d22006-06-04 03:26:02 +00001599 nbytes = read_size_t(q - 2*SST);
1600 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1601 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001602
Tim Peters449b5a82002-04-28 06:14:45 +00001603 /* In case this is nuts, check the leading pad bytes first. */
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001604 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
Tim Peters9ea89d22006-06-04 03:26:02 +00001605 ok = 1;
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001606 for (i = 1; i <= SST-1; ++i) {
Tim Peters9ea89d22006-06-04 03:26:02 +00001607 if (*(q-i) != FORBIDDENBYTE) {
1608 ok = 0;
1609 break;
1610 }
Tim Petersddea2082002-03-23 10:03:50 +00001611 }
Tim Peters9ea89d22006-06-04 03:26:02 +00001612 if (ok)
1613 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001614 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001615 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1616 FORBIDDENBYTE);
Kristján Valur Jónsson02ca57c2009-09-28 13:12:38 +00001617 for (i = SST-1; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001618 const uchar byte = *(q-i);
1619 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001620 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001621 fputs(" *** OUCH", stderr);
1622 fputc('\n', stderr);
1623 }
Tim Peters449b5a82002-04-28 06:14:45 +00001624
1625 fputs(" Because memory is corrupted at the start, the "
1626 "count of bytes requested\n"
1627 " may be bogus, and checking the trailing pad "
1628 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001629 }
1630
1631 tail = q + nbytes;
Tim Peters9ea89d22006-06-04 03:26:02 +00001632 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1633 ok = 1;
1634 for (i = 0; i < SST; ++i) {
1635 if (tail[i] != FORBIDDENBYTE) {
1636 ok = 0;
1637 break;
1638 }
Tim Petersddea2082002-03-23 10:03:50 +00001639 }
Tim Peters9ea89d22006-06-04 03:26:02 +00001640 if (ok)
1641 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001642 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001643 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1644 FORBIDDENBYTE);
Tim Peters9ea89d22006-06-04 03:26:02 +00001645 for (i = 0; i < SST; ++i) {
Tim Petersddea2082002-03-23 10:03:50 +00001646 const uchar byte = tail[i];
1647 fprintf(stderr, " at tail+%d: 0x%02x",
1648 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001649 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001650 fputs(" *** OUCH", stderr);
1651 fputc('\n', stderr);
1652 }
1653 }
1654
Tim Peters9ea89d22006-06-04 03:26:02 +00001655 serial = read_size_t(tail + SST);
1656 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1657 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001658
1659 if (nbytes > 0) {
Tim Peters9ea89d22006-06-04 03:26:02 +00001660 i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001661 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001662 /* print up to 8 bytes at the start */
1663 while (q < tail && i < 8) {
1664 fprintf(stderr, " %02x", *q);
1665 ++i;
1666 ++q;
1667 }
1668 /* and up to 8 at the end */
1669 if (q < tail) {
1670 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001671 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001672 q = tail - 8;
1673 }
1674 while (q < tail) {
1675 fprintf(stderr, " %02x", *q);
1676 ++q;
1677 }
1678 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001679 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001680 }
1681}
1682
Tim Peters9ea89d22006-06-04 03:26:02 +00001683static size_t
1684printone(const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001685{
Tim Peters49f26812002-04-06 01:45:35 +00001686 int i, k;
1687 char buf[100];
Tim Peters9ea89d22006-06-04 03:26:02 +00001688 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001689
1690 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001691 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001692 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001693 fputc('=', stderr);
1694
1695 /* Write the value with commas. */
1696 i = 22;
1697 buf[i--] = '\0';
1698 buf[i--] = '\n';
1699 k = 3;
1700 do {
Tim Peters9ea89d22006-06-04 03:26:02 +00001701 size_t nextvalue = value / 10;
1702 uint digit = (uint)(value - nextvalue * 10);
Tim Peters49f26812002-04-06 01:45:35 +00001703 value = nextvalue;
1704 buf[i--] = (char)(digit + '0');
1705 --k;
1706 if (k == 0 && value && i >= 0) {
1707 k = 3;
1708 buf[i--] = ',';
1709 }
1710 } while (value && i >= 0);
1711
1712 while (i >= 0)
1713 buf[i--] = ' ';
1714 fputs(buf, stderr);
1715
1716 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001717}
1718
Tim Peters08d82152002-04-18 22:25:03 +00001719/* Print summary info to stderr about the state of pymalloc's structures.
1720 * In Py_DEBUG mode, also perform some expensive internal consistency
1721 * checks.
1722 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001723void
Tim Peters0e871182002-04-13 08:29:14 +00001724_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001725{
1726 uint i;
1727 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001728 /* # of pools, allocated blocks, and free blocks per class index */
Tim Peters9ea89d22006-06-04 03:26:02 +00001729 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1730 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1731 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001732 /* total # of allocated bytes in used and full pools */
Tim Peters9ea89d22006-06-04 03:26:02 +00001733 size_t allocated_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001734 /* total # of available bytes in used pools */
Tim Peters9ea89d22006-06-04 03:26:02 +00001735 size_t available_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001736 /* # of free pools + pools not yet carved out of current arena */
1737 uint numfreepools = 0;
1738 /* # of bytes for arena alignment padding */
Tim Peters9ea89d22006-06-04 03:26:02 +00001739 size_t arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001740 /* # of bytes in used and full pools used for pool_headers */
Tim Peters9ea89d22006-06-04 03:26:02 +00001741 size_t pool_header_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001742 /* # of bytes in used and full pools wasted due to quantization,
1743 * i.e. the necessarily leftover space at the ends of used and
1744 * full pools.
1745 */
Tim Peters9ea89d22006-06-04 03:26:02 +00001746 size_t quantization = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +00001747 /* # of arenas actually allocated. */
Tim Peters9ea89d22006-06-04 03:26:02 +00001748 size_t narenas = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001749 /* running total -- should equal narenas * ARENA_SIZE */
Tim Peters9ea89d22006-06-04 03:26:02 +00001750 size_t total;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001751 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001752
Tim Peters7ccfadf2002-04-01 06:04:21 +00001753 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1754 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001755
1756 for (i = 0; i < numclasses; ++i)
1757 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1758
Tim Peters6169f092002-04-01 20:12:59 +00001759 /* Because full pools aren't linked to from anything, it's easiest
1760 * to march over all the arenas. If we're lucky, most of the memory
1761 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001762 */
Tim Peterscf79aac2006-03-16 01:14:46 +00001763 for (i = 0; i < maxarenas; ++i) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001764 uint poolsinarena;
1765 uint j;
Tim Peterscf79aac2006-03-16 01:14:46 +00001766 uptr base = arenas[i].address;
1767
1768 /* Skip arenas which are not allocated. */
1769 if (arenas[i].address == (uptr)NULL)
1770 continue;
1771 narenas += 1;
1772
1773 poolsinarena = arenas[i].ntotalpools;
1774 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001775
1776 /* round up to pool alignment */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001777 if (base & (uptr)POOL_SIZE_MASK) {
Tim Peters16bcb6b2002-04-05 05:45:31 +00001778 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001779 base &= ~(uptr)POOL_SIZE_MASK;
1780 base += POOL_SIZE;
1781 }
1782
Tim Peters7ccfadf2002-04-01 06:04:21 +00001783 /* visit every pool in the arena */
Tim Peterscf79aac2006-03-16 01:14:46 +00001784 assert(base <= (uptr) arenas[i].pool_address);
1785 for (j = 0;
1786 base < (uptr) arenas[i].pool_address;
1787 ++j, base += POOL_SIZE) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001788 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001789 const uint sz = p->szidx;
1790 uint freeblocks;
1791
Tim Peters7ccfadf2002-04-01 06:04:21 +00001792 if (p->ref.count == 0) {
1793 /* currently unused */
Tim Peterscf79aac2006-03-16 01:14:46 +00001794 assert(pool_is_in_list(p, arenas[i].freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001795 continue;
1796 }
Tim Peters08d82152002-04-18 22:25:03 +00001797 ++numpools[sz];
1798 numblocks[sz] += p->ref.count;
1799 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1800 numfreeblocks[sz] += freeblocks;
1801#ifdef Py_DEBUG
1802 if (freeblocks > 0)
1803 assert(pool_is_in_list(p, usedpools[sz + sz]));
1804#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001805 }
1806 }
Tim Peterscf79aac2006-03-16 01:14:46 +00001807 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001808
1809 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001810 fputs("class size num pools blocks in use avail blocks\n"
1811 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001812 stderr);
1813
Tim Peters7ccfadf2002-04-01 06:04:21 +00001814 for (i = 0; i < numclasses; ++i) {
Tim Peters9ea89d22006-06-04 03:26:02 +00001815 size_t p = numpools[i];
1816 size_t b = numblocks[i];
1817 size_t f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001818 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001819 if (p == 0) {
1820 assert(b == 0 && f == 0);
1821 continue;
1822 }
Tim Peters9ea89d22006-06-04 03:26:02 +00001823 fprintf(stderr, "%5u %6u "
1824 "%11" PY_FORMAT_SIZE_T "u "
1825 "%15" PY_FORMAT_SIZE_T "u "
1826 "%13" PY_FORMAT_SIZE_T "u\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001827 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001828 allocated_bytes += b * size;
1829 available_bytes += f * size;
1830 pool_header_bytes += p * POOL_OVERHEAD;
1831 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001832 }
1833 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001834 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001835
Tim Peterscf79aac2006-03-16 01:14:46 +00001836 (void)printone("# arenas allocated total", ntimes_arena_allocated);
1837 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
1838 (void)printone("# arenas highwater mark", narenas_highwater);
1839 (void)printone("# arenas allocated current", narenas);
1840
Tim Peters16bcb6b2002-04-05 05:45:31 +00001841 PyOS_snprintf(buf, sizeof(buf),
Tim Peters9ea89d22006-06-04 03:26:02 +00001842 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1843 narenas, ARENA_SIZE);
Tim Peterscf79aac2006-03-16 01:14:46 +00001844 (void)printone(buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001845
1846 fputc('\n', stderr);
1847
Tim Peters49f26812002-04-06 01:45:35 +00001848 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001849 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001850
Tim Peters16bcb6b2002-04-05 05:45:31 +00001851 PyOS_snprintf(buf, sizeof(buf),
1852 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Tim Peters9ea89d22006-06-04 03:26:02 +00001853 total += printone(buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001854
Tim Peters16bcb6b2002-04-05 05:45:31 +00001855 total += printone("# bytes lost to pool headers", pool_header_bytes);
1856 total += printone("# bytes lost to quantization", quantization);
1857 total += printone("# bytes lost to arena alignment", arena_alignment);
1858 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001859}
1860
Tim Petersddea2082002-03-23 10:03:50 +00001861#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001862
1863#ifdef Py_USING_MEMORY_DEBUGGER
Tim Peterscf79aac2006-03-16 01:14:46 +00001864/* Make this function last so gcc won't inline it since the definition is
1865 * after the reference.
1866 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001867int
1868Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1869{
Tim Peterscf79aac2006-03-16 01:14:46 +00001870 return pool->arenaindex < maxarenas &&
1871 (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
1872 arenas[pool->arenaindex].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001873}
1874#endif