blob: a6fdf408112cc57cefca685082d9977b66995f73 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000098 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000134#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000145 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000170 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000171 */
Tim Peters3c83df22002-03-30 07:04:41 +0000172#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173
174#ifdef WITH_MEMORY_LIMITS
175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uchar
220#define uchar unsigned char /* assuming == 8 bits */
221
Neil Schemenauera35c6882001-02-27 04:45:05 +0000222#undef uint
223#define uint unsigned int /* assuming >= 16 bits */
224
225#undef ulong
226#define ulong unsigned long /* assuming >= 32 bits */
227
Tim Petersd97a1c02002-03-30 06:09:22 +0000228#undef uptr
229#define uptr Py_uintptr_t
230
Neil Schemenauera35c6882001-02-27 04:45:05 +0000231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
Tim Peterse70ddf32002-04-05 04:32:29 +0000234/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000235struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000236 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000241 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000245};
246
247typedef struct pool_header *poolp;
248
249#undef ROUNDUP
250#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
251#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
252
253#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
254
Tim Petersd97a1c02002-03-30 06:09:22 +0000255/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000256#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
257
Tim Peters16bcb6b2002-04-05 05:45:31 +0000258/* Return total number of blocks in pool of size index I, as a uint. */
259#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000260
Neil Schemenauera35c6882001-02-27 04:45:05 +0000261/*==========================================================================*/
262
263/*
264 * This malloc lock
265 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000266SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000267#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
268#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
269#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
270#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000271
272/*
Tim Peters1e16db62002-03-31 01:05:22 +0000273 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
274
275This is involved. For an index i, usedpools[i+i] is the header for a list of
276all partially used pools holding small blocks with "size class idx" i. So
277usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
27816, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
279
Tim Peters338e0102002-04-01 19:23:44 +0000280Pools are carved off the current arena highwater mark (file static arenabase)
281as needed. Once carved off, a pool is in one of three states forever after:
Tim Peters1e16db62002-03-31 01:05:22 +0000282
Tim Peters338e0102002-04-01 19:23:44 +0000283used == partially used, neither empty nor full
284 At least one block in the pool is currently allocated, and at least one
285 block in the pool is not currently allocated (note this implies a pool
286 has room for at least two blocks).
287 This is a pool's initial state, as a pool is created only when malloc
288 needs space.
289 The pool holds blocks of a fixed size, and is in the circular list headed
290 at usedpools[i] (see above). It's linked to the other used pools of the
291 same size class via the pool_header's nextpool and prevpool members.
292 If all but one block is currently allocated, a malloc can cause a
293 transition to the full state. If all but one block is not currently
294 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000295
Tim Peters338e0102002-04-01 19:23:44 +0000296full == all the pool's blocks are currently allocated
297 On transition to full, a pool is unlinked from its usedpools[] list.
298 It's not linked to from anything then anymore, and its nextpool and
299 prevpool members are meaningless until it transitions back to used.
300 A free of a block in a full pool puts the pool back in the used state.
301 Then it's linked in at the front of the appropriate usedpools[] list, so
302 that the next allocation for its size class will reuse the freed block.
303
304empty == all the pool's blocks are currently available for allocation
305 On transition to empty, a pool is unlinked from its usedpools[] list,
306 and linked to the front of the (file static) singly-linked freepools list,
307 via its nextpool member. The prevpool member has no meaning in this case.
308 Empty pools have no inherent size class: the next time a malloc finds
309 an empty list in usedpools[], it takes the first pool off of freepools.
310 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000311 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000312
313
314Block Management
315
316Blocks within pools are again carved out as needed. pool->freeblock points to
317the start of a singly-linked list of free blocks within the pool. When a
318block is freed, it's inserted at the front of its pool's freeblock list. Note
319that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000320is initialized. Instead only "the first two" (lowest addresses) blocks are
321set up, returning the first such block, and setting pool->freeblock to a
322one-block list holding the second such block. This is consistent with that
323pymalloc strives at all levels (arena, pool, and block) never to touch a piece
324of memory until it's actually needed.
325
326So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000327available for allocating, and pool->freeblock is not NULL. If pool->freeblock
328points to the end of the free list before we've carved the entire pool into
329blocks, that means we simply haven't yet gotten to one of the higher-address
330blocks. The offset from the pool_header to the start of "the next" virgin
331block is stored in the pool_header nextoffset member, and the largest value
332of nextoffset that makes sense is stored in the maxnextoffset member when a
333pool is initialized. All the blocks in a pool have been passed out at least
334once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000335
Tim Peters1e16db62002-03-31 01:05:22 +0000336
337Major obscurity: While the usedpools vector is declared to have poolp
338entries, it doesn't really. It really contains two pointers per (conceptual)
339poolp entry, the nextpool and prevpool members of a pool_header. The
340excruciating initialization code below fools C so that
341
342 usedpool[i+i]
343
344"acts like" a genuine poolp, but only so long as you only reference its
345nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
346compensating for that a pool_header's nextpool and prevpool members
347immediately follow a pool_header's first two members:
348
349 union { block *_padding;
350 uint count; } ref;
351 block *freeblock;
352
353each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
354contains is a fudged-up pointer p such that *if* C believes it's a poolp
355pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
356circular list is empty).
357
358It's unclear why the usedpools setup is so convoluted. It could be to
359minimize the amount of cache required to hold this heavily-referenced table
360(which only *needs* the two interpool pointer members of a pool_header). OTOH,
361referencing code has to remember to "double the index" and doing so isn't
362free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
363on that C doesn't insert any padding anywhere in a pool_header at or before
364the prevpool member.
365**************************************************************************** */
366
Neil Schemenauera35c6882001-02-27 04:45:05 +0000367#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
368#define PT(x) PTA(x), PTA(x)
369
370static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
371 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
372#if NB_SMALL_SIZE_CLASSES > 8
373 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
374#if NB_SMALL_SIZE_CLASSES > 16
375 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
376#if NB_SMALL_SIZE_CLASSES > 24
377 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
378#if NB_SMALL_SIZE_CLASSES > 32
379 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
380#if NB_SMALL_SIZE_CLASSES > 40
381 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
382#if NB_SMALL_SIZE_CLASSES > 48
383 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
384#if NB_SMALL_SIZE_CLASSES > 56
385 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
386#endif /* NB_SMALL_SIZE_CLASSES > 56 */
387#endif /* NB_SMALL_SIZE_CLASSES > 48 */
388#endif /* NB_SMALL_SIZE_CLASSES > 40 */
389#endif /* NB_SMALL_SIZE_CLASSES > 32 */
390#endif /* NB_SMALL_SIZE_CLASSES > 24 */
391#endif /* NB_SMALL_SIZE_CLASSES > 16 */
392#endif /* NB_SMALL_SIZE_CLASSES > 8 */
393};
394
395/*
396 * Free (cached) pools
397 */
398static poolp freepools = NULL; /* free list for cached pools */
399
Tim Petersd97a1c02002-03-30 06:09:22 +0000400/*==========================================================================*/
401/* Arena management. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000402
Tim Petersd97a1c02002-03-30 06:09:22 +0000403/* arenas is a vector of arena base addresses, in order of allocation time.
404 * arenas currently contains narenas entries, and has space allocated
405 * for at most maxarenas entries.
406 *
407 * CAUTION: See the long comment block about thread safety in new_arena():
408 * the code currently relies in deep ways on that this vector only grows,
409 * and only grows by appending at the end. For now we never return an arena
410 * to the OS.
411 */
Tim Petersc2ce91a2002-03-30 21:36:04 +0000412static uptr *volatile arenas = NULL; /* the pointer itself is volatile */
413static volatile uint narenas = 0;
Tim Peters1d99af82002-03-30 10:35:09 +0000414static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000415
Tim Peters3c83df22002-03-30 07:04:41 +0000416/* Number of pools still available to be allocated in the current arena. */
417static uint nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000418
Tim Peters3c83df22002-03-30 07:04:41 +0000419/* Free space start address in current arena. This is pool-aligned. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000420static block *arenabase = NULL;
421
Tim Petersd97a1c02002-03-30 06:09:22 +0000422/* Allocate a new arena and return its base address. If we run out of
423 * memory, return NULL.
424 */
425static block *
426new_arena(void)
427{
Tim Peters3c83df22002-03-30 07:04:41 +0000428 uint excess; /* number of bytes above pool alignment */
Tim Peters84c1b972002-04-04 04:44:32 +0000429 block *bp = (block *)malloc(ARENA_SIZE);
Tim Petersd97a1c02002-03-30 06:09:22 +0000430 if (bp == NULL)
431 return NULL;
432
Tim Peters0e871182002-04-13 08:29:14 +0000433#ifdef PYMALLOC_DEBUG
434 if (Py_GETENV("PYTHONMALLOCSTATS"))
435 _PyObject_DebugMallocStats();
436#endif
437
Tim Peters3c83df22002-03-30 07:04:41 +0000438 /* arenabase <- first pool-aligned address in the arena
439 nfreepools <- number of whole pools that fit after alignment */
440 arenabase = bp;
441 nfreepools = ARENA_SIZE / POOL_SIZE;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000442 assert(POOL_SIZE * nfreepools == ARENA_SIZE);
Guido van Rossumefc11882002-09-12 14:43:41 +0000443 excess = (uint) ((Py_uintptr_t)bp & POOL_SIZE_MASK);
Tim Peters3c83df22002-03-30 07:04:41 +0000444 if (excess != 0) {
445 --nfreepools;
446 arenabase += POOL_SIZE - excess;
447 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000448
449 /* Make room for a new entry in the arenas vector. */
450 if (arenas == NULL) {
Tim Petersc2ce91a2002-03-30 21:36:04 +0000451 assert(narenas == 0 && maxarenas == 0);
Tim Peters84c1b972002-04-04 04:44:32 +0000452 arenas = (uptr *)malloc(16 * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000453 if (arenas == NULL)
454 goto error;
455 maxarenas = 16;
Tim Petersd97a1c02002-03-30 06:09:22 +0000456 }
457 else if (narenas == maxarenas) {
Tim Peters52aefc82002-04-11 06:36:45 +0000458 /* Grow arenas.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000459 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000460 * Exceedingly subtle: Someone may be calling the pymalloc
461 * free via PyMem_{DEL, Del, FREE, Free} without holding the
462 *.GIL. Someone else may simultaneously be calling the
463 * pymalloc malloc while holding the GIL via, e.g.,
464 * PyObject_New. Now the pymalloc free may index into arenas
465 * for an address check, while the pymalloc malloc calls
466 * new_arena and we end up here to grow a new arena *and*
467 * grow the arenas vector. If the value for arenas pymalloc
468 * free picks up "vanishes" during this resize, anything may
469 * happen, and it would be an incredibly rare bug. Therefore
470 * the code here takes great pains to make sure that, at every
471 * moment, arenas always points to an intact vector of
472 * addresses. It doesn't matter whether arenas points to a
473 * wholly up-to-date vector when pymalloc free checks it in
474 * this case, because the only legal (and that even this is
475 * legal is debatable) way to call PyMem_{Del, etc} while not
476 * holding the GIL is if the memory being released is not
477 * object memory, i.e. if the address check in pymalloc free
478 * is supposed to fail. Having an incomplete vector can't
479 * make a supposed-to-fail case succeed by mistake (it could
480 * only make a supposed-to-succeed case fail by mistake).
Tim Petersc2ce91a2002-03-30 21:36:04 +0000481 *
482 * In addition, without a lock we can't know for sure when
483 * an old vector is no longer referenced, so we simply let
484 * old vectors leak.
485 *
486 * And on top of that, since narenas and arenas can't be
487 * changed as-a-pair atomically without a lock, we're also
488 * careful to declare them volatile and ensure that we change
489 * arenas first. This prevents another thread from picking
490 * up an narenas value too large for the arenas value it
491 * reads up (arenas never shrinks).
492 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000493 * Read the above 50 times before changing anything in this
494 * block.
495 */
Tim Peters1d99af82002-03-30 10:35:09 +0000496 uptr *p;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000497 uint newmax = maxarenas << 1;
Tim Peters1d99af82002-03-30 10:35:09 +0000498 if (newmax <= maxarenas) /* overflow */
499 goto error;
Tim Peters84c1b972002-04-04 04:44:32 +0000500 p = (uptr *)malloc(newmax * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000501 if (p == NULL)
502 goto error;
503 memcpy(p, arenas, narenas * sizeof(*arenas));
Tim Petersc2ce91a2002-03-30 21:36:04 +0000504 arenas = p; /* old arenas deliberately leaked */
Tim Petersd97a1c02002-03-30 06:09:22 +0000505 maxarenas = newmax;
506 }
507
508 /* Append the new arena address to arenas. */
509 assert(narenas < maxarenas);
510 arenas[narenas] = (uptr)bp;
Tim Peters1d99af82002-03-30 10:35:09 +0000511 ++narenas; /* can't overflow, since narenas < maxarenas before */
Tim Petersd97a1c02002-03-30 06:09:22 +0000512 return bp;
513
514error:
Tim Peters84c1b972002-04-04 04:44:32 +0000515 free(bp);
Tim Peters7b85b4a2002-03-30 10:42:09 +0000516 nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000517 return NULL;
518}
519
520/* Return true if and only if P is an address that was allocated by
521 * pymalloc. I must be the index into arenas that the address claims
522 * to come from.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000523 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000524 * Tricky: Letting B be the arena base address in arenas[I], P belongs to the
525 * arena if and only if
Tim Peters3c83df22002-03-30 07:04:41 +0000526 * B <= P < B + ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000527 * Subtracting B throughout, this is true iff
Tim Peters3c83df22002-03-30 07:04:41 +0000528 * 0 <= P-B < ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000529 * By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000530 *
531 * Obscure: A PyMem "free memory" function can call the pymalloc free or
532 * realloc before the first arena has been allocated. arenas is still
533 * NULL in that case. We're relying on that narenas is also 0 in that case,
534 * so the (I) < narenas must be false, saving us from trying to index into
535 * a NULL arenas.
Tim Petersd97a1c02002-03-30 06:09:22 +0000536 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000537#define Py_ADDRESS_IN_RANGE(P, POOL) \
538 ((POOL)->arenaindex < narenas && \
539 (uptr)(P) - arenas[(POOL)->arenaindex] < (uptr)ARENA_SIZE)
540
541/* This is only useful when running memory debuggers such as
542 * Purify or Valgrind. Uncomment to use.
543 *
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000544#define Py_USING_MEMORY_DEBUGGER
Neal Norwitzb5d77022004-06-06 19:21:34 +0000545 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000546
547#ifdef Py_USING_MEMORY_DEBUGGER
548
549/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
550 * This leads to thousands of spurious warnings when using
551 * Purify or Valgrind. By making a function, we can easily
552 * suppress the uninitialized memory reads in this one function.
553 * So we won't ignore real errors elsewhere.
554 *
555 * Disable the macro and use a function.
556 */
557
558#undef Py_ADDRESS_IN_RANGE
559
560/* Don't make static, to ensure this isn't inlined. */
561int Py_ADDRESS_IN_RANGE(void *P, poolp pool);
562#endif
Tim Peters338e0102002-04-01 19:23:44 +0000563
Neil Schemenauera35c6882001-02-27 04:45:05 +0000564/*==========================================================================*/
565
Tim Peters84c1b972002-04-04 04:44:32 +0000566/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
567 * from all other currently live pointers. This may not be possible.
568 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000569
570/*
571 * The basic blocks are ordered by decreasing execution frequency,
572 * which minimizes the number of jumps in the most common cases,
573 * improves branching prediction and instruction scheduling (small
574 * block allocations typically result in a couple of instructions).
575 * Unless the optimizer reorders everything, being too smart...
576 */
577
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000578#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000579void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000580PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000581{
582 block *bp;
583 poolp pool;
584 poolp next;
585 uint size;
586
Neil Schemenauera35c6882001-02-27 04:45:05 +0000587 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000588 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000589 */
590 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
591 LOCK();
592 /*
593 * Most frequent paths first
594 */
595 size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT;
596 pool = usedpools[size + size];
597 if (pool != pool->nextpool) {
598 /*
599 * There is a used pool for this size class.
600 * Pick up the head block of its free list.
601 */
602 ++pool->ref.count;
603 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000604 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000605 if ((pool->freeblock = *(block **)bp) != NULL) {
606 UNLOCK();
607 return (void *)bp;
608 }
609 /*
610 * Reached the end of the free list, try to extend it
611 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000612 if (pool->nextoffset <= pool->maxnextoffset) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000613 /*
614 * There is room for another block
615 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000616 pool->freeblock = (block *)pool +
617 pool->nextoffset;
618 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000619 *(block **)(pool->freeblock) = NULL;
620 UNLOCK();
621 return (void *)bp;
622 }
623 /*
624 * Pool is full, unlink from used pools
625 */
626 next = pool->nextpool;
627 pool = pool->prevpool;
628 next->prevpool = pool;
629 pool->nextpool = next;
630 UNLOCK();
631 return (void *)bp;
632 }
633 /*
634 * Try to get a cached free pool
635 */
636 pool = freepools;
637 if (pool != NULL) {
638 /*
639 * Unlink from cached pools
640 */
641 freepools = pool->nextpool;
642 init_pool:
643 /*
644 * Frontlink to used pools
645 */
646 next = usedpools[size + size]; /* == prev */
647 pool->nextpool = next;
648 pool->prevpool = next;
649 next->nextpool = pool;
650 next->prevpool = pool;
651 pool->ref.count = 1;
652 if (pool->szidx == size) {
653 /*
654 * Luckily, this pool last contained blocks
655 * of the same size class, so its header
656 * and free list are already initialized.
657 */
658 bp = pool->freeblock;
659 pool->freeblock = *(block **)bp;
660 UNLOCK();
661 return (void *)bp;
662 }
663 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000664 * Initialize the pool header, set up the free list to
665 * contain just the second block, and return the first
666 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000667 */
668 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000669 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000670 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000671 pool->nextoffset = POOL_OVERHEAD + (size << 1);
672 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000673 pool->freeblock = bp + size;
674 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000675 UNLOCK();
676 return (void *)bp;
677 }
Walter Dörwalde0a1bb62003-06-17 15:48:11 +0000678 /*
679 * Allocate new pool
680 */
Tim Peters3c83df22002-03-30 07:04:41 +0000681 if (nfreepools) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000682 commit_pool:
Tim Peters3c83df22002-03-30 07:04:41 +0000683 --nfreepools;
684 pool = (poolp)arenabase;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000685 arenabase += POOL_SIZE;
Tim Petersd97a1c02002-03-30 06:09:22 +0000686 pool->arenaindex = narenas - 1;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000687 pool->szidx = DUMMY_SIZE_IDX;
688 goto init_pool;
689 }
Walter Dörwalde0a1bb62003-06-17 15:48:11 +0000690 /*
691 * Allocate new arena
692 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000693#ifdef WITH_MEMORY_LIMITS
Tim Petersd97a1c02002-03-30 06:09:22 +0000694 if (!(narenas < MAX_ARENAS)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000695 UNLOCK();
696 goto redirect;
697 }
698#endif
Tim Petersd97a1c02002-03-30 06:09:22 +0000699 bp = new_arena();
700 if (bp != NULL)
701 goto commit_pool;
702 UNLOCK();
703 goto redirect;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000704 }
705
706 /* The small block allocator ends here. */
707
Tim Petersd97a1c02002-03-30 06:09:22 +0000708redirect:
Neil Schemenauera35c6882001-02-27 04:45:05 +0000709 /*
710 * Redirect the original request to the underlying (libc) allocator.
711 * We jump here on bigger requests, on error in the code above (as a
712 * last chance to serve the request) or when the max memory limit
713 * has been reached.
714 */
Tim Peters64d80c92002-04-18 21:58:56 +0000715 if (nbytes == 0)
716 nbytes = 1;
Tim Peters64d80c92002-04-18 21:58:56 +0000717 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000718}
719
720/* free */
721
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000722#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000723void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000724PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000725{
726 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000727 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000728 poolp next, prev;
729 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000730
Neil Schemenauera35c6882001-02-27 04:45:05 +0000731 if (p == NULL) /* free(NULL) has no effect */
732 return;
733
Tim Petersd97a1c02002-03-30 06:09:22 +0000734 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000735 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Petersd97a1c02002-03-30 06:09:22 +0000736 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000737 LOCK();
738 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000739 * Link p to the start of the pool's freeblock list. Since
740 * the pool had at least the p block outstanding, the pool
741 * wasn't empty (so it's already in a usedpools[] list, or
742 * was full and is in no list -- it's not in the freeblocks
743 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000744 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000745 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000746 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000747 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000748 if (lastfree) {
749 /*
750 * freeblock wasn't NULL, so the pool wasn't full,
751 * and the pool is in a usedpools[] list.
752 */
Tim Peters2c95c992002-03-31 02:18:01 +0000753 if (--pool->ref.count != 0) {
754 /* pool isn't empty: leave it in usedpools */
755 UNLOCK();
756 return;
757 }
758 /*
759 * Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000760 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000761 * previously freed pools will be allocated later
762 * (being not referenced, they are perhaps paged out).
763 */
764 next = pool->nextpool;
765 prev = pool->prevpool;
766 next->prevpool = prev;
767 prev->nextpool = next;
768 /* Link to freepools. This is a singly-linked list,
769 * and pool->prevpool isn't used there.
770 */
771 pool->nextpool = freepools;
772 freepools = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000773 UNLOCK();
774 return;
775 }
776 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000777 * Pool was full, so doesn't currently live in any list:
778 * link it to the front of the appropriate usedpools[] list.
779 * This mimics LRU pool usage for new allocations and
780 * targets optimal filling when several pools contain
781 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +0000782 */
Tim Peters2c95c992002-03-31 02:18:01 +0000783 --pool->ref.count;
784 assert(pool->ref.count > 0); /* else the pool is empty */
785 size = pool->szidx;
786 next = usedpools[size + size];
787 prev = next->prevpool;
788 /* insert pool before next: prev <-> pool <-> next */
789 pool->nextpool = next;
790 pool->prevpool = prev;
791 next->prevpool = pool;
792 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000793 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +0000794 return;
795 }
796
Tim Peters2c95c992002-03-31 02:18:01 +0000797 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +0000798 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000799}
800
Tim Peters84c1b972002-04-04 04:44:32 +0000801/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
802 * then as the Python docs promise, we do not treat this like free(p), and
803 * return a non-NULL result.
804 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000805
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000806#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000807void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000808PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000809{
Tim Peters84c1b972002-04-04 04:44:32 +0000810 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000811 poolp pool;
812 uint size;
813
Neil Schemenauera35c6882001-02-27 04:45:05 +0000814 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000815 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000816
Tim Petersd97a1c02002-03-30 06:09:22 +0000817 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000818 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000819 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +0000820 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +0000821 if (nbytes <= size) {
822 /* The block is staying the same or shrinking. If
823 * it's shrinking, there's a tradeoff: it costs
824 * cycles to copy the block to a smaller size class,
825 * but it wastes memory not to copy it. The
826 * compromise here is to copy on shrink only if at
827 * least 25% of size can be shaved off.
828 */
829 if (4 * nbytes > 3 * size) {
830 /* It's the same,
831 * or shrinking and new/old > 3/4.
832 */
833 return p;
834 }
835 size = nbytes;
836 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000837 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +0000838 if (bp != NULL) {
839 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000840 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000841 }
Tim Peters84c1b972002-04-04 04:44:32 +0000842 return bp;
843 }
Tim Petersecc6e6a2005-07-10 22:30:55 +0000844 /* We're not managing this block. If nbytes <=
845 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
846 * block. However, if we do, we need to copy the valid data from
847 * the C-managed block to one of our blocks, and there's no portable
848 * way to know how much of the memory space starting at p is valid.
849 * As bug 1185883 pointed out the hard way, it's possible that the
850 * C-managed block is "at the end" of allocated VM space, so that
851 * a memory fault can occur if we try to copy nbytes bytes starting
852 * at p. Instead we punt: let C continue to manage this block.
853 */
854 if (nbytes)
855 return realloc(p, nbytes);
856 /* C doesn't define the result of realloc(p, 0) (it may or may not
857 * return NULL then), but Python's docs promise that nbytes==0 never
858 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
859 * to begin with. Even then, we can't be sure that realloc() won't
860 * return NULL.
861 */
862 bp = realloc(p, 1);
863 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000864}
865
Tim Peters1221c0a2002-03-23 00:20:15 +0000866#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +0000867
868/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000869/* pymalloc not enabled: Redirect the entry points to malloc. These will
870 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +0000871
Tim Petersce7fb9b2002-03-23 00:28:57 +0000872void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000873PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000874{
875 return PyMem_MALLOC(n);
876}
877
Tim Petersce7fb9b2002-03-23 00:28:57 +0000878void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000879PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000880{
881 return PyMem_REALLOC(p, n);
882}
883
884void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000885PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +0000886{
887 PyMem_FREE(p);
888}
889#endif /* WITH_PYMALLOC */
890
Tim Petersddea2082002-03-23 10:03:50 +0000891#ifdef PYMALLOC_DEBUG
892/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +0000893/* A x-platform debugging allocator. This doesn't manage memory directly,
894 * it wraps a real allocator, adding extra debugging info to the memory blocks.
895 */
Tim Petersddea2082002-03-23 10:03:50 +0000896
Tim Petersf6fb5012002-04-12 07:38:53 +0000897/* Special bytes broadcast into debug memory blocks at appropriate times.
898 * Strings of these are unlikely to be valid addresses, floats, ints or
899 * 7-bit ASCII.
900 */
901#undef CLEANBYTE
902#undef DEADBYTE
903#undef FORBIDDENBYTE
904#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +0000905#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +0000906#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +0000907
908static ulong serialno = 0; /* incremented on each debug {m,re}alloc */
909
Tim Peterse0850172002-03-24 00:34:21 +0000910/* serialno is always incremented via calling this routine. The point is
911 to supply a single place to set a breakpoint.
912*/
913static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +0000914bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +0000915{
916 ++serialno;
917}
918
919
Tim Petersddea2082002-03-23 10:03:50 +0000920/* Read 4 bytes at p as a big-endian ulong. */
921static ulong
922read4(const void *p)
923{
Tim Peters62c06ba2002-03-23 22:28:18 +0000924 const uchar *q = (const uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +0000925 return ((ulong)q[0] << 24) |
926 ((ulong)q[1] << 16) |
927 ((ulong)q[2] << 8) |
928 (ulong)q[3];
929}
930
931/* Write the 4 least-significant bytes of n as a big-endian unsigned int,
932 MSB at address p, LSB at p+3. */
933static void
934write4(void *p, ulong n)
935{
Tim Peters62c06ba2002-03-23 22:28:18 +0000936 uchar *q = (uchar *)p;
937 q[0] = (uchar)((n >> 24) & 0xff);
938 q[1] = (uchar)((n >> 16) & 0xff);
939 q[2] = (uchar)((n >> 8) & 0xff);
940 q[3] = (uchar)( n & 0xff);
Tim Petersddea2082002-03-23 10:03:50 +0000941}
942
Tim Peters08d82152002-04-18 22:25:03 +0000943#ifdef Py_DEBUG
944/* Is target in the list? The list is traversed via the nextpool pointers.
945 * The list may be NULL-terminated, or circular. Return 1 if target is in
946 * list, else 0.
947 */
948static int
949pool_is_in_list(const poolp target, poolp list)
950{
951 poolp origlist = list;
952 assert(target != NULL);
953 if (list == NULL)
954 return 0;
955 do {
956 if (target == list)
957 return 1;
958 list = list->nextpool;
959 } while (list != NULL && list != origlist);
960 return 0;
961}
962
963#else
964#define pool_is_in_list(X, Y) 1
965
966#endif /* Py_DEBUG */
967
Tim Petersddea2082002-03-23 10:03:50 +0000968/* The debug malloc asks for 16 extra bytes and fills them with useful stuff,
969 here calling the underlying malloc's result p:
970
971p[0:4]
972 Number of bytes originally asked for. 4-byte unsigned integer,
973 big-endian (easier to read in a memory dump).
Tim Petersd1139e02002-03-28 07:32:11 +0000974p[4:8]
Tim Petersf6fb5012002-04-12 07:38:53 +0000975 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000976p[8:8+n]
Tim Petersf6fb5012002-04-12 07:38:53 +0000977 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +0000978 Used to catch reference to uninitialized memory.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000979 &p[8] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +0000980 handled the request itself.
981p[8+n:8+n+4]
Tim Petersf6fb5012002-04-12 07:38:53 +0000982 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000983p[8+n+4:8+n+8]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000984 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
985 and _PyObject_DebugRealloc.
Tim Petersddea2082002-03-23 10:03:50 +0000986 4-byte unsigned integer, big-endian.
987 If "bad memory" is detected later, the serial number gives an
988 excellent way to set a breakpoint on the next run, to capture the
989 instant at which this block was passed out.
990*/
991
992void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000993_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +0000994{
995 uchar *p; /* base address of malloc'ed block */
Tim Peters62c06ba2002-03-23 22:28:18 +0000996 uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */
Tim Petersddea2082002-03-23 10:03:50 +0000997 size_t total; /* nbytes + 16 */
998
Tim Peterse0850172002-03-24 00:34:21 +0000999 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001000 total = nbytes + 16;
1001 if (total < nbytes || (total >> 31) > 1) {
1002 /* overflow, or we can't represent it in 4 bytes */
1003 /* Obscure: can't do (total >> 32) != 0 instead, because
1004 C doesn't define what happens for a right-shift of 32
1005 when size_t is a 32-bit type. At least C guarantees
1006 size_t is an unsigned type. */
1007 return NULL;
1008 }
1009
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001010 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +00001011 if (p == NULL)
1012 return NULL;
1013
1014 write4(p, nbytes);
Tim Petersf6fb5012002-04-12 07:38:53 +00001015 p[4] = p[5] = p[6] = p[7] = FORBIDDENBYTE;
Tim Petersddea2082002-03-23 10:03:50 +00001016
1017 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001018 memset(p+8, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001019
Tim Peters62c06ba2002-03-23 22:28:18 +00001020 tail = p + 8 + nbytes;
Tim Petersf6fb5012002-04-12 07:38:53 +00001021 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
Tim Peters62c06ba2002-03-23 22:28:18 +00001022 write4(tail + 4, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001023
1024 return p+8;
1025}
1026
Tim Peters62c06ba2002-03-23 22:28:18 +00001027/* The debug free first checks the 8 bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001028 particular, that the FORBIDDENBYTEs are still intact).
1029 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001030 Then calls the underlying free.
1031*/
1032void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001033_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001034{
Tim Peters62c06ba2002-03-23 22:28:18 +00001035 uchar *q = (uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +00001036 size_t nbytes;
1037
Tim Petersddea2082002-03-23 10:03:50 +00001038 if (p == NULL)
1039 return;
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001040 _PyObject_DebugCheckAddress(p);
Tim Petersddea2082002-03-23 10:03:50 +00001041 nbytes = read4(q-8);
1042 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001043 memset(q, DEADBYTE, nbytes);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001044 PyObject_Free(q-8);
Tim Petersddea2082002-03-23 10:03:50 +00001045}
1046
1047void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001048_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001049{
1050 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001051 uchar *tail;
1052 size_t total; /* nbytes + 16 */
Tim Petersddea2082002-03-23 10:03:50 +00001053 size_t original_nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001054
Tim Petersddea2082002-03-23 10:03:50 +00001055 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001056 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001057
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001058 _PyObject_DebugCheckAddress(p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001059 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001060 original_nbytes = read4(q-8);
Tim Peters85cc1c42002-04-12 08:52:50 +00001061 total = nbytes + 16;
1062 if (total < nbytes || (total >> 31) > 1) {
1063 /* overflow, or we can't represent it in 4 bytes */
1064 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001065 }
1066
1067 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001068 /* shrinking: mark old extra memory dead */
1069 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001070 }
1071
Tim Peters85cc1c42002-04-12 08:52:50 +00001072 /* Resize and add decorations. */
1073 q = (uchar *)PyObject_Realloc(q-8, total);
1074 if (q == NULL)
1075 return NULL;
1076
1077 write4(q, nbytes);
1078 assert(q[4] == FORBIDDENBYTE &&
1079 q[5] == FORBIDDENBYTE &&
1080 q[6] == FORBIDDENBYTE &&
1081 q[7] == FORBIDDENBYTE);
1082 q += 8;
1083 tail = q + nbytes;
1084 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
1085 write4(tail + 4, serialno);
1086
1087 if (nbytes > original_nbytes) {
1088 /* growing: mark new extra memory clean */
1089 memset(q + original_nbytes, CLEANBYTE,
1090 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001091 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001092
1093 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001094}
1095
Tim Peters7ccfadf2002-04-01 06:04:21 +00001096/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001097 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001098 * and call Py_FatalError to kill the program.
1099 */
1100 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001101_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001102{
1103 const uchar *q = (const uchar *)p;
Tim Petersd1139e02002-03-28 07:32:11 +00001104 char *msg;
Tim Peters449b5a82002-04-28 06:14:45 +00001105 ulong nbytes;
1106 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001107 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001108
Tim Petersd1139e02002-03-28 07:32:11 +00001109 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001110 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001111 goto error;
1112 }
Tim Petersddea2082002-03-23 10:03:50 +00001113
Tim Peters449b5a82002-04-28 06:14:45 +00001114 /* Check the stuff at the start of p first: if there's underwrite
1115 * corruption, the number-of-bytes field may be nuts, and checking
1116 * the tail could lead to a segfault then.
1117 */
Tim Petersd1139e02002-03-28 07:32:11 +00001118 for (i = 4; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001119 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001120 msg = "bad leading pad byte";
1121 goto error;
1122 }
1123 }
Tim Petersddea2082002-03-23 10:03:50 +00001124
Tim Peters449b5a82002-04-28 06:14:45 +00001125 nbytes = read4(q-8);
1126 tail = q + nbytes;
1127 for (i = 0; i < 4; ++i) {
1128 if (tail[i] != FORBIDDENBYTE) {
1129 msg = "bad trailing pad byte";
1130 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001131 }
1132 }
1133
Tim Petersd1139e02002-03-28 07:32:11 +00001134 return;
1135
1136error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001137 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001138 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001139}
1140
Tim Peters7ccfadf2002-04-01 06:04:21 +00001141/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001142void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001143_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001144{
1145 const uchar *q = (const uchar *)p;
1146 const uchar *tail;
1147 ulong nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001148 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001149
1150 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1151 if (p == NULL)
1152 return;
1153
1154 nbytes = read4(q-8);
Tim Petersf539c682002-04-12 07:43:07 +00001155 fprintf(stderr, " %lu bytes originally requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001156
Tim Peters449b5a82002-04-28 06:14:45 +00001157 /* In case this is nuts, check the leading pad bytes first. */
1158 fputs(" The 4 pad bytes at p-4 are ", stderr);
Tim Petersf6fb5012002-04-12 07:38:53 +00001159 if (*(q-4) == FORBIDDENBYTE &&
1160 *(q-3) == FORBIDDENBYTE &&
1161 *(q-2) == FORBIDDENBYTE &&
1162 *(q-1) == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001163 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001164 }
1165 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001166 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1167 FORBIDDENBYTE);
Tim Petersd1139e02002-03-28 07:32:11 +00001168 for (i = 4; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001169 const uchar byte = *(q-i);
1170 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001171 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001172 fputs(" *** OUCH", stderr);
1173 fputc('\n', stderr);
1174 }
Tim Peters449b5a82002-04-28 06:14:45 +00001175
1176 fputs(" Because memory is corrupted at the start, the "
1177 "count of bytes requested\n"
1178 " may be bogus, and checking the trailing pad "
1179 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001180 }
1181
1182 tail = q + nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001183 fprintf(stderr, " The 4 pad bytes at tail=%p are ", tail);
Tim Petersf6fb5012002-04-12 07:38:53 +00001184 if (tail[0] == FORBIDDENBYTE &&
1185 tail[1] == FORBIDDENBYTE &&
1186 tail[2] == FORBIDDENBYTE &&
1187 tail[3] == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001188 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001189 }
1190 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001191 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1192 FORBIDDENBYTE);
Tim Petersddea2082002-03-23 10:03:50 +00001193 for (i = 0; i < 4; ++i) {
1194 const uchar byte = tail[i];
1195 fprintf(stderr, " at tail+%d: 0x%02x",
1196 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001197 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001198 fputs(" *** OUCH", stderr);
1199 fputc('\n', stderr);
1200 }
1201 }
1202
1203 serial = read4(tail+4);
Tim Peters449b5a82002-04-28 06:14:45 +00001204 fprintf(stderr, " The block was made by call #%lu to "
1205 "debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001206
1207 if (nbytes > 0) {
1208 int i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001209 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001210 /* print up to 8 bytes at the start */
1211 while (q < tail && i < 8) {
1212 fprintf(stderr, " %02x", *q);
1213 ++i;
1214 ++q;
1215 }
1216 /* and up to 8 at the end */
1217 if (q < tail) {
1218 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001219 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001220 q = tail - 8;
1221 }
1222 while (q < tail) {
1223 fprintf(stderr, " %02x", *q);
1224 ++q;
1225 }
1226 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001227 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001228 }
1229}
1230
Tim Peters16bcb6b2002-04-05 05:45:31 +00001231static ulong
1232printone(const char* msg, ulong value)
1233{
Tim Peters49f26812002-04-06 01:45:35 +00001234 int i, k;
1235 char buf[100];
1236 ulong origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001237
1238 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001239 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001240 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001241 fputc('=', stderr);
1242
1243 /* Write the value with commas. */
1244 i = 22;
1245 buf[i--] = '\0';
1246 buf[i--] = '\n';
1247 k = 3;
1248 do {
1249 ulong nextvalue = value / 10UL;
1250 uint digit = value - nextvalue * 10UL;
1251 value = nextvalue;
1252 buf[i--] = (char)(digit + '0');
1253 --k;
1254 if (k == 0 && value && i >= 0) {
1255 k = 3;
1256 buf[i--] = ',';
1257 }
1258 } while (value && i >= 0);
1259
1260 while (i >= 0)
1261 buf[i--] = ' ';
1262 fputs(buf, stderr);
1263
1264 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001265}
1266
Tim Peters08d82152002-04-18 22:25:03 +00001267/* Print summary info to stderr about the state of pymalloc's structures.
1268 * In Py_DEBUG mode, also perform some expensive internal consistency
1269 * checks.
1270 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001271void
Tim Peters0e871182002-04-13 08:29:14 +00001272_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001273{
1274 uint i;
1275 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001276 /* # of pools, allocated blocks, and free blocks per class index */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001277 ulong numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001278 ulong numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001279 ulong numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001280 /* total # of allocated bytes in used and full pools */
1281 ulong allocated_bytes = 0;
1282 /* total # of available bytes in used pools */
1283 ulong available_bytes = 0;
1284 /* # of free pools + pools not yet carved out of current arena */
1285 uint numfreepools = 0;
1286 /* # of bytes for arena alignment padding */
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001287 ulong arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001288 /* # of bytes in used and full pools used for pool_headers */
1289 ulong pool_header_bytes = 0;
1290 /* # of bytes in used and full pools wasted due to quantization,
1291 * i.e. the necessarily leftover space at the ends of used and
1292 * full pools.
1293 */
1294 ulong quantization = 0;
1295 /* running total -- should equal narenas * ARENA_SIZE */
1296 ulong total;
1297 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001298
Tim Peters7ccfadf2002-04-01 06:04:21 +00001299 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1300 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001301
1302 for (i = 0; i < numclasses; ++i)
1303 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1304
Tim Peters6169f092002-04-01 20:12:59 +00001305 /* Because full pools aren't linked to from anything, it's easiest
1306 * to march over all the arenas. If we're lucky, most of the memory
1307 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001308 */
1309 for (i = 0; i < narenas; ++i) {
1310 uint poolsinarena;
1311 uint j;
1312 uptr base = arenas[i];
1313
1314 /* round up to pool alignment */
1315 poolsinarena = ARENA_SIZE / POOL_SIZE;
1316 if (base & (uptr)POOL_SIZE_MASK) {
1317 --poolsinarena;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001318 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001319 base &= ~(uptr)POOL_SIZE_MASK;
1320 base += POOL_SIZE;
1321 }
1322
1323 if (i == narenas - 1) {
1324 /* current arena may have raw memory at the end */
1325 numfreepools += nfreepools;
1326 poolsinarena -= nfreepools;
1327 }
1328
1329 /* visit every pool in the arena */
1330 for (j = 0; j < poolsinarena; ++j, base += POOL_SIZE) {
1331 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001332 const uint sz = p->szidx;
1333 uint freeblocks;
1334
Tim Peters7ccfadf2002-04-01 06:04:21 +00001335 if (p->ref.count == 0) {
1336 /* currently unused */
1337 ++numfreepools;
Tim Peters08d82152002-04-18 22:25:03 +00001338 assert(pool_is_in_list(p, freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001339 continue;
1340 }
Tim Peters08d82152002-04-18 22:25:03 +00001341 ++numpools[sz];
1342 numblocks[sz] += p->ref.count;
1343 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1344 numfreeblocks[sz] += freeblocks;
1345#ifdef Py_DEBUG
1346 if (freeblocks > 0)
1347 assert(pool_is_in_list(p, usedpools[sz + sz]));
1348#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001349 }
1350 }
1351
1352 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001353 fputs("class size num pools blocks in use avail blocks\n"
1354 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001355 stderr);
1356
Tim Peters7ccfadf2002-04-01 06:04:21 +00001357 for (i = 0; i < numclasses; ++i) {
1358 ulong p = numpools[i];
1359 ulong b = numblocks[i];
1360 ulong f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001361 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001362 if (p == 0) {
1363 assert(b == 0 && f == 0);
1364 continue;
1365 }
Tim Peters49f26812002-04-06 01:45:35 +00001366 fprintf(stderr, "%5u %6u %11lu %15lu %13lu\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001367 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001368 allocated_bytes += b * size;
1369 available_bytes += f * size;
1370 pool_header_bytes += p * POOL_OVERHEAD;
1371 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001372 }
1373 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001374 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001375
1376 PyOS_snprintf(buf, sizeof(buf),
1377 "%u arenas * %d bytes/arena", narenas, ARENA_SIZE);
1378 (void)printone(buf, (ulong)narenas * ARENA_SIZE);
1379
1380 fputc('\n', stderr);
1381
Tim Peters49f26812002-04-06 01:45:35 +00001382 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001383 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001384
Tim Peters16bcb6b2002-04-05 05:45:31 +00001385 PyOS_snprintf(buf, sizeof(buf),
1386 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Tim Peters49f26812002-04-06 01:45:35 +00001387 total += printone(buf, (ulong)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001388
Tim Peters16bcb6b2002-04-05 05:45:31 +00001389 total += printone("# bytes lost to pool headers", pool_header_bytes);
1390 total += printone("# bytes lost to quantization", quantization);
1391 total += printone("# bytes lost to arena alignment", arena_alignment);
1392 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001393}
1394
Tim Petersddea2082002-03-23 10:03:50 +00001395#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001396
1397#ifdef Py_USING_MEMORY_DEBUGGER
1398/* Make this function last so gcc won't inline it
1399 since the definition is after the reference. */
1400int
1401Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1402{
1403 return ((pool->arenaindex) < narenas &&
1404 (uptr)(P) - arenas[pool->arenaindex] < (uptr)ARENA_SIZE);
1405}
1406#endif