blob: 95c0e87f9c35dce5a2acbfbee7e058ecfe718d4e [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000098 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000134#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
142 * have to be.
143 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000144#define SYSTEM_PAGE_SIZE (4 * 1024)
145#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
146
147/*
148 * Maximum amount of memory managed by the allocator for small requests.
149 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000150#ifdef WITH_MEMORY_LIMITS
151#ifndef SMALL_MEMORY_LIMIT
152#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
153#endif
154#endif
155
156/*
157 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
158 * on a page boundary. This is a reserved virtual address space for the
159 * current process (obtained through a malloc call). In no way this means
160 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
161 * an address range reservation for <Big> bytes, unless all pages within this
162 * space are referenced subsequently. So malloc'ing big blocks and not using
163 * them does not mean "wasting memory". It's an addressable range wastage...
164 *
165 * Therefore, allocating arenas with malloc is not optimal, because there is
166 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000167 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000168 */
Tim Peters3c83df22002-03-30 07:04:41 +0000169#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000170
171#ifdef WITH_MEMORY_LIMITS
172#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
173#endif
174
175/*
176 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000177 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000178 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000179#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
180#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181
182/*
183 * -- End of tunable settings section --
184 */
185
186/*==========================================================================*/
187
188/*
189 * Locking
190 *
191 * To reduce lock contention, it would probably be better to refine the
192 * crude function locking with per size class locking. I'm not positive
193 * however, whether it's worth switching to such locking policy because
194 * of the performance penalty it might introduce.
195 *
196 * The following macros describe the simplest (should also be the fastest)
197 * lock object on a particular platform and the init/fini/lock/unlock
198 * operations on it. The locks defined here are not expected to be recursive
199 * because it is assumed that they will always be called in the order:
200 * INIT, [LOCK, UNLOCK]*, FINI.
201 */
202
203/*
204 * Python's threads are serialized, so object malloc locking is disabled.
205 */
206#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
207#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
208#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
209#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
210#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
211
212/*
213 * Basic types
214 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
215 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000216#undef uchar
217#define uchar unsigned char /* assuming == 8 bits */
218
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uint
220#define uint unsigned int /* assuming >= 16 bits */
221
222#undef ulong
223#define ulong unsigned long /* assuming >= 32 bits */
224
Tim Petersd97a1c02002-03-30 06:09:22 +0000225#undef uptr
226#define uptr Py_uintptr_t
227
Neil Schemenauera35c6882001-02-27 04:45:05 +0000228/* When you say memory, my mind reasons in terms of (pointers to) blocks */
229typedef uchar block;
230
Tim Peterse70ddf32002-04-05 04:32:29 +0000231/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000232struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000233 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000234 uint count; } ref; /* number of allocated blocks */
235 block *freeblock; /* pool's free list head */
236 struct pool_header *nextpool; /* next pool of this size class */
237 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000238 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000239 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000240 uint nextoffset; /* bytes to virgin block */
241 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242};
243
244typedef struct pool_header *poolp;
245
246#undef ROUNDUP
247#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
248#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
249
250#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
251
Tim Petersd97a1c02002-03-30 06:09:22 +0000252/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000253#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
254
Tim Peters16bcb6b2002-04-05 05:45:31 +0000255/* Return total number of blocks in pool of size index I, as a uint. */
256#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000257
Neil Schemenauera35c6882001-02-27 04:45:05 +0000258/*==========================================================================*/
259
260/*
261 * This malloc lock
262 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000263SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000264#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
265#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
266#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
267#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000268
269/*
Tim Peters1e16db62002-03-31 01:05:22 +0000270 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
271
272This is involved. For an index i, usedpools[i+i] is the header for a list of
273all partially used pools holding small blocks with "size class idx" i. So
274usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
27516, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
276
Tim Peters338e0102002-04-01 19:23:44 +0000277Pools are carved off the current arena highwater mark (file static arenabase)
278as needed. Once carved off, a pool is in one of three states forever after:
Tim Peters1e16db62002-03-31 01:05:22 +0000279
Tim Peters338e0102002-04-01 19:23:44 +0000280used == partially used, neither empty nor full
281 At least one block in the pool is currently allocated, and at least one
282 block in the pool is not currently allocated (note this implies a pool
283 has room for at least two blocks).
284 This is a pool's initial state, as a pool is created only when malloc
285 needs space.
286 The pool holds blocks of a fixed size, and is in the circular list headed
287 at usedpools[i] (see above). It's linked to the other used pools of the
288 same size class via the pool_header's nextpool and prevpool members.
289 If all but one block is currently allocated, a malloc can cause a
290 transition to the full state. If all but one block is not currently
291 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000292
Tim Peters338e0102002-04-01 19:23:44 +0000293full == all the pool's blocks are currently allocated
294 On transition to full, a pool is unlinked from its usedpools[] list.
295 It's not linked to from anything then anymore, and its nextpool and
296 prevpool members are meaningless until it transitions back to used.
297 A free of a block in a full pool puts the pool back in the used state.
298 Then it's linked in at the front of the appropriate usedpools[] list, so
299 that the next allocation for its size class will reuse the freed block.
300
301empty == all the pool's blocks are currently available for allocation
302 On transition to empty, a pool is unlinked from its usedpools[] list,
303 and linked to the front of the (file static) singly-linked freepools list,
304 via its nextpool member. The prevpool member has no meaning in this case.
305 Empty pools have no inherent size class: the next time a malloc finds
306 an empty list in usedpools[], it takes the first pool off of freepools.
307 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000308 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000309
310
311Block Management
312
313Blocks within pools are again carved out as needed. pool->freeblock points to
314the start of a singly-linked list of free blocks within the pool. When a
315block is freed, it's inserted at the front of its pool's freeblock list. Note
316that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000317is initialized. Instead only "the first two" (lowest addresses) blocks are
318set up, returning the first such block, and setting pool->freeblock to a
319one-block list holding the second such block. This is consistent with that
320pymalloc strives at all levels (arena, pool, and block) never to touch a piece
321of memory until it's actually needed.
322
323So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000324available for allocating, and pool->freeblock is not NULL. If pool->freeblock
325points to the end of the free list before we've carved the entire pool into
326blocks, that means we simply haven't yet gotten to one of the higher-address
327blocks. The offset from the pool_header to the start of "the next" virgin
328block is stored in the pool_header nextoffset member, and the largest value
329of nextoffset that makes sense is stored in the maxnextoffset member when a
330pool is initialized. All the blocks in a pool have been passed out at least
331once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000332
Tim Peters1e16db62002-03-31 01:05:22 +0000333
334Major obscurity: While the usedpools vector is declared to have poolp
335entries, it doesn't really. It really contains two pointers per (conceptual)
336poolp entry, the nextpool and prevpool members of a pool_header. The
337excruciating initialization code below fools C so that
338
339 usedpool[i+i]
340
341"acts like" a genuine poolp, but only so long as you only reference its
342nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
343compensating for that a pool_header's nextpool and prevpool members
344immediately follow a pool_header's first two members:
345
346 union { block *_padding;
347 uint count; } ref;
348 block *freeblock;
349
350each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
351contains is a fudged-up pointer p such that *if* C believes it's a poolp
352pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
353circular list is empty).
354
355It's unclear why the usedpools setup is so convoluted. It could be to
356minimize the amount of cache required to hold this heavily-referenced table
357(which only *needs* the two interpool pointer members of a pool_header). OTOH,
358referencing code has to remember to "double the index" and doing so isn't
359free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
360on that C doesn't insert any padding anywhere in a pool_header at or before
361the prevpool member.
362**************************************************************************** */
363
Neil Schemenauera35c6882001-02-27 04:45:05 +0000364#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
365#define PT(x) PTA(x), PTA(x)
366
367static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
368 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
369#if NB_SMALL_SIZE_CLASSES > 8
370 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
371#if NB_SMALL_SIZE_CLASSES > 16
372 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
373#if NB_SMALL_SIZE_CLASSES > 24
374 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
375#if NB_SMALL_SIZE_CLASSES > 32
376 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
377#if NB_SMALL_SIZE_CLASSES > 40
378 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
379#if NB_SMALL_SIZE_CLASSES > 48
380 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
381#if NB_SMALL_SIZE_CLASSES > 56
382 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
383#endif /* NB_SMALL_SIZE_CLASSES > 56 */
384#endif /* NB_SMALL_SIZE_CLASSES > 48 */
385#endif /* NB_SMALL_SIZE_CLASSES > 40 */
386#endif /* NB_SMALL_SIZE_CLASSES > 32 */
387#endif /* NB_SMALL_SIZE_CLASSES > 24 */
388#endif /* NB_SMALL_SIZE_CLASSES > 16 */
389#endif /* NB_SMALL_SIZE_CLASSES > 8 */
390};
391
392/*
393 * Free (cached) pools
394 */
395static poolp freepools = NULL; /* free list for cached pools */
396
Tim Petersd97a1c02002-03-30 06:09:22 +0000397/*==========================================================================*/
398/* Arena management. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000399
Tim Petersd97a1c02002-03-30 06:09:22 +0000400/* arenas is a vector of arena base addresses, in order of allocation time.
401 * arenas currently contains narenas entries, and has space allocated
402 * for at most maxarenas entries.
403 *
404 * CAUTION: See the long comment block about thread safety in new_arena():
405 * the code currently relies in deep ways on that this vector only grows,
406 * and only grows by appending at the end. For now we never return an arena
407 * to the OS.
408 */
Tim Petersc2ce91a2002-03-30 21:36:04 +0000409static uptr *volatile arenas = NULL; /* the pointer itself is volatile */
410static volatile uint narenas = 0;
Tim Peters1d99af82002-03-30 10:35:09 +0000411static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000412
Tim Peters3c83df22002-03-30 07:04:41 +0000413/* Number of pools still available to be allocated in the current arena. */
414static uint nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000415
Tim Peters3c83df22002-03-30 07:04:41 +0000416/* Free space start address in current arena. This is pool-aligned. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000417static block *arenabase = NULL;
418
Tim Petersd97a1c02002-03-30 06:09:22 +0000419/* Allocate a new arena and return its base address. If we run out of
420 * memory, return NULL.
421 */
422static block *
423new_arena(void)
424{
Tim Peters3c83df22002-03-30 07:04:41 +0000425 uint excess; /* number of bytes above pool alignment */
Tim Peters84c1b972002-04-04 04:44:32 +0000426 block *bp = (block *)malloc(ARENA_SIZE);
Tim Petersd97a1c02002-03-30 06:09:22 +0000427 if (bp == NULL)
428 return NULL;
429
Tim Peters0e871182002-04-13 08:29:14 +0000430#ifdef PYMALLOC_DEBUG
431 if (Py_GETENV("PYTHONMALLOCSTATS"))
432 _PyObject_DebugMallocStats();
433#endif
434
Tim Peters3c83df22002-03-30 07:04:41 +0000435 /* arenabase <- first pool-aligned address in the arena
436 nfreepools <- number of whole pools that fit after alignment */
437 arenabase = bp;
438 nfreepools = ARENA_SIZE / POOL_SIZE;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000439 assert(POOL_SIZE * nfreepools == ARENA_SIZE);
Tim Peters3c83df22002-03-30 07:04:41 +0000440 excess = (uint)bp & POOL_SIZE_MASK;
441 if (excess != 0) {
442 --nfreepools;
443 arenabase += POOL_SIZE - excess;
444 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000445
446 /* Make room for a new entry in the arenas vector. */
447 if (arenas == NULL) {
Tim Petersc2ce91a2002-03-30 21:36:04 +0000448 assert(narenas == 0 && maxarenas == 0);
Tim Peters84c1b972002-04-04 04:44:32 +0000449 arenas = (uptr *)malloc(16 * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000450 if (arenas == NULL)
451 goto error;
452 maxarenas = 16;
Tim Petersd97a1c02002-03-30 06:09:22 +0000453 }
454 else if (narenas == maxarenas) {
Tim Peters52aefc82002-04-11 06:36:45 +0000455 /* Grow arenas.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000456 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000457 * Exceedingly subtle: Someone may be calling the pymalloc
458 * free via PyMem_{DEL, Del, FREE, Free} without holding the
459 *.GIL. Someone else may simultaneously be calling the
460 * pymalloc malloc while holding the GIL via, e.g.,
461 * PyObject_New. Now the pymalloc free may index into arenas
462 * for an address check, while the pymalloc malloc calls
463 * new_arena and we end up here to grow a new arena *and*
464 * grow the arenas vector. If the value for arenas pymalloc
465 * free picks up "vanishes" during this resize, anything may
466 * happen, and it would be an incredibly rare bug. Therefore
467 * the code here takes great pains to make sure that, at every
468 * moment, arenas always points to an intact vector of
469 * addresses. It doesn't matter whether arenas points to a
470 * wholly up-to-date vector when pymalloc free checks it in
471 * this case, because the only legal (and that even this is
472 * legal is debatable) way to call PyMem_{Del, etc} while not
473 * holding the GIL is if the memory being released is not
474 * object memory, i.e. if the address check in pymalloc free
475 * is supposed to fail. Having an incomplete vector can't
476 * make a supposed-to-fail case succeed by mistake (it could
477 * only make a supposed-to-succeed case fail by mistake).
Tim Petersc2ce91a2002-03-30 21:36:04 +0000478 *
479 * In addition, without a lock we can't know for sure when
480 * an old vector is no longer referenced, so we simply let
481 * old vectors leak.
482 *
483 * And on top of that, since narenas and arenas can't be
484 * changed as-a-pair atomically without a lock, we're also
485 * careful to declare them volatile and ensure that we change
486 * arenas first. This prevents another thread from picking
487 * up an narenas value too large for the arenas value it
488 * reads up (arenas never shrinks).
489 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000490 * Read the above 50 times before changing anything in this
491 * block.
492 */
Tim Peters1d99af82002-03-30 10:35:09 +0000493 uptr *p;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000494 uint newmax = maxarenas << 1;
Tim Peters1d99af82002-03-30 10:35:09 +0000495 if (newmax <= maxarenas) /* overflow */
496 goto error;
Tim Peters84c1b972002-04-04 04:44:32 +0000497 p = (uptr *)malloc(newmax * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000498 if (p == NULL)
499 goto error;
500 memcpy(p, arenas, narenas * sizeof(*arenas));
Tim Petersc2ce91a2002-03-30 21:36:04 +0000501 arenas = p; /* old arenas deliberately leaked */
Tim Petersd97a1c02002-03-30 06:09:22 +0000502 maxarenas = newmax;
503 }
504
505 /* Append the new arena address to arenas. */
506 assert(narenas < maxarenas);
507 arenas[narenas] = (uptr)bp;
Tim Peters1d99af82002-03-30 10:35:09 +0000508 ++narenas; /* can't overflow, since narenas < maxarenas before */
Tim Petersd97a1c02002-03-30 06:09:22 +0000509 return bp;
510
511error:
Tim Peters84c1b972002-04-04 04:44:32 +0000512 free(bp);
Tim Peters7b85b4a2002-03-30 10:42:09 +0000513 nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000514 return NULL;
515}
516
517/* Return true if and only if P is an address that was allocated by
518 * pymalloc. I must be the index into arenas that the address claims
519 * to come from.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000520 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000521 * Tricky: Letting B be the arena base address in arenas[I], P belongs to the
522 * arena if and only if
Tim Peters3c83df22002-03-30 07:04:41 +0000523 * B <= P < B + ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000524 * Subtracting B throughout, this is true iff
Tim Peters3c83df22002-03-30 07:04:41 +0000525 * 0 <= P-B < ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000526 * By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000527 *
528 * Obscure: A PyMem "free memory" function can call the pymalloc free or
529 * realloc before the first arena has been allocated. arenas is still
530 * NULL in that case. We're relying on that narenas is also 0 in that case,
531 * so the (I) < narenas must be false, saving us from trying to index into
532 * a NULL arenas.
Tim Petersd97a1c02002-03-30 06:09:22 +0000533 */
534#define ADDRESS_IN_RANGE(P, I) \
Tim Peters3c83df22002-03-30 07:04:41 +0000535 ((I) < narenas && (uptr)(P) - arenas[I] < (uptr)ARENA_SIZE)
Tim Peters338e0102002-04-01 19:23:44 +0000536
Neil Schemenauera35c6882001-02-27 04:45:05 +0000537/*==========================================================================*/
538
Tim Peters84c1b972002-04-04 04:44:32 +0000539/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
540 * from all other currently live pointers. This may not be possible.
541 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000542
543/*
544 * The basic blocks are ordered by decreasing execution frequency,
545 * which minimizes the number of jumps in the most common cases,
546 * improves branching prediction and instruction scheduling (small
547 * block allocations typically result in a couple of instructions).
548 * Unless the optimizer reorders everything, being too smart...
549 */
550
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000551#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000552void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000553PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000554{
555 block *bp;
556 poolp pool;
557 poolp next;
558 uint size;
559
Neil Schemenauera35c6882001-02-27 04:45:05 +0000560 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000561 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000562 */
563 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
564 LOCK();
565 /*
566 * Most frequent paths first
567 */
568 size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT;
569 pool = usedpools[size + size];
570 if (pool != pool->nextpool) {
571 /*
572 * There is a used pool for this size class.
573 * Pick up the head block of its free list.
574 */
575 ++pool->ref.count;
576 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000577 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000578 if ((pool->freeblock = *(block **)bp) != NULL) {
579 UNLOCK();
580 return (void *)bp;
581 }
582 /*
583 * Reached the end of the free list, try to extend it
584 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000585 if (pool->nextoffset <= pool->maxnextoffset) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000586 /*
587 * There is room for another block
588 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000589 pool->freeblock = (block *)pool +
590 pool->nextoffset;
591 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000592 *(block **)(pool->freeblock) = NULL;
593 UNLOCK();
594 return (void *)bp;
595 }
596 /*
597 * Pool is full, unlink from used pools
598 */
599 next = pool->nextpool;
600 pool = pool->prevpool;
601 next->prevpool = pool;
602 pool->nextpool = next;
603 UNLOCK();
604 return (void *)bp;
605 }
606 /*
607 * Try to get a cached free pool
608 */
609 pool = freepools;
610 if (pool != NULL) {
611 /*
612 * Unlink from cached pools
613 */
614 freepools = pool->nextpool;
615 init_pool:
616 /*
617 * Frontlink to used pools
618 */
619 next = usedpools[size + size]; /* == prev */
620 pool->nextpool = next;
621 pool->prevpool = next;
622 next->nextpool = pool;
623 next->prevpool = pool;
624 pool->ref.count = 1;
625 if (pool->szidx == size) {
626 /*
627 * Luckily, this pool last contained blocks
628 * of the same size class, so its header
629 * and free list are already initialized.
630 */
631 bp = pool->freeblock;
632 pool->freeblock = *(block **)bp;
633 UNLOCK();
634 return (void *)bp;
635 }
636 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000637 * Initialize the pool header, set up the free list to
638 * contain just the second block, and return the first
639 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000640 */
641 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000642 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000643 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000644 pool->nextoffset = POOL_OVERHEAD + (size << 1);
645 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000646 pool->freeblock = bp + size;
647 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000648 UNLOCK();
649 return (void *)bp;
650 }
651 /*
652 * Allocate new pool
653 */
Tim Peters3c83df22002-03-30 07:04:41 +0000654 if (nfreepools) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000655 commit_pool:
Tim Peters3c83df22002-03-30 07:04:41 +0000656 --nfreepools;
657 pool = (poolp)arenabase;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000658 arenabase += POOL_SIZE;
Tim Petersd97a1c02002-03-30 06:09:22 +0000659 pool->arenaindex = narenas - 1;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000660 pool->szidx = DUMMY_SIZE_IDX;
661 goto init_pool;
662 }
663 /*
664 * Allocate new arena
665 */
666#ifdef WITH_MEMORY_LIMITS
Tim Petersd97a1c02002-03-30 06:09:22 +0000667 if (!(narenas < MAX_ARENAS)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000668 UNLOCK();
669 goto redirect;
670 }
671#endif
Tim Petersd97a1c02002-03-30 06:09:22 +0000672 bp = new_arena();
673 if (bp != NULL)
674 goto commit_pool;
675 UNLOCK();
676 goto redirect;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000677 }
678
679 /* The small block allocator ends here. */
680
Tim Petersd97a1c02002-03-30 06:09:22 +0000681redirect:
Neil Schemenauera35c6882001-02-27 04:45:05 +0000682 /*
683 * Redirect the original request to the underlying (libc) allocator.
684 * We jump here on bigger requests, on error in the code above (as a
685 * last chance to serve the request) or when the max memory limit
686 * has been reached.
687 */
Tim Peters64d80c92002-04-18 21:58:56 +0000688#ifdef MALLOC_ZERO_RETURNS_NULL
689 if (nbytes == 0)
690 nbytes = 1;
691#endif
692 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000693}
694
695/* free */
696
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000697#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000698void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000699PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000700{
701 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000702 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000703 poolp next, prev;
704 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000705
Neil Schemenauera35c6882001-02-27 04:45:05 +0000706 if (p == NULL) /* free(NULL) has no effect */
707 return;
708
Tim Petersd97a1c02002-03-30 06:09:22 +0000709 pool = POOL_ADDR(p);
710 if (ADDRESS_IN_RANGE(p, pool->arenaindex)) {
711 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000712 LOCK();
713 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000714 * Link p to the start of the pool's freeblock list. Since
715 * the pool had at least the p block outstanding, the pool
716 * wasn't empty (so it's already in a usedpools[] list, or
717 * was full and is in no list -- it's not in the freeblocks
718 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000719 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000720 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000721 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000722 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000723 if (lastfree) {
724 /*
725 * freeblock wasn't NULL, so the pool wasn't full,
726 * and the pool is in a usedpools[] list.
727 */
Tim Peters2c95c992002-03-31 02:18:01 +0000728 if (--pool->ref.count != 0) {
729 /* pool isn't empty: leave it in usedpools */
730 UNLOCK();
731 return;
732 }
733 /*
734 * Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000735 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000736 * previously freed pools will be allocated later
737 * (being not referenced, they are perhaps paged out).
738 */
739 next = pool->nextpool;
740 prev = pool->prevpool;
741 next->prevpool = prev;
742 prev->nextpool = next;
743 /* Link to freepools. This is a singly-linked list,
744 * and pool->prevpool isn't used there.
745 */
746 pool->nextpool = freepools;
747 freepools = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000748 UNLOCK();
749 return;
750 }
751 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000752 * Pool was full, so doesn't currently live in any list:
753 * link it to the front of the appropriate usedpools[] list.
754 * This mimics LRU pool usage for new allocations and
755 * targets optimal filling when several pools contain
756 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +0000757 */
Tim Peters2c95c992002-03-31 02:18:01 +0000758 --pool->ref.count;
759 assert(pool->ref.count > 0); /* else the pool is empty */
760 size = pool->szidx;
761 next = usedpools[size + size];
762 prev = next->prevpool;
763 /* insert pool before next: prev <-> pool <-> next */
764 pool->nextpool = next;
765 pool->prevpool = prev;
766 next->prevpool = pool;
767 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000768 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +0000769 return;
770 }
771
Tim Peters2c95c992002-03-31 02:18:01 +0000772 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +0000773 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000774}
775
Tim Peters84c1b972002-04-04 04:44:32 +0000776/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
777 * then as the Python docs promise, we do not treat this like free(p), and
778 * return a non-NULL result.
779 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000780
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000781#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000782void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000783PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000784{
Tim Peters84c1b972002-04-04 04:44:32 +0000785 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000786 poolp pool;
787 uint size;
788
Neil Schemenauera35c6882001-02-27 04:45:05 +0000789 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000790 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000791
Tim Petersd97a1c02002-03-30 06:09:22 +0000792 pool = POOL_ADDR(p);
793 if (ADDRESS_IN_RANGE(p, pool->arenaindex)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000794 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +0000795 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +0000796 if (nbytes <= size) {
797 /* The block is staying the same or shrinking. If
798 * it's shrinking, there's a tradeoff: it costs
799 * cycles to copy the block to a smaller size class,
800 * but it wastes memory not to copy it. The
801 * compromise here is to copy on shrink only if at
802 * least 25% of size can be shaved off.
803 */
804 if (4 * nbytes > 3 * size) {
805 /* It's the same,
806 * or shrinking and new/old > 3/4.
807 */
808 return p;
809 }
810 size = nbytes;
811 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000812 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +0000813 if (bp != NULL) {
814 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000815 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000816 }
Tim Peters84c1b972002-04-04 04:44:32 +0000817 return bp;
818 }
819 /* We're not managing this block. */
Tim Peters84c1b972002-04-04 04:44:32 +0000820 if (nbytes <= SMALL_REQUEST_THRESHOLD) {
Tim Peters64d80c92002-04-18 21:58:56 +0000821 /* Take over this block -- ask for at least one byte so
822 * we really do take it over (PyObject_Malloc(0) goes to
823 * the system malloc).
824 */
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000825 bp = PyObject_Malloc(nbytes ? nbytes : 1);
Tim Peters84c1b972002-04-04 04:44:32 +0000826 if (bp != NULL) {
827 memcpy(bp, p, nbytes);
828 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000829 }
Tim Peters84c1b972002-04-04 04:44:32 +0000830 else if (nbytes == 0) {
831 /* Meet the doc's promise that nbytes==0 will
832 * never return a NULL pointer when p isn't NULL.
833 */
834 bp = p;
835 }
836
Neil Schemenauera35c6882001-02-27 04:45:05 +0000837 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000838 else {
Tim Peters84c1b972002-04-04 04:44:32 +0000839 assert(nbytes != 0);
840 bp = realloc(p, nbytes);
Tim Petersd97a1c02002-03-30 06:09:22 +0000841 }
Tim Peters84c1b972002-04-04 04:44:32 +0000842 return bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000843}
844
Tim Peters1221c0a2002-03-23 00:20:15 +0000845#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +0000846
847/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000848/* pymalloc not enabled: Redirect the entry points to malloc. These will
849 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +0000850
Tim Petersce7fb9b2002-03-23 00:28:57 +0000851void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000852PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000853{
854 return PyMem_MALLOC(n);
855}
856
Tim Petersce7fb9b2002-03-23 00:28:57 +0000857void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000858PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000859{
860 return PyMem_REALLOC(p, n);
861}
862
863void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000864PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +0000865{
866 PyMem_FREE(p);
867}
868#endif /* WITH_PYMALLOC */
869
Tim Petersddea2082002-03-23 10:03:50 +0000870#ifdef PYMALLOC_DEBUG
871/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +0000872/* A x-platform debugging allocator. This doesn't manage memory directly,
873 * it wraps a real allocator, adding extra debugging info to the memory blocks.
874 */
Tim Petersddea2082002-03-23 10:03:50 +0000875
Tim Petersf6fb5012002-04-12 07:38:53 +0000876/* Special bytes broadcast into debug memory blocks at appropriate times.
877 * Strings of these are unlikely to be valid addresses, floats, ints or
878 * 7-bit ASCII.
879 */
880#undef CLEANBYTE
881#undef DEADBYTE
882#undef FORBIDDENBYTE
883#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +0000884#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +0000885#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +0000886
887static ulong serialno = 0; /* incremented on each debug {m,re}alloc */
888
Tim Peterse0850172002-03-24 00:34:21 +0000889/* serialno is always incremented via calling this routine. The point is
890 to supply a single place to set a breakpoint.
891*/
892static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +0000893bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +0000894{
895 ++serialno;
896}
897
898
Tim Petersddea2082002-03-23 10:03:50 +0000899/* Read 4 bytes at p as a big-endian ulong. */
900static ulong
901read4(const void *p)
902{
Tim Peters62c06ba2002-03-23 22:28:18 +0000903 const uchar *q = (const uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +0000904 return ((ulong)q[0] << 24) |
905 ((ulong)q[1] << 16) |
906 ((ulong)q[2] << 8) |
907 (ulong)q[3];
908}
909
910/* Write the 4 least-significant bytes of n as a big-endian unsigned int,
911 MSB at address p, LSB at p+3. */
912static void
913write4(void *p, ulong n)
914{
Tim Peters62c06ba2002-03-23 22:28:18 +0000915 uchar *q = (uchar *)p;
916 q[0] = (uchar)((n >> 24) & 0xff);
917 q[1] = (uchar)((n >> 16) & 0xff);
918 q[2] = (uchar)((n >> 8) & 0xff);
919 q[3] = (uchar)( n & 0xff);
Tim Petersddea2082002-03-23 10:03:50 +0000920}
921
Tim Peters08d82152002-04-18 22:25:03 +0000922#ifdef Py_DEBUG
923/* Is target in the list? The list is traversed via the nextpool pointers.
924 * The list may be NULL-terminated, or circular. Return 1 if target is in
925 * list, else 0.
926 */
927static int
928pool_is_in_list(const poolp target, poolp list)
929{
930 poolp origlist = list;
931 assert(target != NULL);
932 if (list == NULL)
933 return 0;
934 do {
935 if (target == list)
936 return 1;
937 list = list->nextpool;
938 } while (list != NULL && list != origlist);
939 return 0;
940}
941
942#else
943#define pool_is_in_list(X, Y) 1
944
945#endif /* Py_DEBUG */
946
Tim Petersddea2082002-03-23 10:03:50 +0000947/* The debug malloc asks for 16 extra bytes and fills them with useful stuff,
948 here calling the underlying malloc's result p:
949
950p[0:4]
951 Number of bytes originally asked for. 4-byte unsigned integer,
952 big-endian (easier to read in a memory dump).
Tim Petersd1139e02002-03-28 07:32:11 +0000953p[4:8]
Tim Petersf6fb5012002-04-12 07:38:53 +0000954 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000955p[8:8+n]
Tim Petersf6fb5012002-04-12 07:38:53 +0000956 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +0000957 Used to catch reference to uninitialized memory.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000958 &p[8] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +0000959 handled the request itself.
960p[8+n:8+n+4]
Tim Petersf6fb5012002-04-12 07:38:53 +0000961 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000962p[8+n+4:8+n+8]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000963 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
964 and _PyObject_DebugRealloc.
Tim Petersddea2082002-03-23 10:03:50 +0000965 4-byte unsigned integer, big-endian.
966 If "bad memory" is detected later, the serial number gives an
967 excellent way to set a breakpoint on the next run, to capture the
968 instant at which this block was passed out.
969*/
970
971void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000972_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +0000973{
974 uchar *p; /* base address of malloc'ed block */
Tim Peters62c06ba2002-03-23 22:28:18 +0000975 uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */
Tim Petersddea2082002-03-23 10:03:50 +0000976 size_t total; /* nbytes + 16 */
977
Tim Peterse0850172002-03-24 00:34:21 +0000978 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +0000979 total = nbytes + 16;
980 if (total < nbytes || (total >> 31) > 1) {
981 /* overflow, or we can't represent it in 4 bytes */
982 /* Obscure: can't do (total >> 32) != 0 instead, because
983 C doesn't define what happens for a right-shift of 32
984 when size_t is a 32-bit type. At least C guarantees
985 size_t is an unsigned type. */
986 return NULL;
987 }
988
Tim Peters8a8cdfd2002-04-12 20:49:36 +0000989 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +0000990 if (p == NULL)
991 return NULL;
992
993 write4(p, nbytes);
Tim Petersf6fb5012002-04-12 07:38:53 +0000994 p[4] = p[5] = p[6] = p[7] = FORBIDDENBYTE;
Tim Petersddea2082002-03-23 10:03:50 +0000995
996 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +0000997 memset(p+8, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +0000998
Tim Peters62c06ba2002-03-23 22:28:18 +0000999 tail = p + 8 + nbytes;
Tim Petersf6fb5012002-04-12 07:38:53 +00001000 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
Tim Peters62c06ba2002-03-23 22:28:18 +00001001 write4(tail + 4, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001002
1003 return p+8;
1004}
1005
Tim Peters62c06ba2002-03-23 22:28:18 +00001006/* The debug free first checks the 8 bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001007 particular, that the FORBIDDENBYTEs are still intact).
1008 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001009 Then calls the underlying free.
1010*/
1011void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001012_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001013{
Tim Peters62c06ba2002-03-23 22:28:18 +00001014 uchar *q = (uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +00001015 size_t nbytes;
1016
Tim Petersddea2082002-03-23 10:03:50 +00001017 if (p == NULL)
1018 return;
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001019 _PyObject_DebugCheckAddress(p);
Tim Petersddea2082002-03-23 10:03:50 +00001020 nbytes = read4(q-8);
1021 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001022 memset(q, DEADBYTE, nbytes);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001023 PyObject_Free(q-8);
Tim Petersddea2082002-03-23 10:03:50 +00001024}
1025
1026void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001027_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001028{
1029 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001030 uchar *tail;
1031 size_t total; /* nbytes + 16 */
Tim Petersddea2082002-03-23 10:03:50 +00001032 size_t original_nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001033
Tim Petersddea2082002-03-23 10:03:50 +00001034 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001035 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001036
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001037 _PyObject_DebugCheckAddress(p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001038 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001039 original_nbytes = read4(q-8);
Tim Peters85cc1c42002-04-12 08:52:50 +00001040 total = nbytes + 16;
1041 if (total < nbytes || (total >> 31) > 1) {
1042 /* overflow, or we can't represent it in 4 bytes */
1043 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001044 }
1045
1046 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001047 /* shrinking: mark old extra memory dead */
1048 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001049 }
1050
Tim Peters85cc1c42002-04-12 08:52:50 +00001051 /* Resize and add decorations. */
1052 q = (uchar *)PyObject_Realloc(q-8, total);
1053 if (q == NULL)
1054 return NULL;
1055
1056 write4(q, nbytes);
1057 assert(q[4] == FORBIDDENBYTE &&
1058 q[5] == FORBIDDENBYTE &&
1059 q[6] == FORBIDDENBYTE &&
1060 q[7] == FORBIDDENBYTE);
1061 q += 8;
1062 tail = q + nbytes;
1063 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
1064 write4(tail + 4, serialno);
1065
1066 if (nbytes > original_nbytes) {
1067 /* growing: mark new extra memory clean */
1068 memset(q + original_nbytes, CLEANBYTE,
1069 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001070 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001071
1072 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001073}
1074
Tim Peters7ccfadf2002-04-01 06:04:21 +00001075/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001076 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001077 * and call Py_FatalError to kill the program.
1078 */
1079 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001080_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001081{
1082 const uchar *q = (const uchar *)p;
Tim Petersd1139e02002-03-28 07:32:11 +00001083 char *msg;
Tim Peters449b5a82002-04-28 06:14:45 +00001084 ulong nbytes;
1085 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001086 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001087
Tim Petersd1139e02002-03-28 07:32:11 +00001088 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001089 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001090 goto error;
1091 }
Tim Petersddea2082002-03-23 10:03:50 +00001092
Tim Peters449b5a82002-04-28 06:14:45 +00001093 /* Check the stuff at the start of p first: if there's underwrite
1094 * corruption, the number-of-bytes field may be nuts, and checking
1095 * the tail could lead to a segfault then.
1096 */
Tim Petersd1139e02002-03-28 07:32:11 +00001097 for (i = 4; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001098 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001099 msg = "bad leading pad byte";
1100 goto error;
1101 }
1102 }
Tim Petersddea2082002-03-23 10:03:50 +00001103
Tim Peters449b5a82002-04-28 06:14:45 +00001104 nbytes = read4(q-8);
1105 tail = q + nbytes;
1106 for (i = 0; i < 4; ++i) {
1107 if (tail[i] != FORBIDDENBYTE) {
1108 msg = "bad trailing pad byte";
1109 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001110 }
1111 }
1112
Tim Petersd1139e02002-03-28 07:32:11 +00001113 return;
1114
1115error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001116 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001117 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001118}
1119
Tim Peters7ccfadf2002-04-01 06:04:21 +00001120/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001121void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001122_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001123{
1124 const uchar *q = (const uchar *)p;
1125 const uchar *tail;
1126 ulong nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001127 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001128
1129 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1130 if (p == NULL)
1131 return;
1132
1133 nbytes = read4(q-8);
Tim Petersf539c682002-04-12 07:43:07 +00001134 fprintf(stderr, " %lu bytes originally requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001135
Tim Peters449b5a82002-04-28 06:14:45 +00001136 /* In case this is nuts, check the leading pad bytes first. */
1137 fputs(" The 4 pad bytes at p-4 are ", stderr);
Tim Petersf6fb5012002-04-12 07:38:53 +00001138 if (*(q-4) == FORBIDDENBYTE &&
1139 *(q-3) == FORBIDDENBYTE &&
1140 *(q-2) == FORBIDDENBYTE &&
1141 *(q-1) == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001142 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001143 }
1144 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001145 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1146 FORBIDDENBYTE);
Tim Petersd1139e02002-03-28 07:32:11 +00001147 for (i = 4; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001148 const uchar byte = *(q-i);
1149 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001150 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001151 fputs(" *** OUCH", stderr);
1152 fputc('\n', stderr);
1153 }
Tim Peters449b5a82002-04-28 06:14:45 +00001154
1155 fputs(" Because memory is corrupted at the start, the "
1156 "count of bytes requested\n"
1157 " may be bogus, and checking the trailing pad "
1158 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001159 }
1160
1161 tail = q + nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001162 fprintf(stderr, " The 4 pad bytes at tail=%p are ", tail);
Tim Petersf6fb5012002-04-12 07:38:53 +00001163 if (tail[0] == FORBIDDENBYTE &&
1164 tail[1] == FORBIDDENBYTE &&
1165 tail[2] == FORBIDDENBYTE &&
1166 tail[3] == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001167 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001168 }
1169 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001170 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1171 FORBIDDENBYTE);
Tim Petersddea2082002-03-23 10:03:50 +00001172 for (i = 0; i < 4; ++i) {
1173 const uchar byte = tail[i];
1174 fprintf(stderr, " at tail+%d: 0x%02x",
1175 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001176 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001177 fputs(" *** OUCH", stderr);
1178 fputc('\n', stderr);
1179 }
1180 }
1181
1182 serial = read4(tail+4);
Tim Peters449b5a82002-04-28 06:14:45 +00001183 fprintf(stderr, " The block was made by call #%lu to "
1184 "debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001185
1186 if (nbytes > 0) {
1187 int i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001188 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001189 /* print up to 8 bytes at the start */
1190 while (q < tail && i < 8) {
1191 fprintf(stderr, " %02x", *q);
1192 ++i;
1193 ++q;
1194 }
1195 /* and up to 8 at the end */
1196 if (q < tail) {
1197 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001198 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001199 q = tail - 8;
1200 }
1201 while (q < tail) {
1202 fprintf(stderr, " %02x", *q);
1203 ++q;
1204 }
1205 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001206 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001207 }
1208}
1209
Tim Peters16bcb6b2002-04-05 05:45:31 +00001210static ulong
1211printone(const char* msg, ulong value)
1212{
Tim Peters49f26812002-04-06 01:45:35 +00001213 int i, k;
1214 char buf[100];
1215 ulong origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001216
1217 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001218 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001219 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001220 fputc('=', stderr);
1221
1222 /* Write the value with commas. */
1223 i = 22;
1224 buf[i--] = '\0';
1225 buf[i--] = '\n';
1226 k = 3;
1227 do {
1228 ulong nextvalue = value / 10UL;
1229 uint digit = value - nextvalue * 10UL;
1230 value = nextvalue;
1231 buf[i--] = (char)(digit + '0');
1232 --k;
1233 if (k == 0 && value && i >= 0) {
1234 k = 3;
1235 buf[i--] = ',';
1236 }
1237 } while (value && i >= 0);
1238
1239 while (i >= 0)
1240 buf[i--] = ' ';
1241 fputs(buf, stderr);
1242
1243 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001244}
1245
Tim Peters08d82152002-04-18 22:25:03 +00001246/* Print summary info to stderr about the state of pymalloc's structures.
1247 * In Py_DEBUG mode, also perform some expensive internal consistency
1248 * checks.
1249 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001250void
Tim Peters0e871182002-04-13 08:29:14 +00001251_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001252{
1253 uint i;
1254 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001255 /* # of pools, allocated blocks, and free blocks per class index */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001256 ulong numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001257 ulong numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001258 ulong numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001259 /* total # of allocated bytes in used and full pools */
1260 ulong allocated_bytes = 0;
1261 /* total # of available bytes in used pools */
1262 ulong available_bytes = 0;
1263 /* # of free pools + pools not yet carved out of current arena */
1264 uint numfreepools = 0;
1265 /* # of bytes for arena alignment padding */
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001266 ulong arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001267 /* # of bytes in used and full pools used for pool_headers */
1268 ulong pool_header_bytes = 0;
1269 /* # of bytes in used and full pools wasted due to quantization,
1270 * i.e. the necessarily leftover space at the ends of used and
1271 * full pools.
1272 */
1273 ulong quantization = 0;
1274 /* running total -- should equal narenas * ARENA_SIZE */
1275 ulong total;
1276 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001277
Tim Peters7ccfadf2002-04-01 06:04:21 +00001278 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1279 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001280
1281 for (i = 0; i < numclasses; ++i)
1282 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1283
Tim Peters6169f092002-04-01 20:12:59 +00001284 /* Because full pools aren't linked to from anything, it's easiest
1285 * to march over all the arenas. If we're lucky, most of the memory
1286 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001287 */
1288 for (i = 0; i < narenas; ++i) {
1289 uint poolsinarena;
1290 uint j;
1291 uptr base = arenas[i];
1292
1293 /* round up to pool alignment */
1294 poolsinarena = ARENA_SIZE / POOL_SIZE;
1295 if (base & (uptr)POOL_SIZE_MASK) {
1296 --poolsinarena;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001297 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001298 base &= ~(uptr)POOL_SIZE_MASK;
1299 base += POOL_SIZE;
1300 }
1301
1302 if (i == narenas - 1) {
1303 /* current arena may have raw memory at the end */
1304 numfreepools += nfreepools;
1305 poolsinarena -= nfreepools;
1306 }
1307
1308 /* visit every pool in the arena */
1309 for (j = 0; j < poolsinarena; ++j, base += POOL_SIZE) {
1310 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001311 const uint sz = p->szidx;
1312 uint freeblocks;
1313
Tim Peters7ccfadf2002-04-01 06:04:21 +00001314 if (p->ref.count == 0) {
1315 /* currently unused */
1316 ++numfreepools;
Tim Peters08d82152002-04-18 22:25:03 +00001317 assert(pool_is_in_list(p, freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001318 continue;
1319 }
Tim Peters08d82152002-04-18 22:25:03 +00001320 ++numpools[sz];
1321 numblocks[sz] += p->ref.count;
1322 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1323 numfreeblocks[sz] += freeblocks;
1324#ifdef Py_DEBUG
1325 if (freeblocks > 0)
1326 assert(pool_is_in_list(p, usedpools[sz + sz]));
1327#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001328 }
1329 }
1330
1331 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001332 fputs("class size num pools blocks in use avail blocks\n"
1333 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001334 stderr);
1335
Tim Peters7ccfadf2002-04-01 06:04:21 +00001336 for (i = 0; i < numclasses; ++i) {
1337 ulong p = numpools[i];
1338 ulong b = numblocks[i];
1339 ulong f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001340 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001341 if (p == 0) {
1342 assert(b == 0 && f == 0);
1343 continue;
1344 }
Tim Peters49f26812002-04-06 01:45:35 +00001345 fprintf(stderr, "%5u %6u %11lu %15lu %13lu\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001346 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001347 allocated_bytes += b * size;
1348 available_bytes += f * size;
1349 pool_header_bytes += p * POOL_OVERHEAD;
1350 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001351 }
1352 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001353 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001354
1355 PyOS_snprintf(buf, sizeof(buf),
1356 "%u arenas * %d bytes/arena", narenas, ARENA_SIZE);
1357 (void)printone(buf, (ulong)narenas * ARENA_SIZE);
1358
1359 fputc('\n', stderr);
1360
Tim Peters49f26812002-04-06 01:45:35 +00001361 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001362 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001363
Tim Peters16bcb6b2002-04-05 05:45:31 +00001364 PyOS_snprintf(buf, sizeof(buf),
1365 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Tim Peters49f26812002-04-06 01:45:35 +00001366 total += printone(buf, (ulong)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001367
Tim Peters16bcb6b2002-04-05 05:45:31 +00001368 total += printone("# bytes lost to pool headers", pool_header_bytes);
1369 total += printone("# bytes lost to quantization", quantization);
1370 total += printone("# bytes lost to arena alignment", arena_alignment);
1371 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001372}
1373
Tim Petersddea2082002-03-23 10:03:50 +00001374#endif /* PYMALLOC_DEBUG */