blob: 090abda9e19f2ea6eb6a7eb02b8f80ae9a465453 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000098 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000134#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000145 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000170 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000171 */
Tim Peters3c83df22002-03-30 07:04:41 +0000172#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173
174#ifdef WITH_MEMORY_LIMITS
175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uchar
220#define uchar unsigned char /* assuming == 8 bits */
221
Neil Schemenauera35c6882001-02-27 04:45:05 +0000222#undef uint
223#define uint unsigned int /* assuming >= 16 bits */
224
225#undef ulong
226#define ulong unsigned long /* assuming >= 32 bits */
227
Tim Petersd97a1c02002-03-30 06:09:22 +0000228#undef uptr
229#define uptr Py_uintptr_t
230
Neil Schemenauera35c6882001-02-27 04:45:05 +0000231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
Tim Peterse70ddf32002-04-05 04:32:29 +0000234/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000235struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000236 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000241 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000245};
246
247typedef struct pool_header *poolp;
248
249#undef ROUNDUP
250#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
251#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
252
253#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
254
Tim Petersd97a1c02002-03-30 06:09:22 +0000255/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000256#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
257
Tim Peters16bcb6b2002-04-05 05:45:31 +0000258/* Return total number of blocks in pool of size index I, as a uint. */
259#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000260
Neil Schemenauera35c6882001-02-27 04:45:05 +0000261/*==========================================================================*/
262
263/*
264 * This malloc lock
265 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000266SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000267#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
268#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
269#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
270#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000271
272/*
Tim Peters1e16db62002-03-31 01:05:22 +0000273 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
274
275This is involved. For an index i, usedpools[i+i] is the header for a list of
276all partially used pools holding small blocks with "size class idx" i. So
277usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
27816, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
279
Tim Peters338e0102002-04-01 19:23:44 +0000280Pools are carved off the current arena highwater mark (file static arenabase)
281as needed. Once carved off, a pool is in one of three states forever after:
Tim Peters1e16db62002-03-31 01:05:22 +0000282
Tim Peters338e0102002-04-01 19:23:44 +0000283used == partially used, neither empty nor full
284 At least one block in the pool is currently allocated, and at least one
285 block in the pool is not currently allocated (note this implies a pool
286 has room for at least two blocks).
287 This is a pool's initial state, as a pool is created only when malloc
288 needs space.
289 The pool holds blocks of a fixed size, and is in the circular list headed
290 at usedpools[i] (see above). It's linked to the other used pools of the
291 same size class via the pool_header's nextpool and prevpool members.
292 If all but one block is currently allocated, a malloc can cause a
293 transition to the full state. If all but one block is not currently
294 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000295
Tim Peters338e0102002-04-01 19:23:44 +0000296full == all the pool's blocks are currently allocated
297 On transition to full, a pool is unlinked from its usedpools[] list.
298 It's not linked to from anything then anymore, and its nextpool and
299 prevpool members are meaningless until it transitions back to used.
300 A free of a block in a full pool puts the pool back in the used state.
301 Then it's linked in at the front of the appropriate usedpools[] list, so
302 that the next allocation for its size class will reuse the freed block.
303
304empty == all the pool's blocks are currently available for allocation
305 On transition to empty, a pool is unlinked from its usedpools[] list,
306 and linked to the front of the (file static) singly-linked freepools list,
307 via its nextpool member. The prevpool member has no meaning in this case.
308 Empty pools have no inherent size class: the next time a malloc finds
309 an empty list in usedpools[], it takes the first pool off of freepools.
310 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000311 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000312
313
314Block Management
315
316Blocks within pools are again carved out as needed. pool->freeblock points to
317the start of a singly-linked list of free blocks within the pool. When a
318block is freed, it's inserted at the front of its pool's freeblock list. Note
319that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000320is initialized. Instead only "the first two" (lowest addresses) blocks are
321set up, returning the first such block, and setting pool->freeblock to a
322one-block list holding the second such block. This is consistent with that
323pymalloc strives at all levels (arena, pool, and block) never to touch a piece
324of memory until it's actually needed.
325
326So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000327available for allocating, and pool->freeblock is not NULL. If pool->freeblock
328points to the end of the free list before we've carved the entire pool into
329blocks, that means we simply haven't yet gotten to one of the higher-address
330blocks. The offset from the pool_header to the start of "the next" virgin
331block is stored in the pool_header nextoffset member, and the largest value
332of nextoffset that makes sense is stored in the maxnextoffset member when a
333pool is initialized. All the blocks in a pool have been passed out at least
334once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000335
Tim Peters1e16db62002-03-31 01:05:22 +0000336
337Major obscurity: While the usedpools vector is declared to have poolp
338entries, it doesn't really. It really contains two pointers per (conceptual)
339poolp entry, the nextpool and prevpool members of a pool_header. The
340excruciating initialization code below fools C so that
341
342 usedpool[i+i]
343
344"acts like" a genuine poolp, but only so long as you only reference its
345nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
346compensating for that a pool_header's nextpool and prevpool members
347immediately follow a pool_header's first two members:
348
349 union { block *_padding;
350 uint count; } ref;
351 block *freeblock;
352
353each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
354contains is a fudged-up pointer p such that *if* C believes it's a poolp
355pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
356circular list is empty).
357
358It's unclear why the usedpools setup is so convoluted. It could be to
359minimize the amount of cache required to hold this heavily-referenced table
360(which only *needs* the two interpool pointer members of a pool_header). OTOH,
361referencing code has to remember to "double the index" and doing so isn't
362free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
363on that C doesn't insert any padding anywhere in a pool_header at or before
364the prevpool member.
365**************************************************************************** */
366
Neil Schemenauera35c6882001-02-27 04:45:05 +0000367#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
368#define PT(x) PTA(x), PTA(x)
369
370static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
371 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
372#if NB_SMALL_SIZE_CLASSES > 8
373 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
374#if NB_SMALL_SIZE_CLASSES > 16
375 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
376#if NB_SMALL_SIZE_CLASSES > 24
377 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
378#if NB_SMALL_SIZE_CLASSES > 32
379 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
380#if NB_SMALL_SIZE_CLASSES > 40
381 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
382#if NB_SMALL_SIZE_CLASSES > 48
383 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
384#if NB_SMALL_SIZE_CLASSES > 56
385 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
386#endif /* NB_SMALL_SIZE_CLASSES > 56 */
387#endif /* NB_SMALL_SIZE_CLASSES > 48 */
388#endif /* NB_SMALL_SIZE_CLASSES > 40 */
389#endif /* NB_SMALL_SIZE_CLASSES > 32 */
390#endif /* NB_SMALL_SIZE_CLASSES > 24 */
391#endif /* NB_SMALL_SIZE_CLASSES > 16 */
392#endif /* NB_SMALL_SIZE_CLASSES > 8 */
393};
394
395/*
396 * Free (cached) pools
397 */
398static poolp freepools = NULL; /* free list for cached pools */
399
Tim Petersd97a1c02002-03-30 06:09:22 +0000400/*==========================================================================*/
401/* Arena management. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000402
Tim Petersd97a1c02002-03-30 06:09:22 +0000403/* arenas is a vector of arena base addresses, in order of allocation time.
404 * arenas currently contains narenas entries, and has space allocated
405 * for at most maxarenas entries.
406 *
407 * CAUTION: See the long comment block about thread safety in new_arena():
408 * the code currently relies in deep ways on that this vector only grows,
409 * and only grows by appending at the end. For now we never return an arena
410 * to the OS.
411 */
Tim Petersc2ce91a2002-03-30 21:36:04 +0000412static uptr *volatile arenas = NULL; /* the pointer itself is volatile */
413static volatile uint narenas = 0;
Tim Peters1d99af82002-03-30 10:35:09 +0000414static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000415
Tim Peters3c83df22002-03-30 07:04:41 +0000416/* Number of pools still available to be allocated in the current arena. */
417static uint nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000418
Tim Peters3c83df22002-03-30 07:04:41 +0000419/* Free space start address in current arena. This is pool-aligned. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000420static block *arenabase = NULL;
421
Tim Petersd97a1c02002-03-30 06:09:22 +0000422/* Allocate a new arena and return its base address. If we run out of
423 * memory, return NULL.
424 */
425static block *
426new_arena(void)
427{
Tim Peters3c83df22002-03-30 07:04:41 +0000428 uint excess; /* number of bytes above pool alignment */
Tim Peters84c1b972002-04-04 04:44:32 +0000429 block *bp = (block *)malloc(ARENA_SIZE);
Tim Petersd97a1c02002-03-30 06:09:22 +0000430 if (bp == NULL)
431 return NULL;
432
Tim Peters0e871182002-04-13 08:29:14 +0000433#ifdef PYMALLOC_DEBUG
434 if (Py_GETENV("PYTHONMALLOCSTATS"))
435 _PyObject_DebugMallocStats();
436#endif
437
Tim Peters3c83df22002-03-30 07:04:41 +0000438 /* arenabase <- first pool-aligned address in the arena
439 nfreepools <- number of whole pools that fit after alignment */
440 arenabase = bp;
441 nfreepools = ARENA_SIZE / POOL_SIZE;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000442 assert(POOL_SIZE * nfreepools == ARENA_SIZE);
Guido van Rossumefc11882002-09-12 14:43:41 +0000443 excess = (uint) ((Py_uintptr_t)bp & POOL_SIZE_MASK);
Tim Peters3c83df22002-03-30 07:04:41 +0000444 if (excess != 0) {
445 --nfreepools;
446 arenabase += POOL_SIZE - excess;
447 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000448
449 /* Make room for a new entry in the arenas vector. */
450 if (arenas == NULL) {
Tim Petersc2ce91a2002-03-30 21:36:04 +0000451 assert(narenas == 0 && maxarenas == 0);
Tim Peters84c1b972002-04-04 04:44:32 +0000452 arenas = (uptr *)malloc(16 * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000453 if (arenas == NULL)
454 goto error;
455 maxarenas = 16;
Tim Petersd97a1c02002-03-30 06:09:22 +0000456 }
457 else if (narenas == maxarenas) {
Tim Peters52aefc82002-04-11 06:36:45 +0000458 /* Grow arenas.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000459 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000460 * Exceedingly subtle: Someone may be calling the pymalloc
461 * free via PyMem_{DEL, Del, FREE, Free} without holding the
462 *.GIL. Someone else may simultaneously be calling the
463 * pymalloc malloc while holding the GIL via, e.g.,
464 * PyObject_New. Now the pymalloc free may index into arenas
465 * for an address check, while the pymalloc malloc calls
466 * new_arena and we end up here to grow a new arena *and*
467 * grow the arenas vector. If the value for arenas pymalloc
468 * free picks up "vanishes" during this resize, anything may
469 * happen, and it would be an incredibly rare bug. Therefore
470 * the code here takes great pains to make sure that, at every
471 * moment, arenas always points to an intact vector of
472 * addresses. It doesn't matter whether arenas points to a
473 * wholly up-to-date vector when pymalloc free checks it in
474 * this case, because the only legal (and that even this is
475 * legal is debatable) way to call PyMem_{Del, etc} while not
476 * holding the GIL is if the memory being released is not
477 * object memory, i.e. if the address check in pymalloc free
478 * is supposed to fail. Having an incomplete vector can't
479 * make a supposed-to-fail case succeed by mistake (it could
480 * only make a supposed-to-succeed case fail by mistake).
Tim Petersc2ce91a2002-03-30 21:36:04 +0000481 *
482 * In addition, without a lock we can't know for sure when
483 * an old vector is no longer referenced, so we simply let
484 * old vectors leak.
485 *
486 * And on top of that, since narenas and arenas can't be
487 * changed as-a-pair atomically without a lock, we're also
488 * careful to declare them volatile and ensure that we change
489 * arenas first. This prevents another thread from picking
490 * up an narenas value too large for the arenas value it
491 * reads up (arenas never shrinks).
492 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000493 * Read the above 50 times before changing anything in this
494 * block.
495 */
Tim Peters1d99af82002-03-30 10:35:09 +0000496 uptr *p;
Tim Petersc2ce91a2002-03-30 21:36:04 +0000497 uint newmax = maxarenas << 1;
Tim Peters1d99af82002-03-30 10:35:09 +0000498 if (newmax <= maxarenas) /* overflow */
499 goto error;
Tim Peters84c1b972002-04-04 04:44:32 +0000500 p = (uptr *)malloc(newmax * sizeof(*arenas));
Tim Petersd97a1c02002-03-30 06:09:22 +0000501 if (p == NULL)
502 goto error;
503 memcpy(p, arenas, narenas * sizeof(*arenas));
Tim Petersc2ce91a2002-03-30 21:36:04 +0000504 arenas = p; /* old arenas deliberately leaked */
Tim Petersd97a1c02002-03-30 06:09:22 +0000505 maxarenas = newmax;
506 }
507
508 /* Append the new arena address to arenas. */
509 assert(narenas < maxarenas);
510 arenas[narenas] = (uptr)bp;
Tim Peters1d99af82002-03-30 10:35:09 +0000511 ++narenas; /* can't overflow, since narenas < maxarenas before */
Tim Petersd97a1c02002-03-30 06:09:22 +0000512 return bp;
513
514error:
Tim Peters84c1b972002-04-04 04:44:32 +0000515 free(bp);
Tim Peters7b85b4a2002-03-30 10:42:09 +0000516 nfreepools = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000517 return NULL;
518}
519
520/* Return true if and only if P is an address that was allocated by
521 * pymalloc. I must be the index into arenas that the address claims
522 * to come from.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000523 *
Tim Petersd97a1c02002-03-30 06:09:22 +0000524 * Tricky: Letting B be the arena base address in arenas[I], P belongs to the
525 * arena if and only if
Tim Peters3c83df22002-03-30 07:04:41 +0000526 * B <= P < B + ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000527 * Subtracting B throughout, this is true iff
Tim Peters3c83df22002-03-30 07:04:41 +0000528 * 0 <= P-B < ARENA_SIZE
Tim Petersd97a1c02002-03-30 06:09:22 +0000529 * By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
Tim Petersc2ce91a2002-03-30 21:36:04 +0000530 *
531 * Obscure: A PyMem "free memory" function can call the pymalloc free or
532 * realloc before the first arena has been allocated. arenas is still
533 * NULL in that case. We're relying on that narenas is also 0 in that case,
534 * so the (I) < narenas must be false, saving us from trying to index into
535 * a NULL arenas.
Tim Petersd97a1c02002-03-30 06:09:22 +0000536 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000537#define Py_ADDRESS_IN_RANGE(P, POOL) \
538 ((POOL)->arenaindex < narenas && \
539 (uptr)(P) - arenas[(POOL)->arenaindex] < (uptr)ARENA_SIZE)
540
541/* This is only useful when running memory debuggers such as
542 * Purify or Valgrind. Uncomment to use.
543 *
Neal Norwitzb5d77022004-06-06 19:21:34 +0000544 */
Martin v. Löwis18e16552006-02-15 17:27:45 +0000545#define Py_USING_MEMORY_DEBUGGER
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000546
547#ifdef Py_USING_MEMORY_DEBUGGER
548
549/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
550 * This leads to thousands of spurious warnings when using
551 * Purify or Valgrind. By making a function, we can easily
552 * suppress the uninitialized memory reads in this one function.
553 * So we won't ignore real errors elsewhere.
554 *
555 * Disable the macro and use a function.
556 */
557
558#undef Py_ADDRESS_IN_RANGE
559
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000560#if defined(__GNUC__) && (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)
561#define Py_NO_INLINE __attribute__((__noinline__))
562#else
563#define Py_NO_INLINE
564#endif
565
566/* Don't make static, to try to ensure this isn't inlined. */
567int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
568#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000569#endif
Tim Peters338e0102002-04-01 19:23:44 +0000570
Neil Schemenauera35c6882001-02-27 04:45:05 +0000571/*==========================================================================*/
572
Tim Peters84c1b972002-04-04 04:44:32 +0000573/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
574 * from all other currently live pointers. This may not be possible.
575 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000576
577/*
578 * The basic blocks are ordered by decreasing execution frequency,
579 * which minimizes the number of jumps in the most common cases,
580 * improves branching prediction and instruction scheduling (small
581 * block allocations typically result in a couple of instructions).
582 * Unless the optimizer reorders everything, being too smart...
583 */
584
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000585#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000586void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000587PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000588{
589 block *bp;
590 poolp pool;
591 poolp next;
592 uint size;
593
Neil Schemenauera35c6882001-02-27 04:45:05 +0000594 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000595 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000596 */
597 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
598 LOCK();
599 /*
600 * Most frequent paths first
601 */
602 size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT;
603 pool = usedpools[size + size];
604 if (pool != pool->nextpool) {
605 /*
606 * There is a used pool for this size class.
607 * Pick up the head block of its free list.
608 */
609 ++pool->ref.count;
610 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000611 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000612 if ((pool->freeblock = *(block **)bp) != NULL) {
613 UNLOCK();
614 return (void *)bp;
615 }
616 /*
617 * Reached the end of the free list, try to extend it
618 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000619 if (pool->nextoffset <= pool->maxnextoffset) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000620 /*
621 * There is room for another block
622 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000623 pool->freeblock = (block *)pool +
624 pool->nextoffset;
625 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000626 *(block **)(pool->freeblock) = NULL;
627 UNLOCK();
628 return (void *)bp;
629 }
630 /*
631 * Pool is full, unlink from used pools
632 */
633 next = pool->nextpool;
634 pool = pool->prevpool;
635 next->prevpool = pool;
636 pool->nextpool = next;
637 UNLOCK();
638 return (void *)bp;
639 }
640 /*
641 * Try to get a cached free pool
642 */
643 pool = freepools;
644 if (pool != NULL) {
645 /*
646 * Unlink from cached pools
647 */
648 freepools = pool->nextpool;
649 init_pool:
650 /*
651 * Frontlink to used pools
652 */
653 next = usedpools[size + size]; /* == prev */
654 pool->nextpool = next;
655 pool->prevpool = next;
656 next->nextpool = pool;
657 next->prevpool = pool;
658 pool->ref.count = 1;
659 if (pool->szidx == size) {
660 /*
661 * Luckily, this pool last contained blocks
662 * of the same size class, so its header
663 * and free list are already initialized.
664 */
665 bp = pool->freeblock;
666 pool->freeblock = *(block **)bp;
667 UNLOCK();
668 return (void *)bp;
669 }
670 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000671 * Initialize the pool header, set up the free list to
672 * contain just the second block, and return the first
673 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000674 */
675 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000676 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000677 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000678 pool->nextoffset = POOL_OVERHEAD + (size << 1);
679 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000680 pool->freeblock = bp + size;
681 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000682 UNLOCK();
683 return (void *)bp;
684 }
Walter Dörwalde0a1bb62003-06-17 15:48:11 +0000685 /*
686 * Allocate new pool
687 */
Tim Peters3c83df22002-03-30 07:04:41 +0000688 if (nfreepools) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000689 commit_pool:
Tim Peters3c83df22002-03-30 07:04:41 +0000690 --nfreepools;
691 pool = (poolp)arenabase;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000692 arenabase += POOL_SIZE;
Tim Petersd97a1c02002-03-30 06:09:22 +0000693 pool->arenaindex = narenas - 1;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000694 pool->szidx = DUMMY_SIZE_IDX;
695 goto init_pool;
696 }
Walter Dörwalde0a1bb62003-06-17 15:48:11 +0000697 /*
698 * Allocate new arena
699 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000700#ifdef WITH_MEMORY_LIMITS
Tim Petersd97a1c02002-03-30 06:09:22 +0000701 if (!(narenas < MAX_ARENAS)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000702 UNLOCK();
703 goto redirect;
704 }
705#endif
Tim Petersd97a1c02002-03-30 06:09:22 +0000706 bp = new_arena();
707 if (bp != NULL)
708 goto commit_pool;
709 UNLOCK();
710 goto redirect;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000711 }
712
713 /* The small block allocator ends here. */
714
Tim Petersd97a1c02002-03-30 06:09:22 +0000715redirect:
Neil Schemenauera35c6882001-02-27 04:45:05 +0000716 /*
717 * Redirect the original request to the underlying (libc) allocator.
718 * We jump here on bigger requests, on error in the code above (as a
719 * last chance to serve the request) or when the max memory limit
720 * has been reached.
721 */
Tim Peters64d80c92002-04-18 21:58:56 +0000722 if (nbytes == 0)
723 nbytes = 1;
Tim Peters64d80c92002-04-18 21:58:56 +0000724 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000725}
726
727/* free */
728
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000729#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000730void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000731PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000732{
733 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000734 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000735 poolp next, prev;
736 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000737
Neil Schemenauera35c6882001-02-27 04:45:05 +0000738 if (p == NULL) /* free(NULL) has no effect */
739 return;
740
Tim Petersd97a1c02002-03-30 06:09:22 +0000741 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000742 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Petersd97a1c02002-03-30 06:09:22 +0000743 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000744 LOCK();
745 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000746 * Link p to the start of the pool's freeblock list. Since
747 * the pool had at least the p block outstanding, the pool
748 * wasn't empty (so it's already in a usedpools[] list, or
749 * was full and is in no list -- it's not in the freeblocks
750 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000751 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000752 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000753 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000754 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000755 if (lastfree) {
756 /*
757 * freeblock wasn't NULL, so the pool wasn't full,
758 * and the pool is in a usedpools[] list.
759 */
Tim Peters2c95c992002-03-31 02:18:01 +0000760 if (--pool->ref.count != 0) {
761 /* pool isn't empty: leave it in usedpools */
762 UNLOCK();
763 return;
764 }
765 /*
766 * Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000767 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000768 * previously freed pools will be allocated later
769 * (being not referenced, they are perhaps paged out).
770 */
771 next = pool->nextpool;
772 prev = pool->prevpool;
773 next->prevpool = prev;
774 prev->nextpool = next;
775 /* Link to freepools. This is a singly-linked list,
776 * and pool->prevpool isn't used there.
777 */
778 pool->nextpool = freepools;
779 freepools = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000780 UNLOCK();
781 return;
782 }
783 /*
Tim Peters2c95c992002-03-31 02:18:01 +0000784 * Pool was full, so doesn't currently live in any list:
785 * link it to the front of the appropriate usedpools[] list.
786 * This mimics LRU pool usage for new allocations and
787 * targets optimal filling when several pools contain
788 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +0000789 */
Tim Peters2c95c992002-03-31 02:18:01 +0000790 --pool->ref.count;
791 assert(pool->ref.count > 0); /* else the pool is empty */
792 size = pool->szidx;
793 next = usedpools[size + size];
794 prev = next->prevpool;
795 /* insert pool before next: prev <-> pool <-> next */
796 pool->nextpool = next;
797 pool->prevpool = prev;
798 next->prevpool = pool;
799 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +0000800 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +0000801 return;
802 }
803
Tim Peters2c95c992002-03-31 02:18:01 +0000804 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +0000805 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000806}
807
Tim Peters84c1b972002-04-04 04:44:32 +0000808/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
809 * then as the Python docs promise, we do not treat this like free(p), and
810 * return a non-NULL result.
811 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000812
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000813#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000814void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000815PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000816{
Tim Peters84c1b972002-04-04 04:44:32 +0000817 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000818 poolp pool;
Martin v. Löwis18e16552006-02-15 17:27:45 +0000819 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000820
Neil Schemenauera35c6882001-02-27 04:45:05 +0000821 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000822 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000823
Tim Petersd97a1c02002-03-30 06:09:22 +0000824 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000825 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +0000826 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +0000827 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +0000828 if (nbytes <= size) {
829 /* The block is staying the same or shrinking. If
830 * it's shrinking, there's a tradeoff: it costs
831 * cycles to copy the block to a smaller size class,
832 * but it wastes memory not to copy it. The
833 * compromise here is to copy on shrink only if at
834 * least 25% of size can be shaved off.
835 */
836 if (4 * nbytes > 3 * size) {
837 /* It's the same,
838 * or shrinking and new/old > 3/4.
839 */
840 return p;
841 }
842 size = nbytes;
843 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000844 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +0000845 if (bp != NULL) {
846 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000847 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000848 }
Tim Peters84c1b972002-04-04 04:44:32 +0000849 return bp;
850 }
Tim Petersecc6e6a2005-07-10 22:30:55 +0000851 /* We're not managing this block. If nbytes <=
852 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
853 * block. However, if we do, we need to copy the valid data from
854 * the C-managed block to one of our blocks, and there's no portable
855 * way to know how much of the memory space starting at p is valid.
856 * As bug 1185883 pointed out the hard way, it's possible that the
857 * C-managed block is "at the end" of allocated VM space, so that
858 * a memory fault can occur if we try to copy nbytes bytes starting
859 * at p. Instead we punt: let C continue to manage this block.
860 */
861 if (nbytes)
862 return realloc(p, nbytes);
863 /* C doesn't define the result of realloc(p, 0) (it may or may not
864 * return NULL then), but Python's docs promise that nbytes==0 never
865 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
866 * to begin with. Even then, we can't be sure that realloc() won't
867 * return NULL.
868 */
869 bp = realloc(p, 1);
870 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000871}
872
Tim Peters1221c0a2002-03-23 00:20:15 +0000873#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +0000874
875/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000876/* pymalloc not enabled: Redirect the entry points to malloc. These will
877 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +0000878
Tim Petersce7fb9b2002-03-23 00:28:57 +0000879void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000880PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000881{
882 return PyMem_MALLOC(n);
883}
884
Tim Petersce7fb9b2002-03-23 00:28:57 +0000885void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000886PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +0000887{
888 return PyMem_REALLOC(p, n);
889}
890
891void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000892PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +0000893{
894 PyMem_FREE(p);
895}
896#endif /* WITH_PYMALLOC */
897
Tim Petersddea2082002-03-23 10:03:50 +0000898#ifdef PYMALLOC_DEBUG
899/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +0000900/* A x-platform debugging allocator. This doesn't manage memory directly,
901 * it wraps a real allocator, adding extra debugging info to the memory blocks.
902 */
Tim Petersddea2082002-03-23 10:03:50 +0000903
Tim Petersf6fb5012002-04-12 07:38:53 +0000904/* Special bytes broadcast into debug memory blocks at appropriate times.
905 * Strings of these are unlikely to be valid addresses, floats, ints or
906 * 7-bit ASCII.
907 */
908#undef CLEANBYTE
909#undef DEADBYTE
910#undef FORBIDDENBYTE
911#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +0000912#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +0000913#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +0000914
915static ulong serialno = 0; /* incremented on each debug {m,re}alloc */
916
Tim Peterse0850172002-03-24 00:34:21 +0000917/* serialno is always incremented via calling this routine. The point is
918 to supply a single place to set a breakpoint.
919*/
920static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +0000921bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +0000922{
923 ++serialno;
924}
925
926
Tim Petersddea2082002-03-23 10:03:50 +0000927/* Read 4 bytes at p as a big-endian ulong. */
928static ulong
929read4(const void *p)
930{
Tim Peters62c06ba2002-03-23 22:28:18 +0000931 const uchar *q = (const uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +0000932 return ((ulong)q[0] << 24) |
933 ((ulong)q[1] << 16) |
934 ((ulong)q[2] << 8) |
935 (ulong)q[3];
936}
937
938/* Write the 4 least-significant bytes of n as a big-endian unsigned int,
939 MSB at address p, LSB at p+3. */
940static void
941write4(void *p, ulong n)
942{
Tim Peters62c06ba2002-03-23 22:28:18 +0000943 uchar *q = (uchar *)p;
944 q[0] = (uchar)((n >> 24) & 0xff);
945 q[1] = (uchar)((n >> 16) & 0xff);
946 q[2] = (uchar)((n >> 8) & 0xff);
947 q[3] = (uchar)( n & 0xff);
Tim Petersddea2082002-03-23 10:03:50 +0000948}
949
Tim Peters08d82152002-04-18 22:25:03 +0000950#ifdef Py_DEBUG
951/* Is target in the list? The list is traversed via the nextpool pointers.
952 * The list may be NULL-terminated, or circular. Return 1 if target is in
953 * list, else 0.
954 */
955static int
956pool_is_in_list(const poolp target, poolp list)
957{
958 poolp origlist = list;
959 assert(target != NULL);
960 if (list == NULL)
961 return 0;
962 do {
963 if (target == list)
964 return 1;
965 list = list->nextpool;
966 } while (list != NULL && list != origlist);
967 return 0;
968}
969
970#else
971#define pool_is_in_list(X, Y) 1
972
973#endif /* Py_DEBUG */
974
Tim Petersddea2082002-03-23 10:03:50 +0000975/* The debug malloc asks for 16 extra bytes and fills them with useful stuff,
976 here calling the underlying malloc's result p:
977
978p[0:4]
979 Number of bytes originally asked for. 4-byte unsigned integer,
980 big-endian (easier to read in a memory dump).
Tim Petersd1139e02002-03-28 07:32:11 +0000981p[4:8]
Tim Petersf6fb5012002-04-12 07:38:53 +0000982 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000983p[8:8+n]
Tim Petersf6fb5012002-04-12 07:38:53 +0000984 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +0000985 Used to catch reference to uninitialized memory.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000986 &p[8] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +0000987 handled the request itself.
988p[8+n:8+n+4]
Tim Petersf6fb5012002-04-12 07:38:53 +0000989 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +0000990p[8+n+4:8+n+8]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000991 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
992 and _PyObject_DebugRealloc.
Tim Petersddea2082002-03-23 10:03:50 +0000993 4-byte unsigned integer, big-endian.
994 If "bad memory" is detected later, the serial number gives an
995 excellent way to set a breakpoint on the next run, to capture the
996 instant at which this block was passed out.
997*/
998
999void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001000_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001001{
1002 uchar *p; /* base address of malloc'ed block */
Tim Peters62c06ba2002-03-23 22:28:18 +00001003 uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */
Tim Petersddea2082002-03-23 10:03:50 +00001004 size_t total; /* nbytes + 16 */
1005
Tim Peterse0850172002-03-24 00:34:21 +00001006 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001007 total = nbytes + 16;
Martin v. Löwis18e16552006-02-15 17:27:45 +00001008#if SIZEOF_SIZE_T < 8
1009 /* XXX do this check only on 32-bit machines */
Tim Petersddea2082002-03-23 10:03:50 +00001010 if (total < nbytes || (total >> 31) > 1) {
1011 /* overflow, or we can't represent it in 4 bytes */
1012 /* Obscure: can't do (total >> 32) != 0 instead, because
1013 C doesn't define what happens for a right-shift of 32
1014 when size_t is a 32-bit type. At least C guarantees
1015 size_t is an unsigned type. */
1016 return NULL;
1017 }
Martin v. Löwis18e16552006-02-15 17:27:45 +00001018#endif
Tim Petersddea2082002-03-23 10:03:50 +00001019
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001020 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +00001021 if (p == NULL)
1022 return NULL;
1023
Martin v. Löwis18e16552006-02-15 17:27:45 +00001024 write4(p, (ulong)nbytes);
Tim Petersf6fb5012002-04-12 07:38:53 +00001025 p[4] = p[5] = p[6] = p[7] = FORBIDDENBYTE;
Tim Petersddea2082002-03-23 10:03:50 +00001026
1027 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001028 memset(p+8, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001029
Tim Peters62c06ba2002-03-23 22:28:18 +00001030 tail = p + 8 + nbytes;
Tim Petersf6fb5012002-04-12 07:38:53 +00001031 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
Tim Peters62c06ba2002-03-23 22:28:18 +00001032 write4(tail + 4, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001033
1034 return p+8;
1035}
1036
Tim Peters62c06ba2002-03-23 22:28:18 +00001037/* The debug free first checks the 8 bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001038 particular, that the FORBIDDENBYTEs are still intact).
1039 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001040 Then calls the underlying free.
1041*/
1042void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001043_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001044{
Tim Peters62c06ba2002-03-23 22:28:18 +00001045 uchar *q = (uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +00001046 size_t nbytes;
1047
Tim Petersddea2082002-03-23 10:03:50 +00001048 if (p == NULL)
1049 return;
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001050 _PyObject_DebugCheckAddress(p);
Tim Petersddea2082002-03-23 10:03:50 +00001051 nbytes = read4(q-8);
1052 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001053 memset(q, DEADBYTE, nbytes);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001054 PyObject_Free(q-8);
Tim Petersddea2082002-03-23 10:03:50 +00001055}
1056
1057void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001058_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001059{
1060 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001061 uchar *tail;
1062 size_t total; /* nbytes + 16 */
Tim Petersddea2082002-03-23 10:03:50 +00001063 size_t original_nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001064
Tim Petersddea2082002-03-23 10:03:50 +00001065 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001066 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001067
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001068 _PyObject_DebugCheckAddress(p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001069 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001070 original_nbytes = read4(q-8);
Tim Peters85cc1c42002-04-12 08:52:50 +00001071 total = nbytes + 16;
1072 if (total < nbytes || (total >> 31) > 1) {
1073 /* overflow, or we can't represent it in 4 bytes */
1074 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001075 }
1076
1077 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001078 /* shrinking: mark old extra memory dead */
1079 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001080 }
1081
Tim Peters85cc1c42002-04-12 08:52:50 +00001082 /* Resize and add decorations. */
1083 q = (uchar *)PyObject_Realloc(q-8, total);
1084 if (q == NULL)
1085 return NULL;
1086
Martin v. Löwis18e16552006-02-15 17:27:45 +00001087 write4(q, (ulong)nbytes);
Tim Peters85cc1c42002-04-12 08:52:50 +00001088 assert(q[4] == FORBIDDENBYTE &&
1089 q[5] == FORBIDDENBYTE &&
1090 q[6] == FORBIDDENBYTE &&
1091 q[7] == FORBIDDENBYTE);
1092 q += 8;
1093 tail = q + nbytes;
1094 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
1095 write4(tail + 4, serialno);
1096
1097 if (nbytes > original_nbytes) {
1098 /* growing: mark new extra memory clean */
1099 memset(q + original_nbytes, CLEANBYTE,
1100 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001101 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001102
1103 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001104}
1105
Tim Peters7ccfadf2002-04-01 06:04:21 +00001106/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001107 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001108 * and call Py_FatalError to kill the program.
1109 */
1110 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001111_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001112{
1113 const uchar *q = (const uchar *)p;
Tim Petersd1139e02002-03-28 07:32:11 +00001114 char *msg;
Tim Peters449b5a82002-04-28 06:14:45 +00001115 ulong nbytes;
1116 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001117 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001118
Tim Petersd1139e02002-03-28 07:32:11 +00001119 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001120 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001121 goto error;
1122 }
Tim Petersddea2082002-03-23 10:03:50 +00001123
Tim Peters449b5a82002-04-28 06:14:45 +00001124 /* Check the stuff at the start of p first: if there's underwrite
1125 * corruption, the number-of-bytes field may be nuts, and checking
1126 * the tail could lead to a segfault then.
1127 */
Tim Petersd1139e02002-03-28 07:32:11 +00001128 for (i = 4; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001129 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001130 msg = "bad leading pad byte";
1131 goto error;
1132 }
1133 }
Tim Petersddea2082002-03-23 10:03:50 +00001134
Tim Peters449b5a82002-04-28 06:14:45 +00001135 nbytes = read4(q-8);
1136 tail = q + nbytes;
1137 for (i = 0; i < 4; ++i) {
1138 if (tail[i] != FORBIDDENBYTE) {
1139 msg = "bad trailing pad byte";
1140 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001141 }
1142 }
1143
Tim Petersd1139e02002-03-28 07:32:11 +00001144 return;
1145
1146error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001147 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001148 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001149}
1150
Tim Peters7ccfadf2002-04-01 06:04:21 +00001151/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001152void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001153_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001154{
1155 const uchar *q = (const uchar *)p;
1156 const uchar *tail;
1157 ulong nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001158 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001159
1160 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1161 if (p == NULL)
1162 return;
1163
1164 nbytes = read4(q-8);
Tim Petersf539c682002-04-12 07:43:07 +00001165 fprintf(stderr, " %lu bytes originally requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001166
Tim Peters449b5a82002-04-28 06:14:45 +00001167 /* In case this is nuts, check the leading pad bytes first. */
1168 fputs(" The 4 pad bytes at p-4 are ", stderr);
Tim Petersf6fb5012002-04-12 07:38:53 +00001169 if (*(q-4) == FORBIDDENBYTE &&
1170 *(q-3) == FORBIDDENBYTE &&
1171 *(q-2) == FORBIDDENBYTE &&
1172 *(q-1) == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001173 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001174 }
1175 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001176 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1177 FORBIDDENBYTE);
Tim Petersd1139e02002-03-28 07:32:11 +00001178 for (i = 4; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001179 const uchar byte = *(q-i);
1180 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001181 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001182 fputs(" *** OUCH", stderr);
1183 fputc('\n', stderr);
1184 }
Tim Peters449b5a82002-04-28 06:14:45 +00001185
1186 fputs(" Because memory is corrupted at the start, the "
1187 "count of bytes requested\n"
1188 " may be bogus, and checking the trailing pad "
1189 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001190 }
1191
1192 tail = q + nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001193 fprintf(stderr, " The 4 pad bytes at tail=%p are ", tail);
Tim Petersf6fb5012002-04-12 07:38:53 +00001194 if (tail[0] == FORBIDDENBYTE &&
1195 tail[1] == FORBIDDENBYTE &&
1196 tail[2] == FORBIDDENBYTE &&
1197 tail[3] == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001198 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001199 }
1200 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001201 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1202 FORBIDDENBYTE);
Tim Petersddea2082002-03-23 10:03:50 +00001203 for (i = 0; i < 4; ++i) {
1204 const uchar byte = tail[i];
1205 fprintf(stderr, " at tail+%d: 0x%02x",
1206 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001207 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001208 fputs(" *** OUCH", stderr);
1209 fputc('\n', stderr);
1210 }
1211 }
1212
1213 serial = read4(tail+4);
Tim Peters449b5a82002-04-28 06:14:45 +00001214 fprintf(stderr, " The block was made by call #%lu to "
1215 "debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001216
1217 if (nbytes > 0) {
1218 int i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001219 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001220 /* print up to 8 bytes at the start */
1221 while (q < tail && i < 8) {
1222 fprintf(stderr, " %02x", *q);
1223 ++i;
1224 ++q;
1225 }
1226 /* and up to 8 at the end */
1227 if (q < tail) {
1228 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001229 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001230 q = tail - 8;
1231 }
1232 while (q < tail) {
1233 fprintf(stderr, " %02x", *q);
1234 ++q;
1235 }
1236 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001237 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001238 }
1239}
1240
Tim Peters16bcb6b2002-04-05 05:45:31 +00001241static ulong
1242printone(const char* msg, ulong value)
1243{
Tim Peters49f26812002-04-06 01:45:35 +00001244 int i, k;
1245 char buf[100];
1246 ulong origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001247
1248 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001249 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001250 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001251 fputc('=', stderr);
1252
1253 /* Write the value with commas. */
1254 i = 22;
1255 buf[i--] = '\0';
1256 buf[i--] = '\n';
1257 k = 3;
1258 do {
1259 ulong nextvalue = value / 10UL;
1260 uint digit = value - nextvalue * 10UL;
1261 value = nextvalue;
1262 buf[i--] = (char)(digit + '0');
1263 --k;
1264 if (k == 0 && value && i >= 0) {
1265 k = 3;
1266 buf[i--] = ',';
1267 }
1268 } while (value && i >= 0);
1269
1270 while (i >= 0)
1271 buf[i--] = ' ';
1272 fputs(buf, stderr);
1273
1274 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001275}
1276
Tim Peters08d82152002-04-18 22:25:03 +00001277/* Print summary info to stderr about the state of pymalloc's structures.
1278 * In Py_DEBUG mode, also perform some expensive internal consistency
1279 * checks.
1280 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001281void
Tim Peters0e871182002-04-13 08:29:14 +00001282_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001283{
1284 uint i;
1285 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001286 /* # of pools, allocated blocks, and free blocks per class index */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001287 ulong numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001288 ulong numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001289 ulong numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001290 /* total # of allocated bytes in used and full pools */
1291 ulong allocated_bytes = 0;
1292 /* total # of available bytes in used pools */
1293 ulong available_bytes = 0;
1294 /* # of free pools + pools not yet carved out of current arena */
1295 uint numfreepools = 0;
1296 /* # of bytes for arena alignment padding */
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001297 ulong arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001298 /* # of bytes in used and full pools used for pool_headers */
1299 ulong pool_header_bytes = 0;
1300 /* # of bytes in used and full pools wasted due to quantization,
1301 * i.e. the necessarily leftover space at the ends of used and
1302 * full pools.
1303 */
1304 ulong quantization = 0;
1305 /* running total -- should equal narenas * ARENA_SIZE */
1306 ulong total;
1307 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001308
Tim Peters7ccfadf2002-04-01 06:04:21 +00001309 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1310 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001311
1312 for (i = 0; i < numclasses; ++i)
1313 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1314
Tim Peters6169f092002-04-01 20:12:59 +00001315 /* Because full pools aren't linked to from anything, it's easiest
1316 * to march over all the arenas. If we're lucky, most of the memory
1317 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001318 */
1319 for (i = 0; i < narenas; ++i) {
1320 uint poolsinarena;
1321 uint j;
1322 uptr base = arenas[i];
1323
1324 /* round up to pool alignment */
1325 poolsinarena = ARENA_SIZE / POOL_SIZE;
1326 if (base & (uptr)POOL_SIZE_MASK) {
1327 --poolsinarena;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001328 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001329 base &= ~(uptr)POOL_SIZE_MASK;
1330 base += POOL_SIZE;
1331 }
1332
1333 if (i == narenas - 1) {
1334 /* current arena may have raw memory at the end */
1335 numfreepools += nfreepools;
1336 poolsinarena -= nfreepools;
1337 }
1338
1339 /* visit every pool in the arena */
1340 for (j = 0; j < poolsinarena; ++j, base += POOL_SIZE) {
1341 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001342 const uint sz = p->szidx;
1343 uint freeblocks;
1344
Tim Peters7ccfadf2002-04-01 06:04:21 +00001345 if (p->ref.count == 0) {
1346 /* currently unused */
1347 ++numfreepools;
Tim Peters08d82152002-04-18 22:25:03 +00001348 assert(pool_is_in_list(p, freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001349 continue;
1350 }
Tim Peters08d82152002-04-18 22:25:03 +00001351 ++numpools[sz];
1352 numblocks[sz] += p->ref.count;
1353 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1354 numfreeblocks[sz] += freeblocks;
1355#ifdef Py_DEBUG
1356 if (freeblocks > 0)
1357 assert(pool_is_in_list(p, usedpools[sz + sz]));
1358#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001359 }
1360 }
1361
1362 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001363 fputs("class size num pools blocks in use avail blocks\n"
1364 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001365 stderr);
1366
Tim Peters7ccfadf2002-04-01 06:04:21 +00001367 for (i = 0; i < numclasses; ++i) {
1368 ulong p = numpools[i];
1369 ulong b = numblocks[i];
1370 ulong f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001371 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001372 if (p == 0) {
1373 assert(b == 0 && f == 0);
1374 continue;
1375 }
Tim Peters49f26812002-04-06 01:45:35 +00001376 fprintf(stderr, "%5u %6u %11lu %15lu %13lu\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001377 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001378 allocated_bytes += b * size;
1379 available_bytes += f * size;
1380 pool_header_bytes += p * POOL_OVERHEAD;
1381 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001382 }
1383 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001384 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001385
1386 PyOS_snprintf(buf, sizeof(buf),
1387 "%u arenas * %d bytes/arena", narenas, ARENA_SIZE);
1388 (void)printone(buf, (ulong)narenas * ARENA_SIZE);
1389
1390 fputc('\n', stderr);
1391
Tim Peters49f26812002-04-06 01:45:35 +00001392 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001393 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001394
Tim Peters16bcb6b2002-04-05 05:45:31 +00001395 PyOS_snprintf(buf, sizeof(buf),
1396 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Tim Peters49f26812002-04-06 01:45:35 +00001397 total += printone(buf, (ulong)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001398
Tim Peters16bcb6b2002-04-05 05:45:31 +00001399 total += printone("# bytes lost to pool headers", pool_header_bytes);
1400 total += printone("# bytes lost to quantization", quantization);
1401 total += printone("# bytes lost to arena alignment", arena_alignment);
1402 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001403}
1404
Tim Petersddea2082002-03-23 10:03:50 +00001405#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001406
1407#ifdef Py_USING_MEMORY_DEBUGGER
1408/* Make this function last so gcc won't inline it
1409 since the definition is after the reference. */
1410int
1411Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1412{
1413 return ((pool->arenaindex) < narenas &&
1414 (uptr)(P) - arenas[pool->arenaindex] < (uptr)ARENA_SIZE);
1415}
1416#endif