blob: efbd566469eca3e0ca86220e2e8d7d238c220314 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000098 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000134#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000145 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000170 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000171 */
Tim Peters3c83df22002-03-30 07:04:41 +0000172#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173
174#ifdef WITH_MEMORY_LIMITS
175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uchar
Thomas Woutersa9773292006-04-21 09:43:23 +0000220#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000221
Neil Schemenauera35c6882001-02-27 04:45:05 +0000222#undef uint
Thomas Woutersa9773292006-04-21 09:43:23 +0000223#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000224
225#undef ulong
Thomas Woutersa9773292006-04-21 09:43:23 +0000226#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000227
Tim Petersd97a1c02002-03-30 06:09:22 +0000228#undef uptr
Thomas Woutersa9773292006-04-21 09:43:23 +0000229#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000230
Neil Schemenauera35c6882001-02-27 04:45:05 +0000231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
Tim Peterse70ddf32002-04-05 04:32:29 +0000234/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000235struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000236 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000241 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000245};
246
247typedef struct pool_header *poolp;
248
Thomas Woutersa9773292006-04-21 09:43:23 +0000249/* Record keeping for arenas. */
250struct arena_object {
251 /* The address of the arena, as returned by malloc. Note that 0
252 * will never be returned by a successful malloc, and is used
253 * here to mark an arena_object that doesn't correspond to an
254 * allocated arena.
255 */
256 uptr address;
257
258 /* Pool-aligned pointer to the next pool to be carved off. */
259 block* pool_address;
260
261 /* The number of available pools in the arena: free pools + never-
262 * allocated pools.
263 */
264 uint nfreepools;
265
266 /* The total number of pools in the arena, whether or not available. */
267 uint ntotalpools;
268
269 /* Singly-linked list of available pools. */
270 struct pool_header* freepools;
271
272 /* Whenever this arena_object is not associated with an allocated
273 * arena, the nextarena member is used to link all unassociated
274 * arena_objects in the singly-linked `unused_arena_objects` list.
275 * The prevarena member is unused in this case.
276 *
277 * When this arena_object is associated with an allocated arena
278 * with at least one available pool, both members are used in the
279 * doubly-linked `usable_arenas` list, which is maintained in
280 * increasing order of `nfreepools` values.
281 *
282 * Else this arena_object is associated with an allocated arena
283 * all of whose pools are in use. `nextarena` and `prevarena`
284 * are both meaningless in this case.
285 */
286 struct arena_object* nextarena;
287 struct arena_object* prevarena;
288};
289
Neil Schemenauera35c6882001-02-27 04:45:05 +0000290#undef ROUNDUP
291#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
292#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
293
294#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
295
Tim Petersd97a1c02002-03-30 06:09:22 +0000296/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000297#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
298
Tim Peters16bcb6b2002-04-05 05:45:31 +0000299/* Return total number of blocks in pool of size index I, as a uint. */
300#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000301
Neil Schemenauera35c6882001-02-27 04:45:05 +0000302/*==========================================================================*/
303
304/*
305 * This malloc lock
306 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000307SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000308#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
309#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
310#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
311#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000312
313/*
Tim Peters1e16db62002-03-31 01:05:22 +0000314 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
315
316This is involved. For an index i, usedpools[i+i] is the header for a list of
317all partially used pools holding small blocks with "size class idx" i. So
318usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
31916, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
320
Thomas Woutersa9773292006-04-21 09:43:23 +0000321Pools are carved off an arena's highwater mark (an arena_object's pool_address
322member) as needed. Once carved off, a pool is in one of three states forever
323after:
Tim Peters1e16db62002-03-31 01:05:22 +0000324
Tim Peters338e0102002-04-01 19:23:44 +0000325used == partially used, neither empty nor full
326 At least one block in the pool is currently allocated, and at least one
327 block in the pool is not currently allocated (note this implies a pool
328 has room for at least two blocks).
329 This is a pool's initial state, as a pool is created only when malloc
330 needs space.
331 The pool holds blocks of a fixed size, and is in the circular list headed
332 at usedpools[i] (see above). It's linked to the other used pools of the
333 same size class via the pool_header's nextpool and prevpool members.
334 If all but one block is currently allocated, a malloc can cause a
335 transition to the full state. If all but one block is not currently
336 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000337
Tim Peters338e0102002-04-01 19:23:44 +0000338full == all the pool's blocks are currently allocated
339 On transition to full, a pool is unlinked from its usedpools[] list.
340 It's not linked to from anything then anymore, and its nextpool and
341 prevpool members are meaningless until it transitions back to used.
342 A free of a block in a full pool puts the pool back in the used state.
343 Then it's linked in at the front of the appropriate usedpools[] list, so
344 that the next allocation for its size class will reuse the freed block.
345
346empty == all the pool's blocks are currently available for allocation
347 On transition to empty, a pool is unlinked from its usedpools[] list,
Thomas Woutersa9773292006-04-21 09:43:23 +0000348 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000349 via its nextpool member. The prevpool member has no meaning in this case.
350 Empty pools have no inherent size class: the next time a malloc finds
351 an empty list in usedpools[], it takes the first pool off of freepools.
352 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000353 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000354
355
356Block Management
357
358Blocks within pools are again carved out as needed. pool->freeblock points to
359the start of a singly-linked list of free blocks within the pool. When a
360block is freed, it's inserted at the front of its pool's freeblock list. Note
361that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000362is initialized. Instead only "the first two" (lowest addresses) blocks are
363set up, returning the first such block, and setting pool->freeblock to a
364one-block list holding the second such block. This is consistent with that
365pymalloc strives at all levels (arena, pool, and block) never to touch a piece
366of memory until it's actually needed.
367
368So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000369available for allocating, and pool->freeblock is not NULL. If pool->freeblock
370points to the end of the free list before we've carved the entire pool into
371blocks, that means we simply haven't yet gotten to one of the higher-address
372blocks. The offset from the pool_header to the start of "the next" virgin
373block is stored in the pool_header nextoffset member, and the largest value
374of nextoffset that makes sense is stored in the maxnextoffset member when a
375pool is initialized. All the blocks in a pool have been passed out at least
376once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000377
Tim Peters1e16db62002-03-31 01:05:22 +0000378
379Major obscurity: While the usedpools vector is declared to have poolp
380entries, it doesn't really. It really contains two pointers per (conceptual)
381poolp entry, the nextpool and prevpool members of a pool_header. The
382excruciating initialization code below fools C so that
383
384 usedpool[i+i]
385
386"acts like" a genuine poolp, but only so long as you only reference its
387nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
388compensating for that a pool_header's nextpool and prevpool members
389immediately follow a pool_header's first two members:
390
391 union { block *_padding;
392 uint count; } ref;
393 block *freeblock;
394
395each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
396contains is a fudged-up pointer p such that *if* C believes it's a poolp
397pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
398circular list is empty).
399
400It's unclear why the usedpools setup is so convoluted. It could be to
401minimize the amount of cache required to hold this heavily-referenced table
402(which only *needs* the two interpool pointer members of a pool_header). OTOH,
403referencing code has to remember to "double the index" and doing so isn't
404free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
405on that C doesn't insert any padding anywhere in a pool_header at or before
406the prevpool member.
407**************************************************************************** */
408
Neil Schemenauera35c6882001-02-27 04:45:05 +0000409#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
410#define PT(x) PTA(x), PTA(x)
411
412static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
413 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
414#if NB_SMALL_SIZE_CLASSES > 8
415 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
416#if NB_SMALL_SIZE_CLASSES > 16
417 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
418#if NB_SMALL_SIZE_CLASSES > 24
419 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
420#if NB_SMALL_SIZE_CLASSES > 32
421 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
422#if NB_SMALL_SIZE_CLASSES > 40
423 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
424#if NB_SMALL_SIZE_CLASSES > 48
425 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
426#if NB_SMALL_SIZE_CLASSES > 56
427 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
428#endif /* NB_SMALL_SIZE_CLASSES > 56 */
429#endif /* NB_SMALL_SIZE_CLASSES > 48 */
430#endif /* NB_SMALL_SIZE_CLASSES > 40 */
431#endif /* NB_SMALL_SIZE_CLASSES > 32 */
432#endif /* NB_SMALL_SIZE_CLASSES > 24 */
433#endif /* NB_SMALL_SIZE_CLASSES > 16 */
434#endif /* NB_SMALL_SIZE_CLASSES > 8 */
435};
436
Thomas Woutersa9773292006-04-21 09:43:23 +0000437/*==========================================================================
438Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000439
Thomas Woutersa9773292006-04-21 09:43:23 +0000440`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
441which may not be currently used (== they're arena_objects that aren't
442currently associated with an allocated arena). Note that arenas proper are
443separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000444
Thomas Woutersa9773292006-04-21 09:43:23 +0000445Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
446we do try to free() arenas, and use some mild heuristic strategies to increase
447the likelihood that arenas eventually can be freed.
448
449unused_arena_objects
450
451 This is a singly-linked list of the arena_objects that are currently not
452 being used (no arena is associated with them). Objects are taken off the
453 head of the list in new_arena(), and are pushed on the head of the list in
454 PyObject_Free() when the arena is empty. Key invariant: an arena_object
455 is on this list if and only if its .address member is 0.
456
457usable_arenas
458
459 This is a doubly-linked list of the arena_objects associated with arenas
460 that have pools available. These pools are either waiting to be reused,
461 or have not been used before. The list is sorted to have the most-
462 allocated arenas first (ascending order based on the nfreepools member).
463 This means that the next allocation will come from a heavily used arena,
464 which gives the nearly empty arenas a chance to be returned to the system.
465 In my unscientific tests this dramatically improved the number of arenas
466 that could be freed.
467
468Note that an arena_object associated with an arena all of whose pools are
469currently in use isn't on either list.
470*/
471
472/* Array of objects used to track chunks of memory (arenas). */
473static struct arena_object* arenas = NULL;
474/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000475static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000476
Thomas Woutersa9773292006-04-21 09:43:23 +0000477/* The head of the singly-linked, NULL-terminated list of available
478 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000479 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000480static struct arena_object* unused_arena_objects = NULL;
481
482/* The head of the doubly-linked, NULL-terminated at each end, list of
483 * arena_objects associated with arenas that have pools available.
484 */
485static struct arena_object* usable_arenas = NULL;
486
487/* How many arena_objects do we initially allocate?
488 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
489 * `arenas` vector.
490 */
491#define INITIAL_ARENA_OBJECTS 16
492
493/* Number of arenas allocated that haven't been free()'d. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000494static size_t narenas_currently_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000495
496#ifdef PYMALLOC_DEBUG
497/* Total number of times malloc() called to allocate an arena. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000498static size_t ntimes_arena_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000499/* High water mark (max value ever seen) for narenas_currently_allocated. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000500static size_t narenas_highwater = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000501#endif
502
503/* Allocate a new arena. If we run out of memory, return NULL. Else
504 * allocate a new arena, and return the address of an arena_object
505 * describing the new arena. It's expected that the caller will set
506 * `usable_arenas` to the return value.
507 */
508static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000509new_arena(void)
510{
Thomas Woutersa9773292006-04-21 09:43:23 +0000511 struct arena_object* arenaobj;
Tim Peters3c83df22002-03-30 07:04:41 +0000512 uint excess; /* number of bytes above pool alignment */
Tim Petersd97a1c02002-03-30 06:09:22 +0000513
Tim Peters0e871182002-04-13 08:29:14 +0000514#ifdef PYMALLOC_DEBUG
515 if (Py_GETENV("PYTHONMALLOCSTATS"))
516 _PyObject_DebugMallocStats();
517#endif
Thomas Woutersa9773292006-04-21 09:43:23 +0000518 if (unused_arena_objects == NULL) {
519 uint i;
520 uint numarenas;
521 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000522
Thomas Woutersa9773292006-04-21 09:43:23 +0000523 /* Double the number of arena objects on each allocation.
524 * Note that it's possible for `numarenas` to overflow.
Tim Petersd97a1c02002-03-30 06:09:22 +0000525 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000526 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
527 if (numarenas <= maxarenas)
528 return NULL; /* overflow */
Amaury Forgeot d'Arc9c74b142008-06-18 00:47:36 +0000529 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
Thomas Woutersa9773292006-04-21 09:43:23 +0000530 return NULL; /* overflow */
Amaury Forgeot d'Arc9c74b142008-06-18 00:47:36 +0000531 nbytes = numarenas * sizeof(*arenas);
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000532 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
Thomas Woutersa9773292006-04-21 09:43:23 +0000533 if (arenaobj == NULL)
534 return NULL;
535 arenas = arenaobj;
536
537 /* We might need to fix pointers that were copied. However,
538 * new_arena only gets called when all the pages in the
539 * previous arenas are full. Thus, there are *no* pointers
540 * into the old array. Thus, we don't have to worry about
541 * invalid pointers. Just to be sure, some asserts:
542 */
543 assert(usable_arenas == NULL);
544 assert(unused_arena_objects == NULL);
545
546 /* Put the new arenas on the unused_arena_objects list. */
547 for (i = maxarenas; i < numarenas; ++i) {
548 arenas[i].address = 0; /* mark as unassociated */
549 arenas[i].nextarena = i < numarenas - 1 ?
550 &arenas[i+1] : NULL;
551 }
552
553 /* Update globals. */
554 unused_arena_objects = &arenas[maxarenas];
555 maxarenas = numarenas;
Tim Petersd97a1c02002-03-30 06:09:22 +0000556 }
557
Thomas Woutersa9773292006-04-21 09:43:23 +0000558 /* Take the next available arena object off the head of the list. */
559 assert(unused_arena_objects != NULL);
560 arenaobj = unused_arena_objects;
561 unused_arena_objects = arenaobj->nextarena;
562 assert(arenaobj->address == 0);
563 arenaobj->address = (uptr)malloc(ARENA_SIZE);
564 if (arenaobj->address == 0) {
565 /* The allocation failed: return NULL after putting the
566 * arenaobj back.
567 */
568 arenaobj->nextarena = unused_arena_objects;
569 unused_arena_objects = arenaobj;
570 return NULL;
571 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000572
Thomas Woutersa9773292006-04-21 09:43:23 +0000573 ++narenas_currently_allocated;
574#ifdef PYMALLOC_DEBUG
575 ++ntimes_arena_allocated;
576 if (narenas_currently_allocated > narenas_highwater)
577 narenas_highwater = narenas_currently_allocated;
578#endif
579 arenaobj->freepools = NULL;
580 /* pool_address <- first pool-aligned address in the arena
581 nfreepools <- number of whole pools that fit after alignment */
582 arenaobj->pool_address = (block*)arenaobj->address;
583 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
584 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
585 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
586 if (excess != 0) {
587 --arenaobj->nfreepools;
588 arenaobj->pool_address += POOL_SIZE - excess;
589 }
590 arenaobj->ntotalpools = arenaobj->nfreepools;
591
592 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000593}
594
Thomas Woutersa9773292006-04-21 09:43:23 +0000595/*
596Py_ADDRESS_IN_RANGE(P, POOL)
597
598Return true if and only if P is an address that was allocated by pymalloc.
599POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
600(the caller is asked to compute this because the macro expands POOL more than
601once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
602variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
603called on every alloc/realloc/free, micro-efficiency is important here).
604
605Tricky: Let B be the arena base address associated with the pool, B =
606arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
607
608 B <= P < B + ARENA_SIZE
609
610Subtracting B throughout, this is true iff
611
612 0 <= P-B < ARENA_SIZE
613
614By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
615
616Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
617before the first arena has been allocated. `arenas` is still NULL in that
618case. We're relying on that maxarenas is also 0 in that case, so that
619(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
620into a NULL arenas.
621
622Details: given P and POOL, the arena_object corresponding to P is AO =
623arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
624stores, etc), POOL is the correct address of P's pool, AO.address is the
625correct base address of the pool's arena, and P must be within ARENA_SIZE of
626AO.address. In addition, AO.address is not 0 (no arena can start at address 0
627(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
628controls P.
629
630Now suppose obmalloc does not control P (e.g., P was obtained via a direct
631call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
632in this case -- it may even be uninitialized trash. If the trash arenaindex
633is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
634control P.
635
636Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
637allocated arena, obmalloc controls all the memory in slice AO.address :
638AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
639so P doesn't lie in that slice, so the macro correctly reports that P is not
640controlled by obmalloc.
641
642Finally, if P is not controlled by obmalloc and AO corresponds to an unused
643arena_object (one not currently associated with an allocated arena),
644AO.address is 0, and the second test in the macro reduces to:
645
646 P < ARENA_SIZE
647
648If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
649that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
650of the test still passes, and the third clause (AO.address != 0) is necessary
651to get the correct result: AO.address is 0 in this case, so the macro
652correctly reports that P is not controlled by obmalloc (despite that P lies in
653slice AO.address : AO.address + ARENA_SIZE).
654
655Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6562.5, arenas were never free()'ed, and an arenaindex < maxarena always
657corresponded to a currently-allocated arena, so the "P is not controlled by
658obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
659was impossible.
660
661Note that the logic is excruciating, and reading up possibly uninitialized
662memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
663creates problems for some memory debuggers. The overwhelming advantage is
664that this test determines whether an arbitrary address is controlled by
665obmalloc in a small constant time, independent of the number of arenas
666obmalloc controls. Since this test is needed at every entry point, it's
667extremely desirable that it be this fast.
668*/
669#define Py_ADDRESS_IN_RANGE(P, POOL) \
670 ((POOL)->arenaindex < maxarenas && \
671 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
672 arenas[(POOL)->arenaindex].address != 0)
673
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000674
675/* This is only useful when running memory debuggers such as
676 * Purify or Valgrind. Uncomment to use.
677 *
Neal Norwitz82c5a862006-02-16 07:30:11 +0000678 */
Martin v. Löwis9f2e3462007-07-21 17:22:18 +0000679#define Py_USING_MEMORY_DEBUGGER
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000680
681#ifdef Py_USING_MEMORY_DEBUGGER
682
683/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
684 * This leads to thousands of spurious warnings when using
685 * Purify or Valgrind. By making a function, we can easily
686 * suppress the uninitialized memory reads in this one function.
687 * So we won't ignore real errors elsewhere.
688 *
689 * Disable the macro and use a function.
690 */
691
692#undef Py_ADDRESS_IN_RANGE
693
Thomas Wouters89f507f2006-12-13 04:49:30 +0000694#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
695 (__GNUC__ >= 4))
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000696#define Py_NO_INLINE __attribute__((__noinline__))
697#else
698#define Py_NO_INLINE
699#endif
700
701/* Don't make static, to try to ensure this isn't inlined. */
702int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
703#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000704#endif
Tim Peters338e0102002-04-01 19:23:44 +0000705
Neil Schemenauera35c6882001-02-27 04:45:05 +0000706/*==========================================================================*/
707
Tim Peters84c1b972002-04-04 04:44:32 +0000708/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
709 * from all other currently live pointers. This may not be possible.
710 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000711
712/*
713 * The basic blocks are ordered by decreasing execution frequency,
714 * which minimizes the number of jumps in the most common cases,
715 * improves branching prediction and instruction scheduling (small
716 * block allocations typically result in a couple of instructions).
717 * Unless the optimizer reorders everything, being too smart...
718 */
719
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000720#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000721void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000722PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000723{
724 block *bp;
725 poolp pool;
726 poolp next;
727 uint size;
728
Neil Schemenauera35c6882001-02-27 04:45:05 +0000729 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000730 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000731 */
732 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
733 LOCK();
734 /*
735 * Most frequent paths first
736 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000737 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000738 pool = usedpools[size + size];
739 if (pool != pool->nextpool) {
740 /*
741 * There is a used pool for this size class.
742 * Pick up the head block of its free list.
743 */
744 ++pool->ref.count;
745 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000746 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000747 if ((pool->freeblock = *(block **)bp) != NULL) {
748 UNLOCK();
749 return (void *)bp;
750 }
751 /*
Thomas Woutersa9773292006-04-21 09:43:23 +0000752 * Reached the end of the free list, try to extend it.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000753 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000754 if (pool->nextoffset <= pool->maxnextoffset) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000755 /* There is room for another block. */
756 pool->freeblock = (block*)pool +
Tim Peterse70ddf32002-04-05 04:32:29 +0000757 pool->nextoffset;
758 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000759 *(block **)(pool->freeblock) = NULL;
760 UNLOCK();
761 return (void *)bp;
762 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000763 /* Pool is full, unlink from used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000764 next = pool->nextpool;
765 pool = pool->prevpool;
766 next->prevpool = pool;
767 pool->nextpool = next;
768 UNLOCK();
769 return (void *)bp;
770 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000771
772 /* There isn't a pool of the right size class immediately
773 * available: use a free pool.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000774 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000775 if (usable_arenas == NULL) {
776 /* No arena has a free pool: allocate a new arena. */
777#ifdef WITH_MEMORY_LIMITS
778 if (narenas_currently_allocated >= MAX_ARENAS) {
779 UNLOCK();
780 goto redirect;
781 }
782#endif
783 usable_arenas = new_arena();
784 if (usable_arenas == NULL) {
785 UNLOCK();
786 goto redirect;
787 }
788 usable_arenas->nextarena =
789 usable_arenas->prevarena = NULL;
790 }
791 assert(usable_arenas->address != 0);
792
793 /* Try to get a cached free pool. */
794 pool = usable_arenas->freepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000795 if (pool != NULL) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000796 /* Unlink from cached pools. */
797 usable_arenas->freepools = pool->nextpool;
798
799 /* This arena already had the smallest nfreepools
800 * value, so decreasing nfreepools doesn't change
801 * that, and we don't need to rearrange the
802 * usable_arenas list. However, if the arena has
803 * become wholly allocated, we need to remove its
804 * arena_object from usable_arenas.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000805 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000806 --usable_arenas->nfreepools;
807 if (usable_arenas->nfreepools == 0) {
808 /* Wholly allocated: remove. */
809 assert(usable_arenas->freepools == NULL);
810 assert(usable_arenas->nextarena == NULL ||
811 usable_arenas->nextarena->prevarena ==
812 usable_arenas);
813
814 usable_arenas = usable_arenas->nextarena;
815 if (usable_arenas != NULL) {
816 usable_arenas->prevarena = NULL;
817 assert(usable_arenas->address != 0);
818 }
819 }
820 else {
821 /* nfreepools > 0: it must be that freepools
822 * isn't NULL, or that we haven't yet carved
823 * off all the arena's pools for the first
824 * time.
825 */
826 assert(usable_arenas->freepools != NULL ||
827 usable_arenas->pool_address <=
828 (block*)usable_arenas->address +
829 ARENA_SIZE - POOL_SIZE);
830 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000831 init_pool:
Thomas Woutersa9773292006-04-21 09:43:23 +0000832 /* Frontlink to used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000833 next = usedpools[size + size]; /* == prev */
834 pool->nextpool = next;
835 pool->prevpool = next;
836 next->nextpool = pool;
837 next->prevpool = pool;
838 pool->ref.count = 1;
839 if (pool->szidx == size) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000840 /* Luckily, this pool last contained blocks
Neil Schemenauera35c6882001-02-27 04:45:05 +0000841 * of the same size class, so its header
842 * and free list are already initialized.
843 */
844 bp = pool->freeblock;
845 pool->freeblock = *(block **)bp;
846 UNLOCK();
847 return (void *)bp;
848 }
849 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000850 * Initialize the pool header, set up the free list to
851 * contain just the second block, and return the first
852 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000853 */
854 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000855 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000856 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000857 pool->nextoffset = POOL_OVERHEAD + (size << 1);
858 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000859 pool->freeblock = bp + size;
860 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000861 UNLOCK();
862 return (void *)bp;
863 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000864
865 /* Carve off a new pool. */
866 assert(usable_arenas->nfreepools > 0);
867 assert(usable_arenas->freepools == NULL);
868 pool = (poolp)usable_arenas->pool_address;
869 assert((block*)pool <= (block*)usable_arenas->address +
870 ARENA_SIZE - POOL_SIZE);
871 pool->arenaindex = usable_arenas - arenas;
872 assert(&arenas[pool->arenaindex] == usable_arenas);
873 pool->szidx = DUMMY_SIZE_IDX;
874 usable_arenas->pool_address += POOL_SIZE;
875 --usable_arenas->nfreepools;
876
877 if (usable_arenas->nfreepools == 0) {
878 assert(usable_arenas->nextarena == NULL ||
879 usable_arenas->nextarena->prevarena ==
880 usable_arenas);
881 /* Unlink the arena: it is completely allocated. */
882 usable_arenas = usable_arenas->nextarena;
883 if (usable_arenas != NULL) {
884 usable_arenas->prevarena = NULL;
885 assert(usable_arenas->address != 0);
886 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000887 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000888
889 goto init_pool;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000890 }
891
892 /* The small block allocator ends here. */
893
Tim Petersd97a1c02002-03-30 06:09:22 +0000894redirect:
Thomas Woutersa9773292006-04-21 09:43:23 +0000895 /* Redirect the original request to the underlying (libc) allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000896 * We jump here on bigger requests, on error in the code above (as a
897 * last chance to serve the request) or when the max memory limit
898 * has been reached.
899 */
Tim Peters64d80c92002-04-18 21:58:56 +0000900 if (nbytes == 0)
901 nbytes = 1;
Tim Peters64d80c92002-04-18 21:58:56 +0000902 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000903}
904
905/* free */
906
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000907#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000908void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000909PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000910{
911 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000912 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000913 poolp next, prev;
914 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000915
Neil Schemenauera35c6882001-02-27 04:45:05 +0000916 if (p == NULL) /* free(NULL) has no effect */
917 return;
918
Tim Petersd97a1c02002-03-30 06:09:22 +0000919 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000920 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Petersd97a1c02002-03-30 06:09:22 +0000921 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000922 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +0000923 /* Link p to the start of the pool's freeblock list. Since
Tim Peters2c95c992002-03-31 02:18:01 +0000924 * the pool had at least the p block outstanding, the pool
925 * wasn't empty (so it's already in a usedpools[] list, or
926 * was full and is in no list -- it's not in the freeblocks
927 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000928 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000929 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000930 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000931 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000932 if (lastfree) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000933 struct arena_object* ao;
934 uint nf; /* ao->nfreepools */
935
936 /* freeblock wasn't NULL, so the pool wasn't full,
Tim Peters2c95c992002-03-31 02:18:01 +0000937 * and the pool is in a usedpools[] list.
938 */
Tim Peters2c95c992002-03-31 02:18:01 +0000939 if (--pool->ref.count != 0) {
940 /* pool isn't empty: leave it in usedpools */
941 UNLOCK();
942 return;
943 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000944 /* Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000945 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000946 * previously freed pools will be allocated later
947 * (being not referenced, they are perhaps paged out).
948 */
949 next = pool->nextpool;
950 prev = pool->prevpool;
951 next->prevpool = prev;
952 prev->nextpool = next;
Thomas Woutersa9773292006-04-21 09:43:23 +0000953
954 /* Link the pool to freepools. This is a singly-linked
955 * list, and pool->prevpool isn't used there.
Tim Peters2c95c992002-03-31 02:18:01 +0000956 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000957 ao = &arenas[pool->arenaindex];
958 pool->nextpool = ao->freepools;
959 ao->freepools = pool;
960 nf = ++ao->nfreepools;
961
962 /* All the rest is arena management. We just freed
963 * a pool, and there are 4 cases for arena mgmt:
964 * 1. If all the pools are free, return the arena to
965 * the system free().
966 * 2. If this is the only free pool in the arena,
967 * add the arena back to the `usable_arenas` list.
968 * 3. If the "next" arena has a smaller count of free
969 * pools, we have to "slide this arena right" to
970 * restore that usable_arenas is sorted in order of
971 * nfreepools.
972 * 4. Else there's nothing more to do.
973 */
974 if (nf == ao->ntotalpools) {
975 /* Case 1. First unlink ao from usable_arenas.
976 */
977 assert(ao->prevarena == NULL ||
978 ao->prevarena->address != 0);
979 assert(ao ->nextarena == NULL ||
980 ao->nextarena->address != 0);
981
982 /* Fix the pointer in the prevarena, or the
983 * usable_arenas pointer.
984 */
985 if (ao->prevarena == NULL) {
986 usable_arenas = ao->nextarena;
987 assert(usable_arenas == NULL ||
988 usable_arenas->address != 0);
989 }
990 else {
991 assert(ao->prevarena->nextarena == ao);
992 ao->prevarena->nextarena =
993 ao->nextarena;
994 }
995 /* Fix the pointer in the nextarena. */
996 if (ao->nextarena != NULL) {
997 assert(ao->nextarena->prevarena == ao);
998 ao->nextarena->prevarena =
999 ao->prevarena;
1000 }
1001 /* Record that this arena_object slot is
1002 * available to be reused.
1003 */
1004 ao->nextarena = unused_arena_objects;
1005 unused_arena_objects = ao;
1006
1007 /* Free the entire arena. */
1008 free((void *)ao->address);
1009 ao->address = 0; /* mark unassociated */
1010 --narenas_currently_allocated;
1011
1012 UNLOCK();
1013 return;
1014 }
1015 if (nf == 1) {
1016 /* Case 2. Put ao at the head of
1017 * usable_arenas. Note that because
1018 * ao->nfreepools was 0 before, ao isn't
1019 * currently on the usable_arenas list.
1020 */
1021 ao->nextarena = usable_arenas;
1022 ao->prevarena = NULL;
1023 if (usable_arenas)
1024 usable_arenas->prevarena = ao;
1025 usable_arenas = ao;
1026 assert(usable_arenas->address != 0);
1027
1028 UNLOCK();
1029 return;
1030 }
1031 /* If this arena is now out of order, we need to keep
1032 * the list sorted. The list is kept sorted so that
1033 * the "most full" arenas are used first, which allows
1034 * the nearly empty arenas to be completely freed. In
1035 * a few un-scientific tests, it seems like this
1036 * approach allowed a lot more memory to be freed.
1037 */
1038 if (ao->nextarena == NULL ||
1039 nf <= ao->nextarena->nfreepools) {
1040 /* Case 4. Nothing to do. */
1041 UNLOCK();
1042 return;
1043 }
1044 /* Case 3: We have to move the arena towards the end
1045 * of the list, because it has more free pools than
1046 * the arena to its right.
1047 * First unlink ao from usable_arenas.
1048 */
1049 if (ao->prevarena != NULL) {
1050 /* ao isn't at the head of the list */
1051 assert(ao->prevarena->nextarena == ao);
1052 ao->prevarena->nextarena = ao->nextarena;
1053 }
1054 else {
1055 /* ao is at the head of the list */
1056 assert(usable_arenas == ao);
1057 usable_arenas = ao->nextarena;
1058 }
1059 ao->nextarena->prevarena = ao->prevarena;
1060
1061 /* Locate the new insertion point by iterating over
1062 * the list, using our nextarena pointer.
1063 */
1064 while (ao->nextarena != NULL &&
1065 nf > ao->nextarena->nfreepools) {
1066 ao->prevarena = ao->nextarena;
1067 ao->nextarena = ao->nextarena->nextarena;
1068 }
1069
1070 /* Insert ao at this point. */
1071 assert(ao->nextarena == NULL ||
1072 ao->prevarena == ao->nextarena->prevarena);
1073 assert(ao->prevarena->nextarena == ao->nextarena);
1074
1075 ao->prevarena->nextarena = ao;
1076 if (ao->nextarena != NULL)
1077 ao->nextarena->prevarena = ao;
1078
1079 /* Verify that the swaps worked. */
1080 assert(ao->nextarena == NULL ||
1081 nf <= ao->nextarena->nfreepools);
1082 assert(ao->prevarena == NULL ||
1083 nf > ao->prevarena->nfreepools);
1084 assert(ao->nextarena == NULL ||
1085 ao->nextarena->prevarena == ao);
1086 assert((usable_arenas == ao &&
1087 ao->prevarena == NULL) ||
1088 ao->prevarena->nextarena == ao);
1089
Tim Petersd97a1c02002-03-30 06:09:22 +00001090 UNLOCK();
1091 return;
1092 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001093 /* Pool was full, so doesn't currently live in any list:
Tim Peters2c95c992002-03-31 02:18:01 +00001094 * link it to the front of the appropriate usedpools[] list.
1095 * This mimics LRU pool usage for new allocations and
1096 * targets optimal filling when several pools contain
1097 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +00001098 */
Tim Peters2c95c992002-03-31 02:18:01 +00001099 --pool->ref.count;
1100 assert(pool->ref.count > 0); /* else the pool is empty */
1101 size = pool->szidx;
1102 next = usedpools[size + size];
1103 prev = next->prevpool;
1104 /* insert pool before next: prev <-> pool <-> next */
1105 pool->nextpool = next;
1106 pool->prevpool = prev;
1107 next->prevpool = pool;
1108 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +00001109 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +00001110 return;
1111 }
1112
Tim Peters2c95c992002-03-31 02:18:01 +00001113 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +00001114 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001115}
1116
Tim Peters84c1b972002-04-04 04:44:32 +00001117/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1118 * then as the Python docs promise, we do not treat this like free(p), and
1119 * return a non-NULL result.
1120 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001121
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001122#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +00001123void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001124PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001125{
Tim Peters84c1b972002-04-04 04:44:32 +00001126 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001127 poolp pool;
Martin v. Löwis18e16552006-02-15 17:27:45 +00001128 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001129
Neil Schemenauera35c6882001-02-27 04:45:05 +00001130 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001131 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001132
Tim Petersd97a1c02002-03-30 06:09:22 +00001133 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001134 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +00001135 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +00001136 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +00001137 if (nbytes <= size) {
1138 /* The block is staying the same or shrinking. If
1139 * it's shrinking, there's a tradeoff: it costs
1140 * cycles to copy the block to a smaller size class,
1141 * but it wastes memory not to copy it. The
1142 * compromise here is to copy on shrink only if at
1143 * least 25% of size can be shaved off.
1144 */
1145 if (4 * nbytes > 3 * size) {
1146 /* It's the same,
1147 * or shrinking and new/old > 3/4.
1148 */
1149 return p;
1150 }
1151 size = nbytes;
1152 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001153 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +00001154 if (bp != NULL) {
1155 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001156 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001157 }
Tim Peters84c1b972002-04-04 04:44:32 +00001158 return bp;
1159 }
Tim Petersecc6e6a2005-07-10 22:30:55 +00001160 /* We're not managing this block. If nbytes <=
1161 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1162 * block. However, if we do, we need to copy the valid data from
1163 * the C-managed block to one of our blocks, and there's no portable
1164 * way to know how much of the memory space starting at p is valid.
1165 * As bug 1185883 pointed out the hard way, it's possible that the
1166 * C-managed block is "at the end" of allocated VM space, so that
1167 * a memory fault can occur if we try to copy nbytes bytes starting
1168 * at p. Instead we punt: let C continue to manage this block.
1169 */
1170 if (nbytes)
1171 return realloc(p, nbytes);
1172 /* C doesn't define the result of realloc(p, 0) (it may or may not
1173 * return NULL then), but Python's docs promise that nbytes==0 never
1174 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1175 * to begin with. Even then, we can't be sure that realloc() won't
1176 * return NULL.
1177 */
1178 bp = realloc(p, 1);
1179 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001180}
1181
Tim Peters1221c0a2002-03-23 00:20:15 +00001182#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001183
1184/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001185/* pymalloc not enabled: Redirect the entry points to malloc. These will
1186 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001187
Tim Petersce7fb9b2002-03-23 00:28:57 +00001188void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001189PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001190{
1191 return PyMem_MALLOC(n);
1192}
1193
Tim Petersce7fb9b2002-03-23 00:28:57 +00001194void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001195PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001196{
1197 return PyMem_REALLOC(p, n);
1198}
1199
1200void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001201PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +00001202{
1203 PyMem_FREE(p);
1204}
1205#endif /* WITH_PYMALLOC */
1206
Tim Petersddea2082002-03-23 10:03:50 +00001207#ifdef PYMALLOC_DEBUG
1208/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001209/* A x-platform debugging allocator. This doesn't manage memory directly,
1210 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1211 */
Tim Petersddea2082002-03-23 10:03:50 +00001212
Tim Petersf6fb5012002-04-12 07:38:53 +00001213/* Special bytes broadcast into debug memory blocks at appropriate times.
1214 * Strings of these are unlikely to be valid addresses, floats, ints or
1215 * 7-bit ASCII.
1216 */
1217#undef CLEANBYTE
1218#undef DEADBYTE
1219#undef FORBIDDENBYTE
1220#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001221#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001222#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001223
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001224static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
Tim Petersddea2082002-03-23 10:03:50 +00001225
Tim Peterse0850172002-03-24 00:34:21 +00001226/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001227 * to supply a single place to set a breakpoint.
1228 */
Tim Peterse0850172002-03-24 00:34:21 +00001229static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001230bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001231{
1232 ++serialno;
1233}
1234
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001235#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001236
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001237/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1238static size_t
1239read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001240{
Tim Peters62c06ba2002-03-23 22:28:18 +00001241 const uchar *q = (const uchar *)p;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001242 size_t result = *q++;
1243 int i;
1244
1245 for (i = SST; --i > 0; ++q)
1246 result = (result << 8) | *q;
1247 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001248}
1249
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001250/* Write n as a big-endian size_t, MSB at address p, LSB at
1251 * p + sizeof(size_t) - 1.
1252 */
Tim Petersddea2082002-03-23 10:03:50 +00001253static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001254write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001255{
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001256 uchar *q = (uchar *)p + SST - 1;
1257 int i;
1258
1259 for (i = SST; --i >= 0; --q) {
1260 *q = (uchar)(n & 0xff);
1261 n >>= 8;
1262 }
Tim Petersddea2082002-03-23 10:03:50 +00001263}
1264
Tim Peters08d82152002-04-18 22:25:03 +00001265#ifdef Py_DEBUG
1266/* Is target in the list? The list is traversed via the nextpool pointers.
1267 * The list may be NULL-terminated, or circular. Return 1 if target is in
1268 * list, else 0.
1269 */
1270static int
1271pool_is_in_list(const poolp target, poolp list)
1272{
1273 poolp origlist = list;
1274 assert(target != NULL);
1275 if (list == NULL)
1276 return 0;
1277 do {
1278 if (target == list)
1279 return 1;
1280 list = list->nextpool;
1281 } while (list != NULL && list != origlist);
1282 return 0;
1283}
1284
1285#else
1286#define pool_is_in_list(X, Y) 1
1287
1288#endif /* Py_DEBUG */
1289
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001290/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1291 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001292
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001293p[0: S]
1294 Number of bytes originally asked for. This is a size_t, big-endian (easier
1295 to read in a memory dump).
1296p[S: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001297 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001298p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001299 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001300 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001301 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001302 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001303p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001304 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001305p[2*S+n+S: 2*S+n+2*S]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001306 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1307 and _PyObject_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001308 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001309 If "bad memory" is detected later, the serial number gives an
1310 excellent way to set a breakpoint on the next run, to capture the
1311 instant at which this block was passed out.
1312*/
1313
1314void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001315_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001316{
1317 uchar *p; /* base address of malloc'ed block */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001318 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1319 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001320
Tim Peterse0850172002-03-24 00:34:21 +00001321 bumpserialno();
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001322 total = nbytes + 4*SST;
1323 if (total < nbytes)
1324 /* overflow: can't represent total as a size_t */
Tim Petersddea2082002-03-23 10:03:50 +00001325 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001326
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001327 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +00001328 if (p == NULL)
1329 return NULL;
1330
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001331 write_size_t(p, nbytes);
1332 memset(p + SST, FORBIDDENBYTE, SST);
Tim Petersddea2082002-03-23 10:03:50 +00001333
1334 if (nbytes > 0)
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001335 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001336
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001337 tail = p + 2*SST + nbytes;
1338 memset(tail, FORBIDDENBYTE, SST);
1339 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001340
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001341 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001342}
1343
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001344/* The debug free first checks the 2*SST bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001345 particular, that the FORBIDDENBYTEs are still intact).
1346 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001347 Then calls the underlying free.
1348*/
1349void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001350_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001351{
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001352 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
Tim Petersddea2082002-03-23 10:03:50 +00001353 size_t nbytes;
1354
Tim Petersddea2082002-03-23 10:03:50 +00001355 if (p == NULL)
1356 return;
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001357 _PyObject_DebugCheckAddress(p);
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001358 nbytes = read_size_t(q);
Tim Petersddea2082002-03-23 10:03:50 +00001359 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001360 memset(q, DEADBYTE, nbytes);
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001361 PyObject_Free(q);
Tim Petersddea2082002-03-23 10:03:50 +00001362}
1363
1364void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001365_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001366{
1367 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001368 uchar *tail;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001369 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001370 size_t original_nbytes;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001371 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001372
Tim Petersddea2082002-03-23 10:03:50 +00001373 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001374 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001375
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001376 _PyObject_DebugCheckAddress(p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001377 bumpserialno();
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001378 original_nbytes = read_size_t(q - 2*SST);
1379 total = nbytes + 4*SST;
1380 if (total < nbytes)
1381 /* overflow: can't represent total as a size_t */
Tim Peters85cc1c42002-04-12 08:52:50 +00001382 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001383
1384 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001385 /* shrinking: mark old extra memory dead */
1386 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001387 }
1388
Tim Peters85cc1c42002-04-12 08:52:50 +00001389 /* Resize and add decorations. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001390 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
Tim Peters85cc1c42002-04-12 08:52:50 +00001391 if (q == NULL)
1392 return NULL;
1393
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001394 write_size_t(q, nbytes);
1395 for (i = 0; i < SST; ++i)
1396 assert(q[SST + i] == FORBIDDENBYTE);
1397 q += 2*SST;
Tim Peters85cc1c42002-04-12 08:52:50 +00001398 tail = q + nbytes;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001399 memset(tail, FORBIDDENBYTE, SST);
1400 write_size_t(tail + SST, serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001401
1402 if (nbytes > original_nbytes) {
1403 /* growing: mark new extra memory clean */
1404 memset(q + original_nbytes, CLEANBYTE,
1405 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001406 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001407
1408 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001409}
1410
Tim Peters7ccfadf2002-04-01 06:04:21 +00001411/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001412 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001413 * and call Py_FatalError to kill the program.
1414 */
1415 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001416_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001417{
1418 const uchar *q = (const uchar *)p;
Tim Petersd1139e02002-03-28 07:32:11 +00001419 char *msg;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001420 size_t nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001421 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001422 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001423
Tim Petersd1139e02002-03-28 07:32:11 +00001424 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001425 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001426 goto error;
1427 }
Tim Petersddea2082002-03-23 10:03:50 +00001428
Tim Peters449b5a82002-04-28 06:14:45 +00001429 /* Check the stuff at the start of p first: if there's underwrite
1430 * corruption, the number-of-bytes field may be nuts, and checking
1431 * the tail could lead to a segfault then.
1432 */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001433 for (i = SST; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001434 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001435 msg = "bad leading pad byte";
1436 goto error;
1437 }
1438 }
Tim Petersddea2082002-03-23 10:03:50 +00001439
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001440 nbytes = read_size_t(q - 2*SST);
Tim Peters449b5a82002-04-28 06:14:45 +00001441 tail = q + nbytes;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001442 for (i = 0; i < SST; ++i) {
Tim Peters449b5a82002-04-28 06:14:45 +00001443 if (tail[i] != FORBIDDENBYTE) {
1444 msg = "bad trailing pad byte";
1445 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001446 }
1447 }
1448
Tim Petersd1139e02002-03-28 07:32:11 +00001449 return;
1450
1451error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001452 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001453 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001454}
1455
Tim Peters7ccfadf2002-04-01 06:04:21 +00001456/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001457void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001458_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001459{
1460 const uchar *q = (const uchar *)p;
1461 const uchar *tail;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001462 size_t nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001463 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001464 int ok;
Tim Petersddea2082002-03-23 10:03:50 +00001465
1466 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1467 if (p == NULL)
1468 return;
1469
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001470 nbytes = read_size_t(q - 2*SST);
1471 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1472 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001473
Tim Peters449b5a82002-04-28 06:14:45 +00001474 /* In case this is nuts, check the leading pad bytes first. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001475 fprintf(stderr, " The %d pad bytes at p-%d are ", SST, SST);
1476 ok = 1;
1477 for (i = 1; i <= SST; ++i) {
1478 if (*(q-i) != FORBIDDENBYTE) {
1479 ok = 0;
1480 break;
1481 }
Tim Petersddea2082002-03-23 10:03:50 +00001482 }
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001483 if (ok)
1484 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001485 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001486 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1487 FORBIDDENBYTE);
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001488 for (i = SST; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001489 const uchar byte = *(q-i);
1490 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001491 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001492 fputs(" *** OUCH", stderr);
1493 fputc('\n', stderr);
1494 }
Tim Peters449b5a82002-04-28 06:14:45 +00001495
1496 fputs(" Because memory is corrupted at the start, the "
1497 "count of bytes requested\n"
1498 " may be bogus, and checking the trailing pad "
1499 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001500 }
1501
1502 tail = q + nbytes;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001503 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1504 ok = 1;
1505 for (i = 0; i < SST; ++i) {
1506 if (tail[i] != FORBIDDENBYTE) {
1507 ok = 0;
1508 break;
1509 }
Tim Petersddea2082002-03-23 10:03:50 +00001510 }
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001511 if (ok)
1512 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001513 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001514 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1515 FORBIDDENBYTE);
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001516 for (i = 0; i < SST; ++i) {
Tim Petersddea2082002-03-23 10:03:50 +00001517 const uchar byte = tail[i];
1518 fprintf(stderr, " at tail+%d: 0x%02x",
1519 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001520 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001521 fputs(" *** OUCH", stderr);
1522 fputc('\n', stderr);
1523 }
1524 }
1525
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001526 serial = read_size_t(tail + SST);
1527 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1528 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001529
1530 if (nbytes > 0) {
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001531 i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001532 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001533 /* print up to 8 bytes at the start */
1534 while (q < tail && i < 8) {
1535 fprintf(stderr, " %02x", *q);
1536 ++i;
1537 ++q;
1538 }
1539 /* and up to 8 at the end */
1540 if (q < tail) {
1541 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001542 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001543 q = tail - 8;
1544 }
1545 while (q < tail) {
1546 fprintf(stderr, " %02x", *q);
1547 ++q;
1548 }
1549 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001550 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001551 }
1552}
1553
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001554static size_t
1555printone(const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001556{
Tim Peters49f26812002-04-06 01:45:35 +00001557 int i, k;
1558 char buf[100];
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001559 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001560
1561 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001562 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001563 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001564 fputc('=', stderr);
1565
1566 /* Write the value with commas. */
1567 i = 22;
1568 buf[i--] = '\0';
1569 buf[i--] = '\n';
1570 k = 3;
1571 do {
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001572 size_t nextvalue = value / 10;
1573 uint digit = (uint)(value - nextvalue * 10);
Tim Peters49f26812002-04-06 01:45:35 +00001574 value = nextvalue;
1575 buf[i--] = (char)(digit + '0');
1576 --k;
1577 if (k == 0 && value && i >= 0) {
1578 k = 3;
1579 buf[i--] = ',';
1580 }
1581 } while (value && i >= 0);
1582
1583 while (i >= 0)
1584 buf[i--] = ' ';
1585 fputs(buf, stderr);
1586
1587 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001588}
1589
Tim Peters08d82152002-04-18 22:25:03 +00001590/* Print summary info to stderr about the state of pymalloc's structures.
1591 * In Py_DEBUG mode, also perform some expensive internal consistency
1592 * checks.
1593 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001594void
Tim Peters0e871182002-04-13 08:29:14 +00001595_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001596{
1597 uint i;
1598 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001599 /* # of pools, allocated blocks, and free blocks per class index */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001600 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1601 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1602 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001603 /* total # of allocated bytes in used and full pools */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001604 size_t allocated_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001605 /* total # of available bytes in used pools */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001606 size_t available_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001607 /* # of free pools + pools not yet carved out of current arena */
1608 uint numfreepools = 0;
1609 /* # of bytes for arena alignment padding */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001610 size_t arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001611 /* # of bytes in used and full pools used for pool_headers */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001612 size_t pool_header_bytes = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001613 /* # of bytes in used and full pools wasted due to quantization,
1614 * i.e. the necessarily leftover space at the ends of used and
1615 * full pools.
1616 */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001617 size_t quantization = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +00001618 /* # of arenas actually allocated. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001619 size_t narenas = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001620 /* running total -- should equal narenas * ARENA_SIZE */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001621 size_t total;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001622 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001623
Tim Peters7ccfadf2002-04-01 06:04:21 +00001624 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1625 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001626
1627 for (i = 0; i < numclasses; ++i)
1628 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1629
Tim Peters6169f092002-04-01 20:12:59 +00001630 /* Because full pools aren't linked to from anything, it's easiest
1631 * to march over all the arenas. If we're lucky, most of the memory
1632 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001633 */
Thomas Woutersa9773292006-04-21 09:43:23 +00001634 for (i = 0; i < maxarenas; ++i) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001635 uint poolsinarena;
1636 uint j;
Thomas Woutersa9773292006-04-21 09:43:23 +00001637 uptr base = arenas[i].address;
1638
1639 /* Skip arenas which are not allocated. */
1640 if (arenas[i].address == (uptr)NULL)
1641 continue;
1642 narenas += 1;
1643
1644 poolsinarena = arenas[i].ntotalpools;
1645 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001646
1647 /* round up to pool alignment */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001648 if (base & (uptr)POOL_SIZE_MASK) {
Tim Peters16bcb6b2002-04-05 05:45:31 +00001649 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001650 base &= ~(uptr)POOL_SIZE_MASK;
1651 base += POOL_SIZE;
1652 }
1653
Tim Peters7ccfadf2002-04-01 06:04:21 +00001654 /* visit every pool in the arena */
Thomas Woutersa9773292006-04-21 09:43:23 +00001655 assert(base <= (uptr) arenas[i].pool_address);
1656 for (j = 0;
1657 base < (uptr) arenas[i].pool_address;
1658 ++j, base += POOL_SIZE) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001659 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001660 const uint sz = p->szidx;
1661 uint freeblocks;
1662
Tim Peters7ccfadf2002-04-01 06:04:21 +00001663 if (p->ref.count == 0) {
1664 /* currently unused */
Thomas Woutersa9773292006-04-21 09:43:23 +00001665 assert(pool_is_in_list(p, arenas[i].freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001666 continue;
1667 }
Tim Peters08d82152002-04-18 22:25:03 +00001668 ++numpools[sz];
1669 numblocks[sz] += p->ref.count;
1670 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1671 numfreeblocks[sz] += freeblocks;
1672#ifdef Py_DEBUG
1673 if (freeblocks > 0)
1674 assert(pool_is_in_list(p, usedpools[sz + sz]));
1675#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001676 }
1677 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001678 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001679
1680 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001681 fputs("class size num pools blocks in use avail blocks\n"
1682 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001683 stderr);
1684
Tim Peters7ccfadf2002-04-01 06:04:21 +00001685 for (i = 0; i < numclasses; ++i) {
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001686 size_t p = numpools[i];
1687 size_t b = numblocks[i];
1688 size_t f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001689 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001690 if (p == 0) {
1691 assert(b == 0 && f == 0);
1692 continue;
1693 }
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001694 fprintf(stderr, "%5u %6u "
1695 "%11" PY_FORMAT_SIZE_T "u "
1696 "%15" PY_FORMAT_SIZE_T "u "
1697 "%13" PY_FORMAT_SIZE_T "u\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001698 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001699 allocated_bytes += b * size;
1700 available_bytes += f * size;
1701 pool_header_bytes += p * POOL_OVERHEAD;
1702 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001703 }
1704 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001705 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001706
Thomas Woutersa9773292006-04-21 09:43:23 +00001707 (void)printone("# arenas allocated total", ntimes_arena_allocated);
1708 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
1709 (void)printone("# arenas highwater mark", narenas_highwater);
1710 (void)printone("# arenas allocated current", narenas);
1711
Tim Peters16bcb6b2002-04-05 05:45:31 +00001712 PyOS_snprintf(buf, sizeof(buf),
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001713 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1714 narenas, ARENA_SIZE);
Thomas Woutersa9773292006-04-21 09:43:23 +00001715 (void)printone(buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001716
1717 fputc('\n', stderr);
1718
Tim Peters49f26812002-04-06 01:45:35 +00001719 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001720 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001721
Tim Peters16bcb6b2002-04-05 05:45:31 +00001722 PyOS_snprintf(buf, sizeof(buf),
1723 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001724 total += printone(buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001725
Tim Peters16bcb6b2002-04-05 05:45:31 +00001726 total += printone("# bytes lost to pool headers", pool_header_bytes);
1727 total += printone("# bytes lost to quantization", quantization);
1728 total += printone("# bytes lost to arena alignment", arena_alignment);
1729 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001730}
1731
Tim Petersddea2082002-03-23 10:03:50 +00001732#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001733
1734#ifdef Py_USING_MEMORY_DEBUGGER
Thomas Woutersa9773292006-04-21 09:43:23 +00001735/* Make this function last so gcc won't inline it since the definition is
1736 * after the reference.
1737 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001738int
1739Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1740{
Thomas Woutersa9773292006-04-21 09:43:23 +00001741 return pool->arenaindex < maxarenas &&
1742 (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
1743 arenas[pool->arenaindex].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001744}
1745#endif