blob: f3297f2e22070e586758c1e5b223005912e51f06 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
Antoine Pitrouc7c96a92010-05-09 15:15:40 +000015 Object-specific allocators
Neil Schemenauera35c6882001-02-27 04:45:05 +000016 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
Antoine Pitrouc7c96a92010-05-09 15:15:40 +000055/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
Antoine Pitrouc7c96a92010-05-09 15:15:40 +000083 * Request in bytes Size of allocated block Size class idx
Neil Schemenauera35c6882001-02-27 04:45:05 +000084 * ----------------------------------------------------------------
85 * 1-8 8 0
Antoine Pitrouc7c96a92010-05-09 15:15:40 +000086 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Antoine Pitrouc7c96a92010-05-09 15:15:40 +000098 * 0, 257 and up: routed to the underlying allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +000099 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000134#define SMALL_REQUEST_THRESHOLD 256
135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000145 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000170 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000171 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000172#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173
174#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uchar
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000220#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000221
Neil Schemenauera35c6882001-02-27 04:45:05 +0000222#undef uint
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000223#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000224
225#undef ulong
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000226#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000227
Tim Petersd97a1c02002-03-30 06:09:22 +0000228#undef uptr
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000229#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000230
Neil Schemenauera35c6882001-02-27 04:45:05 +0000231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
Tim Peterse70ddf32002-04-05 04:32:29 +0000234/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000235struct pool_header {
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000236 union { block *_padding;
237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
241 uint arenaindex; /* index into arenas of base adr */
242 uint szidx; /* block size class index */
243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000245};
246
247typedef struct pool_header *poolp;
248
Tim Peterscf79aac2006-03-16 01:14:46 +0000249/* Record keeping for arenas. */
250struct arena_object {
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000251 /* The address of the arena, as returned by malloc. Note that 0
252 * will never be returned by a successful malloc, and is used
253 * here to mark an arena_object that doesn't correspond to an
254 * allocated arena.
255 */
256 uptr address;
Tim Peterscf79aac2006-03-16 01:14:46 +0000257
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000258 /* Pool-aligned pointer to the next pool to be carved off. */
259 block* pool_address;
Tim Peterscf79aac2006-03-16 01:14:46 +0000260
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000261 /* The number of available pools in the arena: free pools + never-
262 * allocated pools.
263 */
264 uint nfreepools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000265
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000266 /* The total number of pools in the arena, whether or not available. */
267 uint ntotalpools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000268
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000269 /* Singly-linked list of available pools. */
270 struct pool_header* freepools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000271
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000272 /* Whenever this arena_object is not associated with an allocated
273 * arena, the nextarena member is used to link all unassociated
274 * arena_objects in the singly-linked `unused_arena_objects` list.
275 * The prevarena member is unused in this case.
276 *
277 * When this arena_object is associated with an allocated arena
278 * with at least one available pool, both members are used in the
279 * doubly-linked `usable_arenas` list, which is maintained in
280 * increasing order of `nfreepools` values.
281 *
282 * Else this arena_object is associated with an allocated arena
283 * all of whose pools are in use. `nextarena` and `prevarena`
284 * are both meaningless in this case.
285 */
286 struct arena_object* nextarena;
287 struct arena_object* prevarena;
Tim Peterscf79aac2006-03-16 01:14:46 +0000288};
289
Neil Schemenauera35c6882001-02-27 04:45:05 +0000290#undef ROUNDUP
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000291#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
292#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
Neil Schemenauera35c6882001-02-27 04:45:05 +0000293
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000294#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000295
Tim Petersd97a1c02002-03-30 06:09:22 +0000296/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000297#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
298
Tim Peters16bcb6b2002-04-05 05:45:31 +0000299/* Return total number of blocks in pool of size index I, as a uint. */
300#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000301
Neil Schemenauera35c6882001-02-27 04:45:05 +0000302/*==========================================================================*/
303
304/*
305 * This malloc lock
306 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000307SIMPLELOCK_DECL(_malloc_lock)
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000308#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
309#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
310#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
311#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000312
313/*
Tim Peters1e16db62002-03-31 01:05:22 +0000314 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
315
316This is involved. For an index i, usedpools[i+i] is the header for a list of
317all partially used pools holding small blocks with "size class idx" i. So
318usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
31916, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
320
Tim Peterscf79aac2006-03-16 01:14:46 +0000321Pools are carved off an arena's highwater mark (an arena_object's pool_address
322member) as needed. Once carved off, a pool is in one of three states forever
323after:
Tim Peters1e16db62002-03-31 01:05:22 +0000324
Tim Peters338e0102002-04-01 19:23:44 +0000325used == partially used, neither empty nor full
326 At least one block in the pool is currently allocated, and at least one
327 block in the pool is not currently allocated (note this implies a pool
328 has room for at least two blocks).
329 This is a pool's initial state, as a pool is created only when malloc
330 needs space.
331 The pool holds blocks of a fixed size, and is in the circular list headed
332 at usedpools[i] (see above). It's linked to the other used pools of the
333 same size class via the pool_header's nextpool and prevpool members.
334 If all but one block is currently allocated, a malloc can cause a
335 transition to the full state. If all but one block is not currently
336 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000337
Tim Peters338e0102002-04-01 19:23:44 +0000338full == all the pool's blocks are currently allocated
339 On transition to full, a pool is unlinked from its usedpools[] list.
340 It's not linked to from anything then anymore, and its nextpool and
341 prevpool members are meaningless until it transitions back to used.
342 A free of a block in a full pool puts the pool back in the used state.
343 Then it's linked in at the front of the appropriate usedpools[] list, so
344 that the next allocation for its size class will reuse the freed block.
345
346empty == all the pool's blocks are currently available for allocation
347 On transition to empty, a pool is unlinked from its usedpools[] list,
Tim Peterscf79aac2006-03-16 01:14:46 +0000348 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000349 via its nextpool member. The prevpool member has no meaning in this case.
350 Empty pools have no inherent size class: the next time a malloc finds
351 an empty list in usedpools[], it takes the first pool off of freepools.
352 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000353 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000354
355
356Block Management
357
358Blocks within pools are again carved out as needed. pool->freeblock points to
359the start of a singly-linked list of free blocks within the pool. When a
360block is freed, it's inserted at the front of its pool's freeblock list. Note
361that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000362is initialized. Instead only "the first two" (lowest addresses) blocks are
363set up, returning the first such block, and setting pool->freeblock to a
364one-block list holding the second such block. This is consistent with that
365pymalloc strives at all levels (arena, pool, and block) never to touch a piece
366of memory until it's actually needed.
367
368So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000369available for allocating, and pool->freeblock is not NULL. If pool->freeblock
370points to the end of the free list before we've carved the entire pool into
371blocks, that means we simply haven't yet gotten to one of the higher-address
372blocks. The offset from the pool_header to the start of "the next" virgin
373block is stored in the pool_header nextoffset member, and the largest value
374of nextoffset that makes sense is stored in the maxnextoffset member when a
375pool is initialized. All the blocks in a pool have been passed out at least
376once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000377
Tim Peters1e16db62002-03-31 01:05:22 +0000378
379Major obscurity: While the usedpools vector is declared to have poolp
380entries, it doesn't really. It really contains two pointers per (conceptual)
381poolp entry, the nextpool and prevpool members of a pool_header. The
382excruciating initialization code below fools C so that
383
384 usedpool[i+i]
385
386"acts like" a genuine poolp, but only so long as you only reference its
387nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
388compensating for that a pool_header's nextpool and prevpool members
389immediately follow a pool_header's first two members:
390
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000391 union { block *_padding;
392 uint count; } ref;
393 block *freeblock;
Tim Peters1e16db62002-03-31 01:05:22 +0000394
395each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
396contains is a fudged-up pointer p such that *if* C believes it's a poolp
397pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
398circular list is empty).
399
400It's unclear why the usedpools setup is so convoluted. It could be to
401minimize the amount of cache required to hold this heavily-referenced table
402(which only *needs* the two interpool pointer members of a pool_header). OTOH,
403referencing code has to remember to "double the index" and doing so isn't
404free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
405on that C doesn't insert any padding anywhere in a pool_header at or before
406the prevpool member.
407**************************************************************************** */
408
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000409#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
410#define PT(x) PTA(x), PTA(x)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000411
412static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000413 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000414#if NB_SMALL_SIZE_CLASSES > 8
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000415 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000416#if NB_SMALL_SIZE_CLASSES > 16
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000417 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000418#if NB_SMALL_SIZE_CLASSES > 24
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000419 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000420#if NB_SMALL_SIZE_CLASSES > 32
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000421 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000422#if NB_SMALL_SIZE_CLASSES > 40
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000423 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000424#if NB_SMALL_SIZE_CLASSES > 48
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000425 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000426#if NB_SMALL_SIZE_CLASSES > 56
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000427 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000428#endif /* NB_SMALL_SIZE_CLASSES > 56 */
429#endif /* NB_SMALL_SIZE_CLASSES > 48 */
430#endif /* NB_SMALL_SIZE_CLASSES > 40 */
431#endif /* NB_SMALL_SIZE_CLASSES > 32 */
432#endif /* NB_SMALL_SIZE_CLASSES > 24 */
433#endif /* NB_SMALL_SIZE_CLASSES > 16 */
434#endif /* NB_SMALL_SIZE_CLASSES > 8 */
435};
436
Tim Peterscf79aac2006-03-16 01:14:46 +0000437/*==========================================================================
438Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000439
Tim Peterscf79aac2006-03-16 01:14:46 +0000440`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
441which may not be currently used (== they're arena_objects that aren't
442currently associated with an allocated arena). Note that arenas proper are
443separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000444
Tim Peterscf79aac2006-03-16 01:14:46 +0000445Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
446we do try to free() arenas, and use some mild heuristic strategies to increase
447the likelihood that arenas eventually can be freed.
448
449unused_arena_objects
450
451 This is a singly-linked list of the arena_objects that are currently not
452 being used (no arena is associated with them). Objects are taken off the
453 head of the list in new_arena(), and are pushed on the head of the list in
454 PyObject_Free() when the arena is empty. Key invariant: an arena_object
455 is on this list if and only if its .address member is 0.
456
457usable_arenas
458
459 This is a doubly-linked list of the arena_objects associated with arenas
460 that have pools available. These pools are either waiting to be reused,
461 or have not been used before. The list is sorted to have the most-
462 allocated arenas first (ascending order based on the nfreepools member).
463 This means that the next allocation will come from a heavily used arena,
464 which gives the nearly empty arenas a chance to be returned to the system.
465 In my unscientific tests this dramatically improved the number of arenas
466 that could be freed.
467
468Note that an arena_object associated with an arena all of whose pools are
469currently in use isn't on either list.
470*/
471
472/* Array of objects used to track chunks of memory (arenas). */
473static struct arena_object* arenas = NULL;
474/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000475static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000476
Tim Peterscf79aac2006-03-16 01:14:46 +0000477/* The head of the singly-linked, NULL-terminated list of available
478 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000479 */
Tim Peterscf79aac2006-03-16 01:14:46 +0000480static struct arena_object* unused_arena_objects = NULL;
481
482/* The head of the doubly-linked, NULL-terminated at each end, list of
483 * arena_objects associated with arenas that have pools available.
484 */
485static struct arena_object* usable_arenas = NULL;
486
487/* How many arena_objects do we initially allocate?
488 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
489 * `arenas` vector.
490 */
491#define INITIAL_ARENA_OBJECTS 16
492
493/* Number of arenas allocated that haven't been free()'d. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000494static size_t narenas_currently_allocated = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000495
496#ifdef PYMALLOC_DEBUG
497/* Total number of times malloc() called to allocate an arena. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000498static size_t ntimes_arena_allocated = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000499/* High water mark (max value ever seen) for narenas_currently_allocated. */
Tim Peters9ea89d22006-06-04 03:26:02 +0000500static size_t narenas_highwater = 0;
Tim Peterscf79aac2006-03-16 01:14:46 +0000501#endif
502
503/* Allocate a new arena. If we run out of memory, return NULL. Else
504 * allocate a new arena, and return the address of an arena_object
505 * describing the new arena. It's expected that the caller will set
506 * `usable_arenas` to the return value.
507 */
508static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000509new_arena(void)
510{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000511 struct arena_object* arenaobj;
512 uint excess; /* number of bytes above pool alignment */
Tim Petersd97a1c02002-03-30 06:09:22 +0000513
Tim Peters0e871182002-04-13 08:29:14 +0000514#ifdef PYMALLOC_DEBUG
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000515 if (Py_GETENV("PYTHONMALLOCSTATS"))
516 _PyObject_DebugMallocStats();
Tim Peters0e871182002-04-13 08:29:14 +0000517#endif
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000518 if (unused_arena_objects == NULL) {
519 uint i;
520 uint numarenas;
521 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000522
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000523 /* Double the number of arena objects on each allocation.
524 * Note that it's possible for `numarenas` to overflow.
525 */
526 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
527 if (numarenas <= maxarenas)
528 return NULL; /* overflow */
Martin v. Löwis9fa5a282008-09-11 06:53:30 +0000529#if SIZEOF_SIZE_T <= SIZEOF_INT
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000530 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
531 return NULL; /* overflow */
Martin v. Löwis9fa5a282008-09-11 06:53:30 +0000532#endif
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000533 nbytes = numarenas * sizeof(*arenas);
534 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
535 if (arenaobj == NULL)
536 return NULL;
537 arenas = arenaobj;
Tim Peterscf79aac2006-03-16 01:14:46 +0000538
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000539 /* We might need to fix pointers that were copied. However,
540 * new_arena only gets called when all the pages in the
541 * previous arenas are full. Thus, there are *no* pointers
542 * into the old array. Thus, we don't have to worry about
543 * invalid pointers. Just to be sure, some asserts:
544 */
545 assert(usable_arenas == NULL);
546 assert(unused_arena_objects == NULL);
Tim Peterscf79aac2006-03-16 01:14:46 +0000547
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000548 /* Put the new arenas on the unused_arena_objects list. */
549 for (i = maxarenas; i < numarenas; ++i) {
550 arenas[i].address = 0; /* mark as unassociated */
551 arenas[i].nextarena = i < numarenas - 1 ?
552 &arenas[i+1] : NULL;
553 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000554
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000555 /* Update globals. */
556 unused_arena_objects = &arenas[maxarenas];
557 maxarenas = numarenas;
558 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000559
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000560 /* Take the next available arena object off the head of the list. */
561 assert(unused_arena_objects != NULL);
562 arenaobj = unused_arena_objects;
563 unused_arena_objects = arenaobj->nextarena;
564 assert(arenaobj->address == 0);
565 arenaobj->address = (uptr)malloc(ARENA_SIZE);
566 if (arenaobj->address == 0) {
567 /* The allocation failed: return NULL after putting the
568 * arenaobj back.
569 */
570 arenaobj->nextarena = unused_arena_objects;
571 unused_arena_objects = arenaobj;
572 return NULL;
573 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000574
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000575 ++narenas_currently_allocated;
Tim Peterscf79aac2006-03-16 01:14:46 +0000576#ifdef PYMALLOC_DEBUG
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000577 ++ntimes_arena_allocated;
578 if (narenas_currently_allocated > narenas_highwater)
579 narenas_highwater = narenas_currently_allocated;
Tim Peterscf79aac2006-03-16 01:14:46 +0000580#endif
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000581 arenaobj->freepools = NULL;
582 /* pool_address <- first pool-aligned address in the arena
583 nfreepools <- number of whole pools that fit after alignment */
584 arenaobj->pool_address = (block*)arenaobj->address;
585 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
586 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
587 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
588 if (excess != 0) {
589 --arenaobj->nfreepools;
590 arenaobj->pool_address += POOL_SIZE - excess;
591 }
592 arenaobj->ntotalpools = arenaobj->nfreepools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000593
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000594 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000595}
596
Tim Peterscf79aac2006-03-16 01:14:46 +0000597/*
598Py_ADDRESS_IN_RANGE(P, POOL)
599
600Return true if and only if P is an address that was allocated by pymalloc.
601POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
602(the caller is asked to compute this because the macro expands POOL more than
603once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
604variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
605called on every alloc/realloc/free, micro-efficiency is important here).
606
607Tricky: Let B be the arena base address associated with the pool, B =
608arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
609
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000610 B <= P < B + ARENA_SIZE
Tim Peterscf79aac2006-03-16 01:14:46 +0000611
612Subtracting B throughout, this is true iff
613
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000614 0 <= P-B < ARENA_SIZE
Tim Peterscf79aac2006-03-16 01:14:46 +0000615
616By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
617
618Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
619before the first arena has been allocated. `arenas` is still NULL in that
620case. We're relying on that maxarenas is also 0 in that case, so that
621(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
622into a NULL arenas.
623
624Details: given P and POOL, the arena_object corresponding to P is AO =
625arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
626stores, etc), POOL is the correct address of P's pool, AO.address is the
627correct base address of the pool's arena, and P must be within ARENA_SIZE of
628AO.address. In addition, AO.address is not 0 (no arena can start at address 0
629(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
630controls P.
631
632Now suppose obmalloc does not control P (e.g., P was obtained via a direct
633call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
634in this case -- it may even be uninitialized trash. If the trash arenaindex
635is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
636control P.
637
638Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
639allocated arena, obmalloc controls all the memory in slice AO.address :
640AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
641so P doesn't lie in that slice, so the macro correctly reports that P is not
642controlled by obmalloc.
643
644Finally, if P is not controlled by obmalloc and AO corresponds to an unused
645arena_object (one not currently associated with an allocated arena),
646AO.address is 0, and the second test in the macro reduces to:
647
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000648 P < ARENA_SIZE
Tim Peterscf79aac2006-03-16 01:14:46 +0000649
650If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
651that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
652of the test still passes, and the third clause (AO.address != 0) is necessary
653to get the correct result: AO.address is 0 in this case, so the macro
654correctly reports that P is not controlled by obmalloc (despite that P lies in
655slice AO.address : AO.address + ARENA_SIZE).
656
657Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6582.5, arenas were never free()'ed, and an arenaindex < maxarena always
659corresponded to a currently-allocated arena, so the "P is not controlled by
660obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
661was impossible.
662
663Note that the logic is excruciating, and reading up possibly uninitialized
664memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
665creates problems for some memory debuggers. The overwhelming advantage is
666that this test determines whether an arbitrary address is controlled by
667obmalloc in a small constant time, independent of the number of arenas
668obmalloc controls. Since this test is needed at every entry point, it's
669extremely desirable that it be this fast.
670*/
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000671#define Py_ADDRESS_IN_RANGE(P, POOL) \
672 ((POOL)->arenaindex < maxarenas && \
673 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
674 arenas[(POOL)->arenaindex].address != 0)
Tim Peterscf79aac2006-03-16 01:14:46 +0000675
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000676
677/* This is only useful when running memory debuggers such as
678 * Purify or Valgrind. Uncomment to use.
679 *
Martin v. Löwis68192102007-07-21 06:55:02 +0000680#define Py_USING_MEMORY_DEBUGGER
Martin v. Löwise86b07c2008-09-25 04:12:50 +0000681 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000682
683#ifdef Py_USING_MEMORY_DEBUGGER
684
685/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
686 * This leads to thousands of spurious warnings when using
687 * Purify or Valgrind. By making a function, we can easily
688 * suppress the uninitialized memory reads in this one function.
689 * So we won't ignore real errors elsewhere.
690 *
691 * Disable the macro and use a function.
692 */
693
694#undef Py_ADDRESS_IN_RANGE
695
Neal Norwitzab772272006-10-28 21:21:00 +0000696#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000697 (__GNUC__ >= 4))
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000698#define Py_NO_INLINE __attribute__((__noinline__))
699#else
700#define Py_NO_INLINE
701#endif
702
703/* Don't make static, to try to ensure this isn't inlined. */
704int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
705#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000706#endif
Tim Peters338e0102002-04-01 19:23:44 +0000707
Neil Schemenauera35c6882001-02-27 04:45:05 +0000708/*==========================================================================*/
709
Tim Peters84c1b972002-04-04 04:44:32 +0000710/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
711 * from all other currently live pointers. This may not be possible.
712 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000713
714/*
715 * The basic blocks are ordered by decreasing execution frequency,
716 * which minimizes the number of jumps in the most common cases,
717 * improves branching prediction and instruction scheduling (small
718 * block allocations typically result in a couple of instructions).
719 * Unless the optimizer reorders everything, being too smart...
720 */
721
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000722#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000723void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000724PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000725{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000726 block *bp;
727 poolp pool;
728 poolp next;
729 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000730
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000731 /*
732 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
733 * Most python internals blindly use a signed Py_ssize_t to track
734 * things without checking for overflows or negatives.
735 * As size_t is unsigned, checking for nbytes < 0 is not required.
736 */
737 if (nbytes > PY_SSIZE_T_MAX)
738 return NULL;
Gregory P. Smith0470bab2008-07-22 04:46:32 +0000739
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000740 /*
741 * This implicitly redirects malloc(0).
742 */
743 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
744 LOCK();
745 /*
746 * Most frequent paths first
747 */
748 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
749 pool = usedpools[size + size];
750 if (pool != pool->nextpool) {
751 /*
752 * There is a used pool for this size class.
753 * Pick up the head block of its free list.
754 */
755 ++pool->ref.count;
756 bp = pool->freeblock;
757 assert(bp != NULL);
758 if ((pool->freeblock = *(block **)bp) != NULL) {
759 UNLOCK();
760 return (void *)bp;
761 }
762 /*
763 * Reached the end of the free list, try to extend it.
764 */
765 if (pool->nextoffset <= pool->maxnextoffset) {
766 /* There is room for another block. */
767 pool->freeblock = (block*)pool +
768 pool->nextoffset;
769 pool->nextoffset += INDEX2SIZE(size);
770 *(block **)(pool->freeblock) = NULL;
771 UNLOCK();
772 return (void *)bp;
773 }
774 /* Pool is full, unlink from used pools. */
775 next = pool->nextpool;
776 pool = pool->prevpool;
777 next->prevpool = pool;
778 pool->nextpool = next;
779 UNLOCK();
780 return (void *)bp;
781 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000782
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000783 /* There isn't a pool of the right size class immediately
784 * available: use a free pool.
785 */
786 if (usable_arenas == NULL) {
787 /* No arena has a free pool: allocate a new arena. */
Tim Peterscf79aac2006-03-16 01:14:46 +0000788#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000789 if (narenas_currently_allocated >= MAX_ARENAS) {
790 UNLOCK();
791 goto redirect;
792 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000793#endif
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000794 usable_arenas = new_arena();
795 if (usable_arenas == NULL) {
796 UNLOCK();
797 goto redirect;
798 }
799 usable_arenas->nextarena =
800 usable_arenas->prevarena = NULL;
801 }
802 assert(usable_arenas->address != 0);
Tim Peterscf79aac2006-03-16 01:14:46 +0000803
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000804 /* Try to get a cached free pool. */
805 pool = usable_arenas->freepools;
806 if (pool != NULL) {
807 /* Unlink from cached pools. */
808 usable_arenas->freepools = pool->nextpool;
Tim Peterscf79aac2006-03-16 01:14:46 +0000809
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000810 /* This arena already had the smallest nfreepools
811 * value, so decreasing nfreepools doesn't change
812 * that, and we don't need to rearrange the
813 * usable_arenas list. However, if the arena has
814 * become wholly allocated, we need to remove its
815 * arena_object from usable_arenas.
816 */
817 --usable_arenas->nfreepools;
818 if (usable_arenas->nfreepools == 0) {
819 /* Wholly allocated: remove. */
820 assert(usable_arenas->freepools == NULL);
821 assert(usable_arenas->nextarena == NULL ||
822 usable_arenas->nextarena->prevarena ==
823 usable_arenas);
Tim Peterscf79aac2006-03-16 01:14:46 +0000824
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000825 usable_arenas = usable_arenas->nextarena;
826 if (usable_arenas != NULL) {
827 usable_arenas->prevarena = NULL;
828 assert(usable_arenas->address != 0);
829 }
830 }
831 else {
832 /* nfreepools > 0: it must be that freepools
833 * isn't NULL, or that we haven't yet carved
834 * off all the arena's pools for the first
835 * time.
836 */
837 assert(usable_arenas->freepools != NULL ||
838 usable_arenas->pool_address <=
839 (block*)usable_arenas->address +
840 ARENA_SIZE - POOL_SIZE);
841 }
842 init_pool:
843 /* Frontlink to used pools. */
844 next = usedpools[size + size]; /* == prev */
845 pool->nextpool = next;
846 pool->prevpool = next;
847 next->nextpool = pool;
848 next->prevpool = pool;
849 pool->ref.count = 1;
850 if (pool->szidx == size) {
851 /* Luckily, this pool last contained blocks
852 * of the same size class, so its header
853 * and free list are already initialized.
854 */
855 bp = pool->freeblock;
856 pool->freeblock = *(block **)bp;
857 UNLOCK();
858 return (void *)bp;
859 }
860 /*
861 * Initialize the pool header, set up the free list to
862 * contain just the second block, and return the first
863 * block.
864 */
865 pool->szidx = size;
866 size = INDEX2SIZE(size);
867 bp = (block *)pool + POOL_OVERHEAD;
868 pool->nextoffset = POOL_OVERHEAD + (size << 1);
869 pool->maxnextoffset = POOL_SIZE - size;
870 pool->freeblock = bp + size;
871 *(block **)(pool->freeblock) = NULL;
872 UNLOCK();
873 return (void *)bp;
874 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000875
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000876 /* Carve off a new pool. */
877 assert(usable_arenas->nfreepools > 0);
878 assert(usable_arenas->freepools == NULL);
879 pool = (poolp)usable_arenas->pool_address;
880 assert((block*)pool <= (block*)usable_arenas->address +
881 ARENA_SIZE - POOL_SIZE);
882 pool->arenaindex = usable_arenas - arenas;
883 assert(&arenas[pool->arenaindex] == usable_arenas);
884 pool->szidx = DUMMY_SIZE_IDX;
885 usable_arenas->pool_address += POOL_SIZE;
886 --usable_arenas->nfreepools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000887
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000888 if (usable_arenas->nfreepools == 0) {
889 assert(usable_arenas->nextarena == NULL ||
890 usable_arenas->nextarena->prevarena ==
891 usable_arenas);
892 /* Unlink the arena: it is completely allocated. */
893 usable_arenas = usable_arenas->nextarena;
894 if (usable_arenas != NULL) {
895 usable_arenas->prevarena = NULL;
896 assert(usable_arenas->address != 0);
897 }
898 }
Tim Peterscf79aac2006-03-16 01:14:46 +0000899
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000900 goto init_pool;
901 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000902
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000903 /* The small block allocator ends here. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000904
Tim Petersd97a1c02002-03-30 06:09:22 +0000905redirect:
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000906 /* Redirect the original request to the underlying (libc) allocator.
907 * We jump here on bigger requests, on error in the code above (as a
908 * last chance to serve the request) or when the max memory limit
909 * has been reached.
910 */
911 if (nbytes == 0)
912 nbytes = 1;
913 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000914}
915
916/* free */
917
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000918#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000919void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000920PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000921{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000922 poolp pool;
923 block *lastfree;
924 poolp next, prev;
925 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000926
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000927 if (p == NULL) /* free(NULL) has no effect */
928 return;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000929
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000930 pool = POOL_ADDR(p);
931 if (Py_ADDRESS_IN_RANGE(p, pool)) {
932 /* We allocated this address. */
933 LOCK();
934 /* Link p to the start of the pool's freeblock list. Since
935 * the pool had at least the p block outstanding, the pool
936 * wasn't empty (so it's already in a usedpools[] list, or
937 * was full and is in no list -- it's not in the freeblocks
938 * list in any case).
939 */
940 assert(pool->ref.count > 0); /* else it was empty */
941 *(block **)p = lastfree = pool->freeblock;
942 pool->freeblock = (block *)p;
943 if (lastfree) {
944 struct arena_object* ao;
945 uint nf; /* ao->nfreepools */
Tim Peterscf79aac2006-03-16 01:14:46 +0000946
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000947 /* freeblock wasn't NULL, so the pool wasn't full,
948 * and the pool is in a usedpools[] list.
949 */
950 if (--pool->ref.count != 0) {
951 /* pool isn't empty: leave it in usedpools */
952 UNLOCK();
953 return;
954 }
955 /* Pool is now empty: unlink from usedpools, and
956 * link to the front of freepools. This ensures that
957 * previously freed pools will be allocated later
958 * (being not referenced, they are perhaps paged out).
959 */
960 next = pool->nextpool;
961 prev = pool->prevpool;
962 next->prevpool = prev;
963 prev->nextpool = next;
Tim Peterscf79aac2006-03-16 01:14:46 +0000964
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000965 /* Link the pool to freepools. This is a singly-linked
966 * list, and pool->prevpool isn't used there.
967 */
968 ao = &arenas[pool->arenaindex];
969 pool->nextpool = ao->freepools;
970 ao->freepools = pool;
971 nf = ++ao->nfreepools;
Tim Peterscf79aac2006-03-16 01:14:46 +0000972
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000973 /* All the rest is arena management. We just freed
974 * a pool, and there are 4 cases for arena mgmt:
975 * 1. If all the pools are free, return the arena to
976 * the system free().
977 * 2. If this is the only free pool in the arena,
978 * add the arena back to the `usable_arenas` list.
979 * 3. If the "next" arena has a smaller count of free
980 * pools, we have to "slide this arena right" to
981 * restore that usable_arenas is sorted in order of
982 * nfreepools.
983 * 4. Else there's nothing more to do.
984 */
985 if (nf == ao->ntotalpools) {
986 /* Case 1. First unlink ao from usable_arenas.
987 */
988 assert(ao->prevarena == NULL ||
989 ao->prevarena->address != 0);
990 assert(ao ->nextarena == NULL ||
991 ao->nextarena->address != 0);
Tim Peterscf79aac2006-03-16 01:14:46 +0000992
Antoine Pitrouc7c96a92010-05-09 15:15:40 +0000993 /* Fix the pointer in the prevarena, or the
994 * usable_arenas pointer.
995 */
996 if (ao->prevarena == NULL) {
997 usable_arenas = ao->nextarena;
998 assert(usable_arenas == NULL ||
999 usable_arenas->address != 0);
1000 }
1001 else {
1002 assert(ao->prevarena->nextarena == ao);
1003 ao->prevarena->nextarena =
1004 ao->nextarena;
1005 }
1006 /* Fix the pointer in the nextarena. */
1007 if (ao->nextarena != NULL) {
1008 assert(ao->nextarena->prevarena == ao);
1009 ao->nextarena->prevarena =
1010 ao->prevarena;
1011 }
1012 /* Record that this arena_object slot is
1013 * available to be reused.
1014 */
1015 ao->nextarena = unused_arena_objects;
1016 unused_arena_objects = ao;
Tim Peterscf79aac2006-03-16 01:14:46 +00001017
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001018 /* Free the entire arena. */
1019 free((void *)ao->address);
1020 ao->address = 0; /* mark unassociated */
1021 --narenas_currently_allocated;
Tim Peterscf79aac2006-03-16 01:14:46 +00001022
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001023 UNLOCK();
1024 return;
1025 }
1026 if (nf == 1) {
1027 /* Case 2. Put ao at the head of
1028 * usable_arenas. Note that because
1029 * ao->nfreepools was 0 before, ao isn't
1030 * currently on the usable_arenas list.
1031 */
1032 ao->nextarena = usable_arenas;
1033 ao->prevarena = NULL;
1034 if (usable_arenas)
1035 usable_arenas->prevarena = ao;
1036 usable_arenas = ao;
1037 assert(usable_arenas->address != 0);
Tim Peterscf79aac2006-03-16 01:14:46 +00001038
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001039 UNLOCK();
1040 return;
1041 }
1042 /* If this arena is now out of order, we need to keep
1043 * the list sorted. The list is kept sorted so that
1044 * the "most full" arenas are used first, which allows
1045 * the nearly empty arenas to be completely freed. In
1046 * a few un-scientific tests, it seems like this
1047 * approach allowed a lot more memory to be freed.
1048 */
1049 if (ao->nextarena == NULL ||
1050 nf <= ao->nextarena->nfreepools) {
1051 /* Case 4. Nothing to do. */
1052 UNLOCK();
1053 return;
1054 }
1055 /* Case 3: We have to move the arena towards the end
1056 * of the list, because it has more free pools than
1057 * the arena to its right.
1058 * First unlink ao from usable_arenas.
1059 */
1060 if (ao->prevarena != NULL) {
1061 /* ao isn't at the head of the list */
1062 assert(ao->prevarena->nextarena == ao);
1063 ao->prevarena->nextarena = ao->nextarena;
1064 }
1065 else {
1066 /* ao is at the head of the list */
1067 assert(usable_arenas == ao);
1068 usable_arenas = ao->nextarena;
1069 }
1070 ao->nextarena->prevarena = ao->prevarena;
Tim Peterscf79aac2006-03-16 01:14:46 +00001071
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001072 /* Locate the new insertion point by iterating over
1073 * the list, using our nextarena pointer.
1074 */
1075 while (ao->nextarena != NULL &&
1076 nf > ao->nextarena->nfreepools) {
1077 ao->prevarena = ao->nextarena;
1078 ao->nextarena = ao->nextarena->nextarena;
1079 }
Tim Peterscf79aac2006-03-16 01:14:46 +00001080
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001081 /* Insert ao at this point. */
1082 assert(ao->nextarena == NULL ||
1083 ao->prevarena == ao->nextarena->prevarena);
1084 assert(ao->prevarena->nextarena == ao->nextarena);
Tim Peterscf79aac2006-03-16 01:14:46 +00001085
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001086 ao->prevarena->nextarena = ao;
1087 if (ao->nextarena != NULL)
1088 ao->nextarena->prevarena = ao;
Tim Peterscf79aac2006-03-16 01:14:46 +00001089
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001090 /* Verify that the swaps worked. */
1091 assert(ao->nextarena == NULL ||
1092 nf <= ao->nextarena->nfreepools);
1093 assert(ao->prevarena == NULL ||
1094 nf > ao->prevarena->nfreepools);
1095 assert(ao->nextarena == NULL ||
1096 ao->nextarena->prevarena == ao);
1097 assert((usable_arenas == ao &&
1098 ao->prevarena == NULL) ||
1099 ao->prevarena->nextarena == ao);
Tim Peterscf79aac2006-03-16 01:14:46 +00001100
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001101 UNLOCK();
1102 return;
1103 }
1104 /* Pool was full, so doesn't currently live in any list:
1105 * link it to the front of the appropriate usedpools[] list.
1106 * This mimics LRU pool usage for new allocations and
1107 * targets optimal filling when several pools contain
1108 * blocks of the same size class.
1109 */
1110 --pool->ref.count;
1111 assert(pool->ref.count > 0); /* else the pool is empty */
1112 size = pool->szidx;
1113 next = usedpools[size + size];
1114 prev = next->prevpool;
1115 /* insert pool before next: prev <-> pool <-> next */
1116 pool->nextpool = next;
1117 pool->prevpool = prev;
1118 next->prevpool = pool;
1119 prev->nextpool = pool;
1120 UNLOCK();
1121 return;
1122 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001123
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001124 /* We didn't allocate this address. */
1125 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001126}
1127
Tim Peters84c1b972002-04-04 04:44:32 +00001128/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1129 * then as the Python docs promise, we do not treat this like free(p), and
1130 * return a non-NULL result.
1131 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001132
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001133#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +00001134void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001135PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001136{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001137 void *bp;
1138 poolp pool;
1139 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001140
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001141 if (p == NULL)
1142 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001143
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001144 /*
1145 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
1146 * Most python internals blindly use a signed Py_ssize_t to track
1147 * things without checking for overflows or negatives.
1148 * As size_t is unsigned, checking for nbytes < 0 is not required.
1149 */
1150 if (nbytes > PY_SSIZE_T_MAX)
1151 return NULL;
Gregory P. Smith0470bab2008-07-22 04:46:32 +00001152
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001153 pool = POOL_ADDR(p);
1154 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1155 /* We're in charge of this block */
1156 size = INDEX2SIZE(pool->szidx);
1157 if (nbytes <= size) {
1158 /* The block is staying the same or shrinking. If
1159 * it's shrinking, there's a tradeoff: it costs
1160 * cycles to copy the block to a smaller size class,
1161 * but it wastes memory not to copy it. The
1162 * compromise here is to copy on shrink only if at
1163 * least 25% of size can be shaved off.
1164 */
1165 if (4 * nbytes > 3 * size) {
1166 /* It's the same,
1167 * or shrinking and new/old > 3/4.
1168 */
1169 return p;
1170 }
1171 size = nbytes;
1172 }
1173 bp = PyObject_Malloc(nbytes);
1174 if (bp != NULL) {
1175 memcpy(bp, p, size);
1176 PyObject_Free(p);
1177 }
1178 return bp;
1179 }
1180 /* We're not managing this block. If nbytes <=
1181 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1182 * block. However, if we do, we need to copy the valid data from
1183 * the C-managed block to one of our blocks, and there's no portable
1184 * way to know how much of the memory space starting at p is valid.
1185 * As bug 1185883 pointed out the hard way, it's possible that the
1186 * C-managed block is "at the end" of allocated VM space, so that
1187 * a memory fault can occur if we try to copy nbytes bytes starting
1188 * at p. Instead we punt: let C continue to manage this block.
1189 */
1190 if (nbytes)
1191 return realloc(p, nbytes);
1192 /* C doesn't define the result of realloc(p, 0) (it may or may not
1193 * return NULL then), but Python's docs promise that nbytes==0 never
1194 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1195 * to begin with. Even then, we can't be sure that realloc() won't
1196 * return NULL.
1197 */
1198 bp = realloc(p, 1);
1199 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001200}
1201
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001202#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001203
1204/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001205/* pymalloc not enabled: Redirect the entry points to malloc. These will
1206 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001207
Tim Petersce7fb9b2002-03-23 00:28:57 +00001208void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001209PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001210{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001211 return PyMem_MALLOC(n);
Tim Peters1221c0a2002-03-23 00:20:15 +00001212}
1213
Tim Petersce7fb9b2002-03-23 00:28:57 +00001214void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001215PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001216{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001217 return PyMem_REALLOC(p, n);
Tim Peters1221c0a2002-03-23 00:20:15 +00001218}
1219
1220void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001221PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +00001222{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001223 PyMem_FREE(p);
Tim Peters1221c0a2002-03-23 00:20:15 +00001224}
1225#endif /* WITH_PYMALLOC */
1226
Tim Petersddea2082002-03-23 10:03:50 +00001227#ifdef PYMALLOC_DEBUG
1228/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001229/* A x-platform debugging allocator. This doesn't manage memory directly,
1230 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1231 */
Tim Petersddea2082002-03-23 10:03:50 +00001232
Tim Petersf6fb5012002-04-12 07:38:53 +00001233/* Special bytes broadcast into debug memory blocks at appropriate times.
1234 * Strings of these are unlikely to be valid addresses, floats, ints or
1235 * 7-bit ASCII.
1236 */
1237#undef CLEANBYTE
1238#undef DEADBYTE
1239#undef FORBIDDENBYTE
1240#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001241#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001242#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001243
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001244static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
Tim Petersddea2082002-03-23 10:03:50 +00001245
Tim Peterse0850172002-03-24 00:34:21 +00001246/* serialno is always incremented via calling this routine. The point is
Tim Peters9ea89d22006-06-04 03:26:02 +00001247 * to supply a single place to set a breakpoint.
1248 */
Tim Peterse0850172002-03-24 00:34:21 +00001249static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001250bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001251{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001252 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001253}
1254
Tim Peters9ea89d22006-06-04 03:26:02 +00001255#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001256
Tim Peters9ea89d22006-06-04 03:26:02 +00001257/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1258static size_t
1259read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001260{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001261 const uchar *q = (const uchar *)p;
1262 size_t result = *q++;
1263 int i;
Tim Peters9ea89d22006-06-04 03:26:02 +00001264
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001265 for (i = SST; --i > 0; ++q)
1266 result = (result << 8) | *q;
1267 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001268}
1269
Tim Peters9ea89d22006-06-04 03:26:02 +00001270/* Write n as a big-endian size_t, MSB at address p, LSB at
1271 * p + sizeof(size_t) - 1.
1272 */
Tim Petersddea2082002-03-23 10:03:50 +00001273static void
Tim Peters9ea89d22006-06-04 03:26:02 +00001274write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001275{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001276 uchar *q = (uchar *)p + SST - 1;
1277 int i;
Tim Peters9ea89d22006-06-04 03:26:02 +00001278
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001279 for (i = SST; --i >= 0; --q) {
1280 *q = (uchar)(n & 0xff);
1281 n >>= 8;
1282 }
Tim Petersddea2082002-03-23 10:03:50 +00001283}
1284
Tim Peters08d82152002-04-18 22:25:03 +00001285#ifdef Py_DEBUG
1286/* Is target in the list? The list is traversed via the nextpool pointers.
1287 * The list may be NULL-terminated, or circular. Return 1 if target is in
1288 * list, else 0.
1289 */
1290static int
1291pool_is_in_list(const poolp target, poolp list)
1292{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001293 poolp origlist = list;
1294 assert(target != NULL);
1295 if (list == NULL)
1296 return 0;
1297 do {
1298 if (target == list)
1299 return 1;
1300 list = list->nextpool;
1301 } while (list != NULL && list != origlist);
1302 return 0;
Tim Peters08d82152002-04-18 22:25:03 +00001303}
1304
1305#else
1306#define pool_is_in_list(X, Y) 1
1307
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001308#endif /* Py_DEBUG */
Tim Peters08d82152002-04-18 22:25:03 +00001309
Tim Peters9ea89d22006-06-04 03:26:02 +00001310/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1311 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001312
Tim Peters9ea89d22006-06-04 03:26:02 +00001313p[0: S]
1314 Number of bytes originally asked for. This is a size_t, big-endian (easier
1315 to read in a memory dump).
1316p[S: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001317 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Peters9ea89d22006-06-04 03:26:02 +00001318p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001319 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001320 Used to catch reference to uninitialized memory.
Tim Peters9ea89d22006-06-04 03:26:02 +00001321 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001322 handled the request itself.
Tim Peters9ea89d22006-06-04 03:26:02 +00001323p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001324 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Peters9ea89d22006-06-04 03:26:02 +00001325p[2*S+n+S: 2*S+n+2*S]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001326 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1327 and _PyObject_DebugRealloc.
Tim Peters9ea89d22006-06-04 03:26:02 +00001328 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001329 If "bad memory" is detected later, the serial number gives an
1330 excellent way to set a breakpoint on the next run, to capture the
1331 instant at which this block was passed out.
1332*/
1333
1334void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001335_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001336{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001337 uchar *p; /* base address of malloc'ed block */
1338 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1339 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001340
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001341 bumpserialno();
1342 total = nbytes + 4*SST;
1343 if (total < nbytes)
1344 /* overflow: can't represent total as a size_t */
1345 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001346
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001347 p = (uchar *)PyObject_Malloc(total);
1348 if (p == NULL)
1349 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001350
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001351 write_size_t(p, nbytes);
1352 memset(p + SST, FORBIDDENBYTE, SST);
Tim Petersddea2082002-03-23 10:03:50 +00001353
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001354 if (nbytes > 0)
1355 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001356
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001357 tail = p + 2*SST + nbytes;
1358 memset(tail, FORBIDDENBYTE, SST);
1359 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001360
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001361 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001362}
1363
Tim Peters9ea89d22006-06-04 03:26:02 +00001364/* The debug free first checks the 2*SST bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001365 particular, that the FORBIDDENBYTEs are still intact).
1366 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001367 Then calls the underlying free.
1368*/
1369void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001370_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001371{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001372 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1373 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001374
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001375 if (p == NULL)
1376 return;
1377 _PyObject_DebugCheckAddress(p);
1378 nbytes = read_size_t(q);
1379 if (nbytes > 0)
1380 memset(q, DEADBYTE, nbytes);
1381 PyObject_Free(q);
Tim Petersddea2082002-03-23 10:03:50 +00001382}
1383
1384void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001385_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001386{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001387 uchar *q = (uchar *)p;
1388 uchar *tail;
1389 size_t total; /* nbytes + 4*SST */
1390 size_t original_nbytes;
1391 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001392
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001393 if (p == NULL)
1394 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001395
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001396 _PyObject_DebugCheckAddress(p);
1397 bumpserialno();
1398 original_nbytes = read_size_t(q - 2*SST);
1399 total = nbytes + 4*SST;
1400 if (total < nbytes)
1401 /* overflow: can't represent total as a size_t */
1402 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001403
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001404 if (nbytes < original_nbytes) {
1405 /* shrinking: mark old extra memory dead */
1406 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1407 }
Tim Petersddea2082002-03-23 10:03:50 +00001408
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001409 /* Resize and add decorations. */
1410 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1411 if (q == NULL)
1412 return NULL;
Tim Peters85cc1c42002-04-12 08:52:50 +00001413
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001414 write_size_t(q, nbytes);
1415 for (i = 0; i < SST; ++i)
1416 assert(q[SST + i] == FORBIDDENBYTE);
1417 q += 2*SST;
1418 tail = q + nbytes;
1419 memset(tail, FORBIDDENBYTE, SST);
1420 write_size_t(tail + SST, serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001421
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001422 if (nbytes > original_nbytes) {
1423 /* growing: mark new extra memory clean */
1424 memset(q + original_nbytes, CLEANBYTE,
1425 nbytes - original_nbytes);
1426 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001427
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001428 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001429}
1430
Tim Peters7ccfadf2002-04-01 06:04:21 +00001431/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001432 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001433 * and call Py_FatalError to kill the program.
1434 */
1435 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001436_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001437{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001438 const uchar *q = (const uchar *)p;
1439 char *msg;
1440 size_t nbytes;
1441 const uchar *tail;
1442 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001443
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001444 if (p == NULL) {
1445 msg = "didn't expect a NULL pointer";
1446 goto error;
1447 }
Tim Petersddea2082002-03-23 10:03:50 +00001448
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001449 /* Check the stuff at the start of p first: if there's underwrite
1450 * corruption, the number-of-bytes field may be nuts, and checking
1451 * the tail could lead to a segfault then.
1452 */
1453 for (i = SST; i >= 1; --i) {
1454 if (*(q-i) != FORBIDDENBYTE) {
1455 msg = "bad leading pad byte";
1456 goto error;
1457 }
1458 }
Tim Petersddea2082002-03-23 10:03:50 +00001459
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001460 nbytes = read_size_t(q - 2*SST);
1461 tail = q + nbytes;
1462 for (i = 0; i < SST; ++i) {
1463 if (tail[i] != FORBIDDENBYTE) {
1464 msg = "bad trailing pad byte";
1465 goto error;
1466 }
1467 }
Tim Petersddea2082002-03-23 10:03:50 +00001468
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001469 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001470
1471error:
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001472 _PyObject_DebugDumpAddress(p);
1473 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001474}
1475
Tim Peters7ccfadf2002-04-01 06:04:21 +00001476/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001477void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001478_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001479{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001480 const uchar *q = (const uchar *)p;
1481 const uchar *tail;
1482 size_t nbytes, serial;
1483 int i;
1484 int ok;
Tim Petersddea2082002-03-23 10:03:50 +00001485
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001486 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1487 if (p == NULL)
1488 return;
Tim Petersddea2082002-03-23 10:03:50 +00001489
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001490 nbytes = read_size_t(q - 2*SST);
1491 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1492 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001493
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001494 /* In case this is nuts, check the leading pad bytes first. */
1495 fprintf(stderr, " The %d pad bytes at p-%d are ", SST, SST);
1496 ok = 1;
1497 for (i = 1; i <= SST; ++i) {
1498 if (*(q-i) != FORBIDDENBYTE) {
1499 ok = 0;
1500 break;
1501 }
1502 }
1503 if (ok)
1504 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1505 else {
1506 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1507 FORBIDDENBYTE);
1508 for (i = SST; i >= 1; --i) {
1509 const uchar byte = *(q-i);
1510 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1511 if (byte != FORBIDDENBYTE)
1512 fputs(" *** OUCH", stderr);
1513 fputc('\n', stderr);
1514 }
Tim Peters449b5a82002-04-28 06:14:45 +00001515
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001516 fputs(" Because memory is corrupted at the start, the "
1517 "count of bytes requested\n"
1518 " may be bogus, and checking the trailing pad "
1519 "bytes may segfault.\n", stderr);
1520 }
Tim Petersddea2082002-03-23 10:03:50 +00001521
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001522 tail = q + nbytes;
1523 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1524 ok = 1;
1525 for (i = 0; i < SST; ++i) {
1526 if (tail[i] != FORBIDDENBYTE) {
1527 ok = 0;
1528 break;
1529 }
1530 }
1531 if (ok)
1532 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1533 else {
1534 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1535 FORBIDDENBYTE);
1536 for (i = 0; i < SST; ++i) {
1537 const uchar byte = tail[i];
1538 fprintf(stderr, " at tail+%d: 0x%02x",
1539 i, byte);
1540 if (byte != FORBIDDENBYTE)
1541 fputs(" *** OUCH", stderr);
1542 fputc('\n', stderr);
1543 }
1544 }
Tim Petersddea2082002-03-23 10:03:50 +00001545
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001546 serial = read_size_t(tail + SST);
1547 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1548 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001549
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001550 if (nbytes > 0) {
1551 i = 0;
1552 fputs(" Data at p:", stderr);
1553 /* print up to 8 bytes at the start */
1554 while (q < tail && i < 8) {
1555 fprintf(stderr, " %02x", *q);
1556 ++i;
1557 ++q;
1558 }
1559 /* and up to 8 at the end */
1560 if (q < tail) {
1561 if (tail - q > 8) {
1562 fputs(" ...", stderr);
1563 q = tail - 8;
1564 }
1565 while (q < tail) {
1566 fprintf(stderr, " %02x", *q);
1567 ++q;
1568 }
1569 }
1570 fputc('\n', stderr);
1571 }
Tim Petersddea2082002-03-23 10:03:50 +00001572}
1573
Tim Peters9ea89d22006-06-04 03:26:02 +00001574static size_t
1575printone(const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001576{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001577 int i, k;
1578 char buf[100];
1579 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001580
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001581 fputs(msg, stderr);
1582 for (i = (int)strlen(msg); i < 35; ++i)
1583 fputc(' ', stderr);
1584 fputc('=', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001585
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001586 /* Write the value with commas. */
1587 i = 22;
1588 buf[i--] = '\0';
1589 buf[i--] = '\n';
1590 k = 3;
1591 do {
1592 size_t nextvalue = value / 10;
1593 uint digit = (uint)(value - nextvalue * 10);
1594 value = nextvalue;
1595 buf[i--] = (char)(digit + '0');
1596 --k;
1597 if (k == 0 && value && i >= 0) {
1598 k = 3;
1599 buf[i--] = ',';
1600 }
1601 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00001602
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001603 while (i >= 0)
1604 buf[i--] = ' ';
1605 fputs(buf, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001606
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001607 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001608}
1609
Tim Peters08d82152002-04-18 22:25:03 +00001610/* Print summary info to stderr about the state of pymalloc's structures.
1611 * In Py_DEBUG mode, also perform some expensive internal consistency
1612 * checks.
1613 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001614void
Tim Peters0e871182002-04-13 08:29:14 +00001615_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001616{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001617 uint i;
1618 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1619 /* # of pools, allocated blocks, and free blocks per class index */
1620 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1621 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1622 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1623 /* total # of allocated bytes in used and full pools */
1624 size_t allocated_bytes = 0;
1625 /* total # of available bytes in used pools */
1626 size_t available_bytes = 0;
1627 /* # of free pools + pools not yet carved out of current arena */
1628 uint numfreepools = 0;
1629 /* # of bytes for arena alignment padding */
1630 size_t arena_alignment = 0;
1631 /* # of bytes in used and full pools used for pool_headers */
1632 size_t pool_header_bytes = 0;
1633 /* # of bytes in used and full pools wasted due to quantization,
1634 * i.e. the necessarily leftover space at the ends of used and
1635 * full pools.
1636 */
1637 size_t quantization = 0;
1638 /* # of arenas actually allocated. */
1639 size_t narenas = 0;
1640 /* running total -- should equal narenas * ARENA_SIZE */
1641 size_t total;
1642 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001643
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001644 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1645 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001646
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001647 for (i = 0; i < numclasses; ++i)
1648 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001649
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001650 /* Because full pools aren't linked to from anything, it's easiest
1651 * to march over all the arenas. If we're lucky, most of the memory
1652 * will be living in full pools -- would be a shame to miss them.
1653 */
1654 for (i = 0; i < maxarenas; ++i) {
1655 uint poolsinarena;
1656 uint j;
1657 uptr base = arenas[i].address;
Tim Peterscf79aac2006-03-16 01:14:46 +00001658
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001659 /* Skip arenas which are not allocated. */
1660 if (arenas[i].address == (uptr)NULL)
1661 continue;
1662 narenas += 1;
Tim Peterscf79aac2006-03-16 01:14:46 +00001663
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001664 poolsinarena = arenas[i].ntotalpools;
1665 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001666
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001667 /* round up to pool alignment */
1668 if (base & (uptr)POOL_SIZE_MASK) {
1669 arena_alignment += POOL_SIZE;
1670 base &= ~(uptr)POOL_SIZE_MASK;
1671 base += POOL_SIZE;
1672 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00001673
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001674 /* visit every pool in the arena */
1675 assert(base <= (uptr) arenas[i].pool_address);
1676 for (j = 0;
1677 base < (uptr) arenas[i].pool_address;
1678 ++j, base += POOL_SIZE) {
1679 poolp p = (poolp)base;
1680 const uint sz = p->szidx;
1681 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001682
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001683 if (p->ref.count == 0) {
1684 /* currently unused */
1685 assert(pool_is_in_list(p, arenas[i].freepools));
1686 continue;
1687 }
1688 ++numpools[sz];
1689 numblocks[sz] += p->ref.count;
1690 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1691 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001692#ifdef Py_DEBUG
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001693 if (freeblocks > 0)
1694 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00001695#endif
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001696 }
1697 }
1698 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001699
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001700 fputc('\n', stderr);
1701 fputs("class size num pools blocks in use avail blocks\n"
1702 "----- ---- --------- ------------- ------------\n",
1703 stderr);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001704
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001705 for (i = 0; i < numclasses; ++i) {
1706 size_t p = numpools[i];
1707 size_t b = numblocks[i];
1708 size_t f = numfreeblocks[i];
1709 uint size = INDEX2SIZE(i);
1710 if (p == 0) {
1711 assert(b == 0 && f == 0);
1712 continue;
1713 }
1714 fprintf(stderr, "%5u %6u "
1715 "%11" PY_FORMAT_SIZE_T "u "
1716 "%15" PY_FORMAT_SIZE_T "u "
1717 "%13" PY_FORMAT_SIZE_T "u\n",
1718 i, size, p, b, f);
1719 allocated_bytes += b * size;
1720 available_bytes += f * size;
1721 pool_header_bytes += p * POOL_OVERHEAD;
1722 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1723 }
1724 fputc('\n', stderr);
1725 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001726
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001727 (void)printone("# arenas allocated total", ntimes_arena_allocated);
1728 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
1729 (void)printone("# arenas highwater mark", narenas_highwater);
1730 (void)printone("# arenas allocated current", narenas);
Tim Peterscf79aac2006-03-16 01:14:46 +00001731
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001732 PyOS_snprintf(buf, sizeof(buf),
1733 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1734 narenas, ARENA_SIZE);
1735 (void)printone(buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001736
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001737 fputc('\n', stderr);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001738
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001739 total = printone("# bytes in allocated blocks", allocated_bytes);
1740 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001741
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001742 PyOS_snprintf(buf, sizeof(buf),
1743 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
1744 total += printone(buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001745
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001746 total += printone("# bytes lost to pool headers", pool_header_bytes);
1747 total += printone("# bytes lost to quantization", quantization);
1748 total += printone("# bytes lost to arena alignment", arena_alignment);
1749 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001750}
1751
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001752#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001753
1754#ifdef Py_USING_MEMORY_DEBUGGER
Tim Peterscf79aac2006-03-16 01:14:46 +00001755/* Make this function last so gcc won't inline it since the definition is
1756 * after the reference.
1757 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001758int
1759Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1760{
Antoine Pitrouc7c96a92010-05-09 15:15:40 +00001761 return pool->arenaindex < maxarenas &&
1762 (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
1763 arenas[pool->arenaindex].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001764}
1765#endif