blob: a393cbc73257a94144c7a449ab357ac3bea12ef5 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Neil Schemenauera35c6882001-02-27 04:45:05 +00005/* An object allocator for Python.
6
7 Here is an introduction to the layers of the Python memory architecture,
8 showing where the object allocator is actually used (layer +2), It is
9 called for every object allocation and deallocation (PyObject_New/Del),
10 unless the object-specific allocators implement a proprietary allocation
11 scheme (ex.: ints use a simple free list). This is also the place where
12 the cyclic garbage collector operates selectively on container objects.
13
14
15 Object-specific allocators
16 _____ ______ ______ ________
17 [ int ] [ dict ] [ list ] ... [ string ] Python core |
18+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
19 _______________________________ | |
20 [ Python's object allocator ] | |
21+2 | ####### Object memory ####### | <------ Internal buffers ------> |
22 ______________________________________________________________ |
23 [ Python's raw memory allocator (PyMem_ API) ] |
24+1 | <----- Python memory (under PyMem manager's control) ------> | |
25 __________________________________________________________________
26 [ Underlying general-purpose allocator (ex: C library malloc) ]
27 0 | <------ Virtual memory allocated for the python process -------> |
28
29 =========================================================================
30 _______________________________________________________________________
31 [ OS-specific Virtual Memory Manager (VMM) ]
32-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
33 __________________________________ __________________________________
34 [ ] [ ]
35-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
36
37*/
38/*==========================================================================*/
39
40/* A fast, special-purpose memory allocator for small blocks, to be used
41 on top of a general-purpose malloc -- heavily based on previous art. */
42
43/* Vladimir Marangozov -- August 2000 */
44
45/*
46 * "Memory management is where the rubber meets the road -- if we do the wrong
47 * thing at any level, the results will not be good. And if we don't make the
48 * levels work well together, we are in serious trouble." (1)
49 *
50 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
51 * "Dynamic Storage Allocation: A Survey and Critical Review",
52 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
53 */
54
55/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000056
57/*==========================================================================*/
58
59/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000060 * Allocation strategy abstract:
61 *
62 * For small requests, the allocator sub-allocates <Big> blocks of memory.
63 * Requests greater than 256 bytes are routed to the system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000064 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000065 * Small requests are grouped in size classes spaced 8 bytes apart, due
66 * to the required valid alignment of the returned address. Requests of
67 * a particular size are serviced from memory pools of 4K (one VMM page).
68 * Pools are fragmented on demand and contain free lists of blocks of one
69 * particular size class. In other words, there is a fixed-size allocator
70 * for each size class. Free pools are shared by the different allocators
71 * thus minimizing the space reserved for a particular size class.
72 *
73 * This allocation strategy is a variant of what is known as "simple
74 * segregated storage based on array of free lists". The main drawback of
75 * simple segregated storage is that we might end up with lot of reserved
76 * memory for the different free lists, which degenerate in time. To avoid
77 * this, we partition each free list in pools and we share dynamically the
78 * reserved space between all free lists. This technique is quite efficient
79 * for memory intensive programs which allocate mainly small-sized blocks.
80 *
81 * For small requests we have the following table:
82 *
83 * Request in bytes Size of allocated block Size class idx
84 * ----------------------------------------------------------------
85 * 1-8 8 0
86 * 9-16 16 1
87 * 17-24 24 2
88 * 25-32 32 3
89 * 33-40 40 4
90 * 41-48 48 5
91 * 49-56 56 6
92 * 57-64 64 7
93 * 65-72 72 8
94 * ... ... ...
95 * 241-248 248 30
96 * 249-256 256 31
Tim Petersce7fb9b2002-03-23 00:28:57 +000097 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000098 * 0, 257 and up: routed to the underlying allocator.
99 */
100
101/*==========================================================================*/
102
103/*
104 * -- Main tunable settings section --
105 */
106
107/*
108 * Alignment of addresses returned to the user. 8-bytes alignment works
109 * on most current architectures (with 32-bit or 64-bit address busses).
110 * The alignment value is also used for grouping small requests in size
111 * classes spaced ALIGNMENT bytes apart.
112 *
113 * You shouldn't change this unless you know what you are doing.
114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000115#define ALIGNMENT 8 /* must be 2^N */
116#define ALIGNMENT_SHIFT 3
117#define ALIGNMENT_MASK (ALIGNMENT - 1)
118
Tim Peterse70ddf32002-04-05 04:32:29 +0000119/* Return the number of bytes in size class I, as a uint. */
120#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
121
Neil Schemenauera35c6882001-02-27 04:45:05 +0000122/*
123 * Max size threshold below which malloc requests are considered to be
124 * small enough in order to use preallocated memory pools. You can tune
125 * this value according to your application behaviour and memory needs.
126 *
127 * The following invariants must hold:
128 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256
Tim Petersd97a1c02002-03-30 06:09:22 +0000129 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000130 *
131 * Although not required, for better performance and space efficiency,
132 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
133 */
Tim Petersd97a1c02002-03-30 06:09:22 +0000134#define SMALL_REQUEST_THRESHOLD 256
Neil Schemenauera35c6882001-02-27 04:45:05 +0000135#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
136
137/*
138 * The system's VMM page size can be obtained on most unices with a
139 * getpagesize() call or deduced from various header files. To make
140 * things simpler, we assume that it is 4K, which is OK for most systems.
141 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000142 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
143 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
144 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000145 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000147#define SYSTEM_PAGE_SIZE (4 * 1024)
148#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
149
150/*
151 * Maximum amount of memory managed by the allocator for small requests.
152 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000153#ifdef WITH_MEMORY_LIMITS
154#ifndef SMALL_MEMORY_LIMIT
155#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
156#endif
157#endif
158
159/*
160 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
161 * on a page boundary. This is a reserved virtual address space for the
162 * current process (obtained through a malloc call). In no way this means
163 * that the memory arenas will be used entirely. A malloc(<Big>) is usually
164 * an address range reservation for <Big> bytes, unless all pages within this
165 * space are referenced subsequently. So malloc'ing big blocks and not using
166 * them does not mean "wasting memory". It's an addressable range wastage...
167 *
168 * Therefore, allocating arenas with malloc is not optimal, because there is
169 * some address space wastage, but this is the most portable way to request
Tim Petersd97a1c02002-03-30 06:09:22 +0000170 * memory from the system across various platforms.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000171 */
Tim Peters3c83df22002-03-30 07:04:41 +0000172#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173
174#ifdef WITH_MEMORY_LIMITS
175#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
176#endif
177
178/*
179 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000180 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000181 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000182#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
183#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000184
185/*
186 * -- End of tunable settings section --
187 */
188
189/*==========================================================================*/
190
191/*
192 * Locking
193 *
194 * To reduce lock contention, it would probably be better to refine the
195 * crude function locking with per size class locking. I'm not positive
196 * however, whether it's worth switching to such locking policy because
197 * of the performance penalty it might introduce.
198 *
199 * The following macros describe the simplest (should also be the fastest)
200 * lock object on a particular platform and the init/fini/lock/unlock
201 * operations on it. The locks defined here are not expected to be recursive
202 * because it is assumed that they will always be called in the order:
203 * INIT, [LOCK, UNLOCK]*, FINI.
204 */
205
206/*
207 * Python's threads are serialized, so object malloc locking is disabled.
208 */
209#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
210#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
211#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
212#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
213#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
214
215/*
216 * Basic types
217 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
218 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000219#undef uchar
Thomas Woutersa9773292006-04-21 09:43:23 +0000220#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000221
Neil Schemenauera35c6882001-02-27 04:45:05 +0000222#undef uint
Thomas Woutersa9773292006-04-21 09:43:23 +0000223#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000224
225#undef ulong
Thomas Woutersa9773292006-04-21 09:43:23 +0000226#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000227
Tim Petersd97a1c02002-03-30 06:09:22 +0000228#undef uptr
Thomas Woutersa9773292006-04-21 09:43:23 +0000229#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000230
Neil Schemenauera35c6882001-02-27 04:45:05 +0000231/* When you say memory, my mind reasons in terms of (pointers to) blocks */
232typedef uchar block;
233
Tim Peterse70ddf32002-04-05 04:32:29 +0000234/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000235struct pool_header {
Tim Petersb2336522001-03-11 18:36:13 +0000236 union { block *_padding;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000237 uint count; } ref; /* number of allocated blocks */
238 block *freeblock; /* pool's free list head */
239 struct pool_header *nextpool; /* next pool of this size class */
240 struct pool_header *prevpool; /* previous pool "" */
Tim Peters1d99af82002-03-30 10:35:09 +0000241 uint arenaindex; /* index into arenas of base adr */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000242 uint szidx; /* block size class index */
Tim Peterse70ddf32002-04-05 04:32:29 +0000243 uint nextoffset; /* bytes to virgin block */
244 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000245};
246
247typedef struct pool_header *poolp;
248
Thomas Woutersa9773292006-04-21 09:43:23 +0000249/* Record keeping for arenas. */
250struct arena_object {
251 /* The address of the arena, as returned by malloc. Note that 0
252 * will never be returned by a successful malloc, and is used
253 * here to mark an arena_object that doesn't correspond to an
254 * allocated arena.
255 */
256 uptr address;
257
258 /* Pool-aligned pointer to the next pool to be carved off. */
259 block* pool_address;
260
261 /* The number of available pools in the arena: free pools + never-
262 * allocated pools.
263 */
264 uint nfreepools;
265
266 /* The total number of pools in the arena, whether or not available. */
267 uint ntotalpools;
268
269 /* Singly-linked list of available pools. */
270 struct pool_header* freepools;
271
272 /* Whenever this arena_object is not associated with an allocated
273 * arena, the nextarena member is used to link all unassociated
274 * arena_objects in the singly-linked `unused_arena_objects` list.
275 * The prevarena member is unused in this case.
276 *
277 * When this arena_object is associated with an allocated arena
278 * with at least one available pool, both members are used in the
279 * doubly-linked `usable_arenas` list, which is maintained in
280 * increasing order of `nfreepools` values.
281 *
282 * Else this arena_object is associated with an allocated arena
283 * all of whose pools are in use. `nextarena` and `prevarena`
284 * are both meaningless in this case.
285 */
286 struct arena_object* nextarena;
287 struct arena_object* prevarena;
288};
289
Neil Schemenauera35c6882001-02-27 04:45:05 +0000290#undef ROUNDUP
291#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
292#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
293
294#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
295
Tim Petersd97a1c02002-03-30 06:09:22 +0000296/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000297#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
298
Tim Peters16bcb6b2002-04-05 05:45:31 +0000299/* Return total number of blocks in pool of size index I, as a uint. */
300#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000301
Neil Schemenauera35c6882001-02-27 04:45:05 +0000302/*==========================================================================*/
303
304/*
305 * This malloc lock
306 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000307SIMPLELOCK_DECL(_malloc_lock)
Tim Petersb2336522001-03-11 18:36:13 +0000308#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
309#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
310#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
311#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000312
313/*
Tim Peters1e16db62002-03-31 01:05:22 +0000314 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
315
316This is involved. For an index i, usedpools[i+i] is the header for a list of
317all partially used pools holding small blocks with "size class idx" i. So
318usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
31916, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
320
Thomas Woutersa9773292006-04-21 09:43:23 +0000321Pools are carved off an arena's highwater mark (an arena_object's pool_address
322member) as needed. Once carved off, a pool is in one of three states forever
323after:
Tim Peters1e16db62002-03-31 01:05:22 +0000324
Tim Peters338e0102002-04-01 19:23:44 +0000325used == partially used, neither empty nor full
326 At least one block in the pool is currently allocated, and at least one
327 block in the pool is not currently allocated (note this implies a pool
328 has room for at least two blocks).
329 This is a pool's initial state, as a pool is created only when malloc
330 needs space.
331 The pool holds blocks of a fixed size, and is in the circular list headed
332 at usedpools[i] (see above). It's linked to the other used pools of the
333 same size class via the pool_header's nextpool and prevpool members.
334 If all but one block is currently allocated, a malloc can cause a
335 transition to the full state. If all but one block is not currently
336 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000337
Tim Peters338e0102002-04-01 19:23:44 +0000338full == all the pool's blocks are currently allocated
339 On transition to full, a pool is unlinked from its usedpools[] list.
340 It's not linked to from anything then anymore, and its nextpool and
341 prevpool members are meaningless until it transitions back to used.
342 A free of a block in a full pool puts the pool back in the used state.
343 Then it's linked in at the front of the appropriate usedpools[] list, so
344 that the next allocation for its size class will reuse the freed block.
345
346empty == all the pool's blocks are currently available for allocation
347 On transition to empty, a pool is unlinked from its usedpools[] list,
Thomas Woutersa9773292006-04-21 09:43:23 +0000348 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000349 via its nextpool member. The prevpool member has no meaning in this case.
350 Empty pools have no inherent size class: the next time a malloc finds
351 an empty list in usedpools[], it takes the first pool off of freepools.
352 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000353 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000354
355
356Block Management
357
358Blocks within pools are again carved out as needed. pool->freeblock points to
359the start of a singly-linked list of free blocks within the pool. When a
360block is freed, it's inserted at the front of its pool's freeblock list. Note
361that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000362is initialized. Instead only "the first two" (lowest addresses) blocks are
363set up, returning the first such block, and setting pool->freeblock to a
364one-block list holding the second such block. This is consistent with that
365pymalloc strives at all levels (arena, pool, and block) never to touch a piece
366of memory until it's actually needed.
367
368So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000369available for allocating, and pool->freeblock is not NULL. If pool->freeblock
370points to the end of the free list before we've carved the entire pool into
371blocks, that means we simply haven't yet gotten to one of the higher-address
372blocks. The offset from the pool_header to the start of "the next" virgin
373block is stored in the pool_header nextoffset member, and the largest value
374of nextoffset that makes sense is stored in the maxnextoffset member when a
375pool is initialized. All the blocks in a pool have been passed out at least
376once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000377
Tim Peters1e16db62002-03-31 01:05:22 +0000378
379Major obscurity: While the usedpools vector is declared to have poolp
380entries, it doesn't really. It really contains two pointers per (conceptual)
381poolp entry, the nextpool and prevpool members of a pool_header. The
382excruciating initialization code below fools C so that
383
384 usedpool[i+i]
385
386"acts like" a genuine poolp, but only so long as you only reference its
387nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
388compensating for that a pool_header's nextpool and prevpool members
389immediately follow a pool_header's first two members:
390
391 union { block *_padding;
392 uint count; } ref;
393 block *freeblock;
394
395each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
396contains is a fudged-up pointer p such that *if* C believes it's a poolp
397pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
398circular list is empty).
399
400It's unclear why the usedpools setup is so convoluted. It could be to
401minimize the amount of cache required to hold this heavily-referenced table
402(which only *needs* the two interpool pointer members of a pool_header). OTOH,
403referencing code has to remember to "double the index" and doing so isn't
404free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
405on that C doesn't insert any padding anywhere in a pool_header at or before
406the prevpool member.
407**************************************************************************** */
408
Neil Schemenauera35c6882001-02-27 04:45:05 +0000409#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
410#define PT(x) PTA(x), PTA(x)
411
412static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
413 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
414#if NB_SMALL_SIZE_CLASSES > 8
415 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
416#if NB_SMALL_SIZE_CLASSES > 16
417 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
418#if NB_SMALL_SIZE_CLASSES > 24
419 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
420#if NB_SMALL_SIZE_CLASSES > 32
421 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
422#if NB_SMALL_SIZE_CLASSES > 40
423 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
424#if NB_SMALL_SIZE_CLASSES > 48
425 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
426#if NB_SMALL_SIZE_CLASSES > 56
427 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
428#endif /* NB_SMALL_SIZE_CLASSES > 56 */
429#endif /* NB_SMALL_SIZE_CLASSES > 48 */
430#endif /* NB_SMALL_SIZE_CLASSES > 40 */
431#endif /* NB_SMALL_SIZE_CLASSES > 32 */
432#endif /* NB_SMALL_SIZE_CLASSES > 24 */
433#endif /* NB_SMALL_SIZE_CLASSES > 16 */
434#endif /* NB_SMALL_SIZE_CLASSES > 8 */
435};
436
Thomas Woutersa9773292006-04-21 09:43:23 +0000437/*==========================================================================
438Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000439
Thomas Woutersa9773292006-04-21 09:43:23 +0000440`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
441which may not be currently used (== they're arena_objects that aren't
442currently associated with an allocated arena). Note that arenas proper are
443separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000444
Thomas Woutersa9773292006-04-21 09:43:23 +0000445Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
446we do try to free() arenas, and use some mild heuristic strategies to increase
447the likelihood that arenas eventually can be freed.
448
449unused_arena_objects
450
451 This is a singly-linked list of the arena_objects that are currently not
452 being used (no arena is associated with them). Objects are taken off the
453 head of the list in new_arena(), and are pushed on the head of the list in
454 PyObject_Free() when the arena is empty. Key invariant: an arena_object
455 is on this list if and only if its .address member is 0.
456
457usable_arenas
458
459 This is a doubly-linked list of the arena_objects associated with arenas
460 that have pools available. These pools are either waiting to be reused,
461 or have not been used before. The list is sorted to have the most-
462 allocated arenas first (ascending order based on the nfreepools member).
463 This means that the next allocation will come from a heavily used arena,
464 which gives the nearly empty arenas a chance to be returned to the system.
465 In my unscientific tests this dramatically improved the number of arenas
466 that could be freed.
467
468Note that an arena_object associated with an arena all of whose pools are
469currently in use isn't on either list.
470*/
471
472/* Array of objects used to track chunks of memory (arenas). */
473static struct arena_object* arenas = NULL;
474/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000475static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000476
Thomas Woutersa9773292006-04-21 09:43:23 +0000477/* The head of the singly-linked, NULL-terminated list of available
478 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000479 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000480static struct arena_object* unused_arena_objects = NULL;
481
482/* The head of the doubly-linked, NULL-terminated at each end, list of
483 * arena_objects associated with arenas that have pools available.
484 */
485static struct arena_object* usable_arenas = NULL;
486
487/* How many arena_objects do we initially allocate?
488 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
489 * `arenas` vector.
490 */
491#define INITIAL_ARENA_OBJECTS 16
492
493/* Number of arenas allocated that haven't been free()'d. */
494static ulong narenas_currently_allocated = 0;
495
496#ifdef PYMALLOC_DEBUG
497/* Total number of times malloc() called to allocate an arena. */
498static ulong ntimes_arena_allocated = 0;
499/* High water mark (max value ever seen) for narenas_currently_allocated. */
500static ulong narenas_highwater = 0;
501#endif
502
503/* Allocate a new arena. If we run out of memory, return NULL. Else
504 * allocate a new arena, and return the address of an arena_object
505 * describing the new arena. It's expected that the caller will set
506 * `usable_arenas` to the return value.
507 */
508static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000509new_arena(void)
510{
Thomas Woutersa9773292006-04-21 09:43:23 +0000511 struct arena_object* arenaobj;
Tim Peters3c83df22002-03-30 07:04:41 +0000512 uint excess; /* number of bytes above pool alignment */
Tim Petersd97a1c02002-03-30 06:09:22 +0000513
Tim Peters0e871182002-04-13 08:29:14 +0000514#ifdef PYMALLOC_DEBUG
515 if (Py_GETENV("PYTHONMALLOCSTATS"))
516 _PyObject_DebugMallocStats();
517#endif
Thomas Woutersa9773292006-04-21 09:43:23 +0000518 if (unused_arena_objects == NULL) {
519 uint i;
520 uint numarenas;
521 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000522
Thomas Woutersa9773292006-04-21 09:43:23 +0000523 /* Double the number of arena objects on each allocation.
524 * Note that it's possible for `numarenas` to overflow.
Tim Petersd97a1c02002-03-30 06:09:22 +0000525 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000526 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
527 if (numarenas <= maxarenas)
528 return NULL; /* overflow */
529 nbytes = numarenas * sizeof(*arenas);
530 if (nbytes / sizeof(*arenas) != numarenas)
531 return NULL; /* overflow */
Thomas Wouters49fd7fa2006-04-21 10:40:58 +0000532 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
Thomas Woutersa9773292006-04-21 09:43:23 +0000533 if (arenaobj == NULL)
534 return NULL;
535 arenas = arenaobj;
536
537 /* We might need to fix pointers that were copied. However,
538 * new_arena only gets called when all the pages in the
539 * previous arenas are full. Thus, there are *no* pointers
540 * into the old array. Thus, we don't have to worry about
541 * invalid pointers. Just to be sure, some asserts:
542 */
543 assert(usable_arenas == NULL);
544 assert(unused_arena_objects == NULL);
545
546 /* Put the new arenas on the unused_arena_objects list. */
547 for (i = maxarenas; i < numarenas; ++i) {
548 arenas[i].address = 0; /* mark as unassociated */
549 arenas[i].nextarena = i < numarenas - 1 ?
550 &arenas[i+1] : NULL;
551 }
552
553 /* Update globals. */
554 unused_arena_objects = &arenas[maxarenas];
555 maxarenas = numarenas;
Tim Petersd97a1c02002-03-30 06:09:22 +0000556 }
557
Thomas Woutersa9773292006-04-21 09:43:23 +0000558 /* Take the next available arena object off the head of the list. */
559 assert(unused_arena_objects != NULL);
560 arenaobj = unused_arena_objects;
561 unused_arena_objects = arenaobj->nextarena;
562 assert(arenaobj->address == 0);
563 arenaobj->address = (uptr)malloc(ARENA_SIZE);
564 if (arenaobj->address == 0) {
565 /* The allocation failed: return NULL after putting the
566 * arenaobj back.
567 */
568 arenaobj->nextarena = unused_arena_objects;
569 unused_arena_objects = arenaobj;
570 return NULL;
571 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000572
Thomas Woutersa9773292006-04-21 09:43:23 +0000573 ++narenas_currently_allocated;
574#ifdef PYMALLOC_DEBUG
575 ++ntimes_arena_allocated;
576 if (narenas_currently_allocated > narenas_highwater)
577 narenas_highwater = narenas_currently_allocated;
578#endif
579 arenaobj->freepools = NULL;
580 /* pool_address <- first pool-aligned address in the arena
581 nfreepools <- number of whole pools that fit after alignment */
582 arenaobj->pool_address = (block*)arenaobj->address;
583 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
584 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
585 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
586 if (excess != 0) {
587 --arenaobj->nfreepools;
588 arenaobj->pool_address += POOL_SIZE - excess;
589 }
590 arenaobj->ntotalpools = arenaobj->nfreepools;
591
592 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000593}
594
Thomas Woutersa9773292006-04-21 09:43:23 +0000595/*
596Py_ADDRESS_IN_RANGE(P, POOL)
597
598Return true if and only if P is an address that was allocated by pymalloc.
599POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
600(the caller is asked to compute this because the macro expands POOL more than
601once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
602variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
603called on every alloc/realloc/free, micro-efficiency is important here).
604
605Tricky: Let B be the arena base address associated with the pool, B =
606arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
607
608 B <= P < B + ARENA_SIZE
609
610Subtracting B throughout, this is true iff
611
612 0 <= P-B < ARENA_SIZE
613
614By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
615
616Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
617before the first arena has been allocated. `arenas` is still NULL in that
618case. We're relying on that maxarenas is also 0 in that case, so that
619(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
620into a NULL arenas.
621
622Details: given P and POOL, the arena_object corresponding to P is AO =
623arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
624stores, etc), POOL is the correct address of P's pool, AO.address is the
625correct base address of the pool's arena, and P must be within ARENA_SIZE of
626AO.address. In addition, AO.address is not 0 (no arena can start at address 0
627(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
628controls P.
629
630Now suppose obmalloc does not control P (e.g., P was obtained via a direct
631call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
632in this case -- it may even be uninitialized trash. If the trash arenaindex
633is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
634control P.
635
636Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
637allocated arena, obmalloc controls all the memory in slice AO.address :
638AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
639so P doesn't lie in that slice, so the macro correctly reports that P is not
640controlled by obmalloc.
641
642Finally, if P is not controlled by obmalloc and AO corresponds to an unused
643arena_object (one not currently associated with an allocated arena),
644AO.address is 0, and the second test in the macro reduces to:
645
646 P < ARENA_SIZE
647
648If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
649that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
650of the test still passes, and the third clause (AO.address != 0) is necessary
651to get the correct result: AO.address is 0 in this case, so the macro
652correctly reports that P is not controlled by obmalloc (despite that P lies in
653slice AO.address : AO.address + ARENA_SIZE).
654
655Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6562.5, arenas were never free()'ed, and an arenaindex < maxarena always
657corresponded to a currently-allocated arena, so the "P is not controlled by
658obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
659was impossible.
660
661Note that the logic is excruciating, and reading up possibly uninitialized
662memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
663creates problems for some memory debuggers. The overwhelming advantage is
664that this test determines whether an arbitrary address is controlled by
665obmalloc in a small constant time, independent of the number of arenas
666obmalloc controls. Since this test is needed at every entry point, it's
667extremely desirable that it be this fast.
668*/
669#define Py_ADDRESS_IN_RANGE(P, POOL) \
670 ((POOL)->arenaindex < maxarenas && \
671 (uptr)(P) - arenas[(POOL)->arenaindex].address < (uptr)ARENA_SIZE && \
672 arenas[(POOL)->arenaindex].address != 0)
673
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000674
675/* This is only useful when running memory debuggers such as
676 * Purify or Valgrind. Uncomment to use.
677 *
Martin v. Löwis18e16552006-02-15 17:27:45 +0000678#define Py_USING_MEMORY_DEBUGGER
Neal Norwitz82c5a862006-02-16 07:30:11 +0000679 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000680
681#ifdef Py_USING_MEMORY_DEBUGGER
682
683/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
684 * This leads to thousands of spurious warnings when using
685 * Purify or Valgrind. By making a function, we can easily
686 * suppress the uninitialized memory reads in this one function.
687 * So we won't ignore real errors elsewhere.
688 *
689 * Disable the macro and use a function.
690 */
691
692#undef Py_ADDRESS_IN_RANGE
693
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000694#if defined(__GNUC__) && (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1)
695#define Py_NO_INLINE __attribute__((__noinline__))
696#else
697#define Py_NO_INLINE
698#endif
699
700/* Don't make static, to try to ensure this isn't inlined. */
701int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
702#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000703#endif
Tim Peters338e0102002-04-01 19:23:44 +0000704
Neil Schemenauera35c6882001-02-27 04:45:05 +0000705/*==========================================================================*/
706
Tim Peters84c1b972002-04-04 04:44:32 +0000707/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
708 * from all other currently live pointers. This may not be possible.
709 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000710
711/*
712 * The basic blocks are ordered by decreasing execution frequency,
713 * which minimizes the number of jumps in the most common cases,
714 * improves branching prediction and instruction scheduling (small
715 * block allocations typically result in a couple of instructions).
716 * Unless the optimizer reorders everything, being too smart...
717 */
718
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000719#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000720void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000721PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000722{
723 block *bp;
724 poolp pool;
725 poolp next;
726 uint size;
727
Neil Schemenauera35c6882001-02-27 04:45:05 +0000728 /*
Tim Peters84c1b972002-04-04 04:44:32 +0000729 * This implicitly redirects malloc(0).
Neil Schemenauera35c6882001-02-27 04:45:05 +0000730 */
731 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
732 LOCK();
733 /*
734 * Most frequent paths first
735 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000736 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000737 pool = usedpools[size + size];
738 if (pool != pool->nextpool) {
739 /*
740 * There is a used pool for this size class.
741 * Pick up the head block of its free list.
742 */
743 ++pool->ref.count;
744 bp = pool->freeblock;
Tim Peters52aefc82002-04-11 06:36:45 +0000745 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000746 if ((pool->freeblock = *(block **)bp) != NULL) {
747 UNLOCK();
748 return (void *)bp;
749 }
750 /*
Thomas Woutersa9773292006-04-21 09:43:23 +0000751 * Reached the end of the free list, try to extend it.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000752 */
Tim Peterse70ddf32002-04-05 04:32:29 +0000753 if (pool->nextoffset <= pool->maxnextoffset) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000754 /* There is room for another block. */
755 pool->freeblock = (block*)pool +
Tim Peterse70ddf32002-04-05 04:32:29 +0000756 pool->nextoffset;
757 pool->nextoffset += INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000758 *(block **)(pool->freeblock) = NULL;
759 UNLOCK();
760 return (void *)bp;
761 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000762 /* Pool is full, unlink from used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000763 next = pool->nextpool;
764 pool = pool->prevpool;
765 next->prevpool = pool;
766 pool->nextpool = next;
767 UNLOCK();
768 return (void *)bp;
769 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000770
771 /* There isn't a pool of the right size class immediately
772 * available: use a free pool.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000773 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000774 if (usable_arenas == NULL) {
775 /* No arena has a free pool: allocate a new arena. */
776#ifdef WITH_MEMORY_LIMITS
777 if (narenas_currently_allocated >= MAX_ARENAS) {
778 UNLOCK();
779 goto redirect;
780 }
781#endif
782 usable_arenas = new_arena();
783 if (usable_arenas == NULL) {
784 UNLOCK();
785 goto redirect;
786 }
787 usable_arenas->nextarena =
788 usable_arenas->prevarena = NULL;
789 }
790 assert(usable_arenas->address != 0);
791
792 /* Try to get a cached free pool. */
793 pool = usable_arenas->freepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000794 if (pool != NULL) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000795 /* Unlink from cached pools. */
796 usable_arenas->freepools = pool->nextpool;
797
798 /* This arena already had the smallest nfreepools
799 * value, so decreasing nfreepools doesn't change
800 * that, and we don't need to rearrange the
801 * usable_arenas list. However, if the arena has
802 * become wholly allocated, we need to remove its
803 * arena_object from usable_arenas.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000804 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000805 --usable_arenas->nfreepools;
806 if (usable_arenas->nfreepools == 0) {
807 /* Wholly allocated: remove. */
808 assert(usable_arenas->freepools == NULL);
809 assert(usable_arenas->nextarena == NULL ||
810 usable_arenas->nextarena->prevarena ==
811 usable_arenas);
812
813 usable_arenas = usable_arenas->nextarena;
814 if (usable_arenas != NULL) {
815 usable_arenas->prevarena = NULL;
816 assert(usable_arenas->address != 0);
817 }
818 }
819 else {
820 /* nfreepools > 0: it must be that freepools
821 * isn't NULL, or that we haven't yet carved
822 * off all the arena's pools for the first
823 * time.
824 */
825 assert(usable_arenas->freepools != NULL ||
826 usable_arenas->pool_address <=
827 (block*)usable_arenas->address +
828 ARENA_SIZE - POOL_SIZE);
829 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000830 init_pool:
Thomas Woutersa9773292006-04-21 09:43:23 +0000831 /* Frontlink to used pools. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000832 next = usedpools[size + size]; /* == prev */
833 pool->nextpool = next;
834 pool->prevpool = next;
835 next->nextpool = pool;
836 next->prevpool = pool;
837 pool->ref.count = 1;
838 if (pool->szidx == size) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000839 /* Luckily, this pool last contained blocks
Neil Schemenauera35c6882001-02-27 04:45:05 +0000840 * of the same size class, so its header
841 * and free list are already initialized.
842 */
843 bp = pool->freeblock;
844 pool->freeblock = *(block **)bp;
845 UNLOCK();
846 return (void *)bp;
847 }
848 /*
Tim Peterse70ddf32002-04-05 04:32:29 +0000849 * Initialize the pool header, set up the free list to
850 * contain just the second block, and return the first
851 * block.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000852 */
853 pool->szidx = size;
Tim Peterse70ddf32002-04-05 04:32:29 +0000854 size = INDEX2SIZE(size);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000855 bp = (block *)pool + POOL_OVERHEAD;
Tim Peterse70ddf32002-04-05 04:32:29 +0000856 pool->nextoffset = POOL_OVERHEAD + (size << 1);
857 pool->maxnextoffset = POOL_SIZE - size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000858 pool->freeblock = bp + size;
859 *(block **)(pool->freeblock) = NULL;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000860 UNLOCK();
861 return (void *)bp;
862 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000863
864 /* Carve off a new pool. */
865 assert(usable_arenas->nfreepools > 0);
866 assert(usable_arenas->freepools == NULL);
867 pool = (poolp)usable_arenas->pool_address;
868 assert((block*)pool <= (block*)usable_arenas->address +
869 ARENA_SIZE - POOL_SIZE);
870 pool->arenaindex = usable_arenas - arenas;
871 assert(&arenas[pool->arenaindex] == usable_arenas);
872 pool->szidx = DUMMY_SIZE_IDX;
873 usable_arenas->pool_address += POOL_SIZE;
874 --usable_arenas->nfreepools;
875
876 if (usable_arenas->nfreepools == 0) {
877 assert(usable_arenas->nextarena == NULL ||
878 usable_arenas->nextarena->prevarena ==
879 usable_arenas);
880 /* Unlink the arena: it is completely allocated. */
881 usable_arenas = usable_arenas->nextarena;
882 if (usable_arenas != NULL) {
883 usable_arenas->prevarena = NULL;
884 assert(usable_arenas->address != 0);
885 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000886 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000887
888 goto init_pool;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000889 }
890
891 /* The small block allocator ends here. */
892
Tim Petersd97a1c02002-03-30 06:09:22 +0000893redirect:
Thomas Woutersa9773292006-04-21 09:43:23 +0000894 /* Redirect the original request to the underlying (libc) allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000895 * We jump here on bigger requests, on error in the code above (as a
896 * last chance to serve the request) or when the max memory limit
897 * has been reached.
898 */
Tim Peters64d80c92002-04-18 21:58:56 +0000899 if (nbytes == 0)
900 nbytes = 1;
Tim Peters64d80c92002-04-18 21:58:56 +0000901 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000902}
903
904/* free */
905
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000906#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000907void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000908PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000909{
910 poolp pool;
Tim Peters2c95c992002-03-31 02:18:01 +0000911 block *lastfree;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000912 poolp next, prev;
913 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000914
Neil Schemenauera35c6882001-02-27 04:45:05 +0000915 if (p == NULL) /* free(NULL) has no effect */
916 return;
917
Tim Petersd97a1c02002-03-30 06:09:22 +0000918 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000919 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Tim Petersd97a1c02002-03-30 06:09:22 +0000920 /* We allocated this address. */
Tim Petersd97a1c02002-03-30 06:09:22 +0000921 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +0000922 /* Link p to the start of the pool's freeblock list. Since
Tim Peters2c95c992002-03-31 02:18:01 +0000923 * the pool had at least the p block outstanding, the pool
924 * wasn't empty (so it's already in a usedpools[] list, or
925 * was full and is in no list -- it's not in the freeblocks
926 * list in any case).
Tim Petersd97a1c02002-03-30 06:09:22 +0000927 */
Tim Peters57b17ad2002-03-31 02:59:48 +0000928 assert(pool->ref.count > 0); /* else it was empty */
Tim Peters2c95c992002-03-31 02:18:01 +0000929 *(block **)p = lastfree = pool->freeblock;
Tim Petersd97a1c02002-03-30 06:09:22 +0000930 pool->freeblock = (block *)p;
Tim Peters2c95c992002-03-31 02:18:01 +0000931 if (lastfree) {
Thomas Woutersa9773292006-04-21 09:43:23 +0000932 struct arena_object* ao;
933 uint nf; /* ao->nfreepools */
934
935 /* freeblock wasn't NULL, so the pool wasn't full,
Tim Peters2c95c992002-03-31 02:18:01 +0000936 * and the pool is in a usedpools[] list.
937 */
Tim Peters2c95c992002-03-31 02:18:01 +0000938 if (--pool->ref.count != 0) {
939 /* pool isn't empty: leave it in usedpools */
940 UNLOCK();
941 return;
942 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000943 /* Pool is now empty: unlink from usedpools, and
Tim Petersb1da0502002-03-31 02:51:40 +0000944 * link to the front of freepools. This ensures that
Tim Peters2c95c992002-03-31 02:18:01 +0000945 * previously freed pools will be allocated later
946 * (being not referenced, they are perhaps paged out).
947 */
948 next = pool->nextpool;
949 prev = pool->prevpool;
950 next->prevpool = prev;
951 prev->nextpool = next;
Thomas Woutersa9773292006-04-21 09:43:23 +0000952
953 /* Link the pool to freepools. This is a singly-linked
954 * list, and pool->prevpool isn't used there.
Tim Peters2c95c992002-03-31 02:18:01 +0000955 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000956 ao = &arenas[pool->arenaindex];
957 pool->nextpool = ao->freepools;
958 ao->freepools = pool;
959 nf = ++ao->nfreepools;
960
961 /* All the rest is arena management. We just freed
962 * a pool, and there are 4 cases for arena mgmt:
963 * 1. If all the pools are free, return the arena to
964 * the system free().
965 * 2. If this is the only free pool in the arena,
966 * add the arena back to the `usable_arenas` list.
967 * 3. If the "next" arena has a smaller count of free
968 * pools, we have to "slide this arena right" to
969 * restore that usable_arenas is sorted in order of
970 * nfreepools.
971 * 4. Else there's nothing more to do.
972 */
973 if (nf == ao->ntotalpools) {
974 /* Case 1. First unlink ao from usable_arenas.
975 */
976 assert(ao->prevarena == NULL ||
977 ao->prevarena->address != 0);
978 assert(ao ->nextarena == NULL ||
979 ao->nextarena->address != 0);
980
981 /* Fix the pointer in the prevarena, or the
982 * usable_arenas pointer.
983 */
984 if (ao->prevarena == NULL) {
985 usable_arenas = ao->nextarena;
986 assert(usable_arenas == NULL ||
987 usable_arenas->address != 0);
988 }
989 else {
990 assert(ao->prevarena->nextarena == ao);
991 ao->prevarena->nextarena =
992 ao->nextarena;
993 }
994 /* Fix the pointer in the nextarena. */
995 if (ao->nextarena != NULL) {
996 assert(ao->nextarena->prevarena == ao);
997 ao->nextarena->prevarena =
998 ao->prevarena;
999 }
1000 /* Record that this arena_object slot is
1001 * available to be reused.
1002 */
1003 ao->nextarena = unused_arena_objects;
1004 unused_arena_objects = ao;
1005
1006 /* Free the entire arena. */
1007 free((void *)ao->address);
1008 ao->address = 0; /* mark unassociated */
1009 --narenas_currently_allocated;
1010
1011 UNLOCK();
1012 return;
1013 }
1014 if (nf == 1) {
1015 /* Case 2. Put ao at the head of
1016 * usable_arenas. Note that because
1017 * ao->nfreepools was 0 before, ao isn't
1018 * currently on the usable_arenas list.
1019 */
1020 ao->nextarena = usable_arenas;
1021 ao->prevarena = NULL;
1022 if (usable_arenas)
1023 usable_arenas->prevarena = ao;
1024 usable_arenas = ao;
1025 assert(usable_arenas->address != 0);
1026
1027 UNLOCK();
1028 return;
1029 }
1030 /* If this arena is now out of order, we need to keep
1031 * the list sorted. The list is kept sorted so that
1032 * the "most full" arenas are used first, which allows
1033 * the nearly empty arenas to be completely freed. In
1034 * a few un-scientific tests, it seems like this
1035 * approach allowed a lot more memory to be freed.
1036 */
1037 if (ao->nextarena == NULL ||
1038 nf <= ao->nextarena->nfreepools) {
1039 /* Case 4. Nothing to do. */
1040 UNLOCK();
1041 return;
1042 }
1043 /* Case 3: We have to move the arena towards the end
1044 * of the list, because it has more free pools than
1045 * the arena to its right.
1046 * First unlink ao from usable_arenas.
1047 */
1048 if (ao->prevarena != NULL) {
1049 /* ao isn't at the head of the list */
1050 assert(ao->prevarena->nextarena == ao);
1051 ao->prevarena->nextarena = ao->nextarena;
1052 }
1053 else {
1054 /* ao is at the head of the list */
1055 assert(usable_arenas == ao);
1056 usable_arenas = ao->nextarena;
1057 }
1058 ao->nextarena->prevarena = ao->prevarena;
1059
1060 /* Locate the new insertion point by iterating over
1061 * the list, using our nextarena pointer.
1062 */
1063 while (ao->nextarena != NULL &&
1064 nf > ao->nextarena->nfreepools) {
1065 ao->prevarena = ao->nextarena;
1066 ao->nextarena = ao->nextarena->nextarena;
1067 }
1068
1069 /* Insert ao at this point. */
1070 assert(ao->nextarena == NULL ||
1071 ao->prevarena == ao->nextarena->prevarena);
1072 assert(ao->prevarena->nextarena == ao->nextarena);
1073
1074 ao->prevarena->nextarena = ao;
1075 if (ao->nextarena != NULL)
1076 ao->nextarena->prevarena = ao;
1077
1078 /* Verify that the swaps worked. */
1079 assert(ao->nextarena == NULL ||
1080 nf <= ao->nextarena->nfreepools);
1081 assert(ao->prevarena == NULL ||
1082 nf > ao->prevarena->nfreepools);
1083 assert(ao->nextarena == NULL ||
1084 ao->nextarena->prevarena == ao);
1085 assert((usable_arenas == ao &&
1086 ao->prevarena == NULL) ||
1087 ao->prevarena->nextarena == ao);
1088
Tim Petersd97a1c02002-03-30 06:09:22 +00001089 UNLOCK();
1090 return;
1091 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001092 /* Pool was full, so doesn't currently live in any list:
Tim Peters2c95c992002-03-31 02:18:01 +00001093 * link it to the front of the appropriate usedpools[] list.
1094 * This mimics LRU pool usage for new allocations and
1095 * targets optimal filling when several pools contain
1096 * blocks of the same size class.
Tim Petersd97a1c02002-03-30 06:09:22 +00001097 */
Tim Peters2c95c992002-03-31 02:18:01 +00001098 --pool->ref.count;
1099 assert(pool->ref.count > 0); /* else the pool is empty */
1100 size = pool->szidx;
1101 next = usedpools[size + size];
1102 prev = next->prevpool;
1103 /* insert pool before next: prev <-> pool <-> next */
1104 pool->nextpool = next;
1105 pool->prevpool = prev;
1106 next->prevpool = pool;
1107 prev->nextpool = pool;
Tim Petersd97a1c02002-03-30 06:09:22 +00001108 UNLOCK();
Neil Schemenauera35c6882001-02-27 04:45:05 +00001109 return;
1110 }
1111
Tim Peters2c95c992002-03-31 02:18:01 +00001112 /* We didn't allocate this address. */
Tim Peters84c1b972002-04-04 04:44:32 +00001113 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001114}
1115
Tim Peters84c1b972002-04-04 04:44:32 +00001116/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1117 * then as the Python docs promise, we do not treat this like free(p), and
1118 * return a non-NULL result.
1119 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001120
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001121#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +00001122void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001123PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001124{
Tim Peters84c1b972002-04-04 04:44:32 +00001125 void *bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001126 poolp pool;
Martin v. Löwis18e16552006-02-15 17:27:45 +00001127 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001128
Neil Schemenauera35c6882001-02-27 04:45:05 +00001129 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001130 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001131
Tim Petersd97a1c02002-03-30 06:09:22 +00001132 pool = POOL_ADDR(p);
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001133 if (Py_ADDRESS_IN_RANGE(p, pool)) {
Neil Schemenauera35c6882001-02-27 04:45:05 +00001134 /* We're in charge of this block */
Tim Peterse70ddf32002-04-05 04:32:29 +00001135 size = INDEX2SIZE(pool->szidx);
Tim Peters4ce71f72002-05-02 20:19:34 +00001136 if (nbytes <= size) {
1137 /* The block is staying the same or shrinking. If
1138 * it's shrinking, there's a tradeoff: it costs
1139 * cycles to copy the block to a smaller size class,
1140 * but it wastes memory not to copy it. The
1141 * compromise here is to copy on shrink only if at
1142 * least 25% of size can be shaved off.
1143 */
1144 if (4 * nbytes > 3 * size) {
1145 /* It's the same,
1146 * or shrinking and new/old > 3/4.
1147 */
1148 return p;
1149 }
1150 size = nbytes;
1151 }
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001152 bp = PyObject_Malloc(nbytes);
Tim Peters84c1b972002-04-04 04:44:32 +00001153 if (bp != NULL) {
1154 memcpy(bp, p, size);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001155 PyObject_Free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001156 }
Tim Peters84c1b972002-04-04 04:44:32 +00001157 return bp;
1158 }
Tim Petersecc6e6a2005-07-10 22:30:55 +00001159 /* We're not managing this block. If nbytes <=
1160 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1161 * block. However, if we do, we need to copy the valid data from
1162 * the C-managed block to one of our blocks, and there's no portable
1163 * way to know how much of the memory space starting at p is valid.
1164 * As bug 1185883 pointed out the hard way, it's possible that the
1165 * C-managed block is "at the end" of allocated VM space, so that
1166 * a memory fault can occur if we try to copy nbytes bytes starting
1167 * at p. Instead we punt: let C continue to manage this block.
1168 */
1169 if (nbytes)
1170 return realloc(p, nbytes);
1171 /* C doesn't define the result of realloc(p, 0) (it may or may not
1172 * return NULL then), but Python's docs promise that nbytes==0 never
1173 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1174 * to begin with. Even then, we can't be sure that realloc() won't
1175 * return NULL.
1176 */
1177 bp = realloc(p, 1);
1178 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001179}
1180
Tim Peters1221c0a2002-03-23 00:20:15 +00001181#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001182
1183/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001184/* pymalloc not enabled: Redirect the entry points to malloc. These will
1185 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001186
Tim Petersce7fb9b2002-03-23 00:28:57 +00001187void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001188PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001189{
1190 return PyMem_MALLOC(n);
1191}
1192
Tim Petersce7fb9b2002-03-23 00:28:57 +00001193void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001194PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001195{
1196 return PyMem_REALLOC(p, n);
1197}
1198
1199void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001200PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +00001201{
1202 PyMem_FREE(p);
1203}
1204#endif /* WITH_PYMALLOC */
1205
Tim Petersddea2082002-03-23 10:03:50 +00001206#ifdef PYMALLOC_DEBUG
1207/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001208/* A x-platform debugging allocator. This doesn't manage memory directly,
1209 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1210 */
Tim Petersddea2082002-03-23 10:03:50 +00001211
Tim Petersf6fb5012002-04-12 07:38:53 +00001212/* Special bytes broadcast into debug memory blocks at appropriate times.
1213 * Strings of these are unlikely to be valid addresses, floats, ints or
1214 * 7-bit ASCII.
1215 */
1216#undef CLEANBYTE
1217#undef DEADBYTE
1218#undef FORBIDDENBYTE
1219#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001220#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001221#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001222
1223static ulong serialno = 0; /* incremented on each debug {m,re}alloc */
1224
Tim Peterse0850172002-03-24 00:34:21 +00001225/* serialno is always incremented via calling this routine. The point is
1226 to supply a single place to set a breakpoint.
1227*/
1228static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001229bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001230{
1231 ++serialno;
1232}
1233
1234
Tim Petersddea2082002-03-23 10:03:50 +00001235/* Read 4 bytes at p as a big-endian ulong. */
1236static ulong
1237read4(const void *p)
1238{
Tim Peters62c06ba2002-03-23 22:28:18 +00001239 const uchar *q = (const uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +00001240 return ((ulong)q[0] << 24) |
1241 ((ulong)q[1] << 16) |
1242 ((ulong)q[2] << 8) |
1243 (ulong)q[3];
1244}
1245
1246/* Write the 4 least-significant bytes of n as a big-endian unsigned int,
1247 MSB at address p, LSB at p+3. */
1248static void
1249write4(void *p, ulong n)
1250{
Tim Peters62c06ba2002-03-23 22:28:18 +00001251 uchar *q = (uchar *)p;
1252 q[0] = (uchar)((n >> 24) & 0xff);
1253 q[1] = (uchar)((n >> 16) & 0xff);
1254 q[2] = (uchar)((n >> 8) & 0xff);
1255 q[3] = (uchar)( n & 0xff);
Tim Petersddea2082002-03-23 10:03:50 +00001256}
1257
Tim Peters08d82152002-04-18 22:25:03 +00001258#ifdef Py_DEBUG
1259/* Is target in the list? The list is traversed via the nextpool pointers.
1260 * The list may be NULL-terminated, or circular. Return 1 if target is in
1261 * list, else 0.
1262 */
1263static int
1264pool_is_in_list(const poolp target, poolp list)
1265{
1266 poolp origlist = list;
1267 assert(target != NULL);
1268 if (list == NULL)
1269 return 0;
1270 do {
1271 if (target == list)
1272 return 1;
1273 list = list->nextpool;
1274 } while (list != NULL && list != origlist);
1275 return 0;
1276}
1277
1278#else
1279#define pool_is_in_list(X, Y) 1
1280
1281#endif /* Py_DEBUG */
1282
Tim Petersddea2082002-03-23 10:03:50 +00001283/* The debug malloc asks for 16 extra bytes and fills them with useful stuff,
1284 here calling the underlying malloc's result p:
1285
1286p[0:4]
1287 Number of bytes originally asked for. 4-byte unsigned integer,
1288 big-endian (easier to read in a memory dump).
Tim Petersd1139e02002-03-28 07:32:11 +00001289p[4:8]
Tim Petersf6fb5012002-04-12 07:38:53 +00001290 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +00001291p[8:8+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001292 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001293 Used to catch reference to uninitialized memory.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001294 &p[8] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001295 handled the request itself.
1296p[8+n:8+n+4]
Tim Petersf6fb5012002-04-12 07:38:53 +00001297 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Tim Petersddea2082002-03-23 10:03:50 +00001298p[8+n+4:8+n+8]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001299 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1300 and _PyObject_DebugRealloc.
Tim Petersddea2082002-03-23 10:03:50 +00001301 4-byte unsigned integer, big-endian.
1302 If "bad memory" is detected later, the serial number gives an
1303 excellent way to set a breakpoint on the next run, to capture the
1304 instant at which this block was passed out.
1305*/
1306
1307void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001308_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001309{
1310 uchar *p; /* base address of malloc'ed block */
Tim Peters62c06ba2002-03-23 22:28:18 +00001311 uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */
Tim Petersddea2082002-03-23 10:03:50 +00001312 size_t total; /* nbytes + 16 */
1313
Tim Peterse0850172002-03-24 00:34:21 +00001314 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001315 total = nbytes + 16;
Martin v. Löwis18e16552006-02-15 17:27:45 +00001316#if SIZEOF_SIZE_T < 8
1317 /* XXX do this check only on 32-bit machines */
Tim Petersddea2082002-03-23 10:03:50 +00001318 if (total < nbytes || (total >> 31) > 1) {
1319 /* overflow, or we can't represent it in 4 bytes */
1320 /* Obscure: can't do (total >> 32) != 0 instead, because
1321 C doesn't define what happens for a right-shift of 32
1322 when size_t is a 32-bit type. At least C guarantees
1323 size_t is an unsigned type. */
1324 return NULL;
1325 }
Martin v. Löwis18e16552006-02-15 17:27:45 +00001326#endif
Tim Petersddea2082002-03-23 10:03:50 +00001327
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001328 p = (uchar *)PyObject_Malloc(total);
Tim Petersddea2082002-03-23 10:03:50 +00001329 if (p == NULL)
1330 return NULL;
1331
Martin v. Löwis18e16552006-02-15 17:27:45 +00001332 write4(p, (ulong)nbytes);
Tim Petersf6fb5012002-04-12 07:38:53 +00001333 p[4] = p[5] = p[6] = p[7] = FORBIDDENBYTE;
Tim Petersddea2082002-03-23 10:03:50 +00001334
1335 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001336 memset(p+8, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001337
Tim Peters62c06ba2002-03-23 22:28:18 +00001338 tail = p + 8 + nbytes;
Tim Petersf6fb5012002-04-12 07:38:53 +00001339 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
Tim Peters62c06ba2002-03-23 22:28:18 +00001340 write4(tail + 4, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001341
1342 return p+8;
1343}
1344
Tim Peters62c06ba2002-03-23 22:28:18 +00001345/* The debug free first checks the 8 bytes on each end for sanity (in
Tim Petersf6fb5012002-04-12 07:38:53 +00001346 particular, that the FORBIDDENBYTEs are still intact).
1347 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001348 Then calls the underlying free.
1349*/
1350void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001351_PyObject_DebugFree(void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001352{
Tim Peters62c06ba2002-03-23 22:28:18 +00001353 uchar *q = (uchar *)p;
Tim Petersddea2082002-03-23 10:03:50 +00001354 size_t nbytes;
1355
Tim Petersddea2082002-03-23 10:03:50 +00001356 if (p == NULL)
1357 return;
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001358 _PyObject_DebugCheckAddress(p);
Tim Petersddea2082002-03-23 10:03:50 +00001359 nbytes = read4(q-8);
1360 if (nbytes > 0)
Tim Petersf6fb5012002-04-12 07:38:53 +00001361 memset(q, DEADBYTE, nbytes);
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001362 PyObject_Free(q-8);
Tim Petersddea2082002-03-23 10:03:50 +00001363}
1364
1365void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001366_PyObject_DebugRealloc(void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001367{
1368 uchar *q = (uchar *)p;
Tim Peters85cc1c42002-04-12 08:52:50 +00001369 uchar *tail;
1370 size_t total; /* nbytes + 16 */
Tim Petersddea2082002-03-23 10:03:50 +00001371 size_t original_nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001372
Tim Petersddea2082002-03-23 10:03:50 +00001373 if (p == NULL)
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001374 return _PyObject_DebugMalloc(nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001375
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001376 _PyObject_DebugCheckAddress(p);
Tim Peters85cc1c42002-04-12 08:52:50 +00001377 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001378 original_nbytes = read4(q-8);
Tim Peters85cc1c42002-04-12 08:52:50 +00001379 total = nbytes + 16;
1380 if (total < nbytes || (total >> 31) > 1) {
1381 /* overflow, or we can't represent it in 4 bytes */
1382 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001383 }
1384
1385 if (nbytes < original_nbytes) {
Tim Peters85cc1c42002-04-12 08:52:50 +00001386 /* shrinking: mark old extra memory dead */
1387 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001388 }
1389
Tim Peters85cc1c42002-04-12 08:52:50 +00001390 /* Resize and add decorations. */
1391 q = (uchar *)PyObject_Realloc(q-8, total);
1392 if (q == NULL)
1393 return NULL;
1394
Martin v. Löwis18e16552006-02-15 17:27:45 +00001395 write4(q, (ulong)nbytes);
Tim Peters85cc1c42002-04-12 08:52:50 +00001396 assert(q[4] == FORBIDDENBYTE &&
1397 q[5] == FORBIDDENBYTE &&
1398 q[6] == FORBIDDENBYTE &&
1399 q[7] == FORBIDDENBYTE);
1400 q += 8;
1401 tail = q + nbytes;
1402 tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE;
1403 write4(tail + 4, serialno);
1404
1405 if (nbytes > original_nbytes) {
1406 /* growing: mark new extra memory clean */
1407 memset(q + original_nbytes, CLEANBYTE,
1408 nbytes - original_nbytes);
Tim Peters52aefc82002-04-11 06:36:45 +00001409 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001410
1411 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001412}
1413
Tim Peters7ccfadf2002-04-01 06:04:21 +00001414/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001415 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001416 * and call Py_FatalError to kill the program.
1417 */
1418 void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001419_PyObject_DebugCheckAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001420{
1421 const uchar *q = (const uchar *)p;
Tim Petersd1139e02002-03-28 07:32:11 +00001422 char *msg;
Tim Peters449b5a82002-04-28 06:14:45 +00001423 ulong nbytes;
1424 const uchar *tail;
Tim Petersd1139e02002-03-28 07:32:11 +00001425 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001426
Tim Petersd1139e02002-03-28 07:32:11 +00001427 if (p == NULL) {
Tim Petersddea2082002-03-23 10:03:50 +00001428 msg = "didn't expect a NULL pointer";
Tim Petersd1139e02002-03-28 07:32:11 +00001429 goto error;
1430 }
Tim Petersddea2082002-03-23 10:03:50 +00001431
Tim Peters449b5a82002-04-28 06:14:45 +00001432 /* Check the stuff at the start of p first: if there's underwrite
1433 * corruption, the number-of-bytes field may be nuts, and checking
1434 * the tail could lead to a segfault then.
1435 */
Tim Petersd1139e02002-03-28 07:32:11 +00001436 for (i = 4; i >= 1; --i) {
Tim Petersf6fb5012002-04-12 07:38:53 +00001437 if (*(q-i) != FORBIDDENBYTE) {
Tim Petersd1139e02002-03-28 07:32:11 +00001438 msg = "bad leading pad byte";
1439 goto error;
1440 }
1441 }
Tim Petersddea2082002-03-23 10:03:50 +00001442
Tim Peters449b5a82002-04-28 06:14:45 +00001443 nbytes = read4(q-8);
1444 tail = q + nbytes;
1445 for (i = 0; i < 4; ++i) {
1446 if (tail[i] != FORBIDDENBYTE) {
1447 msg = "bad trailing pad byte";
1448 goto error;
Tim Petersddea2082002-03-23 10:03:50 +00001449 }
1450 }
1451
Tim Petersd1139e02002-03-28 07:32:11 +00001452 return;
1453
1454error:
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001455 _PyObject_DebugDumpAddress(p);
Tim Petersd1139e02002-03-28 07:32:11 +00001456 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001457}
1458
Tim Peters7ccfadf2002-04-01 06:04:21 +00001459/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001460void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001461_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001462{
1463 const uchar *q = (const uchar *)p;
1464 const uchar *tail;
1465 ulong nbytes, serial;
Tim Petersd1139e02002-03-28 07:32:11 +00001466 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001467
1468 fprintf(stderr, "Debug memory block at address p=%p:\n", p);
1469 if (p == NULL)
1470 return;
1471
1472 nbytes = read4(q-8);
Tim Petersf539c682002-04-12 07:43:07 +00001473 fprintf(stderr, " %lu bytes originally requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001474
Tim Peters449b5a82002-04-28 06:14:45 +00001475 /* In case this is nuts, check the leading pad bytes first. */
1476 fputs(" The 4 pad bytes at p-4 are ", stderr);
Tim Petersf6fb5012002-04-12 07:38:53 +00001477 if (*(q-4) == FORBIDDENBYTE &&
1478 *(q-3) == FORBIDDENBYTE &&
1479 *(q-2) == FORBIDDENBYTE &&
1480 *(q-1) == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001481 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001482 }
1483 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001484 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1485 FORBIDDENBYTE);
Tim Petersd1139e02002-03-28 07:32:11 +00001486 for (i = 4; i >= 1; --i) {
Tim Petersddea2082002-03-23 10:03:50 +00001487 const uchar byte = *(q-i);
1488 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001489 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001490 fputs(" *** OUCH", stderr);
1491 fputc('\n', stderr);
1492 }
Tim Peters449b5a82002-04-28 06:14:45 +00001493
1494 fputs(" Because memory is corrupted at the start, the "
1495 "count of bytes requested\n"
1496 " may be bogus, and checking the trailing pad "
1497 "bytes may segfault.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001498 }
1499
1500 tail = q + nbytes;
Tim Peters449b5a82002-04-28 06:14:45 +00001501 fprintf(stderr, " The 4 pad bytes at tail=%p are ", tail);
Tim Petersf6fb5012002-04-12 07:38:53 +00001502 if (tail[0] == FORBIDDENBYTE &&
1503 tail[1] == FORBIDDENBYTE &&
1504 tail[2] == FORBIDDENBYTE &&
1505 tail[3] == FORBIDDENBYTE) {
Tim Peters449b5a82002-04-28 06:14:45 +00001506 fputs("FORBIDDENBYTE, as expected.\n", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001507 }
1508 else {
Tim Petersf6fb5012002-04-12 07:38:53 +00001509 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1510 FORBIDDENBYTE);
Tim Petersddea2082002-03-23 10:03:50 +00001511 for (i = 0; i < 4; ++i) {
1512 const uchar byte = tail[i];
1513 fprintf(stderr, " at tail+%d: 0x%02x",
1514 i, byte);
Tim Petersf6fb5012002-04-12 07:38:53 +00001515 if (byte != FORBIDDENBYTE)
Tim Petersddea2082002-03-23 10:03:50 +00001516 fputs(" *** OUCH", stderr);
1517 fputc('\n', stderr);
1518 }
1519 }
1520
1521 serial = read4(tail+4);
Tim Peters449b5a82002-04-28 06:14:45 +00001522 fprintf(stderr, " The block was made by call #%lu to "
1523 "debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001524
1525 if (nbytes > 0) {
1526 int i = 0;
Tim Peters449b5a82002-04-28 06:14:45 +00001527 fputs(" Data at p:", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001528 /* print up to 8 bytes at the start */
1529 while (q < tail && i < 8) {
1530 fprintf(stderr, " %02x", *q);
1531 ++i;
1532 ++q;
1533 }
1534 /* and up to 8 at the end */
1535 if (q < tail) {
1536 if (tail - q > 8) {
Tim Peters62c06ba2002-03-23 22:28:18 +00001537 fputs(" ...", stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001538 q = tail - 8;
1539 }
1540 while (q < tail) {
1541 fprintf(stderr, " %02x", *q);
1542 ++q;
1543 }
1544 }
Tim Peters62c06ba2002-03-23 22:28:18 +00001545 fputc('\n', stderr);
Tim Petersddea2082002-03-23 10:03:50 +00001546 }
1547}
1548
Tim Peters16bcb6b2002-04-05 05:45:31 +00001549static ulong
1550printone(const char* msg, ulong value)
1551{
Tim Peters49f26812002-04-06 01:45:35 +00001552 int i, k;
1553 char buf[100];
1554 ulong origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001555
1556 fputs(msg, stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001557 for (i = (int)strlen(msg); i < 35; ++i)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001558 fputc(' ', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001559 fputc('=', stderr);
1560
1561 /* Write the value with commas. */
1562 i = 22;
1563 buf[i--] = '\0';
1564 buf[i--] = '\n';
1565 k = 3;
1566 do {
1567 ulong nextvalue = value / 10UL;
1568 uint digit = value - nextvalue * 10UL;
1569 value = nextvalue;
1570 buf[i--] = (char)(digit + '0');
1571 --k;
1572 if (k == 0 && value && i >= 0) {
1573 k = 3;
1574 buf[i--] = ',';
1575 }
1576 } while (value && i >= 0);
1577
1578 while (i >= 0)
1579 buf[i--] = ' ';
1580 fputs(buf, stderr);
1581
1582 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001583}
1584
Tim Peters08d82152002-04-18 22:25:03 +00001585/* Print summary info to stderr about the state of pymalloc's structures.
1586 * In Py_DEBUG mode, also perform some expensive internal consistency
1587 * checks.
1588 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001589void
Tim Peters0e871182002-04-13 08:29:14 +00001590_PyObject_DebugMallocStats(void)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001591{
1592 uint i;
1593 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001594 /* # of pools, allocated blocks, and free blocks per class index */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001595 ulong numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001596 ulong numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001597 ulong numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
Tim Peters16bcb6b2002-04-05 05:45:31 +00001598 /* total # of allocated bytes in used and full pools */
1599 ulong allocated_bytes = 0;
1600 /* total # of available bytes in used pools */
1601 ulong available_bytes = 0;
1602 /* # of free pools + pools not yet carved out of current arena */
1603 uint numfreepools = 0;
1604 /* # of bytes for arena alignment padding */
Tim Peters8a8cdfd2002-04-12 20:49:36 +00001605 ulong arena_alignment = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001606 /* # of bytes in used and full pools used for pool_headers */
1607 ulong pool_header_bytes = 0;
1608 /* # of bytes in used and full pools wasted due to quantization,
1609 * i.e. the necessarily leftover space at the ends of used and
1610 * full pools.
1611 */
1612 ulong quantization = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +00001613 /* # of arenas actually allocated. */
1614 ulong narenas = 0;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001615 /* running total -- should equal narenas * ARENA_SIZE */
1616 ulong total;
1617 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001618
Tim Peters7ccfadf2002-04-01 06:04:21 +00001619 fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",
1620 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001621
1622 for (i = 0; i < numclasses; ++i)
1623 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
1624
Tim Peters6169f092002-04-01 20:12:59 +00001625 /* Because full pools aren't linked to from anything, it's easiest
1626 * to march over all the arenas. If we're lucky, most of the memory
1627 * will be living in full pools -- would be a shame to miss them.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001628 */
Thomas Woutersa9773292006-04-21 09:43:23 +00001629 for (i = 0; i < maxarenas; ++i) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001630 uint poolsinarena;
1631 uint j;
Thomas Woutersa9773292006-04-21 09:43:23 +00001632 uptr base = arenas[i].address;
1633
1634 /* Skip arenas which are not allocated. */
1635 if (arenas[i].address == (uptr)NULL)
1636 continue;
1637 narenas += 1;
1638
1639 poolsinarena = arenas[i].ntotalpools;
1640 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001641
1642 /* round up to pool alignment */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001643 if (base & (uptr)POOL_SIZE_MASK) {
Tim Peters16bcb6b2002-04-05 05:45:31 +00001644 arena_alignment += POOL_SIZE;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001645 base &= ~(uptr)POOL_SIZE_MASK;
1646 base += POOL_SIZE;
1647 }
1648
Tim Peters7ccfadf2002-04-01 06:04:21 +00001649 /* visit every pool in the arena */
Thomas Woutersa9773292006-04-21 09:43:23 +00001650 assert(base <= (uptr) arenas[i].pool_address);
1651 for (j = 0;
1652 base < (uptr) arenas[i].pool_address;
1653 ++j, base += POOL_SIZE) {
Tim Peters7ccfadf2002-04-01 06:04:21 +00001654 poolp p = (poolp)base;
Tim Peters08d82152002-04-18 22:25:03 +00001655 const uint sz = p->szidx;
1656 uint freeblocks;
1657
Tim Peters7ccfadf2002-04-01 06:04:21 +00001658 if (p->ref.count == 0) {
1659 /* currently unused */
Thomas Woutersa9773292006-04-21 09:43:23 +00001660 assert(pool_is_in_list(p, arenas[i].freepools));
Tim Peters7ccfadf2002-04-01 06:04:21 +00001661 continue;
1662 }
Tim Peters08d82152002-04-18 22:25:03 +00001663 ++numpools[sz];
1664 numblocks[sz] += p->ref.count;
1665 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1666 numfreeblocks[sz] += freeblocks;
1667#ifdef Py_DEBUG
1668 if (freeblocks > 0)
1669 assert(pool_is_in_list(p, usedpools[sz + sz]));
1670#endif
Tim Peters7ccfadf2002-04-01 06:04:21 +00001671 }
1672 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001673 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001674
1675 fputc('\n', stderr);
Tim Peters49f26812002-04-06 01:45:35 +00001676 fputs("class size num pools blocks in use avail blocks\n"
1677 "----- ---- --------- ------------- ------------\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001678 stderr);
1679
Tim Peters7ccfadf2002-04-01 06:04:21 +00001680 for (i = 0; i < numclasses; ++i) {
1681 ulong p = numpools[i];
1682 ulong b = numblocks[i];
1683 ulong f = numfreeblocks[i];
Tim Peterse70ddf32002-04-05 04:32:29 +00001684 uint size = INDEX2SIZE(i);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001685 if (p == 0) {
1686 assert(b == 0 && f == 0);
1687 continue;
1688 }
Tim Peters49f26812002-04-06 01:45:35 +00001689 fprintf(stderr, "%5u %6u %11lu %15lu %13lu\n",
Tim Peters7ccfadf2002-04-01 06:04:21 +00001690 i, size, p, b, f);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001691 allocated_bytes += b * size;
1692 available_bytes += f * size;
1693 pool_header_bytes += p * POOL_OVERHEAD;
1694 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001695 }
1696 fputc('\n', stderr);
Tim Peters0e871182002-04-13 08:29:14 +00001697 (void)printone("# times object malloc called", serialno);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001698
Thomas Woutersa9773292006-04-21 09:43:23 +00001699 (void)printone("# arenas allocated total", ntimes_arena_allocated);
1700 (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);
1701 (void)printone("# arenas highwater mark", narenas_highwater);
1702 (void)printone("# arenas allocated current", narenas);
1703
Tim Peters16bcb6b2002-04-05 05:45:31 +00001704 PyOS_snprintf(buf, sizeof(buf),
Thomas Woutersa9773292006-04-21 09:43:23 +00001705 "%lu arenas * %d bytes/arena", narenas, ARENA_SIZE);
1706 (void)printone(buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001707
1708 fputc('\n', stderr);
1709
Tim Peters49f26812002-04-06 01:45:35 +00001710 total = printone("# bytes in allocated blocks", allocated_bytes);
Tim Peters0e871182002-04-13 08:29:14 +00001711 total += printone("# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001712
Tim Peters16bcb6b2002-04-05 05:45:31 +00001713 PyOS_snprintf(buf, sizeof(buf),
1714 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
Tim Peters49f26812002-04-06 01:45:35 +00001715 total += printone(buf, (ulong)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001716
Tim Peters16bcb6b2002-04-05 05:45:31 +00001717 total += printone("# bytes lost to pool headers", pool_header_bytes);
1718 total += printone("# bytes lost to quantization", quantization);
1719 total += printone("# bytes lost to arena alignment", arena_alignment);
1720 (void)printone("Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001721}
1722
Tim Petersddea2082002-03-23 10:03:50 +00001723#endif /* PYMALLOC_DEBUG */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001724
1725#ifdef Py_USING_MEMORY_DEBUGGER
Thomas Woutersa9773292006-04-21 09:43:23 +00001726/* Make this function last so gcc won't inline it since the definition is
1727 * after the reference.
1728 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001729int
1730Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1731{
Thomas Woutersa9773292006-04-21 09:43:23 +00001732 return pool->arenaindex < maxarenas &&
1733 (uptr)P - arenas[pool->arenaindex].address < (uptr)ARENA_SIZE &&
1734 arenas[pool->arenaindex].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001735}
1736#endif