blob: 925482156decdc5184af690178af1366271ce542 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
3#ifdef WITH_PYMALLOC
4
Antoine Pitrouf0effe62011-11-26 01:11:02 +01005#ifdef HAVE_MMAP
6 #include <sys/mman.h>
7 #ifdef MAP_ANONYMOUS
8 #define ARENAS_USE_MMAP
9 #endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020010#endif
11
Benjamin Peterson05159c42009-12-03 03:01:27 +000012#ifdef WITH_VALGRIND
13#include <valgrind/valgrind.h>
14
15/* If we're using GCC, use __builtin_expect() to reduce overhead of
16 the valgrind checks */
17#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
18# define UNLIKELY(value) __builtin_expect((value), 0)
19#else
20# define UNLIKELY(value) (value)
21#endif
22
23/* -1 indicates that we haven't checked that we're running on valgrind yet. */
24static int running_on_valgrind = -1;
25#endif
26
Neil Schemenauera35c6882001-02-27 04:45:05 +000027/* An object allocator for Python.
28
29 Here is an introduction to the layers of the Python memory architecture,
30 showing where the object allocator is actually used (layer +2), It is
31 called for every object allocation and deallocation (PyObject_New/Del),
32 unless the object-specific allocators implement a proprietary allocation
33 scheme (ex.: ints use a simple free list). This is also the place where
34 the cyclic garbage collector operates selectively on container objects.
35
36
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000037 Object-specific allocators
Neil Schemenauera35c6882001-02-27 04:45:05 +000038 _____ ______ ______ ________
39 [ int ] [ dict ] [ list ] ... [ string ] Python core |
40+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
41 _______________________________ | |
42 [ Python's object allocator ] | |
43+2 | ####### Object memory ####### | <------ Internal buffers ------> |
44 ______________________________________________________________ |
45 [ Python's raw memory allocator (PyMem_ API) ] |
46+1 | <----- Python memory (under PyMem manager's control) ------> | |
47 __________________________________________________________________
48 [ Underlying general-purpose allocator (ex: C library malloc) ]
49 0 | <------ Virtual memory allocated for the python process -------> |
50
51 =========================================================================
52 _______________________________________________________________________
53 [ OS-specific Virtual Memory Manager (VMM) ]
54-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
55 __________________________________ __________________________________
56 [ ] [ ]
57-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
58
59*/
60/*==========================================================================*/
61
62/* A fast, special-purpose memory allocator for small blocks, to be used
63 on top of a general-purpose malloc -- heavily based on previous art. */
64
65/* Vladimir Marangozov -- August 2000 */
66
67/*
68 * "Memory management is where the rubber meets the road -- if we do the wrong
69 * thing at any level, the results will not be good. And if we don't make the
70 * levels work well together, we are in serious trouble." (1)
71 *
72 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
73 * "Dynamic Storage Allocation: A Survey and Critical Review",
74 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
75 */
76
Antoine Pitrouf95a1b32010-05-09 15:52:27 +000077/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +000078
79/*==========================================================================*/
80
81/*
Neil Schemenauera35c6882001-02-27 04:45:05 +000082 * Allocation strategy abstract:
83 *
84 * For small requests, the allocator sub-allocates <Big> blocks of memory.
Antoine Pitrou6f26be02011-05-03 18:18:59 +020085 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
86 * system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +000087 *
Neil Schemenauera35c6882001-02-27 04:45:05 +000088 * Small requests are grouped in size classes spaced 8 bytes apart, due
89 * to the required valid alignment of the returned address. Requests of
90 * a particular size are serviced from memory pools of 4K (one VMM page).
91 * Pools are fragmented on demand and contain free lists of blocks of one
92 * particular size class. In other words, there is a fixed-size allocator
93 * for each size class. Free pools are shared by the different allocators
94 * thus minimizing the space reserved for a particular size class.
95 *
96 * This allocation strategy is a variant of what is known as "simple
97 * segregated storage based on array of free lists". The main drawback of
98 * simple segregated storage is that we might end up with lot of reserved
99 * memory for the different free lists, which degenerate in time. To avoid
100 * this, we partition each free list in pools and we share dynamically the
101 * reserved space between all free lists. This technique is quite efficient
102 * for memory intensive programs which allocate mainly small-sized blocks.
103 *
104 * For small requests we have the following table:
105 *
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000106 * Request in bytes Size of allocated block Size class idx
Neil Schemenauera35c6882001-02-27 04:45:05 +0000107 * ----------------------------------------------------------------
108 * 1-8 8 0
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000109 * 9-16 16 1
110 * 17-24 24 2
111 * 25-32 32 3
112 * 33-40 40 4
113 * 41-48 48 5
114 * 49-56 56 6
115 * 57-64 64 7
116 * 65-72 72 8
117 * ... ... ...
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200118 * 497-504 504 62
119 * 505-512 512 63
Tim Petersce7fb9b2002-03-23 00:28:57 +0000120 *
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200121 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
122 * allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000123 */
124
125/*==========================================================================*/
126
127/*
128 * -- Main tunable settings section --
129 */
130
131/*
132 * Alignment of addresses returned to the user. 8-bytes alignment works
133 * on most current architectures (with 32-bit or 64-bit address busses).
134 * The alignment value is also used for grouping small requests in size
135 * classes spaced ALIGNMENT bytes apart.
136 *
137 * You shouldn't change this unless you know what you are doing.
138 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000139#define ALIGNMENT 8 /* must be 2^N */
140#define ALIGNMENT_SHIFT 3
141#define ALIGNMENT_MASK (ALIGNMENT - 1)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000142
Tim Peterse70ddf32002-04-05 04:32:29 +0000143/* Return the number of bytes in size class I, as a uint. */
144#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
145
Neil Schemenauera35c6882001-02-27 04:45:05 +0000146/*
147 * Max size threshold below which malloc requests are considered to be
148 * small enough in order to use preallocated memory pools. You can tune
149 * this value according to your application behaviour and memory needs.
150 *
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200151 * Note: a size threshold of 512 guarantees that newly created dictionaries
152 * will be allocated from preallocated memory pools on 64-bit.
153 *
Neil Schemenauera35c6882001-02-27 04:45:05 +0000154 * The following invariants must hold:
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200155 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000156 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000157 *
158 * Although not required, for better performance and space efficiency,
159 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
160 */
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200161#define SMALL_REQUEST_THRESHOLD 512
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000162#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000163
164/*
165 * The system's VMM page size can be obtained on most unices with a
166 * getpagesize() call or deduced from various header files. To make
167 * things simpler, we assume that it is 4K, which is OK for most systems.
168 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000169 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
170 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
171 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000172 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000173 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000174#define SYSTEM_PAGE_SIZE (4 * 1024)
175#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000176
177/*
178 * Maximum amount of memory managed by the allocator for small requests.
179 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000180#ifdef WITH_MEMORY_LIMITS
181#ifndef SMALL_MEMORY_LIMIT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000182#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000183#endif
184#endif
185
186/*
187 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
188 * on a page boundary. This is a reserved virtual address space for the
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100189 * current process (obtained through a malloc()/mmap() call). In no way this
190 * means that the memory arenas will be used entirely. A malloc(<Big>) is
191 * usually an address range reservation for <Big> bytes, unless all pages within
192 * this space are referenced subsequently. So malloc'ing big blocks and not
193 * using them does not mean "wasting memory". It's an addressable range
194 * wastage...
Neil Schemenauera35c6882001-02-27 04:45:05 +0000195 *
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100196 * Arenas are allocated with mmap() on systems supporting anonymous memory
197 * mappings to reduce heap fragmentation.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000198 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000199#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000200
201#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000202#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000203#endif
204
205/*
206 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000207 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000208 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000209#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
210#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000211
212/*
213 * -- End of tunable settings section --
214 */
215
216/*==========================================================================*/
217
218/*
219 * Locking
220 *
221 * To reduce lock contention, it would probably be better to refine the
222 * crude function locking with per size class locking. I'm not positive
223 * however, whether it's worth switching to such locking policy because
224 * of the performance penalty it might introduce.
225 *
226 * The following macros describe the simplest (should also be the fastest)
227 * lock object on a particular platform and the init/fini/lock/unlock
228 * operations on it. The locks defined here are not expected to be recursive
229 * because it is assumed that they will always be called in the order:
230 * INIT, [LOCK, UNLOCK]*, FINI.
231 */
232
233/*
234 * Python's threads are serialized, so object malloc locking is disabled.
235 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000236#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
237#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
238#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
239#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
240#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000241
242/*
243 * Basic types
244 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
245 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000246#undef uchar
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000247#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000248
Neil Schemenauera35c6882001-02-27 04:45:05 +0000249#undef uint
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000250#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000251
252#undef ulong
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000253#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000254
Tim Petersd97a1c02002-03-30 06:09:22 +0000255#undef uptr
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000256#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000257
Neil Schemenauera35c6882001-02-27 04:45:05 +0000258/* When you say memory, my mind reasons in terms of (pointers to) blocks */
259typedef uchar block;
260
Tim Peterse70ddf32002-04-05 04:32:29 +0000261/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000262struct pool_header {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000263 union { block *_padding;
Stefan Krah735bb122010-11-26 10:54:09 +0000264 uint count; } ref; /* number of allocated blocks */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000265 block *freeblock; /* pool's free list head */
266 struct pool_header *nextpool; /* next pool of this size class */
267 struct pool_header *prevpool; /* previous pool "" */
268 uint arenaindex; /* index into arenas of base adr */
269 uint szidx; /* block size class index */
270 uint nextoffset; /* bytes to virgin block */
271 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000272};
273
274typedef struct pool_header *poolp;
275
Thomas Woutersa9773292006-04-21 09:43:23 +0000276/* Record keeping for arenas. */
277struct arena_object {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000278 /* The address of the arena, as returned by malloc. Note that 0
279 * will never be returned by a successful malloc, and is used
280 * here to mark an arena_object that doesn't correspond to an
281 * allocated arena.
282 */
283 uptr address;
Thomas Woutersa9773292006-04-21 09:43:23 +0000284
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000285 /* Pool-aligned pointer to the next pool to be carved off. */
286 block* pool_address;
Thomas Woutersa9773292006-04-21 09:43:23 +0000287
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000288 /* The number of available pools in the arena: free pools + never-
289 * allocated pools.
290 */
291 uint nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000292
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000293 /* The total number of pools in the arena, whether or not available. */
294 uint ntotalpools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000295
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000296 /* Singly-linked list of available pools. */
297 struct pool_header* freepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000298
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000299 /* Whenever this arena_object is not associated with an allocated
300 * arena, the nextarena member is used to link all unassociated
301 * arena_objects in the singly-linked `unused_arena_objects` list.
302 * The prevarena member is unused in this case.
303 *
304 * When this arena_object is associated with an allocated arena
305 * with at least one available pool, both members are used in the
306 * doubly-linked `usable_arenas` list, which is maintained in
307 * increasing order of `nfreepools` values.
308 *
309 * Else this arena_object is associated with an allocated arena
310 * all of whose pools are in use. `nextarena` and `prevarena`
311 * are both meaningless in this case.
312 */
313 struct arena_object* nextarena;
314 struct arena_object* prevarena;
Thomas Woutersa9773292006-04-21 09:43:23 +0000315};
316
Neil Schemenauera35c6882001-02-27 04:45:05 +0000317#undef ROUNDUP
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000318#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)
319#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))
Neil Schemenauera35c6882001-02-27 04:45:05 +0000320
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000321#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000322
Tim Petersd97a1c02002-03-30 06:09:22 +0000323/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Tim Peterse70ddf32002-04-05 04:32:29 +0000324#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))
325
Tim Peters16bcb6b2002-04-05 05:45:31 +0000326/* Return total number of blocks in pool of size index I, as a uint. */
327#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000328
Neil Schemenauera35c6882001-02-27 04:45:05 +0000329/*==========================================================================*/
330
331/*
332 * This malloc lock
333 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000334SIMPLELOCK_DECL(_malloc_lock)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000335#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
336#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
337#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
338#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000339
340/*
Tim Peters1e16db62002-03-31 01:05:22 +0000341 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
342
343This is involved. For an index i, usedpools[i+i] is the header for a list of
344all partially used pools holding small blocks with "size class idx" i. So
345usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
34616, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
347
Thomas Woutersa9773292006-04-21 09:43:23 +0000348Pools are carved off an arena's highwater mark (an arena_object's pool_address
349member) as needed. Once carved off, a pool is in one of three states forever
350after:
Tim Peters1e16db62002-03-31 01:05:22 +0000351
Tim Peters338e0102002-04-01 19:23:44 +0000352used == partially used, neither empty nor full
353 At least one block in the pool is currently allocated, and at least one
354 block in the pool is not currently allocated (note this implies a pool
355 has room for at least two blocks).
356 This is a pool's initial state, as a pool is created only when malloc
357 needs space.
358 The pool holds blocks of a fixed size, and is in the circular list headed
359 at usedpools[i] (see above). It's linked to the other used pools of the
360 same size class via the pool_header's nextpool and prevpool members.
361 If all but one block is currently allocated, a malloc can cause a
362 transition to the full state. If all but one block is not currently
363 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000364
Tim Peters338e0102002-04-01 19:23:44 +0000365full == all the pool's blocks are currently allocated
366 On transition to full, a pool is unlinked from its usedpools[] list.
367 It's not linked to from anything then anymore, and its nextpool and
368 prevpool members are meaningless until it transitions back to used.
369 A free of a block in a full pool puts the pool back in the used state.
370 Then it's linked in at the front of the appropriate usedpools[] list, so
371 that the next allocation for its size class will reuse the freed block.
372
373empty == all the pool's blocks are currently available for allocation
374 On transition to empty, a pool is unlinked from its usedpools[] list,
Thomas Woutersa9773292006-04-21 09:43:23 +0000375 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000376 via its nextpool member. The prevpool member has no meaning in this case.
377 Empty pools have no inherent size class: the next time a malloc finds
378 an empty list in usedpools[], it takes the first pool off of freepools.
379 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000380 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000381
382
383Block Management
384
385Blocks within pools are again carved out as needed. pool->freeblock points to
386the start of a singly-linked list of free blocks within the pool. When a
387block is freed, it's inserted at the front of its pool's freeblock list. Note
388that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000389is initialized. Instead only "the first two" (lowest addresses) blocks are
390set up, returning the first such block, and setting pool->freeblock to a
391one-block list holding the second such block. This is consistent with that
392pymalloc strives at all levels (arena, pool, and block) never to touch a piece
393of memory until it's actually needed.
394
395So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000396available for allocating, and pool->freeblock is not NULL. If pool->freeblock
397points to the end of the free list before we've carved the entire pool into
398blocks, that means we simply haven't yet gotten to one of the higher-address
399blocks. The offset from the pool_header to the start of "the next" virgin
400block is stored in the pool_header nextoffset member, and the largest value
401of nextoffset that makes sense is stored in the maxnextoffset member when a
402pool is initialized. All the blocks in a pool have been passed out at least
403once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000404
Tim Peters1e16db62002-03-31 01:05:22 +0000405
406Major obscurity: While the usedpools vector is declared to have poolp
407entries, it doesn't really. It really contains two pointers per (conceptual)
408poolp entry, the nextpool and prevpool members of a pool_header. The
409excruciating initialization code below fools C so that
410
411 usedpool[i+i]
412
413"acts like" a genuine poolp, but only so long as you only reference its
414nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
415compensating for that a pool_header's nextpool and prevpool members
416immediately follow a pool_header's first two members:
417
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000418 union { block *_padding;
Stefan Krah735bb122010-11-26 10:54:09 +0000419 uint count; } ref;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000420 block *freeblock;
Tim Peters1e16db62002-03-31 01:05:22 +0000421
422each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
423contains is a fudged-up pointer p such that *if* C believes it's a poolp
424pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
425circular list is empty).
426
427It's unclear why the usedpools setup is so convoluted. It could be to
428minimize the amount of cache required to hold this heavily-referenced table
429(which only *needs* the two interpool pointer members of a pool_header). OTOH,
430referencing code has to remember to "double the index" and doing so isn't
431free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
432on that C doesn't insert any padding anywhere in a pool_header at or before
433the prevpool member.
434**************************************************************************** */
435
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000436#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
437#define PT(x) PTA(x), PTA(x)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000438
439static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000440 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000441#if NB_SMALL_SIZE_CLASSES > 8
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000442 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000443#if NB_SMALL_SIZE_CLASSES > 16
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000444 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000445#if NB_SMALL_SIZE_CLASSES > 24
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000446 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000447#if NB_SMALL_SIZE_CLASSES > 32
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000448 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000449#if NB_SMALL_SIZE_CLASSES > 40
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000450 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000451#if NB_SMALL_SIZE_CLASSES > 48
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000452 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000453#if NB_SMALL_SIZE_CLASSES > 56
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000454 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200455#if NB_SMALL_SIZE_CLASSES > 64
456#error "NB_SMALL_SIZE_CLASSES should be less than 64"
457#endif /* NB_SMALL_SIZE_CLASSES > 64 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000458#endif /* NB_SMALL_SIZE_CLASSES > 56 */
459#endif /* NB_SMALL_SIZE_CLASSES > 48 */
460#endif /* NB_SMALL_SIZE_CLASSES > 40 */
461#endif /* NB_SMALL_SIZE_CLASSES > 32 */
462#endif /* NB_SMALL_SIZE_CLASSES > 24 */
463#endif /* NB_SMALL_SIZE_CLASSES > 16 */
464#endif /* NB_SMALL_SIZE_CLASSES > 8 */
465};
466
Thomas Woutersa9773292006-04-21 09:43:23 +0000467/*==========================================================================
468Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000469
Thomas Woutersa9773292006-04-21 09:43:23 +0000470`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
471which may not be currently used (== they're arena_objects that aren't
472currently associated with an allocated arena). Note that arenas proper are
473separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000474
Thomas Woutersa9773292006-04-21 09:43:23 +0000475Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
476we do try to free() arenas, and use some mild heuristic strategies to increase
477the likelihood that arenas eventually can be freed.
478
479unused_arena_objects
480
481 This is a singly-linked list of the arena_objects that are currently not
482 being used (no arena is associated with them). Objects are taken off the
483 head of the list in new_arena(), and are pushed on the head of the list in
484 PyObject_Free() when the arena is empty. Key invariant: an arena_object
485 is on this list if and only if its .address member is 0.
486
487usable_arenas
488
489 This is a doubly-linked list of the arena_objects associated with arenas
490 that have pools available. These pools are either waiting to be reused,
491 or have not been used before. The list is sorted to have the most-
492 allocated arenas first (ascending order based on the nfreepools member).
493 This means that the next allocation will come from a heavily used arena,
494 which gives the nearly empty arenas a chance to be returned to the system.
495 In my unscientific tests this dramatically improved the number of arenas
496 that could be freed.
497
498Note that an arena_object associated with an arena all of whose pools are
499currently in use isn't on either list.
500*/
501
502/* Array of objects used to track chunks of memory (arenas). */
503static struct arena_object* arenas = NULL;
504/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000505static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000506
Thomas Woutersa9773292006-04-21 09:43:23 +0000507/* The head of the singly-linked, NULL-terminated list of available
508 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000509 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000510static struct arena_object* unused_arena_objects = NULL;
511
512/* The head of the doubly-linked, NULL-terminated at each end, list of
513 * arena_objects associated with arenas that have pools available.
514 */
515static struct arena_object* usable_arenas = NULL;
516
517/* How many arena_objects do we initially allocate?
518 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
519 * `arenas` vector.
520 */
521#define INITIAL_ARENA_OBJECTS 16
522
523/* Number of arenas allocated that haven't been free()'d. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000524static size_t narenas_currently_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000525
Thomas Woutersa9773292006-04-21 09:43:23 +0000526/* Total number of times malloc() called to allocate an arena. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000527static size_t ntimes_arena_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000528/* High water mark (max value ever seen) for narenas_currently_allocated. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000529static size_t narenas_highwater = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000530
531/* Allocate a new arena. If we run out of memory, return NULL. Else
532 * allocate a new arena, and return the address of an arena_object
533 * describing the new arena. It's expected that the caller will set
534 * `usable_arenas` to the return value.
535 */
536static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000537new_arena(void)
538{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000539 struct arena_object* arenaobj;
540 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +0100541 void *address;
542 int err;
Tim Petersd97a1c02002-03-30 06:09:22 +0000543
Tim Peters0e871182002-04-13 08:29:14 +0000544#ifdef PYMALLOC_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000545 if (Py_GETENV("PYTHONMALLOCSTATS"))
David Malcolm49526f42012-06-22 14:55:41 -0400546 _PyObject_DebugMallocStats(stderr);
Tim Peters0e871182002-04-13 08:29:14 +0000547#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000548 if (unused_arena_objects == NULL) {
549 uint i;
550 uint numarenas;
551 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000552
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000553 /* Double the number of arena objects on each allocation.
554 * Note that it's possible for `numarenas` to overflow.
555 */
556 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
557 if (numarenas <= maxarenas)
558 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000559#if SIZEOF_SIZE_T <= SIZEOF_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000560 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
561 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000562#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000563 nbytes = numarenas * sizeof(*arenas);
564 arenaobj = (struct arena_object *)realloc(arenas, nbytes);
565 if (arenaobj == NULL)
566 return NULL;
567 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +0000568
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000569 /* We might need to fix pointers that were copied. However,
570 * new_arena only gets called when all the pages in the
571 * previous arenas are full. Thus, there are *no* pointers
572 * into the old array. Thus, we don't have to worry about
573 * invalid pointers. Just to be sure, some asserts:
574 */
575 assert(usable_arenas == NULL);
576 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +0000577
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000578 /* Put the new arenas on the unused_arena_objects list. */
579 for (i = maxarenas; i < numarenas; ++i) {
580 arenas[i].address = 0; /* mark as unassociated */
581 arenas[i].nextarena = i < numarenas - 1 ?
582 &arenas[i+1] : NULL;
583 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000584
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000585 /* Update globals. */
586 unused_arena_objects = &arenas[maxarenas];
587 maxarenas = numarenas;
588 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000589
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000590 /* Take the next available arena object off the head of the list. */
591 assert(unused_arena_objects != NULL);
592 arenaobj = unused_arena_objects;
593 unused_arena_objects = arenaobj->nextarena;
594 assert(arenaobj->address == 0);
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100595#ifdef ARENAS_USE_MMAP
Victor Stinnerba108822012-03-10 00:21:44 +0100596 address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100597 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
Victor Stinnerba108822012-03-10 00:21:44 +0100598 err = (address == MAP_FAILED);
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100599#else
Victor Stinnerba108822012-03-10 00:21:44 +0100600 address = malloc(ARENA_SIZE);
601 err = (address == 0);
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100602#endif
Victor Stinnerba108822012-03-10 00:21:44 +0100603 if (err) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000604 /* The allocation failed: return NULL after putting the
605 * arenaobj back.
606 */
607 arenaobj->nextarena = unused_arena_objects;
608 unused_arena_objects = arenaobj;
609 return NULL;
610 }
Victor Stinnerba108822012-03-10 00:21:44 +0100611 arenaobj->address = (uptr)address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000612
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000613 ++narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000614 ++ntimes_arena_allocated;
615 if (narenas_currently_allocated > narenas_highwater)
616 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000617 arenaobj->freepools = NULL;
618 /* pool_address <- first pool-aligned address in the arena
619 nfreepools <- number of whole pools that fit after alignment */
620 arenaobj->pool_address = (block*)arenaobj->address;
621 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
622 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
623 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
624 if (excess != 0) {
625 --arenaobj->nfreepools;
626 arenaobj->pool_address += POOL_SIZE - excess;
627 }
628 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000629
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000630 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000631}
632
Thomas Woutersa9773292006-04-21 09:43:23 +0000633/*
634Py_ADDRESS_IN_RANGE(P, POOL)
635
636Return true if and only if P is an address that was allocated by pymalloc.
637POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
638(the caller is asked to compute this because the macro expands POOL more than
639once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
640variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
641called on every alloc/realloc/free, micro-efficiency is important here).
642
643Tricky: Let B be the arena base address associated with the pool, B =
644arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
645
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000646 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000647
648Subtracting B throughout, this is true iff
649
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000650 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000651
652By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
653
654Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
655before the first arena has been allocated. `arenas` is still NULL in that
656case. We're relying on that maxarenas is also 0 in that case, so that
657(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
658into a NULL arenas.
659
660Details: given P and POOL, the arena_object corresponding to P is AO =
661arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
662stores, etc), POOL is the correct address of P's pool, AO.address is the
663correct base address of the pool's arena, and P must be within ARENA_SIZE of
664AO.address. In addition, AO.address is not 0 (no arena can start at address 0
665(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
666controls P.
667
668Now suppose obmalloc does not control P (e.g., P was obtained via a direct
669call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
670in this case -- it may even be uninitialized trash. If the trash arenaindex
671is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
672control P.
673
674Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
675allocated arena, obmalloc controls all the memory in slice AO.address :
676AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
677so P doesn't lie in that slice, so the macro correctly reports that P is not
678controlled by obmalloc.
679
680Finally, if P is not controlled by obmalloc and AO corresponds to an unused
681arena_object (one not currently associated with an allocated arena),
682AO.address is 0, and the second test in the macro reduces to:
683
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000684 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000685
686If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
687that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
688of the test still passes, and the third clause (AO.address != 0) is necessary
689to get the correct result: AO.address is 0 in this case, so the macro
690correctly reports that P is not controlled by obmalloc (despite that P lies in
691slice AO.address : AO.address + ARENA_SIZE).
692
693Note: The third (AO.address != 0) clause was added in Python 2.5. Before
6942.5, arenas were never free()'ed, and an arenaindex < maxarena always
695corresponded to a currently-allocated arena, so the "P is not controlled by
696obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
697was impossible.
698
699Note that the logic is excruciating, and reading up possibly uninitialized
700memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
701creates problems for some memory debuggers. The overwhelming advantage is
702that this test determines whether an arbitrary address is controlled by
703obmalloc in a small constant time, independent of the number of arenas
704obmalloc controls. Since this test is needed at every entry point, it's
705extremely desirable that it be this fast.
Antoine Pitroub7fb2e22011-01-07 21:43:59 +0000706
707Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated
708by Python, it is important that (POOL)->arenaindex is read only once, as
709another thread may be concurrently modifying the value without holding the
710GIL. To accomplish this, the arenaindex_temp variable is used to store
711(POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's
712execution. The caller of the macro is responsible for declaring this
713variable.
Thomas Woutersa9773292006-04-21 09:43:23 +0000714*/
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000715#define Py_ADDRESS_IN_RANGE(P, POOL) \
Antoine Pitroub7fb2e22011-01-07 21:43:59 +0000716 ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \
717 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \
718 arenas[arenaindex_temp].address != 0)
Thomas Woutersa9773292006-04-21 09:43:23 +0000719
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000720
721/* This is only useful when running memory debuggers such as
722 * Purify or Valgrind. Uncomment to use.
723 *
Martin v. Löwis9f2e3462007-07-21 17:22:18 +0000724#define Py_USING_MEMORY_DEBUGGER
Martin v. Löwis6fea2332008-09-25 04:15:27 +0000725 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000726
727#ifdef Py_USING_MEMORY_DEBUGGER
728
729/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
730 * This leads to thousands of spurious warnings when using
731 * Purify or Valgrind. By making a function, we can easily
732 * suppress the uninitialized memory reads in this one function.
733 * So we won't ignore real errors elsewhere.
734 *
735 * Disable the macro and use a function.
736 */
737
738#undef Py_ADDRESS_IN_RANGE
739
Thomas Wouters89f507f2006-12-13 04:49:30 +0000740#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
Stefan Krah735bb122010-11-26 10:54:09 +0000741 (__GNUC__ >= 4))
Neal Norwitze5e5aa42005-11-13 18:55:39 +0000742#define Py_NO_INLINE __attribute__((__noinline__))
743#else
744#define Py_NO_INLINE
745#endif
746
747/* Don't make static, to try to ensure this isn't inlined. */
748int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
749#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +0000750#endif
Tim Peters338e0102002-04-01 19:23:44 +0000751
Neil Schemenauera35c6882001-02-27 04:45:05 +0000752/*==========================================================================*/
753
Tim Peters84c1b972002-04-04 04:44:32 +0000754/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
755 * from all other currently live pointers. This may not be possible.
756 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000757
758/*
759 * The basic blocks are ordered by decreasing execution frequency,
760 * which minimizes the number of jumps in the most common cases,
761 * improves branching prediction and instruction scheduling (small
762 * block allocations typically result in a couple of instructions).
763 * Unless the optimizer reorders everything, being too smart...
764 */
765
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000766#undef PyObject_Malloc
Neil Schemenauera35c6882001-02-27 04:45:05 +0000767void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000768PyObject_Malloc(size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000769{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000770 block *bp;
771 poolp pool;
772 poolp next;
773 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000774
Benjamin Peterson05159c42009-12-03 03:01:27 +0000775#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000776 if (UNLIKELY(running_on_valgrind == -1))
777 running_on_valgrind = RUNNING_ON_VALGRIND;
778 if (UNLIKELY(running_on_valgrind))
779 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +0000780#endif
781
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000782 /*
783 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
784 * Most python internals blindly use a signed Py_ssize_t to track
785 * things without checking for overflows or negatives.
786 * As size_t is unsigned, checking for nbytes < 0 is not required.
787 */
788 if (nbytes > PY_SSIZE_T_MAX)
789 return NULL;
Georg Brandld492ad82008-07-23 16:13:07 +0000790
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000791 /*
792 * This implicitly redirects malloc(0).
793 */
794 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
795 LOCK();
796 /*
797 * Most frequent paths first
798 */
799 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
800 pool = usedpools[size + size];
801 if (pool != pool->nextpool) {
802 /*
803 * There is a used pool for this size class.
804 * Pick up the head block of its free list.
805 */
806 ++pool->ref.count;
807 bp = pool->freeblock;
808 assert(bp != NULL);
809 if ((pool->freeblock = *(block **)bp) != NULL) {
810 UNLOCK();
811 return (void *)bp;
812 }
813 /*
814 * Reached the end of the free list, try to extend it.
815 */
816 if (pool->nextoffset <= pool->maxnextoffset) {
817 /* There is room for another block. */
818 pool->freeblock = (block*)pool +
819 pool->nextoffset;
820 pool->nextoffset += INDEX2SIZE(size);
821 *(block **)(pool->freeblock) = NULL;
822 UNLOCK();
823 return (void *)bp;
824 }
825 /* Pool is full, unlink from used pools. */
826 next = pool->nextpool;
827 pool = pool->prevpool;
828 next->prevpool = pool;
829 pool->nextpool = next;
830 UNLOCK();
831 return (void *)bp;
832 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000833
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000834 /* There isn't a pool of the right size class immediately
835 * available: use a free pool.
836 */
837 if (usable_arenas == NULL) {
838 /* No arena has a free pool: allocate a new arena. */
Thomas Woutersa9773292006-04-21 09:43:23 +0000839#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000840 if (narenas_currently_allocated >= MAX_ARENAS) {
841 UNLOCK();
842 goto redirect;
843 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000844#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000845 usable_arenas = new_arena();
846 if (usable_arenas == NULL) {
847 UNLOCK();
848 goto redirect;
849 }
850 usable_arenas->nextarena =
851 usable_arenas->prevarena = NULL;
852 }
853 assert(usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +0000854
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000855 /* Try to get a cached free pool. */
856 pool = usable_arenas->freepools;
857 if (pool != NULL) {
858 /* Unlink from cached pools. */
859 usable_arenas->freepools = pool->nextpool;
Thomas Woutersa9773292006-04-21 09:43:23 +0000860
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000861 /* This arena already had the smallest nfreepools
862 * value, so decreasing nfreepools doesn't change
863 * that, and we don't need to rearrange the
864 * usable_arenas list. However, if the arena has
865 * become wholly allocated, we need to remove its
866 * arena_object from usable_arenas.
867 */
868 --usable_arenas->nfreepools;
869 if (usable_arenas->nfreepools == 0) {
870 /* Wholly allocated: remove. */
871 assert(usable_arenas->freepools == NULL);
872 assert(usable_arenas->nextarena == NULL ||
873 usable_arenas->nextarena->prevarena ==
874 usable_arenas);
Thomas Woutersa9773292006-04-21 09:43:23 +0000875
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000876 usable_arenas = usable_arenas->nextarena;
877 if (usable_arenas != NULL) {
878 usable_arenas->prevarena = NULL;
879 assert(usable_arenas->address != 0);
880 }
881 }
882 else {
883 /* nfreepools > 0: it must be that freepools
884 * isn't NULL, or that we haven't yet carved
885 * off all the arena's pools for the first
886 * time.
887 */
888 assert(usable_arenas->freepools != NULL ||
889 usable_arenas->pool_address <=
890 (block*)usable_arenas->address +
891 ARENA_SIZE - POOL_SIZE);
892 }
893 init_pool:
894 /* Frontlink to used pools. */
895 next = usedpools[size + size]; /* == prev */
896 pool->nextpool = next;
897 pool->prevpool = next;
898 next->nextpool = pool;
899 next->prevpool = pool;
900 pool->ref.count = 1;
901 if (pool->szidx == size) {
902 /* Luckily, this pool last contained blocks
903 * of the same size class, so its header
904 * and free list are already initialized.
905 */
906 bp = pool->freeblock;
907 pool->freeblock = *(block **)bp;
908 UNLOCK();
909 return (void *)bp;
910 }
911 /*
912 * Initialize the pool header, set up the free list to
913 * contain just the second block, and return the first
914 * block.
915 */
916 pool->szidx = size;
917 size = INDEX2SIZE(size);
918 bp = (block *)pool + POOL_OVERHEAD;
919 pool->nextoffset = POOL_OVERHEAD + (size << 1);
920 pool->maxnextoffset = POOL_SIZE - size;
921 pool->freeblock = bp + size;
922 *(block **)(pool->freeblock) = NULL;
923 UNLOCK();
924 return (void *)bp;
925 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000926
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000927 /* Carve off a new pool. */
928 assert(usable_arenas->nfreepools > 0);
929 assert(usable_arenas->freepools == NULL);
930 pool = (poolp)usable_arenas->pool_address;
931 assert((block*)pool <= (block*)usable_arenas->address +
932 ARENA_SIZE - POOL_SIZE);
933 pool->arenaindex = usable_arenas - arenas;
934 assert(&arenas[pool->arenaindex] == usable_arenas);
935 pool->szidx = DUMMY_SIZE_IDX;
936 usable_arenas->pool_address += POOL_SIZE;
937 --usable_arenas->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000938
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000939 if (usable_arenas->nfreepools == 0) {
940 assert(usable_arenas->nextarena == NULL ||
941 usable_arenas->nextarena->prevarena ==
942 usable_arenas);
943 /* Unlink the arena: it is completely allocated. */
944 usable_arenas = usable_arenas->nextarena;
945 if (usable_arenas != NULL) {
946 usable_arenas->prevarena = NULL;
947 assert(usable_arenas->address != 0);
948 }
949 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000950
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000951 goto init_pool;
952 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000953
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000954 /* The small block allocator ends here. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000955
Tim Petersd97a1c02002-03-30 06:09:22 +0000956redirect:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000957 /* Redirect the original request to the underlying (libc) allocator.
958 * We jump here on bigger requests, on error in the code above (as a
959 * last chance to serve the request) or when the max memory limit
960 * has been reached.
961 */
962 if (nbytes == 0)
963 nbytes = 1;
964 return (void *)malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +0000965}
966
967/* free */
968
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000969#undef PyObject_Free
Neil Schemenauera35c6882001-02-27 04:45:05 +0000970void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +0000971PyObject_Free(void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000972{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000973 poolp pool;
974 block *lastfree;
975 poolp next, prev;
976 uint size;
Antoine Pitroub7fb2e22011-01-07 21:43:59 +0000977#ifndef Py_USING_MEMORY_DEBUGGER
978 uint arenaindex_temp;
979#endif
Neil Schemenauera35c6882001-02-27 04:45:05 +0000980
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000981 if (p == NULL) /* free(NULL) has no effect */
982 return;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000983
Benjamin Peterson05159c42009-12-03 03:01:27 +0000984#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000985 if (UNLIKELY(running_on_valgrind > 0))
986 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +0000987#endif
988
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000989 pool = POOL_ADDR(p);
990 if (Py_ADDRESS_IN_RANGE(p, pool)) {
991 /* We allocated this address. */
992 LOCK();
993 /* Link p to the start of the pool's freeblock list. Since
994 * the pool had at least the p block outstanding, the pool
995 * wasn't empty (so it's already in a usedpools[] list, or
996 * was full and is in no list -- it's not in the freeblocks
997 * list in any case).
998 */
999 assert(pool->ref.count > 0); /* else it was empty */
1000 *(block **)p = lastfree = pool->freeblock;
1001 pool->freeblock = (block *)p;
1002 if (lastfree) {
1003 struct arena_object* ao;
1004 uint nf; /* ao->nfreepools */
Thomas Woutersa9773292006-04-21 09:43:23 +00001005
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001006 /* freeblock wasn't NULL, so the pool wasn't full,
1007 * and the pool is in a usedpools[] list.
1008 */
1009 if (--pool->ref.count != 0) {
1010 /* pool isn't empty: leave it in usedpools */
1011 UNLOCK();
1012 return;
1013 }
1014 /* Pool is now empty: unlink from usedpools, and
1015 * link to the front of freepools. This ensures that
1016 * previously freed pools will be allocated later
1017 * (being not referenced, they are perhaps paged out).
1018 */
1019 next = pool->nextpool;
1020 prev = pool->prevpool;
1021 next->prevpool = prev;
1022 prev->nextpool = next;
Thomas Woutersa9773292006-04-21 09:43:23 +00001023
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001024 /* Link the pool to freepools. This is a singly-linked
1025 * list, and pool->prevpool isn't used there.
1026 */
1027 ao = &arenas[pool->arenaindex];
1028 pool->nextpool = ao->freepools;
1029 ao->freepools = pool;
1030 nf = ++ao->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001031
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001032 /* All the rest is arena management. We just freed
1033 * a pool, and there are 4 cases for arena mgmt:
1034 * 1. If all the pools are free, return the arena to
1035 * the system free().
1036 * 2. If this is the only free pool in the arena,
1037 * add the arena back to the `usable_arenas` list.
1038 * 3. If the "next" arena has a smaller count of free
1039 * pools, we have to "slide this arena right" to
1040 * restore that usable_arenas is sorted in order of
1041 * nfreepools.
1042 * 4. Else there's nothing more to do.
1043 */
1044 if (nf == ao->ntotalpools) {
1045 /* Case 1. First unlink ao from usable_arenas.
1046 */
1047 assert(ao->prevarena == NULL ||
1048 ao->prevarena->address != 0);
1049 assert(ao ->nextarena == NULL ||
1050 ao->nextarena->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001051
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001052 /* Fix the pointer in the prevarena, or the
1053 * usable_arenas pointer.
1054 */
1055 if (ao->prevarena == NULL) {
1056 usable_arenas = ao->nextarena;
1057 assert(usable_arenas == NULL ||
1058 usable_arenas->address != 0);
1059 }
1060 else {
1061 assert(ao->prevarena->nextarena == ao);
1062 ao->prevarena->nextarena =
1063 ao->nextarena;
1064 }
1065 /* Fix the pointer in the nextarena. */
1066 if (ao->nextarena != NULL) {
1067 assert(ao->nextarena->prevarena == ao);
1068 ao->nextarena->prevarena =
1069 ao->prevarena;
1070 }
1071 /* Record that this arena_object slot is
1072 * available to be reused.
1073 */
1074 ao->nextarena = unused_arena_objects;
1075 unused_arena_objects = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001076
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001077 /* Free the entire arena. */
Antoine Pitrouf0effe62011-11-26 01:11:02 +01001078#ifdef ARENAS_USE_MMAP
1079 munmap((void *)ao->address, ARENA_SIZE);
1080#else
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001081 free((void *)ao->address);
Antoine Pitrouf0effe62011-11-26 01:11:02 +01001082#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001083 ao->address = 0; /* mark unassociated */
1084 --narenas_currently_allocated;
Thomas Woutersa9773292006-04-21 09:43:23 +00001085
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001086 UNLOCK();
1087 return;
1088 }
1089 if (nf == 1) {
1090 /* Case 2. Put ao at the head of
1091 * usable_arenas. Note that because
1092 * ao->nfreepools was 0 before, ao isn't
1093 * currently on the usable_arenas list.
1094 */
1095 ao->nextarena = usable_arenas;
1096 ao->prevarena = NULL;
1097 if (usable_arenas)
1098 usable_arenas->prevarena = ao;
1099 usable_arenas = ao;
1100 assert(usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001101
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001102 UNLOCK();
1103 return;
1104 }
1105 /* If this arena is now out of order, we need to keep
1106 * the list sorted. The list is kept sorted so that
1107 * the "most full" arenas are used first, which allows
1108 * the nearly empty arenas to be completely freed. In
1109 * a few un-scientific tests, it seems like this
1110 * approach allowed a lot more memory to be freed.
1111 */
1112 if (ao->nextarena == NULL ||
1113 nf <= ao->nextarena->nfreepools) {
1114 /* Case 4. Nothing to do. */
1115 UNLOCK();
1116 return;
1117 }
1118 /* Case 3: We have to move the arena towards the end
1119 * of the list, because it has more free pools than
1120 * the arena to its right.
1121 * First unlink ao from usable_arenas.
1122 */
1123 if (ao->prevarena != NULL) {
1124 /* ao isn't at the head of the list */
1125 assert(ao->prevarena->nextarena == ao);
1126 ao->prevarena->nextarena = ao->nextarena;
1127 }
1128 else {
1129 /* ao is at the head of the list */
1130 assert(usable_arenas == ao);
1131 usable_arenas = ao->nextarena;
1132 }
1133 ao->nextarena->prevarena = ao->prevarena;
Thomas Woutersa9773292006-04-21 09:43:23 +00001134
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001135 /* Locate the new insertion point by iterating over
1136 * the list, using our nextarena pointer.
1137 */
1138 while (ao->nextarena != NULL &&
1139 nf > ao->nextarena->nfreepools) {
1140 ao->prevarena = ao->nextarena;
1141 ao->nextarena = ao->nextarena->nextarena;
1142 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001143
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001144 /* Insert ao at this point. */
1145 assert(ao->nextarena == NULL ||
1146 ao->prevarena == ao->nextarena->prevarena);
1147 assert(ao->prevarena->nextarena == ao->nextarena);
Thomas Woutersa9773292006-04-21 09:43:23 +00001148
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001149 ao->prevarena->nextarena = ao;
1150 if (ao->nextarena != NULL)
1151 ao->nextarena->prevarena = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001152
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001153 /* Verify that the swaps worked. */
1154 assert(ao->nextarena == NULL ||
1155 nf <= ao->nextarena->nfreepools);
1156 assert(ao->prevarena == NULL ||
1157 nf > ao->prevarena->nfreepools);
1158 assert(ao->nextarena == NULL ||
1159 ao->nextarena->prevarena == ao);
1160 assert((usable_arenas == ao &&
1161 ao->prevarena == NULL) ||
1162 ao->prevarena->nextarena == ao);
Thomas Woutersa9773292006-04-21 09:43:23 +00001163
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001164 UNLOCK();
1165 return;
1166 }
1167 /* Pool was full, so doesn't currently live in any list:
1168 * link it to the front of the appropriate usedpools[] list.
1169 * This mimics LRU pool usage for new allocations and
1170 * targets optimal filling when several pools contain
1171 * blocks of the same size class.
1172 */
1173 --pool->ref.count;
1174 assert(pool->ref.count > 0); /* else the pool is empty */
1175 size = pool->szidx;
1176 next = usedpools[size + size];
1177 prev = next->prevpool;
1178 /* insert pool before next: prev <-> pool <-> next */
1179 pool->nextpool = next;
1180 pool->prevpool = prev;
1181 next->prevpool = pool;
1182 prev->nextpool = pool;
1183 UNLOCK();
1184 return;
1185 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001186
Benjamin Peterson05159c42009-12-03 03:01:27 +00001187#ifdef WITH_VALGRIND
1188redirect:
1189#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001190 /* We didn't allocate this address. */
1191 free(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001192}
1193
Tim Peters84c1b972002-04-04 04:44:32 +00001194/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1195 * then as the Python docs promise, we do not treat this like free(p), and
1196 * return a non-NULL result.
1197 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001198
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001199#undef PyObject_Realloc
Neil Schemenauera35c6882001-02-27 04:45:05 +00001200void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001201PyObject_Realloc(void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001202{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001203 void *bp;
1204 poolp pool;
1205 size_t size;
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001206#ifndef Py_USING_MEMORY_DEBUGGER
1207 uint arenaindex_temp;
1208#endif
Neil Schemenauera35c6882001-02-27 04:45:05 +00001209
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001210 if (p == NULL)
1211 return PyObject_Malloc(nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001212
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001213 /*
1214 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
1215 * Most python internals blindly use a signed Py_ssize_t to track
1216 * things without checking for overflows or negatives.
1217 * As size_t is unsigned, checking for nbytes < 0 is not required.
1218 */
1219 if (nbytes > PY_SSIZE_T_MAX)
1220 return NULL;
Georg Brandld492ad82008-07-23 16:13:07 +00001221
Benjamin Peterson05159c42009-12-03 03:01:27 +00001222#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001223 /* Treat running_on_valgrind == -1 the same as 0 */
1224 if (UNLIKELY(running_on_valgrind > 0))
1225 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001226#endif
1227
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001228 pool = POOL_ADDR(p);
1229 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1230 /* We're in charge of this block */
1231 size = INDEX2SIZE(pool->szidx);
1232 if (nbytes <= size) {
1233 /* The block is staying the same or shrinking. If
1234 * it's shrinking, there's a tradeoff: it costs
1235 * cycles to copy the block to a smaller size class,
1236 * but it wastes memory not to copy it. The
1237 * compromise here is to copy on shrink only if at
1238 * least 25% of size can be shaved off.
1239 */
1240 if (4 * nbytes > 3 * size) {
1241 /* It's the same,
1242 * or shrinking and new/old > 3/4.
1243 */
1244 return p;
1245 }
1246 size = nbytes;
1247 }
1248 bp = PyObject_Malloc(nbytes);
1249 if (bp != NULL) {
1250 memcpy(bp, p, size);
1251 PyObject_Free(p);
1252 }
1253 return bp;
1254 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001255#ifdef WITH_VALGRIND
1256 redirect:
1257#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001258 /* We're not managing this block. If nbytes <=
1259 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1260 * block. However, if we do, we need to copy the valid data from
1261 * the C-managed block to one of our blocks, and there's no portable
1262 * way to know how much of the memory space starting at p is valid.
1263 * As bug 1185883 pointed out the hard way, it's possible that the
1264 * C-managed block is "at the end" of allocated VM space, so that
1265 * a memory fault can occur if we try to copy nbytes bytes starting
1266 * at p. Instead we punt: let C continue to manage this block.
1267 */
1268 if (nbytes)
1269 return realloc(p, nbytes);
1270 /* C doesn't define the result of realloc(p, 0) (it may or may not
1271 * return NULL then), but Python's docs promise that nbytes==0 never
1272 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1273 * to begin with. Even then, we can't be sure that realloc() won't
1274 * return NULL.
1275 */
1276 bp = realloc(p, 1);
1277 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001278}
1279
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001280#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001281
1282/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001283/* pymalloc not enabled: Redirect the entry points to malloc. These will
1284 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001285
Tim Petersce7fb9b2002-03-23 00:28:57 +00001286void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001287PyObject_Malloc(size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001288{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001289 return PyMem_MALLOC(n);
Tim Peters1221c0a2002-03-23 00:20:15 +00001290}
1291
Tim Petersce7fb9b2002-03-23 00:28:57 +00001292void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001293PyObject_Realloc(void *p, size_t n)
Tim Peters1221c0a2002-03-23 00:20:15 +00001294{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001295 return PyMem_REALLOC(p, n);
Tim Peters1221c0a2002-03-23 00:20:15 +00001296}
1297
1298void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001299PyObject_Free(void *p)
Tim Peters1221c0a2002-03-23 00:20:15 +00001300{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001301 PyMem_FREE(p);
Tim Peters1221c0a2002-03-23 00:20:15 +00001302}
1303#endif /* WITH_PYMALLOC */
1304
Tim Petersddea2082002-03-23 10:03:50 +00001305#ifdef PYMALLOC_DEBUG
1306/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001307/* A x-platform debugging allocator. This doesn't manage memory directly,
1308 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1309 */
Tim Petersddea2082002-03-23 10:03:50 +00001310
Tim Petersf6fb5012002-04-12 07:38:53 +00001311/* Special bytes broadcast into debug memory blocks at appropriate times.
1312 * Strings of these are unlikely to be valid addresses, floats, ints or
1313 * 7-bit ASCII.
1314 */
1315#undef CLEANBYTE
1316#undef DEADBYTE
1317#undef FORBIDDENBYTE
1318#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001319#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001320#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001321
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001322/* We tag each block with an API ID in order to tag API violations */
1323#define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */
1324#define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */
1325
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001326static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
Tim Petersddea2082002-03-23 10:03:50 +00001327
Tim Peterse0850172002-03-24 00:34:21 +00001328/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001329 * to supply a single place to set a breakpoint.
1330 */
Tim Peterse0850172002-03-24 00:34:21 +00001331static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001332bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001333{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001334 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001335}
1336
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001337#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001338
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001339/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1340static size_t
1341read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001342{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001343 const uchar *q = (const uchar *)p;
1344 size_t result = *q++;
1345 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001346
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001347 for (i = SST; --i > 0; ++q)
1348 result = (result << 8) | *q;
1349 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001350}
1351
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001352/* Write n as a big-endian size_t, MSB at address p, LSB at
1353 * p + sizeof(size_t) - 1.
1354 */
Tim Petersddea2082002-03-23 10:03:50 +00001355static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001356write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001357{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001358 uchar *q = (uchar *)p + SST - 1;
1359 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001360
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001361 for (i = SST; --i >= 0; --q) {
1362 *q = (uchar)(n & 0xff);
1363 n >>= 8;
1364 }
Tim Petersddea2082002-03-23 10:03:50 +00001365}
1366
Tim Peters08d82152002-04-18 22:25:03 +00001367#ifdef Py_DEBUG
1368/* Is target in the list? The list is traversed via the nextpool pointers.
1369 * The list may be NULL-terminated, or circular. Return 1 if target is in
1370 * list, else 0.
1371 */
1372static int
1373pool_is_in_list(const poolp target, poolp list)
1374{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001375 poolp origlist = list;
1376 assert(target != NULL);
1377 if (list == NULL)
1378 return 0;
1379 do {
1380 if (target == list)
1381 return 1;
1382 list = list->nextpool;
1383 } while (list != NULL && list != origlist);
1384 return 0;
Tim Peters08d82152002-04-18 22:25:03 +00001385}
1386
1387#else
1388#define pool_is_in_list(X, Y) 1
1389
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001390#endif /* Py_DEBUG */
Tim Peters08d82152002-04-18 22:25:03 +00001391
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001392/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1393 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001394
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001395p[0: S]
1396 Number of bytes originally asked for. This is a size_t, big-endian (easier
1397 to read in a memory dump).
1398p[S: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001399 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001400p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001401 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001402 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001403 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001404 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001405p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001406 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001407p[2*S+n+S: 2*S+n+2*S]
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001408 A serial number, incremented by 1 on each call to _PyObject_DebugMalloc
1409 and _PyObject_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001410 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001411 If "bad memory" is detected later, the serial number gives an
1412 excellent way to set a breakpoint on the next run, to capture the
1413 instant at which this block was passed out.
1414*/
1415
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001416/* debug replacements for the PyMem_* memory API */
1417void *
1418_PyMem_DebugMalloc(size_t nbytes)
1419{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001420 return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001421}
1422void *
1423_PyMem_DebugRealloc(void *p, size_t nbytes)
1424{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001425 return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001426}
1427void
1428_PyMem_DebugFree(void *p)
1429{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001430 _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001431}
1432
1433/* debug replacements for the PyObject_* memory API */
Tim Petersddea2082002-03-23 10:03:50 +00001434void *
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001435_PyObject_DebugMalloc(size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001436{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001437 return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001438}
1439void *
1440_PyObject_DebugRealloc(void *p, size_t nbytes)
1441{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001442 return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001443}
1444void
1445_PyObject_DebugFree(void *p)
1446{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001447 _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001448}
1449void
Kristján Valur Jónsson34369002009-09-28 15:57:53 +00001450_PyObject_DebugCheckAddress(const void *p)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001451{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001452 _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001453}
1454
1455
1456/* generic debug memory api, with an "id" to identify the API in use */
1457void *
1458_PyObject_DebugMallocApi(char id, size_t nbytes)
1459{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001460 uchar *p; /* base address of malloc'ed block */
1461 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1462 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001463
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001464 bumpserialno();
1465 total = nbytes + 4*SST;
1466 if (total < nbytes)
1467 /* overflow: can't represent total as a size_t */
1468 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001469
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001470 p = (uchar *)PyObject_Malloc(total);
1471 if (p == NULL)
1472 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001473
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001474 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1475 write_size_t(p, nbytes);
1476 p[SST] = (uchar)id;
1477 memset(p + SST + 1 , FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001478
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001479 if (nbytes > 0)
1480 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001481
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001482 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1483 tail = p + 2*SST + nbytes;
1484 memset(tail, FORBIDDENBYTE, SST);
1485 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001486
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001487 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001488}
1489
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001490/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001491 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001492 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001493 Then calls the underlying free.
1494*/
1495void
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001496_PyObject_DebugFreeApi(char api, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001497{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001498 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1499 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001500
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001501 if (p == NULL)
1502 return;
1503 _PyObject_DebugCheckAddressApi(api, p);
1504 nbytes = read_size_t(q);
1505 nbytes += 4*SST;
1506 if (nbytes > 0)
1507 memset(q, DEADBYTE, nbytes);
1508 PyObject_Free(q);
Tim Petersddea2082002-03-23 10:03:50 +00001509}
1510
1511void *
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001512_PyObject_DebugReallocApi(char api, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001513{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001514 uchar *q = (uchar *)p;
1515 uchar *tail;
1516 size_t total; /* nbytes + 4*SST */
1517 size_t original_nbytes;
1518 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001519
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001520 if (p == NULL)
1521 return _PyObject_DebugMallocApi(api, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001522
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001523 _PyObject_DebugCheckAddressApi(api, p);
1524 bumpserialno();
1525 original_nbytes = read_size_t(q - 2*SST);
1526 total = nbytes + 4*SST;
1527 if (total < nbytes)
1528 /* overflow: can't represent total as a size_t */
1529 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001530
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001531 if (nbytes < original_nbytes) {
1532 /* shrinking: mark old extra memory dead */
1533 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);
1534 }
Tim Petersddea2082002-03-23 10:03:50 +00001535
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001536 /* Resize and add decorations. We may get a new pointer here, in which
1537 * case we didn't get the chance to mark the old memory with DEADBYTE,
1538 * but we live with that.
1539 */
1540 q = (uchar *)PyObject_Realloc(q - 2*SST, total);
1541 if (q == NULL)
1542 return NULL;
Tim Peters85cc1c42002-04-12 08:52:50 +00001543
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001544 write_size_t(q, nbytes);
1545 assert(q[SST] == (uchar)api);
1546 for (i = 1; i < SST; ++i)
1547 assert(q[SST + i] == FORBIDDENBYTE);
1548 q += 2*SST;
1549 tail = q + nbytes;
1550 memset(tail, FORBIDDENBYTE, SST);
1551 write_size_t(tail + SST, serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001552
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001553 if (nbytes > original_nbytes) {
1554 /* growing: mark new extra memory clean */
1555 memset(q + original_nbytes, CLEANBYTE,
Stefan Krah735bb122010-11-26 10:54:09 +00001556 nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001557 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001558
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001559 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001560}
1561
Tim Peters7ccfadf2002-04-01 06:04:21 +00001562/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001563 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001564 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001565 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001566 */
1567 void
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001568_PyObject_DebugCheckAddressApi(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001569{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001570 const uchar *q = (const uchar *)p;
1571 char msgbuf[64];
1572 char *msg;
1573 size_t nbytes;
1574 const uchar *tail;
1575 int i;
1576 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001577
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001578 if (p == NULL) {
1579 msg = "didn't expect a NULL pointer";
1580 goto error;
1581 }
Tim Petersddea2082002-03-23 10:03:50 +00001582
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001583 /* Check the API id */
1584 id = (char)q[-SST];
1585 if (id != api) {
1586 msg = msgbuf;
1587 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1588 msgbuf[sizeof(msgbuf)-1] = 0;
1589 goto error;
1590 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001591
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001592 /* Check the stuff at the start of p first: if there's underwrite
1593 * corruption, the number-of-bytes field may be nuts, and checking
1594 * the tail could lead to a segfault then.
1595 */
1596 for (i = SST-1; i >= 1; --i) {
1597 if (*(q-i) != FORBIDDENBYTE) {
1598 msg = "bad leading pad byte";
1599 goto error;
1600 }
1601 }
Tim Petersddea2082002-03-23 10:03:50 +00001602
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001603 nbytes = read_size_t(q - 2*SST);
1604 tail = q + nbytes;
1605 for (i = 0; i < SST; ++i) {
1606 if (tail[i] != FORBIDDENBYTE) {
1607 msg = "bad trailing pad byte";
1608 goto error;
1609 }
1610 }
Tim Petersddea2082002-03-23 10:03:50 +00001611
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001612 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001613
1614error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001615 _PyObject_DebugDumpAddress(p);
1616 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001617}
1618
Tim Peters7ccfadf2002-04-01 06:04:21 +00001619/* Display info to stderr about the memory block at p. */
Tim Petersddea2082002-03-23 10:03:50 +00001620void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001621_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001622{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001623 const uchar *q = (const uchar *)p;
1624 const uchar *tail;
1625 size_t nbytes, serial;
1626 int i;
1627 int ok;
1628 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001629
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001630 fprintf(stderr, "Debug memory block at address p=%p:", p);
1631 if (p == NULL) {
1632 fprintf(stderr, "\n");
1633 return;
1634 }
1635 id = (char)q[-SST];
1636 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001637
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001638 nbytes = read_size_t(q - 2*SST);
1639 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1640 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001641
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001642 /* In case this is nuts, check the leading pad bytes first. */
1643 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1644 ok = 1;
1645 for (i = 1; i <= SST-1; ++i) {
1646 if (*(q-i) != FORBIDDENBYTE) {
1647 ok = 0;
1648 break;
1649 }
1650 }
1651 if (ok)
1652 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1653 else {
1654 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1655 FORBIDDENBYTE);
1656 for (i = SST-1; i >= 1; --i) {
1657 const uchar byte = *(q-i);
1658 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1659 if (byte != FORBIDDENBYTE)
1660 fputs(" *** OUCH", stderr);
1661 fputc('\n', stderr);
1662 }
Tim Peters449b5a82002-04-28 06:14:45 +00001663
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001664 fputs(" Because memory is corrupted at the start, the "
1665 "count of bytes requested\n"
1666 " may be bogus, and checking the trailing pad "
1667 "bytes may segfault.\n", stderr);
1668 }
Tim Petersddea2082002-03-23 10:03:50 +00001669
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001670 tail = q + nbytes;
1671 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1672 ok = 1;
1673 for (i = 0; i < SST; ++i) {
1674 if (tail[i] != FORBIDDENBYTE) {
1675 ok = 0;
1676 break;
1677 }
1678 }
1679 if (ok)
1680 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1681 else {
1682 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001683 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001684 for (i = 0; i < SST; ++i) {
1685 const uchar byte = tail[i];
1686 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00001687 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001688 if (byte != FORBIDDENBYTE)
1689 fputs(" *** OUCH", stderr);
1690 fputc('\n', stderr);
1691 }
1692 }
Tim Petersddea2082002-03-23 10:03:50 +00001693
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001694 serial = read_size_t(tail + SST);
1695 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1696 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001697
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001698 if (nbytes > 0) {
1699 i = 0;
1700 fputs(" Data at p:", stderr);
1701 /* print up to 8 bytes at the start */
1702 while (q < tail && i < 8) {
1703 fprintf(stderr, " %02x", *q);
1704 ++i;
1705 ++q;
1706 }
1707 /* and up to 8 at the end */
1708 if (q < tail) {
1709 if (tail - q > 8) {
1710 fputs(" ...", stderr);
1711 q = tail - 8;
1712 }
1713 while (q < tail) {
1714 fprintf(stderr, " %02x", *q);
1715 ++q;
1716 }
1717 }
1718 fputc('\n', stderr);
1719 }
Tim Petersddea2082002-03-23 10:03:50 +00001720}
1721
David Malcolm49526f42012-06-22 14:55:41 -04001722#endif /* PYMALLOC_DEBUG */
1723
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001724static size_t
David Malcolm49526f42012-06-22 14:55:41 -04001725printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001726{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001727 int i, k;
1728 char buf[100];
1729 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001730
David Malcolm49526f42012-06-22 14:55:41 -04001731 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001732 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04001733 fputc(' ', out);
1734 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00001735
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001736 /* Write the value with commas. */
1737 i = 22;
1738 buf[i--] = '\0';
1739 buf[i--] = '\n';
1740 k = 3;
1741 do {
1742 size_t nextvalue = value / 10;
1743 uint digit = (uint)(value - nextvalue * 10);
1744 value = nextvalue;
1745 buf[i--] = (char)(digit + '0');
1746 --k;
1747 if (k == 0 && value && i >= 0) {
1748 k = 3;
1749 buf[i--] = ',';
1750 }
1751 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00001752
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001753 while (i >= 0)
1754 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04001755 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00001756
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001757 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001758}
1759
David Malcolm49526f42012-06-22 14:55:41 -04001760void
1761_PyDebugAllocatorStats(FILE *out,
1762 const char *block_name, int num_blocks, size_t sizeof_block)
1763{
1764 char buf1[128];
1765 char buf2[128];
1766 PyOS_snprintf(buf1, sizeof(buf1),
1767 "%d %ss * %zd bytes each",
1768 num_blocks, block_name, sizeof_block);
1769 PyOS_snprintf(buf2, sizeof(buf2),
1770 "%48s ", buf1);
1771 (void)printone(out, buf2, num_blocks * sizeof_block);
1772}
1773
1774#ifdef WITH_PYMALLOC
1775
1776/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00001777 * In Py_DEBUG mode, also perform some expensive internal consistency
1778 * checks.
1779 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001780void
David Malcolm49526f42012-06-22 14:55:41 -04001781_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001782{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001783 uint i;
1784 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1785 /* # of pools, allocated blocks, and free blocks per class index */
1786 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1787 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1788 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1789 /* total # of allocated bytes in used and full pools */
1790 size_t allocated_bytes = 0;
1791 /* total # of available bytes in used pools */
1792 size_t available_bytes = 0;
1793 /* # of free pools + pools not yet carved out of current arena */
1794 uint numfreepools = 0;
1795 /* # of bytes for arena alignment padding */
1796 size_t arena_alignment = 0;
1797 /* # of bytes in used and full pools used for pool_headers */
1798 size_t pool_header_bytes = 0;
1799 /* # of bytes in used and full pools wasted due to quantization,
1800 * i.e. the necessarily leftover space at the ends of used and
1801 * full pools.
1802 */
1803 size_t quantization = 0;
1804 /* # of arenas actually allocated. */
1805 size_t narenas = 0;
1806 /* running total -- should equal narenas * ARENA_SIZE */
1807 size_t total;
1808 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001809
David Malcolm49526f42012-06-22 14:55:41 -04001810 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001811 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001812
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001813 for (i = 0; i < numclasses; ++i)
1814 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001815
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001816 /* Because full pools aren't linked to from anything, it's easiest
1817 * to march over all the arenas. If we're lucky, most of the memory
1818 * will be living in full pools -- would be a shame to miss them.
1819 */
1820 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001821 uint j;
1822 uptr base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00001823
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001824 /* Skip arenas which are not allocated. */
1825 if (arenas[i].address == (uptr)NULL)
1826 continue;
1827 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00001828
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001829 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001830
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001831 /* round up to pool alignment */
1832 if (base & (uptr)POOL_SIZE_MASK) {
1833 arena_alignment += POOL_SIZE;
1834 base &= ~(uptr)POOL_SIZE_MASK;
1835 base += POOL_SIZE;
1836 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00001837
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001838 /* visit every pool in the arena */
1839 assert(base <= (uptr) arenas[i].pool_address);
1840 for (j = 0;
1841 base < (uptr) arenas[i].pool_address;
1842 ++j, base += POOL_SIZE) {
1843 poolp p = (poolp)base;
1844 const uint sz = p->szidx;
1845 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001846
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001847 if (p->ref.count == 0) {
1848 /* currently unused */
1849 assert(pool_is_in_list(p, arenas[i].freepools));
1850 continue;
1851 }
1852 ++numpools[sz];
1853 numblocks[sz] += p->ref.count;
1854 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1855 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001856#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001857 if (freeblocks > 0)
1858 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00001859#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001860 }
1861 }
1862 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001863
David Malcolm49526f42012-06-22 14:55:41 -04001864 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001865 fputs("class size num pools blocks in use avail blocks\n"
1866 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04001867 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001868
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001869 for (i = 0; i < numclasses; ++i) {
1870 size_t p = numpools[i];
1871 size_t b = numblocks[i];
1872 size_t f = numfreeblocks[i];
1873 uint size = INDEX2SIZE(i);
1874 if (p == 0) {
1875 assert(b == 0 && f == 0);
1876 continue;
1877 }
David Malcolm49526f42012-06-22 14:55:41 -04001878 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001879 "%11" PY_FORMAT_SIZE_T "u "
1880 "%15" PY_FORMAT_SIZE_T "u "
1881 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001882 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001883 allocated_bytes += b * size;
1884 available_bytes += f * size;
1885 pool_header_bytes += p * POOL_OVERHEAD;
1886 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1887 }
David Malcolm49526f42012-06-22 14:55:41 -04001888 fputc('\n', out);
1889#ifdef PYMALLOC_DEBUG
1890 (void)printone(out, "# times object malloc called", serialno);
1891#endif
1892 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
1893 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
1894 (void)printone(out, "# arenas highwater mark", narenas_highwater);
1895 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00001896
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001897 PyOS_snprintf(buf, sizeof(buf),
1898 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1899 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001900 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001901
David Malcolm49526f42012-06-22 14:55:41 -04001902 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001903
David Malcolm49526f42012-06-22 14:55:41 -04001904 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1905 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001906
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001907 PyOS_snprintf(buf, sizeof(buf),
1908 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001909 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001910
David Malcolm49526f42012-06-22 14:55:41 -04001911 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1912 total += printone(out, "# bytes lost to quantization", quantization);
1913 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1914 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001915}
1916
David Malcolm49526f42012-06-22 14:55:41 -04001917#endif /* #ifdef WITH_PYMALLOC */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001918
1919#ifdef Py_USING_MEMORY_DEBUGGER
Thomas Woutersa9773292006-04-21 09:43:23 +00001920/* Make this function last so gcc won't inline it since the definition is
1921 * after the reference.
1922 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001923int
1924Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1925{
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001926 uint arenaindex_temp = pool->arenaindex;
1927
1928 return arenaindex_temp < maxarenas &&
1929 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
1930 arenas[arenaindex_temp].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001931}
1932#endif