Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1 | #include "Python.h" |
| 2 | |
| 3 | #ifdef WITH_PYMALLOC |
| 4 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 5 | /* An object allocator for Python. |
| 6 | |
| 7 | Here is an introduction to the layers of the Python memory architecture, |
| 8 | showing where the object allocator is actually used (layer +2), It is |
| 9 | called for every object allocation and deallocation (PyObject_New/Del), |
| 10 | unless the object-specific allocators implement a proprietary allocation |
| 11 | scheme (ex.: ints use a simple free list). This is also the place where |
| 12 | the cyclic garbage collector operates selectively on container objects. |
| 13 | |
| 14 | |
| 15 | Object-specific allocators |
| 16 | _____ ______ ______ ________ |
| 17 | [ int ] [ dict ] [ list ] ... [ string ] Python core | |
| 18 | +3 | <----- Object-specific memory -----> | <-- Non-object memory --> | |
| 19 | _______________________________ | | |
| 20 | [ Python's object allocator ] | | |
| 21 | +2 | ####### Object memory ####### | <------ Internal buffers ------> | |
| 22 | ______________________________________________________________ | |
| 23 | [ Python's raw memory allocator (PyMem_ API) ] | |
| 24 | +1 | <----- Python memory (under PyMem manager's control) ------> | | |
| 25 | __________________________________________________________________ |
| 26 | [ Underlying general-purpose allocator (ex: C library malloc) ] |
| 27 | 0 | <------ Virtual memory allocated for the python process -------> | |
| 28 | |
| 29 | ========================================================================= |
| 30 | _______________________________________________________________________ |
| 31 | [ OS-specific Virtual Memory Manager (VMM) ] |
| 32 | -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | |
| 33 | __________________________________ __________________________________ |
| 34 | [ ] [ ] |
| 35 | -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | |
| 36 | |
| 37 | */ |
| 38 | /*==========================================================================*/ |
| 39 | |
| 40 | /* A fast, special-purpose memory allocator for small blocks, to be used |
| 41 | on top of a general-purpose malloc -- heavily based on previous art. */ |
| 42 | |
| 43 | /* Vladimir Marangozov -- August 2000 */ |
| 44 | |
| 45 | /* |
| 46 | * "Memory management is where the rubber meets the road -- if we do the wrong |
| 47 | * thing at any level, the results will not be good. And if we don't make the |
| 48 | * levels work well together, we are in serious trouble." (1) |
| 49 | * |
| 50 | * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, |
| 51 | * "Dynamic Storage Allocation: A Survey and Critical Review", |
| 52 | * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. |
| 53 | */ |
| 54 | |
| 55 | /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 56 | |
| 57 | /*==========================================================================*/ |
| 58 | |
| 59 | /* |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 60 | * Allocation strategy abstract: |
| 61 | * |
| 62 | * For small requests, the allocator sub-allocates <Big> blocks of memory. |
| 63 | * Requests greater than 256 bytes are routed to the system's allocator. |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 64 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 65 | * Small requests are grouped in size classes spaced 8 bytes apart, due |
| 66 | * to the required valid alignment of the returned address. Requests of |
| 67 | * a particular size are serviced from memory pools of 4K (one VMM page). |
| 68 | * Pools are fragmented on demand and contain free lists of blocks of one |
| 69 | * particular size class. In other words, there is a fixed-size allocator |
| 70 | * for each size class. Free pools are shared by the different allocators |
| 71 | * thus minimizing the space reserved for a particular size class. |
| 72 | * |
| 73 | * This allocation strategy is a variant of what is known as "simple |
| 74 | * segregated storage based on array of free lists". The main drawback of |
| 75 | * simple segregated storage is that we might end up with lot of reserved |
| 76 | * memory for the different free lists, which degenerate in time. To avoid |
| 77 | * this, we partition each free list in pools and we share dynamically the |
| 78 | * reserved space between all free lists. This technique is quite efficient |
| 79 | * for memory intensive programs which allocate mainly small-sized blocks. |
| 80 | * |
| 81 | * For small requests we have the following table: |
| 82 | * |
| 83 | * Request in bytes Size of allocated block Size class idx |
| 84 | * ---------------------------------------------------------------- |
| 85 | * 1-8 8 0 |
| 86 | * 9-16 16 1 |
| 87 | * 17-24 24 2 |
| 88 | * 25-32 32 3 |
| 89 | * 33-40 40 4 |
| 90 | * 41-48 48 5 |
| 91 | * 49-56 56 6 |
| 92 | * 57-64 64 7 |
| 93 | * 65-72 72 8 |
| 94 | * ... ... ... |
| 95 | * 241-248 248 30 |
| 96 | * 249-256 256 31 |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 97 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 98 | * 0, 257 and up: routed to the underlying allocator. |
| 99 | */ |
| 100 | |
| 101 | /*==========================================================================*/ |
| 102 | |
| 103 | /* |
| 104 | * -- Main tunable settings section -- |
| 105 | */ |
| 106 | |
| 107 | /* |
| 108 | * Alignment of addresses returned to the user. 8-bytes alignment works |
| 109 | * on most current architectures (with 32-bit or 64-bit address busses). |
| 110 | * The alignment value is also used for grouping small requests in size |
| 111 | * classes spaced ALIGNMENT bytes apart. |
| 112 | * |
| 113 | * You shouldn't change this unless you know what you are doing. |
| 114 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 115 | #define ALIGNMENT 8 /* must be 2^N */ |
| 116 | #define ALIGNMENT_SHIFT 3 |
| 117 | #define ALIGNMENT_MASK (ALIGNMENT - 1) |
| 118 | |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 119 | /* Return the number of bytes in size class I, as a uint. */ |
| 120 | #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) |
| 121 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 122 | /* |
| 123 | * Max size threshold below which malloc requests are considered to be |
| 124 | * small enough in order to use preallocated memory pools. You can tune |
| 125 | * this value according to your application behaviour and memory needs. |
| 126 | * |
| 127 | * The following invariants must hold: |
| 128 | * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256 |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 129 | * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 130 | * |
| 131 | * Although not required, for better performance and space efficiency, |
| 132 | * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. |
| 133 | */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 134 | #define SMALL_REQUEST_THRESHOLD 256 |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 135 | #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) |
| 136 | |
| 137 | /* |
| 138 | * The system's VMM page size can be obtained on most unices with a |
| 139 | * getpagesize() call or deduced from various header files. To make |
| 140 | * things simpler, we assume that it is 4K, which is OK for most systems. |
| 141 | * It is probably better if this is the native page size, but it doesn't |
Tim Peters | ecc6e6a | 2005-07-10 22:30:55 +0000 | [diff] [blame] | 142 | * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page |
| 143 | * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation |
| 144 | * violation fault. 4K is apparently OK for all the platforms that python |
Martin v. Löwis | 8c14028 | 2002-10-26 15:01:53 +0000 | [diff] [blame] | 145 | * currently targets. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 146 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 147 | #define SYSTEM_PAGE_SIZE (4 * 1024) |
| 148 | #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1) |
| 149 | |
| 150 | /* |
| 151 | * Maximum amount of memory managed by the allocator for small requests. |
| 152 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 153 | #ifdef WITH_MEMORY_LIMITS |
| 154 | #ifndef SMALL_MEMORY_LIMIT |
| 155 | #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ |
| 156 | #endif |
| 157 | #endif |
| 158 | |
| 159 | /* |
| 160 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned |
| 161 | * on a page boundary. This is a reserved virtual address space for the |
| 162 | * current process (obtained through a malloc call). In no way this means |
| 163 | * that the memory arenas will be used entirely. A malloc(<Big>) is usually |
| 164 | * an address range reservation for <Big> bytes, unless all pages within this |
| 165 | * space are referenced subsequently. So malloc'ing big blocks and not using |
| 166 | * them does not mean "wasting memory". It's an addressable range wastage... |
| 167 | * |
| 168 | * Therefore, allocating arenas with malloc is not optimal, because there is |
| 169 | * some address space wastage, but this is the most portable way to request |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 170 | * memory from the system across various platforms. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 171 | */ |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 172 | #define ARENA_SIZE (256 << 10) /* 256KB */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 173 | |
| 174 | #ifdef WITH_MEMORY_LIMITS |
| 175 | #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) |
| 176 | #endif |
| 177 | |
| 178 | /* |
| 179 | * Size of the pools used for small blocks. Should be a power of 2, |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 180 | * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 181 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 182 | #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */ |
| 183 | #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 184 | |
| 185 | /* |
| 186 | * -- End of tunable settings section -- |
| 187 | */ |
| 188 | |
| 189 | /*==========================================================================*/ |
| 190 | |
| 191 | /* |
| 192 | * Locking |
| 193 | * |
| 194 | * To reduce lock contention, it would probably be better to refine the |
| 195 | * crude function locking with per size class locking. I'm not positive |
| 196 | * however, whether it's worth switching to such locking policy because |
| 197 | * of the performance penalty it might introduce. |
| 198 | * |
| 199 | * The following macros describe the simplest (should also be the fastest) |
| 200 | * lock object on a particular platform and the init/fini/lock/unlock |
| 201 | * operations on it. The locks defined here are not expected to be recursive |
| 202 | * because it is assumed that they will always be called in the order: |
| 203 | * INIT, [LOCK, UNLOCK]*, FINI. |
| 204 | */ |
| 205 | |
| 206 | /* |
| 207 | * Python's threads are serialized, so object malloc locking is disabled. |
| 208 | */ |
| 209 | #define SIMPLELOCK_DECL(lock) /* simple lock declaration */ |
| 210 | #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */ |
| 211 | #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */ |
| 212 | #define SIMPLELOCK_LOCK(lock) /* acquire released lock */ |
| 213 | #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */ |
| 214 | |
| 215 | /* |
| 216 | * Basic types |
| 217 | * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom. |
| 218 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 219 | #undef uchar |
| 220 | #define uchar unsigned char /* assuming == 8 bits */ |
| 221 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 222 | #undef uint |
| 223 | #define uint unsigned int /* assuming >= 16 bits */ |
| 224 | |
| 225 | #undef ulong |
| 226 | #define ulong unsigned long /* assuming >= 32 bits */ |
| 227 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 228 | #undef uptr |
| 229 | #define uptr Py_uintptr_t |
| 230 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 231 | /* When you say memory, my mind reasons in terms of (pointers to) blocks */ |
| 232 | typedef uchar block; |
| 233 | |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 234 | /* Pool for small blocks. */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 235 | struct pool_header { |
Tim Peters | b233652 | 2001-03-11 18:36:13 +0000 | [diff] [blame] | 236 | union { block *_padding; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 237 | uint count; } ref; /* number of allocated blocks */ |
| 238 | block *freeblock; /* pool's free list head */ |
| 239 | struct pool_header *nextpool; /* next pool of this size class */ |
| 240 | struct pool_header *prevpool; /* previous pool "" */ |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 241 | uint arenaindex; /* index into arenas of base adr */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 242 | uint szidx; /* block size class index */ |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 243 | uint nextoffset; /* bytes to virgin block */ |
| 244 | uint maxnextoffset; /* largest valid nextoffset */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 245 | }; |
| 246 | |
| 247 | typedef struct pool_header *poolp; |
| 248 | |
| 249 | #undef ROUNDUP |
| 250 | #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK) |
| 251 | #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header)) |
| 252 | |
| 253 | #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ |
| 254 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 255 | /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 256 | #define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK)) |
| 257 | |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 258 | /* Return total number of blocks in pool of size index I, as a uint. */ |
| 259 | #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 260 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 261 | /*==========================================================================*/ |
| 262 | |
| 263 | /* |
| 264 | * This malloc lock |
| 265 | */ |
Jeremy Hylton | d1fedb6 | 2002-07-18 18:49:52 +0000 | [diff] [blame] | 266 | SIMPLELOCK_DECL(_malloc_lock) |
Tim Peters | b233652 | 2001-03-11 18:36:13 +0000 | [diff] [blame] | 267 | #define LOCK() SIMPLELOCK_LOCK(_malloc_lock) |
| 268 | #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock) |
| 269 | #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock) |
| 270 | #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 271 | |
| 272 | /* |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 273 | * Pool table -- headed, circular, doubly-linked lists of partially used pools. |
| 274 | |
| 275 | This is involved. For an index i, usedpools[i+i] is the header for a list of |
| 276 | all partially used pools holding small blocks with "size class idx" i. So |
| 277 | usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size |
| 278 | 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT. |
| 279 | |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 280 | Pools are carved off the current arena highwater mark (file static arenabase) |
| 281 | as needed. Once carved off, a pool is in one of three states forever after: |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 282 | |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 283 | used == partially used, neither empty nor full |
| 284 | At least one block in the pool is currently allocated, and at least one |
| 285 | block in the pool is not currently allocated (note this implies a pool |
| 286 | has room for at least two blocks). |
| 287 | This is a pool's initial state, as a pool is created only when malloc |
| 288 | needs space. |
| 289 | The pool holds blocks of a fixed size, and is in the circular list headed |
| 290 | at usedpools[i] (see above). It's linked to the other used pools of the |
| 291 | same size class via the pool_header's nextpool and prevpool members. |
| 292 | If all but one block is currently allocated, a malloc can cause a |
| 293 | transition to the full state. If all but one block is not currently |
| 294 | allocated, a free can cause a transition to the empty state. |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 295 | |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 296 | full == all the pool's blocks are currently allocated |
| 297 | On transition to full, a pool is unlinked from its usedpools[] list. |
| 298 | It's not linked to from anything then anymore, and its nextpool and |
| 299 | prevpool members are meaningless until it transitions back to used. |
| 300 | A free of a block in a full pool puts the pool back in the used state. |
| 301 | Then it's linked in at the front of the appropriate usedpools[] list, so |
| 302 | that the next allocation for its size class will reuse the freed block. |
| 303 | |
| 304 | empty == all the pool's blocks are currently available for allocation |
| 305 | On transition to empty, a pool is unlinked from its usedpools[] list, |
| 306 | and linked to the front of the (file static) singly-linked freepools list, |
| 307 | via its nextpool member. The prevpool member has no meaning in this case. |
| 308 | Empty pools have no inherent size class: the next time a malloc finds |
| 309 | an empty list in usedpools[], it takes the first pool off of freepools. |
| 310 | If the size class needed happens to be the same as the size class the pool |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 311 | last had, some pool initialization can be skipped. |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 312 | |
| 313 | |
| 314 | Block Management |
| 315 | |
| 316 | Blocks within pools are again carved out as needed. pool->freeblock points to |
| 317 | the start of a singly-linked list of free blocks within the pool. When a |
| 318 | block is freed, it's inserted at the front of its pool's freeblock list. Note |
| 319 | that the available blocks in a pool are *not* linked all together when a pool |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 320 | is initialized. Instead only "the first two" (lowest addresses) blocks are |
| 321 | set up, returning the first such block, and setting pool->freeblock to a |
| 322 | one-block list holding the second such block. This is consistent with that |
| 323 | pymalloc strives at all levels (arena, pool, and block) never to touch a piece |
| 324 | of memory until it's actually needed. |
| 325 | |
| 326 | So long as a pool is in the used state, we're certain there *is* a block |
Tim Peters | 52aefc8 | 2002-04-11 06:36:45 +0000 | [diff] [blame] | 327 | available for allocating, and pool->freeblock is not NULL. If pool->freeblock |
| 328 | points to the end of the free list before we've carved the entire pool into |
| 329 | blocks, that means we simply haven't yet gotten to one of the higher-address |
| 330 | blocks. The offset from the pool_header to the start of "the next" virgin |
| 331 | block is stored in the pool_header nextoffset member, and the largest value |
| 332 | of nextoffset that makes sense is stored in the maxnextoffset member when a |
| 333 | pool is initialized. All the blocks in a pool have been passed out at least |
| 334 | once when and only when nextoffset > maxnextoffset. |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 335 | |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 336 | |
| 337 | Major obscurity: While the usedpools vector is declared to have poolp |
| 338 | entries, it doesn't really. It really contains two pointers per (conceptual) |
| 339 | poolp entry, the nextpool and prevpool members of a pool_header. The |
| 340 | excruciating initialization code below fools C so that |
| 341 | |
| 342 | usedpool[i+i] |
| 343 | |
| 344 | "acts like" a genuine poolp, but only so long as you only reference its |
| 345 | nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is |
| 346 | compensating for that a pool_header's nextpool and prevpool members |
| 347 | immediately follow a pool_header's first two members: |
| 348 | |
| 349 | union { block *_padding; |
| 350 | uint count; } ref; |
| 351 | block *freeblock; |
| 352 | |
| 353 | each of which consume sizeof(block *) bytes. So what usedpools[i+i] really |
| 354 | contains is a fudged-up pointer p such that *if* C believes it's a poolp |
| 355 | pointer, then p->nextpool and p->prevpool are both p (meaning that the headed |
| 356 | circular list is empty). |
| 357 | |
| 358 | It's unclear why the usedpools setup is so convoluted. It could be to |
| 359 | minimize the amount of cache required to hold this heavily-referenced table |
| 360 | (which only *needs* the two interpool pointer members of a pool_header). OTOH, |
| 361 | referencing code has to remember to "double the index" and doing so isn't |
| 362 | free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying |
| 363 | on that C doesn't insert any padding anywhere in a pool_header at or before |
| 364 | the prevpool member. |
| 365 | **************************************************************************** */ |
| 366 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 367 | #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *))) |
| 368 | #define PT(x) PTA(x), PTA(x) |
| 369 | |
| 370 | static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { |
| 371 | PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) |
| 372 | #if NB_SMALL_SIZE_CLASSES > 8 |
| 373 | , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) |
| 374 | #if NB_SMALL_SIZE_CLASSES > 16 |
| 375 | , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) |
| 376 | #if NB_SMALL_SIZE_CLASSES > 24 |
| 377 | , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) |
| 378 | #if NB_SMALL_SIZE_CLASSES > 32 |
| 379 | , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) |
| 380 | #if NB_SMALL_SIZE_CLASSES > 40 |
| 381 | , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) |
| 382 | #if NB_SMALL_SIZE_CLASSES > 48 |
| 383 | , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) |
| 384 | #if NB_SMALL_SIZE_CLASSES > 56 |
| 385 | , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) |
| 386 | #endif /* NB_SMALL_SIZE_CLASSES > 56 */ |
| 387 | #endif /* NB_SMALL_SIZE_CLASSES > 48 */ |
| 388 | #endif /* NB_SMALL_SIZE_CLASSES > 40 */ |
| 389 | #endif /* NB_SMALL_SIZE_CLASSES > 32 */ |
| 390 | #endif /* NB_SMALL_SIZE_CLASSES > 24 */ |
| 391 | #endif /* NB_SMALL_SIZE_CLASSES > 16 */ |
| 392 | #endif /* NB_SMALL_SIZE_CLASSES > 8 */ |
| 393 | }; |
| 394 | |
| 395 | /* |
| 396 | * Free (cached) pools |
| 397 | */ |
| 398 | static poolp freepools = NULL; /* free list for cached pools */ |
| 399 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 400 | /*==========================================================================*/ |
| 401 | /* Arena management. */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 402 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 403 | /* arenas is a vector of arena base addresses, in order of allocation time. |
| 404 | * arenas currently contains narenas entries, and has space allocated |
| 405 | * for at most maxarenas entries. |
| 406 | * |
| 407 | * CAUTION: See the long comment block about thread safety in new_arena(): |
| 408 | * the code currently relies in deep ways on that this vector only grows, |
| 409 | * and only grows by appending at the end. For now we never return an arena |
| 410 | * to the OS. |
| 411 | */ |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 412 | static uptr *volatile arenas = NULL; /* the pointer itself is volatile */ |
| 413 | static volatile uint narenas = 0; |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 414 | static uint maxarenas = 0; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 415 | |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 416 | /* Number of pools still available to be allocated in the current arena. */ |
| 417 | static uint nfreepools = 0; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 418 | |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 419 | /* Free space start address in current arena. This is pool-aligned. */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 420 | static block *arenabase = NULL; |
| 421 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 422 | /* Allocate a new arena and return its base address. If we run out of |
| 423 | * memory, return NULL. |
| 424 | */ |
| 425 | static block * |
| 426 | new_arena(void) |
| 427 | { |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 428 | uint excess; /* number of bytes above pool alignment */ |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 429 | block *bp = (block *)malloc(ARENA_SIZE); |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 430 | if (bp == NULL) |
| 431 | return NULL; |
| 432 | |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 433 | #ifdef PYMALLOC_DEBUG |
| 434 | if (Py_GETENV("PYTHONMALLOCSTATS")) |
| 435 | _PyObject_DebugMallocStats(); |
| 436 | #endif |
| 437 | |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 438 | /* arenabase <- first pool-aligned address in the arena |
| 439 | nfreepools <- number of whole pools that fit after alignment */ |
| 440 | arenabase = bp; |
| 441 | nfreepools = ARENA_SIZE / POOL_SIZE; |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 442 | assert(POOL_SIZE * nfreepools == ARENA_SIZE); |
Guido van Rossum | efc1188 | 2002-09-12 14:43:41 +0000 | [diff] [blame] | 443 | excess = (uint) ((Py_uintptr_t)bp & POOL_SIZE_MASK); |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 444 | if (excess != 0) { |
| 445 | --nfreepools; |
| 446 | arenabase += POOL_SIZE - excess; |
| 447 | } |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 448 | |
| 449 | /* Make room for a new entry in the arenas vector. */ |
| 450 | if (arenas == NULL) { |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 451 | assert(narenas == 0 && maxarenas == 0); |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 452 | arenas = (uptr *)malloc(16 * sizeof(*arenas)); |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 453 | if (arenas == NULL) |
| 454 | goto error; |
| 455 | maxarenas = 16; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 456 | } |
| 457 | else if (narenas == maxarenas) { |
Tim Peters | 52aefc8 | 2002-04-11 06:36:45 +0000 | [diff] [blame] | 458 | /* Grow arenas. |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 459 | * |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 460 | * Exceedingly subtle: Someone may be calling the pymalloc |
| 461 | * free via PyMem_{DEL, Del, FREE, Free} without holding the |
| 462 | *.GIL. Someone else may simultaneously be calling the |
| 463 | * pymalloc malloc while holding the GIL via, e.g., |
| 464 | * PyObject_New. Now the pymalloc free may index into arenas |
| 465 | * for an address check, while the pymalloc malloc calls |
| 466 | * new_arena and we end up here to grow a new arena *and* |
| 467 | * grow the arenas vector. If the value for arenas pymalloc |
| 468 | * free picks up "vanishes" during this resize, anything may |
| 469 | * happen, and it would be an incredibly rare bug. Therefore |
| 470 | * the code here takes great pains to make sure that, at every |
| 471 | * moment, arenas always points to an intact vector of |
| 472 | * addresses. It doesn't matter whether arenas points to a |
| 473 | * wholly up-to-date vector when pymalloc free checks it in |
| 474 | * this case, because the only legal (and that even this is |
| 475 | * legal is debatable) way to call PyMem_{Del, etc} while not |
| 476 | * holding the GIL is if the memory being released is not |
| 477 | * object memory, i.e. if the address check in pymalloc free |
| 478 | * is supposed to fail. Having an incomplete vector can't |
| 479 | * make a supposed-to-fail case succeed by mistake (it could |
| 480 | * only make a supposed-to-succeed case fail by mistake). |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 481 | * |
| 482 | * In addition, without a lock we can't know for sure when |
| 483 | * an old vector is no longer referenced, so we simply let |
| 484 | * old vectors leak. |
| 485 | * |
| 486 | * And on top of that, since narenas and arenas can't be |
| 487 | * changed as-a-pair atomically without a lock, we're also |
| 488 | * careful to declare them volatile and ensure that we change |
| 489 | * arenas first. This prevents another thread from picking |
| 490 | * up an narenas value too large for the arenas value it |
| 491 | * reads up (arenas never shrinks). |
| 492 | * |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 493 | * Read the above 50 times before changing anything in this |
| 494 | * block. |
| 495 | */ |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 496 | uptr *p; |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 497 | uint newmax = maxarenas << 1; |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 498 | if (newmax <= maxarenas) /* overflow */ |
| 499 | goto error; |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 500 | p = (uptr *)malloc(newmax * sizeof(*arenas)); |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 501 | if (p == NULL) |
| 502 | goto error; |
| 503 | memcpy(p, arenas, narenas * sizeof(*arenas)); |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 504 | arenas = p; /* old arenas deliberately leaked */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 505 | maxarenas = newmax; |
| 506 | } |
| 507 | |
| 508 | /* Append the new arena address to arenas. */ |
| 509 | assert(narenas < maxarenas); |
| 510 | arenas[narenas] = (uptr)bp; |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 511 | ++narenas; /* can't overflow, since narenas < maxarenas before */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 512 | return bp; |
| 513 | |
| 514 | error: |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 515 | free(bp); |
Tim Peters | 7b85b4a | 2002-03-30 10:42:09 +0000 | [diff] [blame] | 516 | nfreepools = 0; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 517 | return NULL; |
| 518 | } |
| 519 | |
| 520 | /* Return true if and only if P is an address that was allocated by |
| 521 | * pymalloc. I must be the index into arenas that the address claims |
| 522 | * to come from. |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 523 | * |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 524 | * Tricky: Letting B be the arena base address in arenas[I], P belongs to the |
| 525 | * arena if and only if |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 526 | * B <= P < B + ARENA_SIZE |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 527 | * Subtracting B throughout, this is true iff |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 528 | * 0 <= P-B < ARENA_SIZE |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 529 | * By using unsigned arithmetic, the "0 <=" half of the test can be skipped. |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 530 | * |
| 531 | * Obscure: A PyMem "free memory" function can call the pymalloc free or |
| 532 | * realloc before the first arena has been allocated. arenas is still |
| 533 | * NULL in that case. We're relying on that narenas is also 0 in that case, |
| 534 | * so the (I) < narenas must be false, saving us from trying to index into |
| 535 | * a NULL arenas. |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 536 | */ |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 537 | #define Py_ADDRESS_IN_RANGE(P, POOL) \ |
| 538 | ((POOL)->arenaindex < narenas && \ |
| 539 | (uptr)(P) - arenas[(POOL)->arenaindex] < (uptr)ARENA_SIZE) |
| 540 | |
| 541 | /* This is only useful when running memory debuggers such as |
| 542 | * Purify or Valgrind. Uncomment to use. |
| 543 | * |
Neal Norwitz | b5d7702 | 2004-06-06 19:21:34 +0000 | [diff] [blame] | 544 | */ |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 545 | #define Py_USING_MEMORY_DEBUGGER |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 546 | |
| 547 | #ifdef Py_USING_MEMORY_DEBUGGER |
| 548 | |
| 549 | /* Py_ADDRESS_IN_RANGE may access uninitialized memory by design |
| 550 | * This leads to thousands of spurious warnings when using |
| 551 | * Purify or Valgrind. By making a function, we can easily |
| 552 | * suppress the uninitialized memory reads in this one function. |
| 553 | * So we won't ignore real errors elsewhere. |
| 554 | * |
| 555 | * Disable the macro and use a function. |
| 556 | */ |
| 557 | |
| 558 | #undef Py_ADDRESS_IN_RANGE |
| 559 | |
Neal Norwitz | e5e5aa4 | 2005-11-13 18:55:39 +0000 | [diff] [blame] | 560 | #if defined(__GNUC__) && (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) |
| 561 | #define Py_NO_INLINE __attribute__((__noinline__)) |
| 562 | #else |
| 563 | #define Py_NO_INLINE |
| 564 | #endif |
| 565 | |
| 566 | /* Don't make static, to try to ensure this isn't inlined. */ |
| 567 | int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE; |
| 568 | #undef Py_NO_INLINE |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 569 | #endif |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 570 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 571 | /*==========================================================================*/ |
| 572 | |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 573 | /* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct |
| 574 | * from all other currently live pointers. This may not be possible. |
| 575 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 576 | |
| 577 | /* |
| 578 | * The basic blocks are ordered by decreasing execution frequency, |
| 579 | * which minimizes the number of jumps in the most common cases, |
| 580 | * improves branching prediction and instruction scheduling (small |
| 581 | * block allocations typically result in a couple of instructions). |
| 582 | * Unless the optimizer reorders everything, being too smart... |
| 583 | */ |
| 584 | |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 585 | #undef PyObject_Malloc |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 586 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 587 | PyObject_Malloc(size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 588 | { |
| 589 | block *bp; |
| 590 | poolp pool; |
| 591 | poolp next; |
| 592 | uint size; |
| 593 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 594 | /* |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 595 | * This implicitly redirects malloc(0). |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 596 | */ |
| 597 | if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { |
| 598 | LOCK(); |
| 599 | /* |
| 600 | * Most frequent paths first |
| 601 | */ |
| 602 | size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT; |
| 603 | pool = usedpools[size + size]; |
| 604 | if (pool != pool->nextpool) { |
| 605 | /* |
| 606 | * There is a used pool for this size class. |
| 607 | * Pick up the head block of its free list. |
| 608 | */ |
| 609 | ++pool->ref.count; |
| 610 | bp = pool->freeblock; |
Tim Peters | 52aefc8 | 2002-04-11 06:36:45 +0000 | [diff] [blame] | 611 | assert(bp != NULL); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 612 | if ((pool->freeblock = *(block **)bp) != NULL) { |
| 613 | UNLOCK(); |
| 614 | return (void *)bp; |
| 615 | } |
| 616 | /* |
| 617 | * Reached the end of the free list, try to extend it |
| 618 | */ |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 619 | if (pool->nextoffset <= pool->maxnextoffset) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 620 | /* |
| 621 | * There is room for another block |
| 622 | */ |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 623 | pool->freeblock = (block *)pool + |
| 624 | pool->nextoffset; |
| 625 | pool->nextoffset += INDEX2SIZE(size); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 626 | *(block **)(pool->freeblock) = NULL; |
| 627 | UNLOCK(); |
| 628 | return (void *)bp; |
| 629 | } |
| 630 | /* |
| 631 | * Pool is full, unlink from used pools |
| 632 | */ |
| 633 | next = pool->nextpool; |
| 634 | pool = pool->prevpool; |
| 635 | next->prevpool = pool; |
| 636 | pool->nextpool = next; |
| 637 | UNLOCK(); |
| 638 | return (void *)bp; |
| 639 | } |
| 640 | /* |
| 641 | * Try to get a cached free pool |
| 642 | */ |
| 643 | pool = freepools; |
| 644 | if (pool != NULL) { |
| 645 | /* |
| 646 | * Unlink from cached pools |
| 647 | */ |
| 648 | freepools = pool->nextpool; |
| 649 | init_pool: |
| 650 | /* |
| 651 | * Frontlink to used pools |
| 652 | */ |
| 653 | next = usedpools[size + size]; /* == prev */ |
| 654 | pool->nextpool = next; |
| 655 | pool->prevpool = next; |
| 656 | next->nextpool = pool; |
| 657 | next->prevpool = pool; |
| 658 | pool->ref.count = 1; |
| 659 | if (pool->szidx == size) { |
| 660 | /* |
| 661 | * Luckily, this pool last contained blocks |
| 662 | * of the same size class, so its header |
| 663 | * and free list are already initialized. |
| 664 | */ |
| 665 | bp = pool->freeblock; |
| 666 | pool->freeblock = *(block **)bp; |
| 667 | UNLOCK(); |
| 668 | return (void *)bp; |
| 669 | } |
| 670 | /* |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 671 | * Initialize the pool header, set up the free list to |
| 672 | * contain just the second block, and return the first |
| 673 | * block. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 674 | */ |
| 675 | pool->szidx = size; |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 676 | size = INDEX2SIZE(size); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 677 | bp = (block *)pool + POOL_OVERHEAD; |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 678 | pool->nextoffset = POOL_OVERHEAD + (size << 1); |
| 679 | pool->maxnextoffset = POOL_SIZE - size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 680 | pool->freeblock = bp + size; |
| 681 | *(block **)(pool->freeblock) = NULL; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 682 | UNLOCK(); |
| 683 | return (void *)bp; |
| 684 | } |
Walter Dörwald | e0a1bb6 | 2003-06-17 15:48:11 +0000 | [diff] [blame] | 685 | /* |
| 686 | * Allocate new pool |
| 687 | */ |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 688 | if (nfreepools) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 689 | commit_pool: |
Tim Peters | 3c83df2 | 2002-03-30 07:04:41 +0000 | [diff] [blame] | 690 | --nfreepools; |
| 691 | pool = (poolp)arenabase; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 692 | arenabase += POOL_SIZE; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 693 | pool->arenaindex = narenas - 1; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 694 | pool->szidx = DUMMY_SIZE_IDX; |
| 695 | goto init_pool; |
| 696 | } |
Walter Dörwald | e0a1bb6 | 2003-06-17 15:48:11 +0000 | [diff] [blame] | 697 | /* |
| 698 | * Allocate new arena |
| 699 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 700 | #ifdef WITH_MEMORY_LIMITS |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 701 | if (!(narenas < MAX_ARENAS)) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 702 | UNLOCK(); |
| 703 | goto redirect; |
| 704 | } |
| 705 | #endif |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 706 | bp = new_arena(); |
| 707 | if (bp != NULL) |
| 708 | goto commit_pool; |
| 709 | UNLOCK(); |
| 710 | goto redirect; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 711 | } |
| 712 | |
| 713 | /* The small block allocator ends here. */ |
| 714 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 715 | redirect: |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 716 | /* |
| 717 | * Redirect the original request to the underlying (libc) allocator. |
| 718 | * We jump here on bigger requests, on error in the code above (as a |
| 719 | * last chance to serve the request) or when the max memory limit |
| 720 | * has been reached. |
| 721 | */ |
Tim Peters | 64d80c9 | 2002-04-18 21:58:56 +0000 | [diff] [blame] | 722 | if (nbytes == 0) |
| 723 | nbytes = 1; |
Tim Peters | 64d80c9 | 2002-04-18 21:58:56 +0000 | [diff] [blame] | 724 | return (void *)malloc(nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /* free */ |
| 728 | |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 729 | #undef PyObject_Free |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 730 | void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 731 | PyObject_Free(void *p) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 732 | { |
| 733 | poolp pool; |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 734 | block *lastfree; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 735 | poolp next, prev; |
| 736 | uint size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 737 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 738 | if (p == NULL) /* free(NULL) has no effect */ |
| 739 | return; |
| 740 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 741 | pool = POOL_ADDR(p); |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 742 | if (Py_ADDRESS_IN_RANGE(p, pool)) { |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 743 | /* We allocated this address. */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 744 | LOCK(); |
| 745 | /* |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 746 | * Link p to the start of the pool's freeblock list. Since |
| 747 | * the pool had at least the p block outstanding, the pool |
| 748 | * wasn't empty (so it's already in a usedpools[] list, or |
| 749 | * was full and is in no list -- it's not in the freeblocks |
| 750 | * list in any case). |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 751 | */ |
Tim Peters | 57b17ad | 2002-03-31 02:59:48 +0000 | [diff] [blame] | 752 | assert(pool->ref.count > 0); /* else it was empty */ |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 753 | *(block **)p = lastfree = pool->freeblock; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 754 | pool->freeblock = (block *)p; |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 755 | if (lastfree) { |
| 756 | /* |
| 757 | * freeblock wasn't NULL, so the pool wasn't full, |
| 758 | * and the pool is in a usedpools[] list. |
| 759 | */ |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 760 | if (--pool->ref.count != 0) { |
| 761 | /* pool isn't empty: leave it in usedpools */ |
| 762 | UNLOCK(); |
| 763 | return; |
| 764 | } |
| 765 | /* |
| 766 | * Pool is now empty: unlink from usedpools, and |
Tim Peters | b1da050 | 2002-03-31 02:51:40 +0000 | [diff] [blame] | 767 | * link to the front of freepools. This ensures that |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 768 | * previously freed pools will be allocated later |
| 769 | * (being not referenced, they are perhaps paged out). |
| 770 | */ |
| 771 | next = pool->nextpool; |
| 772 | prev = pool->prevpool; |
| 773 | next->prevpool = prev; |
| 774 | prev->nextpool = next; |
| 775 | /* Link to freepools. This is a singly-linked list, |
| 776 | * and pool->prevpool isn't used there. |
| 777 | */ |
| 778 | pool->nextpool = freepools; |
| 779 | freepools = pool; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 780 | UNLOCK(); |
| 781 | return; |
| 782 | } |
| 783 | /* |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 784 | * Pool was full, so doesn't currently live in any list: |
| 785 | * link it to the front of the appropriate usedpools[] list. |
| 786 | * This mimics LRU pool usage for new allocations and |
| 787 | * targets optimal filling when several pools contain |
| 788 | * blocks of the same size class. |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 789 | */ |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 790 | --pool->ref.count; |
| 791 | assert(pool->ref.count > 0); /* else the pool is empty */ |
| 792 | size = pool->szidx; |
| 793 | next = usedpools[size + size]; |
| 794 | prev = next->prevpool; |
| 795 | /* insert pool before next: prev <-> pool <-> next */ |
| 796 | pool->nextpool = next; |
| 797 | pool->prevpool = prev; |
| 798 | next->prevpool = pool; |
| 799 | prev->nextpool = pool; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 800 | UNLOCK(); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 801 | return; |
| 802 | } |
| 803 | |
Tim Peters | 2c95c99 | 2002-03-31 02:18:01 +0000 | [diff] [blame] | 804 | /* We didn't allocate this address. */ |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 805 | free(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 806 | } |
| 807 | |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 808 | /* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0, |
| 809 | * then as the Python docs promise, we do not treat this like free(p), and |
| 810 | * return a non-NULL result. |
| 811 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 812 | |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 813 | #undef PyObject_Realloc |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 814 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 815 | PyObject_Realloc(void *p, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 816 | { |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 817 | void *bp; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 818 | poolp pool; |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 819 | size_t size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 820 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 821 | if (p == NULL) |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 822 | return PyObject_Malloc(nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 823 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 824 | pool = POOL_ADDR(p); |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 825 | if (Py_ADDRESS_IN_RANGE(p, pool)) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 826 | /* We're in charge of this block */ |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 827 | size = INDEX2SIZE(pool->szidx); |
Tim Peters | 4ce71f7 | 2002-05-02 20:19:34 +0000 | [diff] [blame] | 828 | if (nbytes <= size) { |
| 829 | /* The block is staying the same or shrinking. If |
| 830 | * it's shrinking, there's a tradeoff: it costs |
| 831 | * cycles to copy the block to a smaller size class, |
| 832 | * but it wastes memory not to copy it. The |
| 833 | * compromise here is to copy on shrink only if at |
| 834 | * least 25% of size can be shaved off. |
| 835 | */ |
| 836 | if (4 * nbytes > 3 * size) { |
| 837 | /* It's the same, |
| 838 | * or shrinking and new/old > 3/4. |
| 839 | */ |
| 840 | return p; |
| 841 | } |
| 842 | size = nbytes; |
| 843 | } |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 844 | bp = PyObject_Malloc(nbytes); |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 845 | if (bp != NULL) { |
| 846 | memcpy(bp, p, size); |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 847 | PyObject_Free(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 848 | } |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 849 | return bp; |
| 850 | } |
Tim Peters | ecc6e6a | 2005-07-10 22:30:55 +0000 | [diff] [blame] | 851 | /* We're not managing this block. If nbytes <= |
| 852 | * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this |
| 853 | * block. However, if we do, we need to copy the valid data from |
| 854 | * the C-managed block to one of our blocks, and there's no portable |
| 855 | * way to know how much of the memory space starting at p is valid. |
| 856 | * As bug 1185883 pointed out the hard way, it's possible that the |
| 857 | * C-managed block is "at the end" of allocated VM space, so that |
| 858 | * a memory fault can occur if we try to copy nbytes bytes starting |
| 859 | * at p. Instead we punt: let C continue to manage this block. |
| 860 | */ |
| 861 | if (nbytes) |
| 862 | return realloc(p, nbytes); |
| 863 | /* C doesn't define the result of realloc(p, 0) (it may or may not |
| 864 | * return NULL then), but Python's docs promise that nbytes==0 never |
| 865 | * returns NULL. We don't pass 0 to realloc(), to avoid that endcase |
| 866 | * to begin with. Even then, we can't be sure that realloc() won't |
| 867 | * return NULL. |
| 868 | */ |
| 869 | bp = realloc(p, 1); |
| 870 | return bp ? bp : p; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 871 | } |
| 872 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 873 | #else /* ! WITH_PYMALLOC */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 874 | |
| 875 | /*==========================================================================*/ |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 876 | /* pymalloc not enabled: Redirect the entry points to malloc. These will |
| 877 | * only be used by extensions that are compiled with pymalloc enabled. */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 878 | |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 879 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 880 | PyObject_Malloc(size_t n) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 881 | { |
| 882 | return PyMem_MALLOC(n); |
| 883 | } |
| 884 | |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 885 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 886 | PyObject_Realloc(void *p, size_t n) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 887 | { |
| 888 | return PyMem_REALLOC(p, n); |
| 889 | } |
| 890 | |
| 891 | void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 892 | PyObject_Free(void *p) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 893 | { |
| 894 | PyMem_FREE(p); |
| 895 | } |
| 896 | #endif /* WITH_PYMALLOC */ |
| 897 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 898 | #ifdef PYMALLOC_DEBUG |
| 899 | /*==========================================================================*/ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 900 | /* A x-platform debugging allocator. This doesn't manage memory directly, |
| 901 | * it wraps a real allocator, adding extra debugging info to the memory blocks. |
| 902 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 903 | |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 904 | /* Special bytes broadcast into debug memory blocks at appropriate times. |
| 905 | * Strings of these are unlikely to be valid addresses, floats, ints or |
| 906 | * 7-bit ASCII. |
| 907 | */ |
| 908 | #undef CLEANBYTE |
| 909 | #undef DEADBYTE |
| 910 | #undef FORBIDDENBYTE |
| 911 | #define CLEANBYTE 0xCB /* clean (newly allocated) memory */ |
Tim Peters | 889f61d | 2002-07-10 19:29:49 +0000 | [diff] [blame] | 912 | #define DEADBYTE 0xDB /* dead (newly freed) memory */ |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 913 | #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 914 | |
| 915 | static ulong serialno = 0; /* incremented on each debug {m,re}alloc */ |
| 916 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 917 | /* serialno is always incremented via calling this routine. The point is |
| 918 | to supply a single place to set a breakpoint. |
| 919 | */ |
| 920 | static void |
Neil Schemenauer | bd02b14 | 2002-03-28 21:05:38 +0000 | [diff] [blame] | 921 | bumpserialno(void) |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 922 | { |
| 923 | ++serialno; |
| 924 | } |
| 925 | |
| 926 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 927 | /* Read 4 bytes at p as a big-endian ulong. */ |
| 928 | static ulong |
| 929 | read4(const void *p) |
| 930 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 931 | const uchar *q = (const uchar *)p; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 932 | return ((ulong)q[0] << 24) | |
| 933 | ((ulong)q[1] << 16) | |
| 934 | ((ulong)q[2] << 8) | |
| 935 | (ulong)q[3]; |
| 936 | } |
| 937 | |
| 938 | /* Write the 4 least-significant bytes of n as a big-endian unsigned int, |
| 939 | MSB at address p, LSB at p+3. */ |
| 940 | static void |
| 941 | write4(void *p, ulong n) |
| 942 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 943 | uchar *q = (uchar *)p; |
| 944 | q[0] = (uchar)((n >> 24) & 0xff); |
| 945 | q[1] = (uchar)((n >> 16) & 0xff); |
| 946 | q[2] = (uchar)((n >> 8) & 0xff); |
| 947 | q[3] = (uchar)( n & 0xff); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 948 | } |
| 949 | |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 950 | #ifdef Py_DEBUG |
| 951 | /* Is target in the list? The list is traversed via the nextpool pointers. |
| 952 | * The list may be NULL-terminated, or circular. Return 1 if target is in |
| 953 | * list, else 0. |
| 954 | */ |
| 955 | static int |
| 956 | pool_is_in_list(const poolp target, poolp list) |
| 957 | { |
| 958 | poolp origlist = list; |
| 959 | assert(target != NULL); |
| 960 | if (list == NULL) |
| 961 | return 0; |
| 962 | do { |
| 963 | if (target == list) |
| 964 | return 1; |
| 965 | list = list->nextpool; |
| 966 | } while (list != NULL && list != origlist); |
| 967 | return 0; |
| 968 | } |
| 969 | |
| 970 | #else |
| 971 | #define pool_is_in_list(X, Y) 1 |
| 972 | |
| 973 | #endif /* Py_DEBUG */ |
| 974 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 975 | /* The debug malloc asks for 16 extra bytes and fills them with useful stuff, |
| 976 | here calling the underlying malloc's result p: |
| 977 | |
| 978 | p[0:4] |
| 979 | Number of bytes originally asked for. 4-byte unsigned integer, |
| 980 | big-endian (easier to read in a memory dump). |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 981 | p[4:8] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 982 | Copies of FORBIDDENBYTE. Used to catch under- writes and reads. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 983 | p[8:8+n] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 984 | The requested memory, filled with copies of CLEANBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 985 | Used to catch reference to uninitialized memory. |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 986 | &p[8] is returned. Note that this is 8-byte aligned if pymalloc |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 987 | handled the request itself. |
| 988 | p[8+n:8+n+4] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 989 | Copies of FORBIDDENBYTE. Used to catch over- writes and reads. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 990 | p[8+n+4:8+n+8] |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 991 | A serial number, incremented by 1 on each call to _PyObject_DebugMalloc |
| 992 | and _PyObject_DebugRealloc. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 993 | 4-byte unsigned integer, big-endian. |
| 994 | If "bad memory" is detected later, the serial number gives an |
| 995 | excellent way to set a breakpoint on the next run, to capture the |
| 996 | instant at which this block was passed out. |
| 997 | */ |
| 998 | |
| 999 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1000 | _PyObject_DebugMalloc(size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1001 | { |
| 1002 | uchar *p; /* base address of malloc'ed block */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1003 | uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1004 | size_t total; /* nbytes + 16 */ |
| 1005 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1006 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1007 | total = nbytes + 16; |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 1008 | #if SIZEOF_SIZE_T < 8 |
| 1009 | /* XXX do this check only on 32-bit machines */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1010 | if (total < nbytes || (total >> 31) > 1) { |
| 1011 | /* overflow, or we can't represent it in 4 bytes */ |
| 1012 | /* Obscure: can't do (total >> 32) != 0 instead, because |
| 1013 | C doesn't define what happens for a right-shift of 32 |
| 1014 | when size_t is a 32-bit type. At least C guarantees |
| 1015 | size_t is an unsigned type. */ |
| 1016 | return NULL; |
| 1017 | } |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 1018 | #endif |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1019 | |
Tim Peters | 8a8cdfd | 2002-04-12 20:49:36 +0000 | [diff] [blame] | 1020 | p = (uchar *)PyObject_Malloc(total); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1021 | if (p == NULL) |
| 1022 | return NULL; |
| 1023 | |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 1024 | write4(p, (ulong)nbytes); |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1025 | p[4] = p[5] = p[6] = p[7] = FORBIDDENBYTE; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1026 | |
| 1027 | if (nbytes > 0) |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1028 | memset(p+8, CLEANBYTE, nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1029 | |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1030 | tail = p + 8 + nbytes; |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1031 | tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE; |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1032 | write4(tail + 4, serialno); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1033 | |
| 1034 | return p+8; |
| 1035 | } |
| 1036 | |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1037 | /* The debug free first checks the 8 bytes on each end for sanity (in |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1038 | particular, that the FORBIDDENBYTEs are still intact). |
| 1039 | Then fills the original bytes with DEADBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1040 | Then calls the underlying free. |
| 1041 | */ |
| 1042 | void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1043 | _PyObject_DebugFree(void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1044 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1045 | uchar *q = (uchar *)p; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1046 | size_t nbytes; |
| 1047 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1048 | if (p == NULL) |
| 1049 | return; |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1050 | _PyObject_DebugCheckAddress(p); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1051 | nbytes = read4(q-8); |
| 1052 | if (nbytes > 0) |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1053 | memset(q, DEADBYTE, nbytes); |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1054 | PyObject_Free(q-8); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1055 | } |
| 1056 | |
| 1057 | void * |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1058 | _PyObject_DebugRealloc(void *p, size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1059 | { |
| 1060 | uchar *q = (uchar *)p; |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1061 | uchar *tail; |
| 1062 | size_t total; /* nbytes + 16 */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1063 | size_t original_nbytes; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1064 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1065 | if (p == NULL) |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1066 | return _PyObject_DebugMalloc(nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1067 | |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1068 | _PyObject_DebugCheckAddress(p); |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1069 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1070 | original_nbytes = read4(q-8); |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1071 | total = nbytes + 16; |
| 1072 | if (total < nbytes || (total >> 31) > 1) { |
| 1073 | /* overflow, or we can't represent it in 4 bytes */ |
| 1074 | return NULL; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1075 | } |
| 1076 | |
| 1077 | if (nbytes < original_nbytes) { |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1078 | /* shrinking: mark old extra memory dead */ |
| 1079 | memset(q + nbytes, DEADBYTE, original_nbytes - nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1080 | } |
| 1081 | |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1082 | /* Resize and add decorations. */ |
| 1083 | q = (uchar *)PyObject_Realloc(q-8, total); |
| 1084 | if (q == NULL) |
| 1085 | return NULL; |
| 1086 | |
Martin v. Löwis | 18e1655 | 2006-02-15 17:27:45 +0000 | [diff] [blame^] | 1087 | write4(q, (ulong)nbytes); |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1088 | assert(q[4] == FORBIDDENBYTE && |
| 1089 | q[5] == FORBIDDENBYTE && |
| 1090 | q[6] == FORBIDDENBYTE && |
| 1091 | q[7] == FORBIDDENBYTE); |
| 1092 | q += 8; |
| 1093 | tail = q + nbytes; |
| 1094 | tail[0] = tail[1] = tail[2] = tail[3] = FORBIDDENBYTE; |
| 1095 | write4(tail + 4, serialno); |
| 1096 | |
| 1097 | if (nbytes > original_nbytes) { |
| 1098 | /* growing: mark new extra memory clean */ |
| 1099 | memset(q + original_nbytes, CLEANBYTE, |
| 1100 | nbytes - original_nbytes); |
Tim Peters | 52aefc8 | 2002-04-11 06:36:45 +0000 | [diff] [blame] | 1101 | } |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1102 | |
| 1103 | return q; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1104 | } |
| 1105 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1106 | /* Check the forbidden bytes on both ends of the memory allocated for p. |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1107 | * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress, |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1108 | * and call Py_FatalError to kill the program. |
| 1109 | */ |
| 1110 | void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1111 | _PyObject_DebugCheckAddress(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1112 | { |
| 1113 | const uchar *q = (const uchar *)p; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1114 | char *msg; |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1115 | ulong nbytes; |
| 1116 | const uchar *tail; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1117 | int i; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1118 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1119 | if (p == NULL) { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1120 | msg = "didn't expect a NULL pointer"; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1121 | goto error; |
| 1122 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1123 | |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1124 | /* Check the stuff at the start of p first: if there's underwrite |
| 1125 | * corruption, the number-of-bytes field may be nuts, and checking |
| 1126 | * the tail could lead to a segfault then. |
| 1127 | */ |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1128 | for (i = 4; i >= 1; --i) { |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1129 | if (*(q-i) != FORBIDDENBYTE) { |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1130 | msg = "bad leading pad byte"; |
| 1131 | goto error; |
| 1132 | } |
| 1133 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1134 | |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1135 | nbytes = read4(q-8); |
| 1136 | tail = q + nbytes; |
| 1137 | for (i = 0; i < 4; ++i) { |
| 1138 | if (tail[i] != FORBIDDENBYTE) { |
| 1139 | msg = "bad trailing pad byte"; |
| 1140 | goto error; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1141 | } |
| 1142 | } |
| 1143 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1144 | return; |
| 1145 | |
| 1146 | error: |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1147 | _PyObject_DebugDumpAddress(p); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1148 | Py_FatalError(msg); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1149 | } |
| 1150 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1151 | /* Display info to stderr about the memory block at p. */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1152 | void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1153 | _PyObject_DebugDumpAddress(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1154 | { |
| 1155 | const uchar *q = (const uchar *)p; |
| 1156 | const uchar *tail; |
| 1157 | ulong nbytes, serial; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1158 | int i; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1159 | |
| 1160 | fprintf(stderr, "Debug memory block at address p=%p:\n", p); |
| 1161 | if (p == NULL) |
| 1162 | return; |
| 1163 | |
| 1164 | nbytes = read4(q-8); |
Tim Peters | f539c68 | 2002-04-12 07:43:07 +0000 | [diff] [blame] | 1165 | fprintf(stderr, " %lu bytes originally requested\n", nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1166 | |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1167 | /* In case this is nuts, check the leading pad bytes first. */ |
| 1168 | fputs(" The 4 pad bytes at p-4 are ", stderr); |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1169 | if (*(q-4) == FORBIDDENBYTE && |
| 1170 | *(q-3) == FORBIDDENBYTE && |
| 1171 | *(q-2) == FORBIDDENBYTE && |
| 1172 | *(q-1) == FORBIDDENBYTE) { |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1173 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1174 | } |
| 1175 | else { |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1176 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
| 1177 | FORBIDDENBYTE); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1178 | for (i = 4; i >= 1; --i) { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1179 | const uchar byte = *(q-i); |
| 1180 | fprintf(stderr, " at p-%d: 0x%02x", i, byte); |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1181 | if (byte != FORBIDDENBYTE) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1182 | fputs(" *** OUCH", stderr); |
| 1183 | fputc('\n', stderr); |
| 1184 | } |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1185 | |
| 1186 | fputs(" Because memory is corrupted at the start, the " |
| 1187 | "count of bytes requested\n" |
| 1188 | " may be bogus, and checking the trailing pad " |
| 1189 | "bytes may segfault.\n", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1190 | } |
| 1191 | |
| 1192 | tail = q + nbytes; |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1193 | fprintf(stderr, " The 4 pad bytes at tail=%p are ", tail); |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1194 | if (tail[0] == FORBIDDENBYTE && |
| 1195 | tail[1] == FORBIDDENBYTE && |
| 1196 | tail[2] == FORBIDDENBYTE && |
| 1197 | tail[3] == FORBIDDENBYTE) { |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1198 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1199 | } |
| 1200 | else { |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1201 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
| 1202 | FORBIDDENBYTE); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1203 | for (i = 0; i < 4; ++i) { |
| 1204 | const uchar byte = tail[i]; |
| 1205 | fprintf(stderr, " at tail+%d: 0x%02x", |
| 1206 | i, byte); |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1207 | if (byte != FORBIDDENBYTE) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1208 | fputs(" *** OUCH", stderr); |
| 1209 | fputc('\n', stderr); |
| 1210 | } |
| 1211 | } |
| 1212 | |
| 1213 | serial = read4(tail+4); |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1214 | fprintf(stderr, " The block was made by call #%lu to " |
| 1215 | "debug malloc/realloc.\n", serial); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1216 | |
| 1217 | if (nbytes > 0) { |
| 1218 | int i = 0; |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1219 | fputs(" Data at p:", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1220 | /* print up to 8 bytes at the start */ |
| 1221 | while (q < tail && i < 8) { |
| 1222 | fprintf(stderr, " %02x", *q); |
| 1223 | ++i; |
| 1224 | ++q; |
| 1225 | } |
| 1226 | /* and up to 8 at the end */ |
| 1227 | if (q < tail) { |
| 1228 | if (tail - q > 8) { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1229 | fputs(" ...", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1230 | q = tail - 8; |
| 1231 | } |
| 1232 | while (q < tail) { |
| 1233 | fprintf(stderr, " %02x", *q); |
| 1234 | ++q; |
| 1235 | } |
| 1236 | } |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1237 | fputc('\n', stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1238 | } |
| 1239 | } |
| 1240 | |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1241 | static ulong |
| 1242 | printone(const char* msg, ulong value) |
| 1243 | { |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1244 | int i, k; |
| 1245 | char buf[100]; |
| 1246 | ulong origvalue = value; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1247 | |
| 1248 | fputs(msg, stderr); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1249 | for (i = (int)strlen(msg); i < 35; ++i) |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1250 | fputc(' ', stderr); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1251 | fputc('=', stderr); |
| 1252 | |
| 1253 | /* Write the value with commas. */ |
| 1254 | i = 22; |
| 1255 | buf[i--] = '\0'; |
| 1256 | buf[i--] = '\n'; |
| 1257 | k = 3; |
| 1258 | do { |
| 1259 | ulong nextvalue = value / 10UL; |
| 1260 | uint digit = value - nextvalue * 10UL; |
| 1261 | value = nextvalue; |
| 1262 | buf[i--] = (char)(digit + '0'); |
| 1263 | --k; |
| 1264 | if (k == 0 && value && i >= 0) { |
| 1265 | k = 3; |
| 1266 | buf[i--] = ','; |
| 1267 | } |
| 1268 | } while (value && i >= 0); |
| 1269 | |
| 1270 | while (i >= 0) |
| 1271 | buf[i--] = ' '; |
| 1272 | fputs(buf, stderr); |
| 1273 | |
| 1274 | return origvalue; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1275 | } |
| 1276 | |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1277 | /* Print summary info to stderr about the state of pymalloc's structures. |
| 1278 | * In Py_DEBUG mode, also perform some expensive internal consistency |
| 1279 | * checks. |
| 1280 | */ |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1281 | void |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 1282 | _PyObject_DebugMallocStats(void) |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1283 | { |
| 1284 | uint i; |
| 1285 | const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1286 | /* # of pools, allocated blocks, and free blocks per class index */ |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1287 | ulong numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1288 | ulong numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1289 | ulong numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1290 | /* total # of allocated bytes in used and full pools */ |
| 1291 | ulong allocated_bytes = 0; |
| 1292 | /* total # of available bytes in used pools */ |
| 1293 | ulong available_bytes = 0; |
| 1294 | /* # of free pools + pools not yet carved out of current arena */ |
| 1295 | uint numfreepools = 0; |
| 1296 | /* # of bytes for arena alignment padding */ |
Tim Peters | 8a8cdfd | 2002-04-12 20:49:36 +0000 | [diff] [blame] | 1297 | ulong arena_alignment = 0; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1298 | /* # of bytes in used and full pools used for pool_headers */ |
| 1299 | ulong pool_header_bytes = 0; |
| 1300 | /* # of bytes in used and full pools wasted due to quantization, |
| 1301 | * i.e. the necessarily leftover space at the ends of used and |
| 1302 | * full pools. |
| 1303 | */ |
| 1304 | ulong quantization = 0; |
| 1305 | /* running total -- should equal narenas * ARENA_SIZE */ |
| 1306 | ulong total; |
| 1307 | char buf[128]; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1308 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1309 | fprintf(stderr, "Small block threshold = %d, in %u size classes.\n", |
| 1310 | SMALL_REQUEST_THRESHOLD, numclasses); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1311 | |
| 1312 | for (i = 0; i < numclasses; ++i) |
| 1313 | numpools[i] = numblocks[i] = numfreeblocks[i] = 0; |
| 1314 | |
Tim Peters | 6169f09 | 2002-04-01 20:12:59 +0000 | [diff] [blame] | 1315 | /* Because full pools aren't linked to from anything, it's easiest |
| 1316 | * to march over all the arenas. If we're lucky, most of the memory |
| 1317 | * will be living in full pools -- would be a shame to miss them. |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1318 | */ |
| 1319 | for (i = 0; i < narenas; ++i) { |
| 1320 | uint poolsinarena; |
| 1321 | uint j; |
| 1322 | uptr base = arenas[i]; |
| 1323 | |
| 1324 | /* round up to pool alignment */ |
| 1325 | poolsinarena = ARENA_SIZE / POOL_SIZE; |
| 1326 | if (base & (uptr)POOL_SIZE_MASK) { |
| 1327 | --poolsinarena; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1328 | arena_alignment += POOL_SIZE; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1329 | base &= ~(uptr)POOL_SIZE_MASK; |
| 1330 | base += POOL_SIZE; |
| 1331 | } |
| 1332 | |
| 1333 | if (i == narenas - 1) { |
| 1334 | /* current arena may have raw memory at the end */ |
| 1335 | numfreepools += nfreepools; |
| 1336 | poolsinarena -= nfreepools; |
| 1337 | } |
| 1338 | |
| 1339 | /* visit every pool in the arena */ |
| 1340 | for (j = 0; j < poolsinarena; ++j, base += POOL_SIZE) { |
| 1341 | poolp p = (poolp)base; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1342 | const uint sz = p->szidx; |
| 1343 | uint freeblocks; |
| 1344 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1345 | if (p->ref.count == 0) { |
| 1346 | /* currently unused */ |
| 1347 | ++numfreepools; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1348 | assert(pool_is_in_list(p, freepools)); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1349 | continue; |
| 1350 | } |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1351 | ++numpools[sz]; |
| 1352 | numblocks[sz] += p->ref.count; |
| 1353 | freeblocks = NUMBLOCKS(sz) - p->ref.count; |
| 1354 | numfreeblocks[sz] += freeblocks; |
| 1355 | #ifdef Py_DEBUG |
| 1356 | if (freeblocks > 0) |
| 1357 | assert(pool_is_in_list(p, usedpools[sz + sz])); |
| 1358 | #endif |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1359 | } |
| 1360 | } |
| 1361 | |
| 1362 | fputc('\n', stderr); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1363 | fputs("class size num pools blocks in use avail blocks\n" |
| 1364 | "----- ---- --------- ------------- ------------\n", |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1365 | stderr); |
| 1366 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1367 | for (i = 0; i < numclasses; ++i) { |
| 1368 | ulong p = numpools[i]; |
| 1369 | ulong b = numblocks[i]; |
| 1370 | ulong f = numfreeblocks[i]; |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 1371 | uint size = INDEX2SIZE(i); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1372 | if (p == 0) { |
| 1373 | assert(b == 0 && f == 0); |
| 1374 | continue; |
| 1375 | } |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1376 | fprintf(stderr, "%5u %6u %11lu %15lu %13lu\n", |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1377 | i, size, p, b, f); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1378 | allocated_bytes += b * size; |
| 1379 | available_bytes += f * size; |
| 1380 | pool_header_bytes += p * POOL_OVERHEAD; |
| 1381 | quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1382 | } |
| 1383 | fputc('\n', stderr); |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 1384 | (void)printone("# times object malloc called", serialno); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1385 | |
| 1386 | PyOS_snprintf(buf, sizeof(buf), |
| 1387 | "%u arenas * %d bytes/arena", narenas, ARENA_SIZE); |
| 1388 | (void)printone(buf, (ulong)narenas * ARENA_SIZE); |
| 1389 | |
| 1390 | fputc('\n', stderr); |
| 1391 | |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1392 | total = printone("# bytes in allocated blocks", allocated_bytes); |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 1393 | total += printone("# bytes in available blocks", available_bytes); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1394 | |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1395 | PyOS_snprintf(buf, sizeof(buf), |
| 1396 | "%u unused pools * %d bytes", numfreepools, POOL_SIZE); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 1397 | total += printone(buf, (ulong)numfreepools * POOL_SIZE); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1398 | |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1399 | total += printone("# bytes lost to pool headers", pool_header_bytes); |
| 1400 | total += printone("# bytes lost to quantization", quantization); |
| 1401 | total += printone("# bytes lost to arena alignment", arena_alignment); |
| 1402 | (void)printone("Total", total); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1403 | } |
| 1404 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1405 | #endif /* PYMALLOC_DEBUG */ |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 1406 | |
| 1407 | #ifdef Py_USING_MEMORY_DEBUGGER |
| 1408 | /* Make this function last so gcc won't inline it |
| 1409 | since the definition is after the reference. */ |
| 1410 | int |
| 1411 | Py_ADDRESS_IN_RANGE(void *P, poolp pool) |
| 1412 | { |
| 1413 | return ((pool->arenaindex) < narenas && |
| 1414 | (uptr)(P) - arenas[pool->arenaindex] < (uptr)ARENA_SIZE); |
| 1415 | } |
| 1416 | #endif |