Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1 | #include "Python.h" |
| 2 | |
| 3 | #ifdef WITH_PYMALLOC |
| 4 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 5 | /* An object allocator for Python. |
| 6 | |
| 7 | Here is an introduction to the layers of the Python memory architecture, |
| 8 | showing where the object allocator is actually used (layer +2), It is |
| 9 | called for every object allocation and deallocation (PyObject_New/Del), |
| 10 | unless the object-specific allocators implement a proprietary allocation |
| 11 | scheme (ex.: ints use a simple free list). This is also the place where |
| 12 | the cyclic garbage collector operates selectively on container objects. |
| 13 | |
| 14 | |
| 15 | Object-specific allocators |
| 16 | _____ ______ ______ ________ |
| 17 | [ int ] [ dict ] [ list ] ... [ string ] Python core | |
| 18 | +3 | <----- Object-specific memory -----> | <-- Non-object memory --> | |
| 19 | _______________________________ | | |
| 20 | [ Python's object allocator ] | | |
| 21 | +2 | ####### Object memory ####### | <------ Internal buffers ------> | |
| 22 | ______________________________________________________________ | |
| 23 | [ Python's raw memory allocator (PyMem_ API) ] | |
| 24 | +1 | <----- Python memory (under PyMem manager's control) ------> | | |
| 25 | __________________________________________________________________ |
| 26 | [ Underlying general-purpose allocator (ex: C library malloc) ] |
| 27 | 0 | <------ Virtual memory allocated for the python process -------> | |
| 28 | |
| 29 | ========================================================================= |
| 30 | _______________________________________________________________________ |
| 31 | [ OS-specific Virtual Memory Manager (VMM) ] |
| 32 | -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | |
| 33 | __________________________________ __________________________________ |
| 34 | [ ] [ ] |
| 35 | -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | |
| 36 | |
| 37 | */ |
| 38 | /*==========================================================================*/ |
| 39 | |
| 40 | /* A fast, special-purpose memory allocator for small blocks, to be used |
| 41 | on top of a general-purpose malloc -- heavily based on previous art. */ |
| 42 | |
| 43 | /* Vladimir Marangozov -- August 2000 */ |
| 44 | |
| 45 | /* |
| 46 | * "Memory management is where the rubber meets the road -- if we do the wrong |
| 47 | * thing at any level, the results will not be good. And if we don't make the |
| 48 | * levels work well together, we are in serious trouble." (1) |
| 49 | * |
| 50 | * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, |
| 51 | * "Dynamic Storage Allocation: A Survey and Critical Review", |
| 52 | * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. |
| 53 | */ |
| 54 | |
| 55 | /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 56 | |
| 57 | /*==========================================================================*/ |
| 58 | |
| 59 | /* |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 60 | * Allocation strategy abstract: |
| 61 | * |
| 62 | * For small requests, the allocator sub-allocates <Big> blocks of memory. |
| 63 | * Requests greater than 256 bytes are routed to the system's allocator. |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 64 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 65 | * Small requests are grouped in size classes spaced 8 bytes apart, due |
| 66 | * to the required valid alignment of the returned address. Requests of |
| 67 | * a particular size are serviced from memory pools of 4K (one VMM page). |
| 68 | * Pools are fragmented on demand and contain free lists of blocks of one |
| 69 | * particular size class. In other words, there is a fixed-size allocator |
| 70 | * for each size class. Free pools are shared by the different allocators |
| 71 | * thus minimizing the space reserved for a particular size class. |
| 72 | * |
| 73 | * This allocation strategy is a variant of what is known as "simple |
| 74 | * segregated storage based on array of free lists". The main drawback of |
| 75 | * simple segregated storage is that we might end up with lot of reserved |
| 76 | * memory for the different free lists, which degenerate in time. To avoid |
| 77 | * this, we partition each free list in pools and we share dynamically the |
| 78 | * reserved space between all free lists. This technique is quite efficient |
| 79 | * for memory intensive programs which allocate mainly small-sized blocks. |
| 80 | * |
| 81 | * For small requests we have the following table: |
| 82 | * |
| 83 | * Request in bytes Size of allocated block Size class idx |
| 84 | * ---------------------------------------------------------------- |
| 85 | * 1-8 8 0 |
| 86 | * 9-16 16 1 |
| 87 | * 17-24 24 2 |
| 88 | * 25-32 32 3 |
| 89 | * 33-40 40 4 |
| 90 | * 41-48 48 5 |
| 91 | * 49-56 56 6 |
| 92 | * 57-64 64 7 |
| 93 | * 65-72 72 8 |
| 94 | * ... ... ... |
| 95 | * 241-248 248 30 |
| 96 | * 249-256 256 31 |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 97 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 98 | * 0, 257 and up: routed to the underlying allocator. |
| 99 | */ |
| 100 | |
| 101 | /*==========================================================================*/ |
| 102 | |
| 103 | /* |
| 104 | * -- Main tunable settings section -- |
| 105 | */ |
| 106 | |
| 107 | /* |
| 108 | * Alignment of addresses returned to the user. 8-bytes alignment works |
| 109 | * on most current architectures (with 32-bit or 64-bit address busses). |
| 110 | * The alignment value is also used for grouping small requests in size |
| 111 | * classes spaced ALIGNMENT bytes apart. |
| 112 | * |
| 113 | * You shouldn't change this unless you know what you are doing. |
| 114 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 115 | #define ALIGNMENT 8 /* must be 2^N */ |
| 116 | #define ALIGNMENT_SHIFT 3 |
| 117 | #define ALIGNMENT_MASK (ALIGNMENT - 1) |
| 118 | |
| 119 | /* |
| 120 | * Max size threshold below which malloc requests are considered to be |
| 121 | * small enough in order to use preallocated memory pools. You can tune |
| 122 | * this value according to your application behaviour and memory needs. |
| 123 | * |
| 124 | * The following invariants must hold: |
| 125 | * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256 |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 126 | * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 127 | * |
| 128 | * Although not required, for better performance and space efficiency, |
| 129 | * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. |
| 130 | */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 131 | #define SMALL_REQUEST_THRESHOLD 256 |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 132 | #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) |
| 133 | |
| 134 | /* |
| 135 | * The system's VMM page size can be obtained on most unices with a |
| 136 | * getpagesize() call or deduced from various header files. To make |
| 137 | * things simpler, we assume that it is 4K, which is OK for most systems. |
| 138 | * It is probably better if this is the native page size, but it doesn't |
| 139 | * have to be. |
| 140 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 141 | #define SYSTEM_PAGE_SIZE (4 * 1024) |
| 142 | #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1) |
| 143 | |
| 144 | /* |
| 145 | * Maximum amount of memory managed by the allocator for small requests. |
| 146 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 147 | #ifdef WITH_MEMORY_LIMITS |
| 148 | #ifndef SMALL_MEMORY_LIMIT |
| 149 | #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ |
| 150 | #endif |
| 151 | #endif |
| 152 | |
| 153 | /* |
| 154 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned |
| 155 | * on a page boundary. This is a reserved virtual address space for the |
| 156 | * current process (obtained through a malloc call). In no way this means |
| 157 | * that the memory arenas will be used entirely. A malloc(<Big>) is usually |
| 158 | * an address range reservation for <Big> bytes, unless all pages within this |
| 159 | * space are referenced subsequently. So malloc'ing big blocks and not using |
| 160 | * them does not mean "wasting memory". It's an addressable range wastage... |
| 161 | * |
| 162 | * Therefore, allocating arenas with malloc is not optimal, because there is |
| 163 | * some address space wastage, but this is the most portable way to request |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 164 | * memory from the system across various platforms. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 165 | */ |
| 166 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 167 | /* ALLOCATED_ARENA_SIZE is passed to malloc; after alignment, we can't |
| 168 | * count on more than ARENA_SIZE bytes being usable for pools. |
| 169 | */ |
| 170 | #define ALLOCATED_ARENA_SIZE (256 << 10) /* 256KB */ |
| 171 | #define ARENA_SIZE (ALLOCATED_ARENA_SIZE - SYSTEM_PAGE_SIZE) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 172 | |
| 173 | #ifdef WITH_MEMORY_LIMITS |
| 174 | #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) |
| 175 | #endif |
| 176 | |
| 177 | /* |
| 178 | * Size of the pools used for small blocks. Should be a power of 2, |
| 179 | * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k, eventually 8k. |
| 180 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 181 | #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */ |
| 182 | #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 183 | #define ARENA_NB_POOLS (ARENA_SIZE / POOL_SIZE) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 184 | |
| 185 | /* |
| 186 | * -- End of tunable settings section -- |
| 187 | */ |
| 188 | |
| 189 | /*==========================================================================*/ |
| 190 | |
| 191 | /* |
| 192 | * Locking |
| 193 | * |
| 194 | * To reduce lock contention, it would probably be better to refine the |
| 195 | * crude function locking with per size class locking. I'm not positive |
| 196 | * however, whether it's worth switching to such locking policy because |
| 197 | * of the performance penalty it might introduce. |
| 198 | * |
| 199 | * The following macros describe the simplest (should also be the fastest) |
| 200 | * lock object on a particular platform and the init/fini/lock/unlock |
| 201 | * operations on it. The locks defined here are not expected to be recursive |
| 202 | * because it is assumed that they will always be called in the order: |
| 203 | * INIT, [LOCK, UNLOCK]*, FINI. |
| 204 | */ |
| 205 | |
| 206 | /* |
| 207 | * Python's threads are serialized, so object malloc locking is disabled. |
| 208 | */ |
| 209 | #define SIMPLELOCK_DECL(lock) /* simple lock declaration */ |
| 210 | #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */ |
| 211 | #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */ |
| 212 | #define SIMPLELOCK_LOCK(lock) /* acquire released lock */ |
| 213 | #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */ |
| 214 | |
| 215 | /* |
| 216 | * Basic types |
| 217 | * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom. |
| 218 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 219 | #undef uchar |
| 220 | #define uchar unsigned char /* assuming == 8 bits */ |
| 221 | |
| 222 | #undef ushort |
| 223 | #define ushort unsigned short /* assuming >= 16 bits */ |
| 224 | |
| 225 | #undef uint |
| 226 | #define uint unsigned int /* assuming >= 16 bits */ |
| 227 | |
| 228 | #undef ulong |
| 229 | #define ulong unsigned long /* assuming >= 32 bits */ |
| 230 | |
| 231 | #undef off_t |
| 232 | #define off_t uint /* 16 bits <= off_t <= 64 bits */ |
| 233 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 234 | #undef uptr |
| 235 | #define uptr Py_uintptr_t |
| 236 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 237 | /* When you say memory, my mind reasons in terms of (pointers to) blocks */ |
| 238 | typedef uchar block; |
| 239 | |
| 240 | /* Pool for small blocks */ |
| 241 | struct pool_header { |
Tim Peters | b233652 | 2001-03-11 18:36:13 +0000 | [diff] [blame] | 242 | union { block *_padding; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 243 | uint count; } ref; /* number of allocated blocks */ |
| 244 | block *freeblock; /* pool's free list head */ |
| 245 | struct pool_header *nextpool; /* next pool of this size class */ |
| 246 | struct pool_header *prevpool; /* previous pool "" */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 247 | ulong arenaindex; /* index into arenas of base adr */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 248 | uint szidx; /* block size class index */ |
| 249 | uint capacity; /* pool capacity in # of blocks */ |
| 250 | }; |
| 251 | |
| 252 | typedef struct pool_header *poolp; |
| 253 | |
| 254 | #undef ROUNDUP |
| 255 | #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK) |
| 256 | #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header)) |
| 257 | |
| 258 | #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ |
| 259 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 260 | /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ |
| 261 | #define POOL_ADDR(P) \ |
| 262 | ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK)) |
| 263 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 264 | /*==========================================================================*/ |
| 265 | |
| 266 | /* |
| 267 | * This malloc lock |
| 268 | */ |
Tim Peters | b233652 | 2001-03-11 18:36:13 +0000 | [diff] [blame] | 269 | SIMPLELOCK_DECL(_malloc_lock); |
| 270 | #define LOCK() SIMPLELOCK_LOCK(_malloc_lock) |
| 271 | #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock) |
| 272 | #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock) |
| 273 | #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 274 | |
| 275 | /* |
| 276 | * Pool table -- doubly linked lists of partially used pools |
| 277 | */ |
| 278 | #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *))) |
| 279 | #define PT(x) PTA(x), PTA(x) |
| 280 | |
| 281 | static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { |
| 282 | PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) |
| 283 | #if NB_SMALL_SIZE_CLASSES > 8 |
| 284 | , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) |
| 285 | #if NB_SMALL_SIZE_CLASSES > 16 |
| 286 | , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) |
| 287 | #if NB_SMALL_SIZE_CLASSES > 24 |
| 288 | , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) |
| 289 | #if NB_SMALL_SIZE_CLASSES > 32 |
| 290 | , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) |
| 291 | #if NB_SMALL_SIZE_CLASSES > 40 |
| 292 | , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) |
| 293 | #if NB_SMALL_SIZE_CLASSES > 48 |
| 294 | , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) |
| 295 | #if NB_SMALL_SIZE_CLASSES > 56 |
| 296 | , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) |
| 297 | #endif /* NB_SMALL_SIZE_CLASSES > 56 */ |
| 298 | #endif /* NB_SMALL_SIZE_CLASSES > 48 */ |
| 299 | #endif /* NB_SMALL_SIZE_CLASSES > 40 */ |
| 300 | #endif /* NB_SMALL_SIZE_CLASSES > 32 */ |
| 301 | #endif /* NB_SMALL_SIZE_CLASSES > 24 */ |
| 302 | #endif /* NB_SMALL_SIZE_CLASSES > 16 */ |
| 303 | #endif /* NB_SMALL_SIZE_CLASSES > 8 */ |
| 304 | }; |
| 305 | |
| 306 | /* |
| 307 | * Free (cached) pools |
| 308 | */ |
| 309 | static poolp freepools = NULL; /* free list for cached pools */ |
| 310 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 311 | /*==========================================================================*/ |
| 312 | /* Arena management. */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 313 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 314 | /* arenas is a vector of arena base addresses, in order of allocation time. |
| 315 | * arenas currently contains narenas entries, and has space allocated |
| 316 | * for at most maxarenas entries. |
| 317 | * |
| 318 | * CAUTION: See the long comment block about thread safety in new_arena(): |
| 319 | * the code currently relies in deep ways on that this vector only grows, |
| 320 | * and only grows by appending at the end. For now we never return an arena |
| 321 | * to the OS. |
| 322 | */ |
| 323 | static uptr *arenas = NULL; |
| 324 | static ulong narenas = 0; |
| 325 | static ulong maxarenas = 0; |
| 326 | |
| 327 | /* Number of pools already allocated from the current arena. This is |
| 328 | * initialized to the max # of pools to provoke the first allocation request |
| 329 | * into allocating a new arena. |
| 330 | */ |
| 331 | static uint watermark = ARENA_NB_POOLS; |
| 332 | |
| 333 | /* Free space start address in current arena. */ |
| 334 | static block *arenabase = NULL; |
| 335 | |
| 336 | #if 0 |
| 337 | static ulong wasmine = 0; |
| 338 | static ulong wasntmine = 0; |
| 339 | |
| 340 | static void |
| 341 | dumpem(void *ptr) |
| 342 | { |
| 343 | if (ptr) |
| 344 | printf("inserted new arena at %08x\n", ptr); |
| 345 | printf("# arenas %d\n", narenas); |
| 346 | printf("was mine %lu wasn't mine %lu\n", wasmine, wasntmine); |
| 347 | } |
| 348 | #define INCMINE ++wasmine |
| 349 | #define INCTHEIRS ++wasntmine |
| 350 | |
| 351 | #else |
| 352 | #define dumpem(ptr) |
| 353 | #define INCMINE |
| 354 | #define INCTHEIRS |
| 355 | #endif |
| 356 | |
| 357 | /* Allocate a new arena and return its base address. If we run out of |
| 358 | * memory, return NULL. |
| 359 | */ |
| 360 | static block * |
| 361 | new_arena(void) |
| 362 | { |
| 363 | block *bp = (block *)PyMem_MALLOC(ALLOCATED_ARENA_SIZE); |
| 364 | if (bp == NULL) |
| 365 | return NULL; |
| 366 | |
| 367 | watermark = 0; |
| 368 | /* Page-round up */ |
| 369 | arenabase = bp + (SYSTEM_PAGE_SIZE - |
| 370 | ((off_t )bp & SYSTEM_PAGE_SIZE_MASK)); |
| 371 | |
| 372 | /* Make room for a new entry in the arenas vector. */ |
| 373 | if (arenas == NULL) { |
| 374 | arenas = (uptr *)PyMem_MALLOC(16 * sizeof(*arenas)); |
| 375 | if (arenas == NULL) |
| 376 | goto error; |
| 377 | maxarenas = 16; |
| 378 | narenas = 0; |
| 379 | } |
| 380 | else if (narenas == maxarenas) { |
| 381 | /* Grow arenas. Don't use realloc: if this fails, we |
| 382 | * don't want to lose the base addresses we already have. |
| 383 | * Exceedingly subtle: Someone may be calling the pymalloc |
| 384 | * free via PyMem_{DEL, Del, FREE, Free} without holding the |
| 385 | *.GIL. Someone else may simultaneously be calling the |
| 386 | * pymalloc malloc while holding the GIL via, e.g., |
| 387 | * PyObject_New. Now the pymalloc free may index into arenas |
| 388 | * for an address check, while the pymalloc malloc calls |
| 389 | * new_arena and we end up here to grow a new arena *and* |
| 390 | * grow the arenas vector. If the value for arenas pymalloc |
| 391 | * free picks up "vanishes" during this resize, anything may |
| 392 | * happen, and it would be an incredibly rare bug. Therefore |
| 393 | * the code here takes great pains to make sure that, at every |
| 394 | * moment, arenas always points to an intact vector of |
| 395 | * addresses. It doesn't matter whether arenas points to a |
| 396 | * wholly up-to-date vector when pymalloc free checks it in |
| 397 | * this case, because the only legal (and that even this is |
| 398 | * legal is debatable) way to call PyMem_{Del, etc} while not |
| 399 | * holding the GIL is if the memory being released is not |
| 400 | * object memory, i.e. if the address check in pymalloc free |
| 401 | * is supposed to fail. Having an incomplete vector can't |
| 402 | * make a supposed-to-fail case succeed by mistake (it could |
| 403 | * only make a supposed-to-succeed case fail by mistake). |
| 404 | * Read the above 50 times before changing anything in this |
| 405 | * block. |
Tim Peters | 1230068 | 2002-03-30 06:20:23 +0000 | [diff] [blame^] | 406 | * XXX Fudge. This is still vulnerable: there's nothing |
| 407 | * XXX to stop the bad-guy thread from picking up the |
| 408 | * XXX current value of arenas, but not indexing off of it |
| 409 | * XXX until after the PyMem_FREE(oldarenas) below completes. |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 410 | */ |
| 411 | uptr *oldarenas; |
| 412 | int newmax = maxarenas + (maxarenas >> 1); |
| 413 | uptr *p = (uptr *)PyMem_MALLOC(newmax * sizeof(*arenas)); |
| 414 | if (p == NULL) |
| 415 | goto error; |
| 416 | memcpy(p, arenas, narenas * sizeof(*arenas)); |
| 417 | oldarenas = arenas; |
| 418 | arenas = p; |
| 419 | PyMem_FREE(oldarenas); |
| 420 | maxarenas = newmax; |
| 421 | } |
| 422 | |
| 423 | /* Append the new arena address to arenas. */ |
| 424 | assert(narenas < maxarenas); |
| 425 | arenas[narenas] = (uptr)bp; |
| 426 | ++narenas; |
| 427 | dumpem(bp); |
| 428 | return bp; |
| 429 | |
| 430 | error: |
| 431 | PyMem_FREE(bp); |
| 432 | return NULL; |
| 433 | } |
| 434 | |
| 435 | /* Return true if and only if P is an address that was allocated by |
| 436 | * pymalloc. I must be the index into arenas that the address claims |
| 437 | * to come from. |
| 438 | * Tricky: Letting B be the arena base address in arenas[I], P belongs to the |
| 439 | * arena if and only if |
| 440 | * B <= P < B + ALLOCATED_ARENA_SIZE |
| 441 | * Subtracting B throughout, this is true iff |
| 442 | * 0 <= P-B < ALLOCATED_ARENA_SIZE |
| 443 | * By using unsigned arithmetic, the "0 <=" half of the test can be skipped. |
| 444 | */ |
| 445 | #define ADDRESS_IN_RANGE(P, I) \ |
| 446 | ((I) < narenas && (uptr)(P) - arenas[I] < (uptr)ALLOCATED_ARENA_SIZE) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 447 | /*==========================================================================*/ |
| 448 | |
| 449 | /* malloc */ |
| 450 | |
| 451 | /* |
| 452 | * The basic blocks are ordered by decreasing execution frequency, |
| 453 | * which minimizes the number of jumps in the most common cases, |
| 454 | * improves branching prediction and instruction scheduling (small |
| 455 | * block allocations typically result in a couple of instructions). |
| 456 | * Unless the optimizer reorders everything, being too smart... |
| 457 | */ |
| 458 | |
| 459 | void * |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 460 | _PyMalloc_Malloc(size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 461 | { |
| 462 | block *bp; |
| 463 | poolp pool; |
| 464 | poolp next; |
| 465 | uint size; |
| 466 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 467 | /* |
| 468 | * This implicitly redirects malloc(0) |
| 469 | */ |
| 470 | if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { |
| 471 | LOCK(); |
| 472 | /* |
| 473 | * Most frequent paths first |
| 474 | */ |
| 475 | size = (uint )(nbytes - 1) >> ALIGNMENT_SHIFT; |
| 476 | pool = usedpools[size + size]; |
| 477 | if (pool != pool->nextpool) { |
| 478 | /* |
| 479 | * There is a used pool for this size class. |
| 480 | * Pick up the head block of its free list. |
| 481 | */ |
| 482 | ++pool->ref.count; |
| 483 | bp = pool->freeblock; |
| 484 | if ((pool->freeblock = *(block **)bp) != NULL) { |
| 485 | UNLOCK(); |
| 486 | return (void *)bp; |
| 487 | } |
| 488 | /* |
| 489 | * Reached the end of the free list, try to extend it |
| 490 | */ |
| 491 | if (pool->ref.count < pool->capacity) { |
| 492 | /* |
| 493 | * There is room for another block |
| 494 | */ |
| 495 | size++; |
| 496 | size <<= ALIGNMENT_SHIFT; /* block size */ |
| 497 | pool->freeblock = (block *)pool + \ |
| 498 | POOL_OVERHEAD + \ |
| 499 | pool->ref.count * size; |
| 500 | *(block **)(pool->freeblock) = NULL; |
| 501 | UNLOCK(); |
| 502 | return (void *)bp; |
| 503 | } |
| 504 | /* |
| 505 | * Pool is full, unlink from used pools |
| 506 | */ |
| 507 | next = pool->nextpool; |
| 508 | pool = pool->prevpool; |
| 509 | next->prevpool = pool; |
| 510 | pool->nextpool = next; |
| 511 | UNLOCK(); |
| 512 | return (void *)bp; |
| 513 | } |
| 514 | /* |
| 515 | * Try to get a cached free pool |
| 516 | */ |
| 517 | pool = freepools; |
| 518 | if (pool != NULL) { |
| 519 | /* |
| 520 | * Unlink from cached pools |
| 521 | */ |
| 522 | freepools = pool->nextpool; |
| 523 | init_pool: |
| 524 | /* |
| 525 | * Frontlink to used pools |
| 526 | */ |
| 527 | next = usedpools[size + size]; /* == prev */ |
| 528 | pool->nextpool = next; |
| 529 | pool->prevpool = next; |
| 530 | next->nextpool = pool; |
| 531 | next->prevpool = pool; |
| 532 | pool->ref.count = 1; |
| 533 | if (pool->szidx == size) { |
| 534 | /* |
| 535 | * Luckily, this pool last contained blocks |
| 536 | * of the same size class, so its header |
| 537 | * and free list are already initialized. |
| 538 | */ |
| 539 | bp = pool->freeblock; |
| 540 | pool->freeblock = *(block **)bp; |
| 541 | UNLOCK(); |
| 542 | return (void *)bp; |
| 543 | } |
| 544 | /* |
| 545 | * Initialize the pool header and free list |
| 546 | * then return the first block. |
| 547 | */ |
| 548 | pool->szidx = size; |
| 549 | size++; |
| 550 | size <<= ALIGNMENT_SHIFT; /* block size */ |
| 551 | bp = (block *)pool + POOL_OVERHEAD; |
| 552 | pool->freeblock = bp + size; |
| 553 | *(block **)(pool->freeblock) = NULL; |
| 554 | pool->capacity = (POOL_SIZE - POOL_OVERHEAD) / size; |
| 555 | UNLOCK(); |
| 556 | return (void *)bp; |
| 557 | } |
| 558 | /* |
| 559 | * Allocate new pool |
| 560 | */ |
| 561 | if (watermark < ARENA_NB_POOLS) { |
| 562 | /* commit malloc(POOL_SIZE) from the current arena */ |
| 563 | commit_pool: |
| 564 | watermark++; |
| 565 | pool = (poolp )arenabase; |
| 566 | arenabase += POOL_SIZE; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 567 | pool->arenaindex = narenas - 1; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 568 | pool->szidx = DUMMY_SIZE_IDX; |
| 569 | goto init_pool; |
| 570 | } |
| 571 | /* |
| 572 | * Allocate new arena |
| 573 | */ |
| 574 | #ifdef WITH_MEMORY_LIMITS |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 575 | if (!(narenas < MAX_ARENAS)) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 576 | UNLOCK(); |
| 577 | goto redirect; |
| 578 | } |
| 579 | #endif |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 580 | bp = new_arena(); |
| 581 | if (bp != NULL) |
| 582 | goto commit_pool; |
| 583 | UNLOCK(); |
| 584 | goto redirect; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 585 | } |
| 586 | |
| 587 | /* The small block allocator ends here. */ |
| 588 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 589 | redirect: |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 590 | /* |
| 591 | * Redirect the original request to the underlying (libc) allocator. |
| 592 | * We jump here on bigger requests, on error in the code above (as a |
| 593 | * last chance to serve the request) or when the max memory limit |
| 594 | * has been reached. |
| 595 | */ |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 596 | return (void *)PyMem_MALLOC(nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 597 | } |
| 598 | |
| 599 | /* free */ |
| 600 | |
| 601 | void |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 602 | _PyMalloc_Free(void *p) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 603 | { |
| 604 | poolp pool; |
| 605 | poolp next, prev; |
| 606 | uint size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 607 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 608 | if (p == NULL) /* free(NULL) has no effect */ |
| 609 | return; |
| 610 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 611 | pool = POOL_ADDR(p); |
| 612 | if (ADDRESS_IN_RANGE(p, pool->arenaindex)) { |
| 613 | /* We allocated this address. */ |
| 614 | INCMINE; |
| 615 | LOCK(); |
| 616 | /* |
| 617 | * At this point, the pool is not empty |
| 618 | */ |
| 619 | if ((*(block **)p = pool->freeblock) == NULL) { |
| 620 | /* |
| 621 | * Pool was full |
| 622 | */ |
| 623 | pool->freeblock = (block *)p; |
| 624 | --pool->ref.count; |
| 625 | /* |
| 626 | * Frontlink to used pools |
| 627 | * This mimics LRU pool usage for new allocations and |
| 628 | * targets optimal filling when several pools contain |
| 629 | * blocks of the same size class. |
| 630 | */ |
| 631 | size = pool->szidx; |
| 632 | next = usedpools[size + size]; |
| 633 | prev = next->prevpool; |
| 634 | pool->nextpool = next; |
| 635 | pool->prevpool = prev; |
| 636 | next->prevpool = pool; |
| 637 | prev->nextpool = pool; |
| 638 | UNLOCK(); |
| 639 | return; |
| 640 | } |
| 641 | /* |
| 642 | * Pool was not full |
| 643 | */ |
| 644 | pool->freeblock = (block *)p; |
| 645 | if (--pool->ref.count != 0) { |
| 646 | UNLOCK(); |
| 647 | return; |
| 648 | } |
| 649 | /* |
| 650 | * Pool is now empty, unlink from used pools |
| 651 | */ |
| 652 | next = pool->nextpool; |
| 653 | prev = pool->prevpool; |
| 654 | next->prevpool = prev; |
| 655 | prev->nextpool = next; |
| 656 | /* |
| 657 | * Frontlink to free pools |
| 658 | * This ensures that previously freed pools will be allocated |
| 659 | * later (being not referenced, they are perhaps paged out). |
| 660 | */ |
| 661 | pool->nextpool = freepools; |
| 662 | freepools = pool; |
| 663 | UNLOCK(); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 664 | return; |
| 665 | } |
| 666 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 667 | /* We did not allocate this address. */ |
| 668 | INCTHEIRS; |
| 669 | PyMem_FREE(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | /* realloc */ |
| 673 | |
| 674 | void * |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 675 | _PyMalloc_Realloc(void *p, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 676 | { |
| 677 | block *bp; |
| 678 | poolp pool; |
| 679 | uint size; |
| 680 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 681 | if (p == NULL) |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 682 | return _PyMalloc_Malloc(nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 683 | |
| 684 | /* realloc(p, 0) on big blocks is redirected. */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 685 | pool = POOL_ADDR(p); |
| 686 | if (ADDRESS_IN_RANGE(p, pool->arenaindex)) { |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 687 | /* We're in charge of this block */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 688 | INCMINE; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 689 | size = (pool->szidx + 1) << ALIGNMENT_SHIFT; /* block size */ |
| 690 | if (size >= nbytes) { |
| 691 | /* Don't bother if a smaller size was requested |
| 692 | except for realloc(p, 0) == free(p), ret NULL */ |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 693 | /* XXX but Python guarantees that *its* flavor of |
| 694 | resize(p, 0) will not do a free or return NULL */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 695 | if (nbytes == 0) { |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 696 | _PyMalloc_Free(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 697 | bp = NULL; |
| 698 | } |
| 699 | else |
| 700 | bp = (block *)p; |
| 701 | } |
| 702 | else { |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 703 | bp = (block *)_PyMalloc_Malloc(nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 704 | if (bp != NULL) { |
| 705 | memcpy(bp, p, size); |
Neil Schemenauer | 25f3dc2 | 2002-03-18 21:06:21 +0000 | [diff] [blame] | 706 | _PyMalloc_Free(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 707 | } |
| 708 | } |
| 709 | } |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 710 | else { |
| 711 | /* We haven't allocated this block */ |
| 712 | INCTHEIRS; |
| 713 | if (nbytes <= SMALL_REQUEST_THRESHOLD && nbytes) { |
| 714 | /* small request */ |
| 715 | size = nbytes; |
| 716 | bp = (block *)_PyMalloc_Malloc(nbytes); |
| 717 | if (bp != NULL) { |
| 718 | memcpy(bp, p, size); |
| 719 | _PyMalloc_Free(p); |
| 720 | } |
| 721 | } |
| 722 | else |
| 723 | bp = (block *)PyMem_REALLOC(p, nbytes); |
| 724 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 725 | return (void *)bp; |
| 726 | } |
| 727 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 728 | #else /* ! WITH_PYMALLOC */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 729 | |
| 730 | /*==========================================================================*/ |
| 731 | /* pymalloc not enabled: Redirect the entry points to the PyMem family. */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 732 | |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 733 | void * |
| 734 | _PyMalloc_Malloc(size_t n) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 735 | { |
| 736 | return PyMem_MALLOC(n); |
| 737 | } |
| 738 | |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 739 | void * |
| 740 | _PyMalloc_Realloc(void *p, size_t n) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 741 | { |
| 742 | return PyMem_REALLOC(p, n); |
| 743 | } |
| 744 | |
| 745 | void |
| 746 | _PyMalloc_Free(void *p) |
| 747 | { |
| 748 | PyMem_FREE(p); |
| 749 | } |
| 750 | #endif /* WITH_PYMALLOC */ |
| 751 | |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 752 | /*==========================================================================*/ |
| 753 | /* Regardless of whether pymalloc is enabled, export entry points for |
| 754 | * the object-oriented pymalloc functions. |
| 755 | */ |
| 756 | |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 757 | PyObject * |
| 758 | _PyMalloc_New(PyTypeObject *tp) |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 759 | { |
| 760 | PyObject *op; |
| 761 | op = (PyObject *) _PyMalloc_MALLOC(_PyObject_SIZE(tp)); |
| 762 | if (op == NULL) |
| 763 | return PyErr_NoMemory(); |
| 764 | return PyObject_INIT(op, tp); |
| 765 | } |
| 766 | |
| 767 | PyVarObject * |
| 768 | _PyMalloc_NewVar(PyTypeObject *tp, int nitems) |
| 769 | { |
| 770 | PyVarObject *op; |
| 771 | const size_t size = _PyObject_VAR_SIZE(tp, nitems); |
| 772 | op = (PyVarObject *) _PyMalloc_MALLOC(size); |
| 773 | if (op == NULL) |
| 774 | return (PyVarObject *)PyErr_NoMemory(); |
| 775 | return PyObject_INIT_VAR(op, tp, nitems); |
| 776 | } |
| 777 | |
| 778 | void |
| 779 | _PyMalloc_Del(PyObject *op) |
| 780 | { |
| 781 | _PyMalloc_FREE(op); |
| 782 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 783 | |
| 784 | #ifdef PYMALLOC_DEBUG |
| 785 | /*==========================================================================*/ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 786 | /* A x-platform debugging allocator. This doesn't manage memory directly, |
| 787 | * it wraps a real allocator, adding extra debugging info to the memory blocks. |
| 788 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 789 | |
| 790 | #define PYMALLOC_CLEANBYTE 0xCB /* uninitialized memory */ |
| 791 | #define PYMALLOC_DEADBYTE 0xDB /* free()ed memory */ |
| 792 | #define PYMALLOC_FORBIDDENBYTE 0xFB /* unusable memory */ |
| 793 | |
| 794 | static ulong serialno = 0; /* incremented on each debug {m,re}alloc */ |
| 795 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 796 | /* serialno is always incremented via calling this routine. The point is |
| 797 | to supply a single place to set a breakpoint. |
| 798 | */ |
| 799 | static void |
Neil Schemenauer | bd02b14 | 2002-03-28 21:05:38 +0000 | [diff] [blame] | 800 | bumpserialno(void) |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 801 | { |
| 802 | ++serialno; |
| 803 | } |
| 804 | |
| 805 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 806 | /* Read 4 bytes at p as a big-endian ulong. */ |
| 807 | static ulong |
| 808 | read4(const void *p) |
| 809 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 810 | const uchar *q = (const uchar *)p; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 811 | return ((ulong)q[0] << 24) | |
| 812 | ((ulong)q[1] << 16) | |
| 813 | ((ulong)q[2] << 8) | |
| 814 | (ulong)q[3]; |
| 815 | } |
| 816 | |
| 817 | /* Write the 4 least-significant bytes of n as a big-endian unsigned int, |
| 818 | MSB at address p, LSB at p+3. */ |
| 819 | static void |
| 820 | write4(void *p, ulong n) |
| 821 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 822 | uchar *q = (uchar *)p; |
| 823 | q[0] = (uchar)((n >> 24) & 0xff); |
| 824 | q[1] = (uchar)((n >> 16) & 0xff); |
| 825 | q[2] = (uchar)((n >> 8) & 0xff); |
| 826 | q[3] = (uchar)( n & 0xff); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 827 | } |
| 828 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 829 | /* The debug malloc asks for 16 extra bytes and fills them with useful stuff, |
| 830 | here calling the underlying malloc's result p: |
| 831 | |
| 832 | p[0:4] |
| 833 | Number of bytes originally asked for. 4-byte unsigned integer, |
| 834 | big-endian (easier to read in a memory dump). |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 835 | p[4:8] |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 836 | Copies of PYMALLOC_FORBIDDENBYTE. Used to catch under- writes |
| 837 | and reads. |
| 838 | p[8:8+n] |
| 839 | The requested memory, filled with copies of PYMALLOC_CLEANBYTE. |
| 840 | Used to catch reference to uninitialized memory. |
| 841 | &p[8] is returned. Note that this is 8-byte aligned if PyMalloc |
| 842 | handled the request itself. |
| 843 | p[8+n:8+n+4] |
| 844 | Copies of PYMALLOC_FORBIDDENBYTE. Used to catch over- writes |
| 845 | and reads. |
| 846 | p[8+n+4:8+n+8] |
| 847 | A serial number, incremented by 1 on each call to _PyMalloc_DebugMalloc |
| 848 | and _PyMalloc_DebugRealloc. |
| 849 | 4-byte unsigned integer, big-endian. |
| 850 | If "bad memory" is detected later, the serial number gives an |
| 851 | excellent way to set a breakpoint on the next run, to capture the |
| 852 | instant at which this block was passed out. |
| 853 | */ |
| 854 | |
| 855 | void * |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 856 | _PyMalloc_DebugMalloc(size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 857 | { |
| 858 | uchar *p; /* base address of malloc'ed block */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 859 | uchar *tail; /* p + 8 + nbytes == pointer to tail pad bytes */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 860 | size_t total; /* nbytes + 16 */ |
| 861 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 862 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 863 | total = nbytes + 16; |
| 864 | if (total < nbytes || (total >> 31) > 1) { |
| 865 | /* overflow, or we can't represent it in 4 bytes */ |
| 866 | /* Obscure: can't do (total >> 32) != 0 instead, because |
| 867 | C doesn't define what happens for a right-shift of 32 |
| 868 | when size_t is a 32-bit type. At least C guarantees |
| 869 | size_t is an unsigned type. */ |
| 870 | return NULL; |
| 871 | } |
| 872 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 873 | p = _PyMalloc_Malloc(total); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 874 | if (p == NULL) |
| 875 | return NULL; |
| 876 | |
| 877 | write4(p, nbytes); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 878 | p[4] = p[5] = p[6] = p[7] = PYMALLOC_FORBIDDENBYTE; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 879 | |
| 880 | if (nbytes > 0) |
| 881 | memset(p+8, PYMALLOC_CLEANBYTE, nbytes); |
| 882 | |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 883 | tail = p + 8 + nbytes; |
| 884 | tail[0] = tail[1] = tail[2] = tail[3] = PYMALLOC_FORBIDDENBYTE; |
| 885 | write4(tail + 4, serialno); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 886 | |
| 887 | return p+8; |
| 888 | } |
| 889 | |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 890 | /* The debug free first checks the 8 bytes on each end for sanity (in |
| 891 | particular, that the PYMALLOC_FORBIDDENBYTEs are still intact). |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 892 | Then fills the original bytes with PYMALLOC_DEADBYTE. |
| 893 | Then calls the underlying free. |
| 894 | */ |
| 895 | void |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 896 | _PyMalloc_DebugFree(void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 897 | { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 898 | uchar *q = (uchar *)p; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 899 | size_t nbytes; |
| 900 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 901 | if (p == NULL) |
| 902 | return; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 903 | _PyMalloc_DebugCheckAddress(p); |
| 904 | nbytes = read4(q-8); |
| 905 | if (nbytes > 0) |
| 906 | memset(q, PYMALLOC_DEADBYTE, nbytes); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 907 | _PyMalloc_Free(q-8); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 908 | } |
| 909 | |
| 910 | void * |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 911 | _PyMalloc_DebugRealloc(void *p, size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 912 | { |
| 913 | uchar *q = (uchar *)p; |
| 914 | size_t original_nbytes; |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 915 | void *fresh; /* new memory block, if needed */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 916 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 917 | if (p == NULL) |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 918 | return _PyMalloc_DebugMalloc(nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 919 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 920 | _PyMalloc_DebugCheckAddress(p); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 921 | original_nbytes = read4(q-8); |
| 922 | if (nbytes == original_nbytes) { |
| 923 | /* note that this case is likely to be common due to the |
| 924 | way Python appends to lists */ |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 925 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 926 | write4(q + nbytes + 4, serialno); |
| 927 | return p; |
| 928 | } |
| 929 | |
| 930 | if (nbytes < original_nbytes) { |
| 931 | /* shrinking -- leave the guts alone, except to |
| 932 | fill the excess with DEADBYTE */ |
| 933 | const size_t excess = original_nbytes - nbytes; |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 934 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 935 | write4(q-8, nbytes); |
| 936 | /* kill the excess bytes plus the trailing 8 pad bytes */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 937 | q += nbytes; |
| 938 | q[0] = q[1] = q[2] = q[3] = PYMALLOC_FORBIDDENBYTE; |
| 939 | write4(q+4, serialno); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 940 | memset(q+8, PYMALLOC_DEADBYTE, excess); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 941 | return p; |
| 942 | } |
| 943 | |
| 944 | /* More memory is needed: get it, copy over the first original_nbytes |
| 945 | of the original data, and free the original memory. */ |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 946 | fresh = _PyMalloc_DebugMalloc(nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 947 | if (fresh != NULL && original_nbytes > 0) |
| 948 | memcpy(fresh, p, original_nbytes); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 949 | _PyMalloc_DebugFree(p); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 950 | return fresh; |
| 951 | } |
| 952 | |
| 953 | void |
| 954 | _PyMalloc_DebugCheckAddress(const void *p) |
| 955 | { |
| 956 | const uchar *q = (const uchar *)p; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 957 | char *msg; |
| 958 | int i; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 959 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 960 | if (p == NULL) { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 961 | msg = "didn't expect a NULL pointer"; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 962 | goto error; |
| 963 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 964 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 965 | for (i = 4; i >= 1; --i) { |
| 966 | if (*(q-i) != PYMALLOC_FORBIDDENBYTE) { |
| 967 | msg = "bad leading pad byte"; |
| 968 | goto error; |
| 969 | } |
| 970 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 971 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 972 | { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 973 | const ulong nbytes = read4(q-8); |
| 974 | const uchar *tail = q + nbytes; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 975 | for (i = 0; i < 4; ++i) { |
| 976 | if (tail[i] != PYMALLOC_FORBIDDENBYTE) { |
| 977 | msg = "bad trailing pad byte"; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 978 | goto error; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 979 | } |
| 980 | } |
| 981 | } |
| 982 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 983 | return; |
| 984 | |
| 985 | error: |
| 986 | _PyMalloc_DebugDumpAddress(p); |
| 987 | Py_FatalError(msg); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 988 | } |
| 989 | |
| 990 | void |
| 991 | _PyMalloc_DebugDumpAddress(const void *p) |
| 992 | { |
| 993 | const uchar *q = (const uchar *)p; |
| 994 | const uchar *tail; |
| 995 | ulong nbytes, serial; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 996 | int i; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 997 | |
| 998 | fprintf(stderr, "Debug memory block at address p=%p:\n", p); |
| 999 | if (p == NULL) |
| 1000 | return; |
| 1001 | |
| 1002 | nbytes = read4(q-8); |
| 1003 | fprintf(stderr, " %lu bytes originally allocated\n", nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1004 | |
| 1005 | /* In case this is nuts, check the pad bytes before trying to read up |
| 1006 | the serial number (the address deref could blow up). */ |
| 1007 | |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1008 | fputs(" the 4 pad bytes at p-4 are ", stderr); |
| 1009 | if (*(q-4) == PYMALLOC_FORBIDDENBYTE && |
| 1010 | *(q-3) == PYMALLOC_FORBIDDENBYTE && |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1011 | *(q-2) == PYMALLOC_FORBIDDENBYTE && |
| 1012 | *(q-1) == PYMALLOC_FORBIDDENBYTE) { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1013 | fputs("PYMALLOC_FORBIDDENBYTE, as expected\n", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1014 | } |
| 1015 | else { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1016 | fprintf(stderr, "not all PYMALLOC_FORBIDDENBYTE (0x%02x):\n", |
| 1017 | PYMALLOC_FORBIDDENBYTE); |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1018 | for (i = 4; i >= 1; --i) { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1019 | const uchar byte = *(q-i); |
| 1020 | fprintf(stderr, " at p-%d: 0x%02x", i, byte); |
| 1021 | if (byte != PYMALLOC_FORBIDDENBYTE) |
| 1022 | fputs(" *** OUCH", stderr); |
| 1023 | fputc('\n', stderr); |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | tail = q + nbytes; |
| 1028 | fprintf(stderr, " the 4 pad bytes at tail=%p are ", tail); |
| 1029 | if (tail[0] == PYMALLOC_FORBIDDENBYTE && |
| 1030 | tail[1] == PYMALLOC_FORBIDDENBYTE && |
| 1031 | tail[2] == PYMALLOC_FORBIDDENBYTE && |
| 1032 | tail[3] == PYMALLOC_FORBIDDENBYTE) { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1033 | fputs("PYMALLOC_FORBIDDENBYTE, as expected\n", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1034 | } |
| 1035 | else { |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1036 | fprintf(stderr, "not all PYMALLOC_FORBIDDENBYTE (0x%02x):\n", |
| 1037 | PYMALLOC_FORBIDDENBYTE); |
| 1038 | for (i = 0; i < 4; ++i) { |
| 1039 | const uchar byte = tail[i]; |
| 1040 | fprintf(stderr, " at tail+%d: 0x%02x", |
| 1041 | i, byte); |
| 1042 | if (byte != PYMALLOC_FORBIDDENBYTE) |
| 1043 | fputs(" *** OUCH", stderr); |
| 1044 | fputc('\n', stderr); |
| 1045 | } |
| 1046 | } |
| 1047 | |
| 1048 | serial = read4(tail+4); |
| 1049 | fprintf(stderr, " the block was made by call #%lu to " |
| 1050 | "debug malloc/realloc\n", serial); |
| 1051 | |
| 1052 | if (nbytes > 0) { |
| 1053 | int i = 0; |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1054 | fputs(" data at p:", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1055 | /* print up to 8 bytes at the start */ |
| 1056 | while (q < tail && i < 8) { |
| 1057 | fprintf(stderr, " %02x", *q); |
| 1058 | ++i; |
| 1059 | ++q; |
| 1060 | } |
| 1061 | /* and up to 8 at the end */ |
| 1062 | if (q < tail) { |
| 1063 | if (tail - q > 8) { |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1064 | fputs(" ...", stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1065 | q = tail - 8; |
| 1066 | } |
| 1067 | while (q < tail) { |
| 1068 | fprintf(stderr, " %02x", *q); |
| 1069 | ++q; |
| 1070 | } |
| 1071 | } |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1072 | fputc('\n', stderr); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1073 | } |
| 1074 | } |
| 1075 | |
| 1076 | #endif /* PYMALLOC_DEBUG */ |