Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1 | #include "Python.h" |
| 2 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 3 | /* Python's malloc wrappers (see pymem.h) */ |
| 4 | |
| 5 | #ifdef PYMALLOC_DEBUG /* WITH_PYMALLOC && PYMALLOC_DEBUG */ |
| 6 | /* Forward declaration */ |
| 7 | static void* _PyMem_DebugMalloc(void *ctx, size_t size); |
| 8 | static void _PyMem_DebugFree(void *ctx, void *p); |
| 9 | static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); |
| 10 | |
| 11 | static void _PyObject_DebugDumpAddress(const void *p); |
| 12 | static void _PyMem_DebugCheckAddress(char api_id, const void *p); |
| 13 | #endif |
| 14 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 15 | #ifdef WITH_PYMALLOC |
| 16 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 17 | #ifdef MS_WINDOWS |
| 18 | # include <windows.h> |
| 19 | #elif defined(HAVE_MMAP) |
| 20 | # include <sys/mman.h> |
| 21 | # ifdef MAP_ANONYMOUS |
| 22 | # define ARENAS_USE_MMAP |
| 23 | # endif |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 24 | #endif |
| 25 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 26 | /* Forward declaration */ |
| 27 | static void* _PyObject_Malloc(void *ctx, size_t size); |
| 28 | static void _PyObject_Free(void *ctx, void *p); |
| 29 | static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); |
Martin v. Löwis | cd83fa8 | 2013-06-27 12:23:29 +0200 | [diff] [blame] | 30 | #endif |
| 31 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 32 | |
| 33 | static void * |
| 34 | _PyMem_RawMalloc(void *ctx, size_t size) |
| 35 | { |
| 36 | /* PyMem_Malloc(0) means malloc(1). Some systems would return NULL |
| 37 | for malloc(0), which would be treated as an error. Some platforms would |
| 38 | return a pointer with no memory behind it, which would break pymalloc. |
| 39 | To solve these problems, allocate an extra byte. */ |
| 40 | if (size == 0) |
| 41 | size = 1; |
| 42 | return malloc(size); |
| 43 | } |
| 44 | |
| 45 | static void * |
| 46 | _PyMem_RawRealloc(void *ctx, void *ptr, size_t size) |
| 47 | { |
| 48 | if (size == 0) |
| 49 | size = 1; |
| 50 | return realloc(ptr, size); |
| 51 | } |
| 52 | |
| 53 | static void |
| 54 | _PyMem_RawFree(void *ctx, void *ptr) |
| 55 | { |
| 56 | free(ptr); |
| 57 | } |
| 58 | |
| 59 | |
| 60 | #ifdef MS_WINDOWS |
| 61 | static void * |
| 62 | _PyObject_ArenaVirtualAlloc(void *ctx, size_t size) |
| 63 | { |
| 64 | return VirtualAlloc(NULL, size, |
| 65 | MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
| 66 | } |
| 67 | |
| 68 | static void |
| 69 | _PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size) |
| 70 | { |
Victor Stinner | 725e668 | 2013-07-07 03:06:16 +0200 | [diff] [blame] | 71 | VirtualFree(ptr, 0, MEM_RELEASE); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | #elif defined(ARENAS_USE_MMAP) |
| 75 | static void * |
| 76 | _PyObject_ArenaMmap(void *ctx, size_t size) |
| 77 | { |
| 78 | void *ptr; |
| 79 | ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, |
| 80 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
| 81 | if (ptr == MAP_FAILED) |
| 82 | return NULL; |
| 83 | assert(ptr != NULL); |
| 84 | return ptr; |
| 85 | } |
| 86 | |
| 87 | static void |
| 88 | _PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size) |
| 89 | { |
| 90 | munmap(ptr, size); |
| 91 | } |
| 92 | |
| 93 | #else |
| 94 | static void * |
| 95 | _PyObject_ArenaMalloc(void *ctx, size_t size) |
| 96 | { |
| 97 | return malloc(size); |
| 98 | } |
| 99 | |
| 100 | static void |
| 101 | _PyObject_ArenaFree(void *ctx, void *ptr, size_t size) |
| 102 | { |
| 103 | free(ptr); |
| 104 | } |
| 105 | #endif |
| 106 | |
| 107 | |
| 108 | #define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawRealloc, _PyMem_RawFree |
| 109 | #ifdef WITH_PYMALLOC |
| 110 | #define PYOBJECT_FUNCS _PyObject_Malloc, _PyObject_Realloc, _PyObject_Free |
| 111 | #else |
| 112 | #define PYOBJECT_FUNCS PYRAW_FUNCS |
| 113 | #endif |
| 114 | |
| 115 | #ifdef PYMALLOC_DEBUG |
| 116 | typedef struct { |
| 117 | /* We tag each block with an API ID in order to tag API violations */ |
| 118 | char api_id; |
| 119 | PyMemAllocator alloc; |
| 120 | } debug_alloc_api_t; |
| 121 | static struct { |
| 122 | debug_alloc_api_t raw; |
| 123 | debug_alloc_api_t mem; |
| 124 | debug_alloc_api_t obj; |
| 125 | } _PyMem_Debug = { |
| 126 | {'r', {NULL, PYRAW_FUNCS}}, |
| 127 | {'m', {NULL, PYRAW_FUNCS}}, |
| 128 | {'o', {NULL, PYOBJECT_FUNCS}} |
| 129 | }; |
| 130 | |
| 131 | #define PYDEBUG_FUNCS _PyMem_DebugMalloc, _PyMem_DebugRealloc, _PyMem_DebugFree |
| 132 | #endif |
| 133 | |
| 134 | static PyMemAllocator _PyMem_Raw = { |
| 135 | #ifdef PYMALLOC_DEBUG |
| 136 | &_PyMem_Debug.raw, PYDEBUG_FUNCS |
| 137 | #else |
| 138 | NULL, PYRAW_FUNCS |
| 139 | #endif |
| 140 | }; |
| 141 | |
| 142 | static PyMemAllocator _PyMem = { |
| 143 | #ifdef PYMALLOC_DEBUG |
| 144 | &_PyMem_Debug.mem, PYDEBUG_FUNCS |
| 145 | #else |
| 146 | NULL, PYRAW_FUNCS |
| 147 | #endif |
| 148 | }; |
| 149 | |
| 150 | static PyMemAllocator _PyObject = { |
| 151 | #ifdef PYMALLOC_DEBUG |
| 152 | &_PyMem_Debug.obj, PYDEBUG_FUNCS |
| 153 | #else |
| 154 | NULL, PYOBJECT_FUNCS |
| 155 | #endif |
| 156 | }; |
| 157 | |
| 158 | #undef PYRAW_FUNCS |
| 159 | #undef PYOBJECT_FUNCS |
| 160 | #undef PYDEBUG_FUNCS |
| 161 | |
| 162 | static PyObjectArenaAllocator _PyObject_Arena = {NULL, |
| 163 | #ifdef MS_WINDOWS |
| 164 | _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree |
| 165 | #elif defined(ARENAS_USE_MMAP) |
| 166 | _PyObject_ArenaMmap, _PyObject_ArenaMunmap |
| 167 | #else |
| 168 | _PyObject_ArenaMalloc, _PyObject_ArenaFree |
| 169 | #endif |
| 170 | }; |
| 171 | |
| 172 | void |
| 173 | PyMem_SetupDebugHooks(void) |
| 174 | { |
| 175 | #ifdef PYMALLOC_DEBUG |
| 176 | PyMemAllocator alloc; |
| 177 | |
| 178 | alloc.malloc = _PyMem_DebugMalloc; |
| 179 | alloc.realloc = _PyMem_DebugRealloc; |
| 180 | alloc.free = _PyMem_DebugFree; |
| 181 | |
| 182 | if (_PyMem_Raw.malloc != _PyMem_DebugMalloc) { |
| 183 | alloc.ctx = &_PyMem_Debug.raw; |
| 184 | PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc); |
| 185 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc); |
| 186 | } |
| 187 | |
| 188 | if (_PyMem.malloc != _PyMem_DebugMalloc) { |
| 189 | alloc.ctx = &_PyMem_Debug.mem; |
| 190 | PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc); |
| 191 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc); |
| 192 | } |
| 193 | |
| 194 | if (_PyObject.malloc != _PyMem_DebugMalloc) { |
| 195 | alloc.ctx = &_PyMem_Debug.obj; |
| 196 | PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc); |
| 197 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc); |
| 198 | } |
| 199 | #endif |
| 200 | } |
| 201 | |
| 202 | void |
| 203 | PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocator *allocator) |
| 204 | { |
| 205 | switch(domain) |
| 206 | { |
| 207 | case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break; |
| 208 | case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break; |
| 209 | case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break; |
| 210 | default: |
| 211 | /* unknown domain */ |
| 212 | allocator->ctx = NULL; |
| 213 | allocator->malloc = NULL; |
| 214 | allocator->realloc = NULL; |
| 215 | allocator->free = NULL; |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | void |
| 220 | PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocator *allocator) |
| 221 | { |
| 222 | switch(domain) |
| 223 | { |
| 224 | case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break; |
| 225 | case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break; |
| 226 | case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break; |
| 227 | /* ignore unknown domain */ |
| 228 | } |
| 229 | |
| 230 | } |
| 231 | |
| 232 | void |
| 233 | PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator) |
| 234 | { |
| 235 | *allocator = _PyObject_Arena; |
| 236 | } |
| 237 | |
| 238 | void |
| 239 | PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator) |
| 240 | { |
| 241 | _PyObject_Arena = *allocator; |
| 242 | } |
| 243 | |
| 244 | void * |
| 245 | PyMem_RawMalloc(size_t size) |
| 246 | { |
| 247 | /* |
| 248 | * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. |
| 249 | * Most python internals blindly use a signed Py_ssize_t to track |
| 250 | * things without checking for overflows or negatives. |
| 251 | * As size_t is unsigned, checking for size < 0 is not required. |
| 252 | */ |
| 253 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 254 | return NULL; |
| 255 | |
| 256 | return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size); |
| 257 | } |
| 258 | |
| 259 | void* |
| 260 | PyMem_RawRealloc(void *ptr, size_t new_size) |
| 261 | { |
| 262 | /* see PyMem_RawMalloc() */ |
| 263 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 264 | return NULL; |
| 265 | return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size); |
| 266 | } |
| 267 | |
| 268 | void PyMem_RawFree(void *ptr) |
| 269 | { |
| 270 | _PyMem_Raw.free(_PyMem_Raw.ctx, ptr); |
| 271 | } |
| 272 | |
| 273 | void * |
| 274 | PyMem_Malloc(size_t size) |
| 275 | { |
| 276 | /* see PyMem_RawMalloc() */ |
| 277 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 278 | return NULL; |
| 279 | return _PyMem.malloc(_PyMem.ctx, size); |
| 280 | } |
| 281 | |
| 282 | void * |
| 283 | PyMem_Realloc(void *ptr, size_t new_size) |
| 284 | { |
| 285 | /* see PyMem_RawMalloc() */ |
| 286 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 287 | return NULL; |
| 288 | return _PyMem.realloc(_PyMem.ctx, ptr, new_size); |
| 289 | } |
| 290 | |
| 291 | void |
| 292 | PyMem_Free(void *ptr) |
| 293 | { |
| 294 | _PyMem.free(_PyMem.ctx, ptr); |
| 295 | } |
| 296 | |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame^] | 297 | char * |
| 298 | _PyMem_RawStrdup(const char *str) |
| 299 | { |
| 300 | size_t size; |
| 301 | char *copy; |
| 302 | |
| 303 | size = strlen(str) + 1; |
| 304 | copy = PyMem_RawMalloc(size); |
| 305 | if (copy == NULL) |
| 306 | return NULL; |
| 307 | memcpy(copy, str, size); |
| 308 | return copy; |
| 309 | } |
| 310 | |
| 311 | char * |
| 312 | _PyMem_Strdup(const char *str) |
| 313 | { |
| 314 | size_t size; |
| 315 | char *copy; |
| 316 | |
| 317 | size = strlen(str) + 1; |
| 318 | copy = PyMem_Malloc(size); |
| 319 | if (copy == NULL) |
| 320 | return NULL; |
| 321 | memcpy(copy, str, size); |
| 322 | return copy; |
| 323 | } |
| 324 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 325 | void * |
| 326 | PyObject_Malloc(size_t size) |
| 327 | { |
| 328 | /* see PyMem_RawMalloc() */ |
| 329 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 330 | return NULL; |
| 331 | return _PyObject.malloc(_PyObject.ctx, size); |
| 332 | } |
| 333 | |
| 334 | void * |
| 335 | PyObject_Realloc(void *ptr, size_t new_size) |
| 336 | { |
| 337 | /* see PyMem_RawMalloc() */ |
| 338 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 339 | return NULL; |
| 340 | return _PyObject.realloc(_PyObject.ctx, ptr, new_size); |
| 341 | } |
| 342 | |
| 343 | void |
| 344 | PyObject_Free(void *ptr) |
| 345 | { |
| 346 | _PyObject.free(_PyObject.ctx, ptr); |
| 347 | } |
| 348 | |
| 349 | |
| 350 | #ifdef WITH_PYMALLOC |
| 351 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 352 | #ifdef WITH_VALGRIND |
| 353 | #include <valgrind/valgrind.h> |
| 354 | |
| 355 | /* If we're using GCC, use __builtin_expect() to reduce overhead of |
| 356 | the valgrind checks */ |
| 357 | #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) |
| 358 | # define UNLIKELY(value) __builtin_expect((value), 0) |
| 359 | #else |
| 360 | # define UNLIKELY(value) (value) |
| 361 | #endif |
| 362 | |
| 363 | /* -1 indicates that we haven't checked that we're running on valgrind yet. */ |
| 364 | static int running_on_valgrind = -1; |
| 365 | #endif |
| 366 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 367 | /* An object allocator for Python. |
| 368 | |
| 369 | Here is an introduction to the layers of the Python memory architecture, |
| 370 | showing where the object allocator is actually used (layer +2), It is |
| 371 | called for every object allocation and deallocation (PyObject_New/Del), |
| 372 | unless the object-specific allocators implement a proprietary allocation |
| 373 | scheme (ex.: ints use a simple free list). This is also the place where |
| 374 | the cyclic garbage collector operates selectively on container objects. |
| 375 | |
| 376 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 377 | Object-specific allocators |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 378 | _____ ______ ______ ________ |
| 379 | [ int ] [ dict ] [ list ] ... [ string ] Python core | |
| 380 | +3 | <----- Object-specific memory -----> | <-- Non-object memory --> | |
| 381 | _______________________________ | | |
| 382 | [ Python's object allocator ] | | |
| 383 | +2 | ####### Object memory ####### | <------ Internal buffers ------> | |
| 384 | ______________________________________________________________ | |
| 385 | [ Python's raw memory allocator (PyMem_ API) ] | |
| 386 | +1 | <----- Python memory (under PyMem manager's control) ------> | | |
| 387 | __________________________________________________________________ |
| 388 | [ Underlying general-purpose allocator (ex: C library malloc) ] |
| 389 | 0 | <------ Virtual memory allocated for the python process -------> | |
| 390 | |
| 391 | ========================================================================= |
| 392 | _______________________________________________________________________ |
| 393 | [ OS-specific Virtual Memory Manager (VMM) ] |
| 394 | -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | |
| 395 | __________________________________ __________________________________ |
| 396 | [ ] [ ] |
| 397 | -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | |
| 398 | |
| 399 | */ |
| 400 | /*==========================================================================*/ |
| 401 | |
| 402 | /* A fast, special-purpose memory allocator for small blocks, to be used |
| 403 | on top of a general-purpose malloc -- heavily based on previous art. */ |
| 404 | |
| 405 | /* Vladimir Marangozov -- August 2000 */ |
| 406 | |
| 407 | /* |
| 408 | * "Memory management is where the rubber meets the road -- if we do the wrong |
| 409 | * thing at any level, the results will not be good. And if we don't make the |
| 410 | * levels work well together, we are in serious trouble." (1) |
| 411 | * |
| 412 | * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, |
| 413 | * "Dynamic Storage Allocation: A Survey and Critical Review", |
| 414 | * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. |
| 415 | */ |
| 416 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 417 | /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 418 | |
| 419 | /*==========================================================================*/ |
| 420 | |
| 421 | /* |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 422 | * Allocation strategy abstract: |
| 423 | * |
| 424 | * For small requests, the allocator sub-allocates <Big> blocks of memory. |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 425 | * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the |
| 426 | * system's allocator. |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 427 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 428 | * Small requests are grouped in size classes spaced 8 bytes apart, due |
| 429 | * to the required valid alignment of the returned address. Requests of |
| 430 | * a particular size are serviced from memory pools of 4K (one VMM page). |
| 431 | * Pools are fragmented on demand and contain free lists of blocks of one |
| 432 | * particular size class. In other words, there is a fixed-size allocator |
| 433 | * for each size class. Free pools are shared by the different allocators |
| 434 | * thus minimizing the space reserved for a particular size class. |
| 435 | * |
| 436 | * This allocation strategy is a variant of what is known as "simple |
| 437 | * segregated storage based on array of free lists". The main drawback of |
| 438 | * simple segregated storage is that we might end up with lot of reserved |
| 439 | * memory for the different free lists, which degenerate in time. To avoid |
| 440 | * this, we partition each free list in pools and we share dynamically the |
| 441 | * reserved space between all free lists. This technique is quite efficient |
| 442 | * for memory intensive programs which allocate mainly small-sized blocks. |
| 443 | * |
| 444 | * For small requests we have the following table: |
| 445 | * |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 446 | * Request in bytes Size of allocated block Size class idx |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 447 | * ---------------------------------------------------------------- |
| 448 | * 1-8 8 0 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 449 | * 9-16 16 1 |
| 450 | * 17-24 24 2 |
| 451 | * 25-32 32 3 |
| 452 | * 33-40 40 4 |
| 453 | * 41-48 48 5 |
| 454 | * 49-56 56 6 |
| 455 | * 57-64 64 7 |
| 456 | * 65-72 72 8 |
| 457 | * ... ... ... |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 458 | * 497-504 504 62 |
| 459 | * 505-512 512 63 |
Tim Peters | ce7fb9b | 2002-03-23 00:28:57 +0000 | [diff] [blame] | 460 | * |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 461 | * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying |
| 462 | * allocator. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 463 | */ |
| 464 | |
| 465 | /*==========================================================================*/ |
| 466 | |
| 467 | /* |
| 468 | * -- Main tunable settings section -- |
| 469 | */ |
| 470 | |
| 471 | /* |
| 472 | * Alignment of addresses returned to the user. 8-bytes alignment works |
| 473 | * on most current architectures (with 32-bit or 64-bit address busses). |
| 474 | * The alignment value is also used for grouping small requests in size |
| 475 | * classes spaced ALIGNMENT bytes apart. |
| 476 | * |
| 477 | * You shouldn't change this unless you know what you are doing. |
| 478 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 479 | #define ALIGNMENT 8 /* must be 2^N */ |
| 480 | #define ALIGNMENT_SHIFT 3 |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 481 | |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 482 | /* Return the number of bytes in size class I, as a uint. */ |
| 483 | #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) |
| 484 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 485 | /* |
| 486 | * Max size threshold below which malloc requests are considered to be |
| 487 | * small enough in order to use preallocated memory pools. You can tune |
| 488 | * this value according to your application behaviour and memory needs. |
| 489 | * |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 490 | * Note: a size threshold of 512 guarantees that newly created dictionaries |
| 491 | * will be allocated from preallocated memory pools on 64-bit. |
| 492 | * |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 493 | * The following invariants must hold: |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 494 | * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 495 | * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 496 | * |
| 497 | * Although not required, for better performance and space efficiency, |
| 498 | * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. |
| 499 | */ |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 500 | #define SMALL_REQUEST_THRESHOLD 512 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 501 | #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 502 | |
| 503 | /* |
| 504 | * The system's VMM page size can be obtained on most unices with a |
| 505 | * getpagesize() call or deduced from various header files. To make |
| 506 | * things simpler, we assume that it is 4K, which is OK for most systems. |
| 507 | * It is probably better if this is the native page size, but it doesn't |
Tim Peters | ecc6e6a | 2005-07-10 22:30:55 +0000 | [diff] [blame] | 508 | * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page |
| 509 | * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation |
| 510 | * violation fault. 4K is apparently OK for all the platforms that python |
Martin v. Löwis | 8c14028 | 2002-10-26 15:01:53 +0000 | [diff] [blame] | 511 | * currently targets. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 512 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 513 | #define SYSTEM_PAGE_SIZE (4 * 1024) |
| 514 | #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 515 | |
| 516 | /* |
| 517 | * Maximum amount of memory managed by the allocator for small requests. |
| 518 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 519 | #ifdef WITH_MEMORY_LIMITS |
| 520 | #ifndef SMALL_MEMORY_LIMIT |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 521 | #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 522 | #endif |
| 523 | #endif |
| 524 | |
| 525 | /* |
| 526 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned |
| 527 | * on a page boundary. This is a reserved virtual address space for the |
Antoine Pitrou | f0effe6 | 2011-11-26 01:11:02 +0100 | [diff] [blame] | 528 | * current process (obtained through a malloc()/mmap() call). In no way this |
| 529 | * means that the memory arenas will be used entirely. A malloc(<Big>) is |
| 530 | * usually an address range reservation for <Big> bytes, unless all pages within |
| 531 | * this space are referenced subsequently. So malloc'ing big blocks and not |
| 532 | * using them does not mean "wasting memory". It's an addressable range |
| 533 | * wastage... |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 534 | * |
Antoine Pitrou | f0effe6 | 2011-11-26 01:11:02 +0100 | [diff] [blame] | 535 | * Arenas are allocated with mmap() on systems supporting anonymous memory |
| 536 | * mappings to reduce heap fragmentation. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 537 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 538 | #define ARENA_SIZE (256 << 10) /* 256KB */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 539 | |
| 540 | #ifdef WITH_MEMORY_LIMITS |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 541 | #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 542 | #endif |
| 543 | |
| 544 | /* |
| 545 | * Size of the pools used for small blocks. Should be a power of 2, |
Tim Peters | c2ce91a | 2002-03-30 21:36:04 +0000 | [diff] [blame] | 546 | * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 547 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 548 | #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */ |
| 549 | #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 550 | |
| 551 | /* |
| 552 | * -- End of tunable settings section -- |
| 553 | */ |
| 554 | |
| 555 | /*==========================================================================*/ |
| 556 | |
| 557 | /* |
| 558 | * Locking |
| 559 | * |
| 560 | * To reduce lock contention, it would probably be better to refine the |
| 561 | * crude function locking with per size class locking. I'm not positive |
| 562 | * however, whether it's worth switching to such locking policy because |
| 563 | * of the performance penalty it might introduce. |
| 564 | * |
| 565 | * The following macros describe the simplest (should also be the fastest) |
| 566 | * lock object on a particular platform and the init/fini/lock/unlock |
| 567 | * operations on it. The locks defined here are not expected to be recursive |
| 568 | * because it is assumed that they will always be called in the order: |
| 569 | * INIT, [LOCK, UNLOCK]*, FINI. |
| 570 | */ |
| 571 | |
| 572 | /* |
| 573 | * Python's threads are serialized, so object malloc locking is disabled. |
| 574 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 575 | #define SIMPLELOCK_DECL(lock) /* simple lock declaration */ |
| 576 | #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */ |
| 577 | #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */ |
| 578 | #define SIMPLELOCK_LOCK(lock) /* acquire released lock */ |
| 579 | #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 580 | |
| 581 | /* |
| 582 | * Basic types |
| 583 | * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom. |
| 584 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 585 | #undef uchar |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 586 | #define uchar unsigned char /* assuming == 8 bits */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 587 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 588 | #undef uint |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 589 | #define uint unsigned int /* assuming >= 16 bits */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 590 | |
| 591 | #undef ulong |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 592 | #define ulong unsigned long /* assuming >= 32 bits */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 593 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 594 | #undef uptr |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 595 | #define uptr Py_uintptr_t |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 596 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 597 | /* When you say memory, my mind reasons in terms of (pointers to) blocks */ |
| 598 | typedef uchar block; |
| 599 | |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 600 | /* Pool for small blocks. */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 601 | struct pool_header { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 602 | union { block *_padding; |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 603 | uint count; } ref; /* number of allocated blocks */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 604 | block *freeblock; /* pool's free list head */ |
| 605 | struct pool_header *nextpool; /* next pool of this size class */ |
| 606 | struct pool_header *prevpool; /* previous pool "" */ |
| 607 | uint arenaindex; /* index into arenas of base adr */ |
| 608 | uint szidx; /* block size class index */ |
| 609 | uint nextoffset; /* bytes to virgin block */ |
| 610 | uint maxnextoffset; /* largest valid nextoffset */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 611 | }; |
| 612 | |
| 613 | typedef struct pool_header *poolp; |
| 614 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 615 | /* Record keeping for arenas. */ |
| 616 | struct arena_object { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 617 | /* The address of the arena, as returned by malloc. Note that 0 |
| 618 | * will never be returned by a successful malloc, and is used |
| 619 | * here to mark an arena_object that doesn't correspond to an |
| 620 | * allocated arena. |
| 621 | */ |
| 622 | uptr address; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 623 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 624 | /* Pool-aligned pointer to the next pool to be carved off. */ |
| 625 | block* pool_address; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 626 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 627 | /* The number of available pools in the arena: free pools + never- |
| 628 | * allocated pools. |
| 629 | */ |
| 630 | uint nfreepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 631 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 632 | /* The total number of pools in the arena, whether or not available. */ |
| 633 | uint ntotalpools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 634 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 635 | /* Singly-linked list of available pools. */ |
| 636 | struct pool_header* freepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 637 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 638 | /* Whenever this arena_object is not associated with an allocated |
| 639 | * arena, the nextarena member is used to link all unassociated |
| 640 | * arena_objects in the singly-linked `unused_arena_objects` list. |
| 641 | * The prevarena member is unused in this case. |
| 642 | * |
| 643 | * When this arena_object is associated with an allocated arena |
| 644 | * with at least one available pool, both members are used in the |
| 645 | * doubly-linked `usable_arenas` list, which is maintained in |
| 646 | * increasing order of `nfreepools` values. |
| 647 | * |
| 648 | * Else this arena_object is associated with an allocated arena |
| 649 | * all of whose pools are in use. `nextarena` and `prevarena` |
| 650 | * are both meaningless in this case. |
| 651 | */ |
| 652 | struct arena_object* nextarena; |
| 653 | struct arena_object* prevarena; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 654 | }; |
| 655 | |
Antoine Pitrou | ca8aa4a | 2012-09-20 20:56:47 +0200 | [diff] [blame] | 656 | #define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 657 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 658 | #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 659 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 660 | /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ |
Antoine Pitrou | ca8aa4a | 2012-09-20 20:56:47 +0200 | [diff] [blame] | 661 | #define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 662 | |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 663 | /* Return total number of blocks in pool of size index I, as a uint. */ |
| 664 | #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 665 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 666 | /*==========================================================================*/ |
| 667 | |
| 668 | /* |
| 669 | * This malloc lock |
| 670 | */ |
Jeremy Hylton | d1fedb6 | 2002-07-18 18:49:52 +0000 | [diff] [blame] | 671 | SIMPLELOCK_DECL(_malloc_lock) |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 672 | #define LOCK() SIMPLELOCK_LOCK(_malloc_lock) |
| 673 | #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock) |
| 674 | #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock) |
| 675 | #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 676 | |
| 677 | /* |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 678 | * Pool table -- headed, circular, doubly-linked lists of partially used pools. |
| 679 | |
| 680 | This is involved. For an index i, usedpools[i+i] is the header for a list of |
| 681 | all partially used pools holding small blocks with "size class idx" i. So |
| 682 | usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size |
| 683 | 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT. |
| 684 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 685 | Pools are carved off an arena's highwater mark (an arena_object's pool_address |
| 686 | member) as needed. Once carved off, a pool is in one of three states forever |
| 687 | after: |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 688 | |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 689 | used == partially used, neither empty nor full |
| 690 | At least one block in the pool is currently allocated, and at least one |
| 691 | block in the pool is not currently allocated (note this implies a pool |
| 692 | has room for at least two blocks). |
| 693 | This is a pool's initial state, as a pool is created only when malloc |
| 694 | needs space. |
| 695 | The pool holds blocks of a fixed size, and is in the circular list headed |
| 696 | at usedpools[i] (see above). It's linked to the other used pools of the |
| 697 | same size class via the pool_header's nextpool and prevpool members. |
| 698 | If all but one block is currently allocated, a malloc can cause a |
| 699 | transition to the full state. If all but one block is not currently |
| 700 | allocated, a free can cause a transition to the empty state. |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 701 | |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 702 | full == all the pool's blocks are currently allocated |
| 703 | On transition to full, a pool is unlinked from its usedpools[] list. |
| 704 | It's not linked to from anything then anymore, and its nextpool and |
| 705 | prevpool members are meaningless until it transitions back to used. |
| 706 | A free of a block in a full pool puts the pool back in the used state. |
| 707 | Then it's linked in at the front of the appropriate usedpools[] list, so |
| 708 | that the next allocation for its size class will reuse the freed block. |
| 709 | |
| 710 | empty == all the pool's blocks are currently available for allocation |
| 711 | On transition to empty, a pool is unlinked from its usedpools[] list, |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 712 | and linked to the front of its arena_object's singly-linked freepools list, |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 713 | via its nextpool member. The prevpool member has no meaning in this case. |
| 714 | Empty pools have no inherent size class: the next time a malloc finds |
| 715 | an empty list in usedpools[], it takes the first pool off of freepools. |
| 716 | If the size class needed happens to be the same as the size class the pool |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 717 | last had, some pool initialization can be skipped. |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 718 | |
| 719 | |
| 720 | Block Management |
| 721 | |
| 722 | Blocks within pools are again carved out as needed. pool->freeblock points to |
| 723 | the start of a singly-linked list of free blocks within the pool. When a |
| 724 | block is freed, it's inserted at the front of its pool's freeblock list. Note |
| 725 | that the available blocks in a pool are *not* linked all together when a pool |
Tim Peters | e70ddf3 | 2002-04-05 04:32:29 +0000 | [diff] [blame] | 726 | is initialized. Instead only "the first two" (lowest addresses) blocks are |
| 727 | set up, returning the first such block, and setting pool->freeblock to a |
| 728 | one-block list holding the second such block. This is consistent with that |
| 729 | pymalloc strives at all levels (arena, pool, and block) never to touch a piece |
| 730 | of memory until it's actually needed. |
| 731 | |
| 732 | So long as a pool is in the used state, we're certain there *is* a block |
Tim Peters | 52aefc8 | 2002-04-11 06:36:45 +0000 | [diff] [blame] | 733 | available for allocating, and pool->freeblock is not NULL. If pool->freeblock |
| 734 | points to the end of the free list before we've carved the entire pool into |
| 735 | blocks, that means we simply haven't yet gotten to one of the higher-address |
| 736 | blocks. The offset from the pool_header to the start of "the next" virgin |
| 737 | block is stored in the pool_header nextoffset member, and the largest value |
| 738 | of nextoffset that makes sense is stored in the maxnextoffset member when a |
| 739 | pool is initialized. All the blocks in a pool have been passed out at least |
| 740 | once when and only when nextoffset > maxnextoffset. |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 741 | |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 742 | |
| 743 | Major obscurity: While the usedpools vector is declared to have poolp |
| 744 | entries, it doesn't really. It really contains two pointers per (conceptual) |
| 745 | poolp entry, the nextpool and prevpool members of a pool_header. The |
| 746 | excruciating initialization code below fools C so that |
| 747 | |
| 748 | usedpool[i+i] |
| 749 | |
| 750 | "acts like" a genuine poolp, but only so long as you only reference its |
| 751 | nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is |
| 752 | compensating for that a pool_header's nextpool and prevpool members |
| 753 | immediately follow a pool_header's first two members: |
| 754 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 755 | union { block *_padding; |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 756 | uint count; } ref; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 757 | block *freeblock; |
Tim Peters | 1e16db6 | 2002-03-31 01:05:22 +0000 | [diff] [blame] | 758 | |
| 759 | each of which consume sizeof(block *) bytes. So what usedpools[i+i] really |
| 760 | contains is a fudged-up pointer p such that *if* C believes it's a poolp |
| 761 | pointer, then p->nextpool and p->prevpool are both p (meaning that the headed |
| 762 | circular list is empty). |
| 763 | |
| 764 | It's unclear why the usedpools setup is so convoluted. It could be to |
| 765 | minimize the amount of cache required to hold this heavily-referenced table |
| 766 | (which only *needs* the two interpool pointer members of a pool_header). OTOH, |
| 767 | referencing code has to remember to "double the index" and doing so isn't |
| 768 | free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying |
| 769 | on that C doesn't insert any padding anywhere in a pool_header at or before |
| 770 | the prevpool member. |
| 771 | **************************************************************************** */ |
| 772 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 773 | #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *))) |
| 774 | #define PT(x) PTA(x), PTA(x) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 775 | |
| 776 | static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 777 | PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 778 | #if NB_SMALL_SIZE_CLASSES > 8 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 779 | , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 780 | #if NB_SMALL_SIZE_CLASSES > 16 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 781 | , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 782 | #if NB_SMALL_SIZE_CLASSES > 24 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 783 | , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 784 | #if NB_SMALL_SIZE_CLASSES > 32 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 785 | , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 786 | #if NB_SMALL_SIZE_CLASSES > 40 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 787 | , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 788 | #if NB_SMALL_SIZE_CLASSES > 48 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 789 | , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 790 | #if NB_SMALL_SIZE_CLASSES > 56 |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 791 | , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 792 | #if NB_SMALL_SIZE_CLASSES > 64 |
| 793 | #error "NB_SMALL_SIZE_CLASSES should be less than 64" |
| 794 | #endif /* NB_SMALL_SIZE_CLASSES > 64 */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 795 | #endif /* NB_SMALL_SIZE_CLASSES > 56 */ |
| 796 | #endif /* NB_SMALL_SIZE_CLASSES > 48 */ |
| 797 | #endif /* NB_SMALL_SIZE_CLASSES > 40 */ |
| 798 | #endif /* NB_SMALL_SIZE_CLASSES > 32 */ |
| 799 | #endif /* NB_SMALL_SIZE_CLASSES > 24 */ |
| 800 | #endif /* NB_SMALL_SIZE_CLASSES > 16 */ |
| 801 | #endif /* NB_SMALL_SIZE_CLASSES > 8 */ |
| 802 | }; |
| 803 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 804 | /*========================================================================== |
| 805 | Arena management. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 806 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 807 | `arenas` is a vector of arena_objects. It contains maxarenas entries, some of |
| 808 | which may not be currently used (== they're arena_objects that aren't |
| 809 | currently associated with an allocated arena). Note that arenas proper are |
| 810 | separately malloc'ed. |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 811 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 812 | Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5, |
| 813 | we do try to free() arenas, and use some mild heuristic strategies to increase |
| 814 | the likelihood that arenas eventually can be freed. |
| 815 | |
| 816 | unused_arena_objects |
| 817 | |
| 818 | This is a singly-linked list of the arena_objects that are currently not |
| 819 | being used (no arena is associated with them). Objects are taken off the |
| 820 | head of the list in new_arena(), and are pushed on the head of the list in |
| 821 | PyObject_Free() when the arena is empty. Key invariant: an arena_object |
| 822 | is on this list if and only if its .address member is 0. |
| 823 | |
| 824 | usable_arenas |
| 825 | |
| 826 | This is a doubly-linked list of the arena_objects associated with arenas |
| 827 | that have pools available. These pools are either waiting to be reused, |
| 828 | or have not been used before. The list is sorted to have the most- |
| 829 | allocated arenas first (ascending order based on the nfreepools member). |
| 830 | This means that the next allocation will come from a heavily used arena, |
| 831 | which gives the nearly empty arenas a chance to be returned to the system. |
| 832 | In my unscientific tests this dramatically improved the number of arenas |
| 833 | that could be freed. |
| 834 | |
| 835 | Note that an arena_object associated with an arena all of whose pools are |
| 836 | currently in use isn't on either list. |
| 837 | */ |
| 838 | |
| 839 | /* Array of objects used to track chunks of memory (arenas). */ |
| 840 | static struct arena_object* arenas = NULL; |
| 841 | /* Number of slots currently allocated in the `arenas` vector. */ |
Tim Peters | 1d99af8 | 2002-03-30 10:35:09 +0000 | [diff] [blame] | 842 | static uint maxarenas = 0; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 843 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 844 | /* The head of the singly-linked, NULL-terminated list of available |
| 845 | * arena_objects. |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 846 | */ |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 847 | static struct arena_object* unused_arena_objects = NULL; |
| 848 | |
| 849 | /* The head of the doubly-linked, NULL-terminated at each end, list of |
| 850 | * arena_objects associated with arenas that have pools available. |
| 851 | */ |
| 852 | static struct arena_object* usable_arenas = NULL; |
| 853 | |
| 854 | /* How many arena_objects do we initially allocate? |
| 855 | * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the |
| 856 | * `arenas` vector. |
| 857 | */ |
| 858 | #define INITIAL_ARENA_OBJECTS 16 |
| 859 | |
| 860 | /* Number of arenas allocated that haven't been free()'d. */ |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 861 | static size_t narenas_currently_allocated = 0; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 862 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 863 | /* Total number of times malloc() called to allocate an arena. */ |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 864 | static size_t ntimes_arena_allocated = 0; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 865 | /* High water mark (max value ever seen) for narenas_currently_allocated. */ |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 866 | static size_t narenas_highwater = 0; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 867 | |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 868 | static Py_ssize_t _Py_AllocatedBlocks = 0; |
| 869 | |
| 870 | Py_ssize_t |
| 871 | _Py_GetAllocatedBlocks(void) |
| 872 | { |
| 873 | return _Py_AllocatedBlocks; |
| 874 | } |
| 875 | |
| 876 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 877 | /* Allocate a new arena. If we run out of memory, return NULL. Else |
| 878 | * allocate a new arena, and return the address of an arena_object |
| 879 | * describing the new arena. It's expected that the caller will set |
| 880 | * `usable_arenas` to the return value. |
| 881 | */ |
| 882 | static struct arena_object* |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 883 | new_arena(void) |
| 884 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 885 | struct arena_object* arenaobj; |
| 886 | uint excess; /* number of bytes above pool alignment */ |
Victor Stinner | ba10882 | 2012-03-10 00:21:44 +0100 | [diff] [blame] | 887 | void *address; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 888 | |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 889 | #ifdef PYMALLOC_DEBUG |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 890 | if (Py_GETENV("PYTHONMALLOCSTATS")) |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 891 | _PyObject_DebugMallocStats(stderr); |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 892 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 893 | if (unused_arena_objects == NULL) { |
| 894 | uint i; |
| 895 | uint numarenas; |
| 896 | size_t nbytes; |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 897 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 898 | /* Double the number of arena objects on each allocation. |
| 899 | * Note that it's possible for `numarenas` to overflow. |
| 900 | */ |
| 901 | numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS; |
| 902 | if (numarenas <= maxarenas) |
| 903 | return NULL; /* overflow */ |
Martin v. Löwis | 5aca882 | 2008-09-11 06:55:48 +0000 | [diff] [blame] | 904 | #if SIZEOF_SIZE_T <= SIZEOF_INT |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 905 | if (numarenas > PY_SIZE_MAX / sizeof(*arenas)) |
| 906 | return NULL; /* overflow */ |
Martin v. Löwis | 5aca882 | 2008-09-11 06:55:48 +0000 | [diff] [blame] | 907 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 908 | nbytes = numarenas * sizeof(*arenas); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 909 | arenaobj = (struct arena_object *)PyMem_Realloc(arenas, nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 910 | if (arenaobj == NULL) |
| 911 | return NULL; |
| 912 | arenas = arenaobj; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 913 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 914 | /* We might need to fix pointers that were copied. However, |
| 915 | * new_arena only gets called when all the pages in the |
| 916 | * previous arenas are full. Thus, there are *no* pointers |
| 917 | * into the old array. Thus, we don't have to worry about |
| 918 | * invalid pointers. Just to be sure, some asserts: |
| 919 | */ |
| 920 | assert(usable_arenas == NULL); |
| 921 | assert(unused_arena_objects == NULL); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 922 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 923 | /* Put the new arenas on the unused_arena_objects list. */ |
| 924 | for (i = maxarenas; i < numarenas; ++i) { |
| 925 | arenas[i].address = 0; /* mark as unassociated */ |
| 926 | arenas[i].nextarena = i < numarenas - 1 ? |
| 927 | &arenas[i+1] : NULL; |
| 928 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 929 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 930 | /* Update globals. */ |
| 931 | unused_arena_objects = &arenas[maxarenas]; |
| 932 | maxarenas = numarenas; |
| 933 | } |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 934 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 935 | /* Take the next available arena object off the head of the list. */ |
| 936 | assert(unused_arena_objects != NULL); |
| 937 | arenaobj = unused_arena_objects; |
| 938 | unused_arena_objects = arenaobj->nextarena; |
| 939 | assert(arenaobj->address == 0); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 940 | address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE); |
| 941 | if (address == NULL) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 942 | /* The allocation failed: return NULL after putting the |
| 943 | * arenaobj back. |
| 944 | */ |
| 945 | arenaobj->nextarena = unused_arena_objects; |
| 946 | unused_arena_objects = arenaobj; |
| 947 | return NULL; |
| 948 | } |
Victor Stinner | ba10882 | 2012-03-10 00:21:44 +0100 | [diff] [blame] | 949 | arenaobj->address = (uptr)address; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 950 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 951 | ++narenas_currently_allocated; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 952 | ++ntimes_arena_allocated; |
| 953 | if (narenas_currently_allocated > narenas_highwater) |
| 954 | narenas_highwater = narenas_currently_allocated; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 955 | arenaobj->freepools = NULL; |
| 956 | /* pool_address <- first pool-aligned address in the arena |
| 957 | nfreepools <- number of whole pools that fit after alignment */ |
| 958 | arenaobj->pool_address = (block*)arenaobj->address; |
| 959 | arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE; |
| 960 | assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE); |
| 961 | excess = (uint)(arenaobj->address & POOL_SIZE_MASK); |
| 962 | if (excess != 0) { |
| 963 | --arenaobj->nfreepools; |
| 964 | arenaobj->pool_address += POOL_SIZE - excess; |
| 965 | } |
| 966 | arenaobj->ntotalpools = arenaobj->nfreepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 967 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 968 | return arenaobj; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 969 | } |
| 970 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 971 | /* |
| 972 | Py_ADDRESS_IN_RANGE(P, POOL) |
| 973 | |
| 974 | Return true if and only if P is an address that was allocated by pymalloc. |
| 975 | POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P) |
| 976 | (the caller is asked to compute this because the macro expands POOL more than |
| 977 | once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a |
| 978 | variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is |
| 979 | called on every alloc/realloc/free, micro-efficiency is important here). |
| 980 | |
| 981 | Tricky: Let B be the arena base address associated with the pool, B = |
| 982 | arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if |
| 983 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 984 | B <= P < B + ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 985 | |
| 986 | Subtracting B throughout, this is true iff |
| 987 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 988 | 0 <= P-B < ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 989 | |
| 990 | By using unsigned arithmetic, the "0 <=" half of the test can be skipped. |
| 991 | |
| 992 | Obscure: A PyMem "free memory" function can call the pymalloc free or realloc |
| 993 | before the first arena has been allocated. `arenas` is still NULL in that |
| 994 | case. We're relying on that maxarenas is also 0 in that case, so that |
| 995 | (POOL)->arenaindex < maxarenas must be false, saving us from trying to index |
| 996 | into a NULL arenas. |
| 997 | |
| 998 | Details: given P and POOL, the arena_object corresponding to P is AO = |
| 999 | arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild |
| 1000 | stores, etc), POOL is the correct address of P's pool, AO.address is the |
| 1001 | correct base address of the pool's arena, and P must be within ARENA_SIZE of |
| 1002 | AO.address. In addition, AO.address is not 0 (no arena can start at address 0 |
| 1003 | (NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc |
| 1004 | controls P. |
| 1005 | |
| 1006 | Now suppose obmalloc does not control P (e.g., P was obtained via a direct |
| 1007 | call to the system malloc() or realloc()). (POOL)->arenaindex may be anything |
| 1008 | in this case -- it may even be uninitialized trash. If the trash arenaindex |
| 1009 | is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't |
| 1010 | control P. |
| 1011 | |
| 1012 | Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an |
| 1013 | allocated arena, obmalloc controls all the memory in slice AO.address : |
| 1014 | AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc, |
| 1015 | so P doesn't lie in that slice, so the macro correctly reports that P is not |
| 1016 | controlled by obmalloc. |
| 1017 | |
| 1018 | Finally, if P is not controlled by obmalloc and AO corresponds to an unused |
| 1019 | arena_object (one not currently associated with an allocated arena), |
| 1020 | AO.address is 0, and the second test in the macro reduces to: |
| 1021 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1022 | P < ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1023 | |
| 1024 | If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes |
| 1025 | that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part |
| 1026 | of the test still passes, and the third clause (AO.address != 0) is necessary |
| 1027 | to get the correct result: AO.address is 0 in this case, so the macro |
| 1028 | correctly reports that P is not controlled by obmalloc (despite that P lies in |
| 1029 | slice AO.address : AO.address + ARENA_SIZE). |
| 1030 | |
| 1031 | Note: The third (AO.address != 0) clause was added in Python 2.5. Before |
| 1032 | 2.5, arenas were never free()'ed, and an arenaindex < maxarena always |
| 1033 | corresponded to a currently-allocated arena, so the "P is not controlled by |
| 1034 | obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case |
| 1035 | was impossible. |
| 1036 | |
| 1037 | Note that the logic is excruciating, and reading up possibly uninitialized |
| 1038 | memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex) |
| 1039 | creates problems for some memory debuggers. The overwhelming advantage is |
| 1040 | that this test determines whether an arbitrary address is controlled by |
| 1041 | obmalloc in a small constant time, independent of the number of arenas |
| 1042 | obmalloc controls. Since this test is needed at every entry point, it's |
| 1043 | extremely desirable that it be this fast. |
Antoine Pitrou | b7fb2e2 | 2011-01-07 21:43:59 +0000 | [diff] [blame] | 1044 | |
| 1045 | Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated |
| 1046 | by Python, it is important that (POOL)->arenaindex is read only once, as |
| 1047 | another thread may be concurrently modifying the value without holding the |
| 1048 | GIL. To accomplish this, the arenaindex_temp variable is used to store |
| 1049 | (POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's |
| 1050 | execution. The caller of the macro is responsible for declaring this |
| 1051 | variable. |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1052 | */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1053 | #define Py_ADDRESS_IN_RANGE(P, POOL) \ |
Antoine Pitrou | b7fb2e2 | 2011-01-07 21:43:59 +0000 | [diff] [blame] | 1054 | ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \ |
| 1055 | (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \ |
| 1056 | arenas[arenaindex_temp].address != 0) |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1057 | |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 1058 | |
| 1059 | /* This is only useful when running memory debuggers such as |
| 1060 | * Purify or Valgrind. Uncomment to use. |
| 1061 | * |
Martin v. Löwis | 9f2e346 | 2007-07-21 17:22:18 +0000 | [diff] [blame] | 1062 | #define Py_USING_MEMORY_DEBUGGER |
Martin v. Löwis | 6fea233 | 2008-09-25 04:15:27 +0000 | [diff] [blame] | 1063 | */ |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 1064 | |
| 1065 | #ifdef Py_USING_MEMORY_DEBUGGER |
| 1066 | |
| 1067 | /* Py_ADDRESS_IN_RANGE may access uninitialized memory by design |
| 1068 | * This leads to thousands of spurious warnings when using |
| 1069 | * Purify or Valgrind. By making a function, we can easily |
| 1070 | * suppress the uninitialized memory reads in this one function. |
| 1071 | * So we won't ignore real errors elsewhere. |
| 1072 | * |
| 1073 | * Disable the macro and use a function. |
| 1074 | */ |
| 1075 | |
| 1076 | #undef Py_ADDRESS_IN_RANGE |
| 1077 | |
Thomas Wouters | 89f507f | 2006-12-13 04:49:30 +0000 | [diff] [blame] | 1078 | #if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \ |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 1079 | (__GNUC__ >= 4)) |
Neal Norwitz | e5e5aa4 | 2005-11-13 18:55:39 +0000 | [diff] [blame] | 1080 | #define Py_NO_INLINE __attribute__((__noinline__)) |
| 1081 | #else |
| 1082 | #define Py_NO_INLINE |
| 1083 | #endif |
| 1084 | |
| 1085 | /* Don't make static, to try to ensure this isn't inlined. */ |
| 1086 | int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE; |
| 1087 | #undef Py_NO_INLINE |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 1088 | #endif |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 1089 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1090 | /*==========================================================================*/ |
| 1091 | |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 1092 | /* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct |
| 1093 | * from all other currently live pointers. This may not be possible. |
| 1094 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1095 | |
| 1096 | /* |
| 1097 | * The basic blocks are ordered by decreasing execution frequency, |
| 1098 | * which minimizes the number of jumps in the most common cases, |
| 1099 | * improves branching prediction and instruction scheduling (small |
| 1100 | * block allocations typically result in a couple of instructions). |
| 1101 | * Unless the optimizer reorders everything, being too smart... |
| 1102 | */ |
| 1103 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1104 | static void * |
| 1105 | _PyObject_Malloc(void *ctx, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1106 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1107 | block *bp; |
| 1108 | poolp pool; |
| 1109 | poolp next; |
| 1110 | uint size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1111 | |
Antoine Pitrou | 0aaaa62 | 2013-04-06 01:15:30 +0200 | [diff] [blame] | 1112 | _Py_AllocatedBlocks++; |
| 1113 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1114 | #ifdef WITH_VALGRIND |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1115 | if (UNLIKELY(running_on_valgrind == -1)) |
| 1116 | running_on_valgrind = RUNNING_ON_VALGRIND; |
| 1117 | if (UNLIKELY(running_on_valgrind)) |
| 1118 | goto redirect; |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1119 | #endif |
| 1120 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1121 | /* |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1122 | * This implicitly redirects malloc(0). |
| 1123 | */ |
| 1124 | if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) { |
| 1125 | LOCK(); |
| 1126 | /* |
| 1127 | * Most frequent paths first |
| 1128 | */ |
| 1129 | size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; |
| 1130 | pool = usedpools[size + size]; |
| 1131 | if (pool != pool->nextpool) { |
| 1132 | /* |
| 1133 | * There is a used pool for this size class. |
| 1134 | * Pick up the head block of its free list. |
| 1135 | */ |
| 1136 | ++pool->ref.count; |
| 1137 | bp = pool->freeblock; |
| 1138 | assert(bp != NULL); |
| 1139 | if ((pool->freeblock = *(block **)bp) != NULL) { |
| 1140 | UNLOCK(); |
| 1141 | return (void *)bp; |
| 1142 | } |
| 1143 | /* |
| 1144 | * Reached the end of the free list, try to extend it. |
| 1145 | */ |
| 1146 | if (pool->nextoffset <= pool->maxnextoffset) { |
| 1147 | /* There is room for another block. */ |
| 1148 | pool->freeblock = (block*)pool + |
| 1149 | pool->nextoffset; |
| 1150 | pool->nextoffset += INDEX2SIZE(size); |
| 1151 | *(block **)(pool->freeblock) = NULL; |
| 1152 | UNLOCK(); |
| 1153 | return (void *)bp; |
| 1154 | } |
| 1155 | /* Pool is full, unlink from used pools. */ |
| 1156 | next = pool->nextpool; |
| 1157 | pool = pool->prevpool; |
| 1158 | next->prevpool = pool; |
| 1159 | pool->nextpool = next; |
| 1160 | UNLOCK(); |
| 1161 | return (void *)bp; |
| 1162 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1163 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1164 | /* There isn't a pool of the right size class immediately |
| 1165 | * available: use a free pool. |
| 1166 | */ |
| 1167 | if (usable_arenas == NULL) { |
| 1168 | /* No arena has a free pool: allocate a new arena. */ |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1169 | #ifdef WITH_MEMORY_LIMITS |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1170 | if (narenas_currently_allocated >= MAX_ARENAS) { |
| 1171 | UNLOCK(); |
| 1172 | goto redirect; |
| 1173 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1174 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1175 | usable_arenas = new_arena(); |
| 1176 | if (usable_arenas == NULL) { |
| 1177 | UNLOCK(); |
| 1178 | goto redirect; |
| 1179 | } |
| 1180 | usable_arenas->nextarena = |
| 1181 | usable_arenas->prevarena = NULL; |
| 1182 | } |
| 1183 | assert(usable_arenas->address != 0); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1184 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1185 | /* Try to get a cached free pool. */ |
| 1186 | pool = usable_arenas->freepools; |
| 1187 | if (pool != NULL) { |
| 1188 | /* Unlink from cached pools. */ |
| 1189 | usable_arenas->freepools = pool->nextpool; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1190 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1191 | /* This arena already had the smallest nfreepools |
| 1192 | * value, so decreasing nfreepools doesn't change |
| 1193 | * that, and we don't need to rearrange the |
| 1194 | * usable_arenas list. However, if the arena has |
| 1195 | * become wholly allocated, we need to remove its |
| 1196 | * arena_object from usable_arenas. |
| 1197 | */ |
| 1198 | --usable_arenas->nfreepools; |
| 1199 | if (usable_arenas->nfreepools == 0) { |
| 1200 | /* Wholly allocated: remove. */ |
| 1201 | assert(usable_arenas->freepools == NULL); |
| 1202 | assert(usable_arenas->nextarena == NULL || |
| 1203 | usable_arenas->nextarena->prevarena == |
| 1204 | usable_arenas); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1205 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1206 | usable_arenas = usable_arenas->nextarena; |
| 1207 | if (usable_arenas != NULL) { |
| 1208 | usable_arenas->prevarena = NULL; |
| 1209 | assert(usable_arenas->address != 0); |
| 1210 | } |
| 1211 | } |
| 1212 | else { |
| 1213 | /* nfreepools > 0: it must be that freepools |
| 1214 | * isn't NULL, or that we haven't yet carved |
| 1215 | * off all the arena's pools for the first |
| 1216 | * time. |
| 1217 | */ |
| 1218 | assert(usable_arenas->freepools != NULL || |
| 1219 | usable_arenas->pool_address <= |
| 1220 | (block*)usable_arenas->address + |
| 1221 | ARENA_SIZE - POOL_SIZE); |
| 1222 | } |
| 1223 | init_pool: |
| 1224 | /* Frontlink to used pools. */ |
| 1225 | next = usedpools[size + size]; /* == prev */ |
| 1226 | pool->nextpool = next; |
| 1227 | pool->prevpool = next; |
| 1228 | next->nextpool = pool; |
| 1229 | next->prevpool = pool; |
| 1230 | pool->ref.count = 1; |
| 1231 | if (pool->szidx == size) { |
| 1232 | /* Luckily, this pool last contained blocks |
| 1233 | * of the same size class, so its header |
| 1234 | * and free list are already initialized. |
| 1235 | */ |
| 1236 | bp = pool->freeblock; |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1237 | assert(bp != NULL); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1238 | pool->freeblock = *(block **)bp; |
| 1239 | UNLOCK(); |
| 1240 | return (void *)bp; |
| 1241 | } |
| 1242 | /* |
| 1243 | * Initialize the pool header, set up the free list to |
| 1244 | * contain just the second block, and return the first |
| 1245 | * block. |
| 1246 | */ |
| 1247 | pool->szidx = size; |
| 1248 | size = INDEX2SIZE(size); |
| 1249 | bp = (block *)pool + POOL_OVERHEAD; |
| 1250 | pool->nextoffset = POOL_OVERHEAD + (size << 1); |
| 1251 | pool->maxnextoffset = POOL_SIZE - size; |
| 1252 | pool->freeblock = bp + size; |
| 1253 | *(block **)(pool->freeblock) = NULL; |
| 1254 | UNLOCK(); |
| 1255 | return (void *)bp; |
| 1256 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1257 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1258 | /* Carve off a new pool. */ |
| 1259 | assert(usable_arenas->nfreepools > 0); |
| 1260 | assert(usable_arenas->freepools == NULL); |
| 1261 | pool = (poolp)usable_arenas->pool_address; |
| 1262 | assert((block*)pool <= (block*)usable_arenas->address + |
| 1263 | ARENA_SIZE - POOL_SIZE); |
| 1264 | pool->arenaindex = usable_arenas - arenas; |
| 1265 | assert(&arenas[pool->arenaindex] == usable_arenas); |
| 1266 | pool->szidx = DUMMY_SIZE_IDX; |
| 1267 | usable_arenas->pool_address += POOL_SIZE; |
| 1268 | --usable_arenas->nfreepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1269 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1270 | if (usable_arenas->nfreepools == 0) { |
| 1271 | assert(usable_arenas->nextarena == NULL || |
| 1272 | usable_arenas->nextarena->prevarena == |
| 1273 | usable_arenas); |
| 1274 | /* Unlink the arena: it is completely allocated. */ |
| 1275 | usable_arenas = usable_arenas->nextarena; |
| 1276 | if (usable_arenas != NULL) { |
| 1277 | usable_arenas->prevarena = NULL; |
| 1278 | assert(usable_arenas->address != 0); |
| 1279 | } |
| 1280 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1281 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1282 | goto init_pool; |
| 1283 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1284 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1285 | /* The small block allocator ends here. */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1286 | |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1287 | redirect: |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1288 | /* Redirect the original request to the underlying (libc) allocator. |
| 1289 | * We jump here on bigger requests, on error in the code above (as a |
| 1290 | * last chance to serve the request) or when the max memory limit |
| 1291 | * has been reached. |
| 1292 | */ |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1293 | { |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1294 | void *result = PyMem_Malloc(nbytes); |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1295 | if (!result) |
| 1296 | _Py_AllocatedBlocks--; |
| 1297 | return result; |
| 1298 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1299 | } |
| 1300 | |
| 1301 | /* free */ |
| 1302 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1303 | static void |
| 1304 | _PyObject_Free(void *ctx, void *p) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1305 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1306 | poolp pool; |
| 1307 | block *lastfree; |
| 1308 | poolp next, prev; |
| 1309 | uint size; |
Antoine Pitrou | b7fb2e2 | 2011-01-07 21:43:59 +0000 | [diff] [blame] | 1310 | #ifndef Py_USING_MEMORY_DEBUGGER |
| 1311 | uint arenaindex_temp; |
| 1312 | #endif |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1313 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1314 | if (p == NULL) /* free(NULL) has no effect */ |
| 1315 | return; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1316 | |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1317 | _Py_AllocatedBlocks--; |
| 1318 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1319 | #ifdef WITH_VALGRIND |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1320 | if (UNLIKELY(running_on_valgrind > 0)) |
| 1321 | goto redirect; |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1322 | #endif |
| 1323 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1324 | pool = POOL_ADDR(p); |
| 1325 | if (Py_ADDRESS_IN_RANGE(p, pool)) { |
| 1326 | /* We allocated this address. */ |
| 1327 | LOCK(); |
| 1328 | /* Link p to the start of the pool's freeblock list. Since |
| 1329 | * the pool had at least the p block outstanding, the pool |
| 1330 | * wasn't empty (so it's already in a usedpools[] list, or |
| 1331 | * was full and is in no list -- it's not in the freeblocks |
| 1332 | * list in any case). |
| 1333 | */ |
| 1334 | assert(pool->ref.count > 0); /* else it was empty */ |
| 1335 | *(block **)p = lastfree = pool->freeblock; |
| 1336 | pool->freeblock = (block *)p; |
| 1337 | if (lastfree) { |
| 1338 | struct arena_object* ao; |
| 1339 | uint nf; /* ao->nfreepools */ |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1340 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1341 | /* freeblock wasn't NULL, so the pool wasn't full, |
| 1342 | * and the pool is in a usedpools[] list. |
| 1343 | */ |
| 1344 | if (--pool->ref.count != 0) { |
| 1345 | /* pool isn't empty: leave it in usedpools */ |
| 1346 | UNLOCK(); |
| 1347 | return; |
| 1348 | } |
| 1349 | /* Pool is now empty: unlink from usedpools, and |
| 1350 | * link to the front of freepools. This ensures that |
| 1351 | * previously freed pools will be allocated later |
| 1352 | * (being not referenced, they are perhaps paged out). |
| 1353 | */ |
| 1354 | next = pool->nextpool; |
| 1355 | prev = pool->prevpool; |
| 1356 | next->prevpool = prev; |
| 1357 | prev->nextpool = next; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1358 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1359 | /* Link the pool to freepools. This is a singly-linked |
| 1360 | * list, and pool->prevpool isn't used there. |
| 1361 | */ |
| 1362 | ao = &arenas[pool->arenaindex]; |
| 1363 | pool->nextpool = ao->freepools; |
| 1364 | ao->freepools = pool; |
| 1365 | nf = ++ao->nfreepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1366 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1367 | /* All the rest is arena management. We just freed |
| 1368 | * a pool, and there are 4 cases for arena mgmt: |
| 1369 | * 1. If all the pools are free, return the arena to |
| 1370 | * the system free(). |
| 1371 | * 2. If this is the only free pool in the arena, |
| 1372 | * add the arena back to the `usable_arenas` list. |
| 1373 | * 3. If the "next" arena has a smaller count of free |
| 1374 | * pools, we have to "slide this arena right" to |
| 1375 | * restore that usable_arenas is sorted in order of |
| 1376 | * nfreepools. |
| 1377 | * 4. Else there's nothing more to do. |
| 1378 | */ |
| 1379 | if (nf == ao->ntotalpools) { |
| 1380 | /* Case 1. First unlink ao from usable_arenas. |
| 1381 | */ |
| 1382 | assert(ao->prevarena == NULL || |
| 1383 | ao->prevarena->address != 0); |
| 1384 | assert(ao ->nextarena == NULL || |
| 1385 | ao->nextarena->address != 0); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1386 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1387 | /* Fix the pointer in the prevarena, or the |
| 1388 | * usable_arenas pointer. |
| 1389 | */ |
| 1390 | if (ao->prevarena == NULL) { |
| 1391 | usable_arenas = ao->nextarena; |
| 1392 | assert(usable_arenas == NULL || |
| 1393 | usable_arenas->address != 0); |
| 1394 | } |
| 1395 | else { |
| 1396 | assert(ao->prevarena->nextarena == ao); |
| 1397 | ao->prevarena->nextarena = |
| 1398 | ao->nextarena; |
| 1399 | } |
| 1400 | /* Fix the pointer in the nextarena. */ |
| 1401 | if (ao->nextarena != NULL) { |
| 1402 | assert(ao->nextarena->prevarena == ao); |
| 1403 | ao->nextarena->prevarena = |
| 1404 | ao->prevarena; |
| 1405 | } |
| 1406 | /* Record that this arena_object slot is |
| 1407 | * available to be reused. |
| 1408 | */ |
| 1409 | ao->nextarena = unused_arena_objects; |
| 1410 | unused_arena_objects = ao; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1411 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1412 | /* Free the entire arena. */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1413 | _PyObject_Arena.free(_PyObject_Arena.ctx, |
| 1414 | (void *)ao->address, ARENA_SIZE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1415 | ao->address = 0; /* mark unassociated */ |
| 1416 | --narenas_currently_allocated; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1417 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1418 | UNLOCK(); |
| 1419 | return; |
| 1420 | } |
| 1421 | if (nf == 1) { |
| 1422 | /* Case 2. Put ao at the head of |
| 1423 | * usable_arenas. Note that because |
| 1424 | * ao->nfreepools was 0 before, ao isn't |
| 1425 | * currently on the usable_arenas list. |
| 1426 | */ |
| 1427 | ao->nextarena = usable_arenas; |
| 1428 | ao->prevarena = NULL; |
| 1429 | if (usable_arenas) |
| 1430 | usable_arenas->prevarena = ao; |
| 1431 | usable_arenas = ao; |
| 1432 | assert(usable_arenas->address != 0); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1433 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1434 | UNLOCK(); |
| 1435 | return; |
| 1436 | } |
| 1437 | /* If this arena is now out of order, we need to keep |
| 1438 | * the list sorted. The list is kept sorted so that |
| 1439 | * the "most full" arenas are used first, which allows |
| 1440 | * the nearly empty arenas to be completely freed. In |
| 1441 | * a few un-scientific tests, it seems like this |
| 1442 | * approach allowed a lot more memory to be freed. |
| 1443 | */ |
| 1444 | if (ao->nextarena == NULL || |
| 1445 | nf <= ao->nextarena->nfreepools) { |
| 1446 | /* Case 4. Nothing to do. */ |
| 1447 | UNLOCK(); |
| 1448 | return; |
| 1449 | } |
| 1450 | /* Case 3: We have to move the arena towards the end |
| 1451 | * of the list, because it has more free pools than |
| 1452 | * the arena to its right. |
| 1453 | * First unlink ao from usable_arenas. |
| 1454 | */ |
| 1455 | if (ao->prevarena != NULL) { |
| 1456 | /* ao isn't at the head of the list */ |
| 1457 | assert(ao->prevarena->nextarena == ao); |
| 1458 | ao->prevarena->nextarena = ao->nextarena; |
| 1459 | } |
| 1460 | else { |
| 1461 | /* ao is at the head of the list */ |
| 1462 | assert(usable_arenas == ao); |
| 1463 | usable_arenas = ao->nextarena; |
| 1464 | } |
| 1465 | ao->nextarena->prevarena = ao->prevarena; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1466 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1467 | /* Locate the new insertion point by iterating over |
| 1468 | * the list, using our nextarena pointer. |
| 1469 | */ |
| 1470 | while (ao->nextarena != NULL && |
| 1471 | nf > ao->nextarena->nfreepools) { |
| 1472 | ao->prevarena = ao->nextarena; |
| 1473 | ao->nextarena = ao->nextarena->nextarena; |
| 1474 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1475 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1476 | /* Insert ao at this point. */ |
| 1477 | assert(ao->nextarena == NULL || |
| 1478 | ao->prevarena == ao->nextarena->prevarena); |
| 1479 | assert(ao->prevarena->nextarena == ao->nextarena); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1480 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1481 | ao->prevarena->nextarena = ao; |
| 1482 | if (ao->nextarena != NULL) |
| 1483 | ao->nextarena->prevarena = ao; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1484 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1485 | /* Verify that the swaps worked. */ |
| 1486 | assert(ao->nextarena == NULL || |
| 1487 | nf <= ao->nextarena->nfreepools); |
| 1488 | assert(ao->prevarena == NULL || |
| 1489 | nf > ao->prevarena->nfreepools); |
| 1490 | assert(ao->nextarena == NULL || |
| 1491 | ao->nextarena->prevarena == ao); |
| 1492 | assert((usable_arenas == ao && |
| 1493 | ao->prevarena == NULL) || |
| 1494 | ao->prevarena->nextarena == ao); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1495 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1496 | UNLOCK(); |
| 1497 | return; |
| 1498 | } |
| 1499 | /* Pool was full, so doesn't currently live in any list: |
| 1500 | * link it to the front of the appropriate usedpools[] list. |
| 1501 | * This mimics LRU pool usage for new allocations and |
| 1502 | * targets optimal filling when several pools contain |
| 1503 | * blocks of the same size class. |
| 1504 | */ |
| 1505 | --pool->ref.count; |
| 1506 | assert(pool->ref.count > 0); /* else the pool is empty */ |
| 1507 | size = pool->szidx; |
| 1508 | next = usedpools[size + size]; |
| 1509 | prev = next->prevpool; |
| 1510 | /* insert pool before next: prev <-> pool <-> next */ |
| 1511 | pool->nextpool = next; |
| 1512 | pool->prevpool = prev; |
| 1513 | next->prevpool = pool; |
| 1514 | prev->nextpool = pool; |
| 1515 | UNLOCK(); |
| 1516 | return; |
| 1517 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1518 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1519 | #ifdef WITH_VALGRIND |
| 1520 | redirect: |
| 1521 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1522 | /* We didn't allocate this address. */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1523 | PyMem_Free(p); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1524 | } |
| 1525 | |
Tim Peters | 84c1b97 | 2002-04-04 04:44:32 +0000 | [diff] [blame] | 1526 | /* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0, |
| 1527 | * then as the Python docs promise, we do not treat this like free(p), and |
| 1528 | * return a non-NULL result. |
| 1529 | */ |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1530 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1531 | static void * |
| 1532 | _PyObject_Realloc(void *ctx, void *p, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1533 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1534 | void *bp; |
| 1535 | poolp pool; |
| 1536 | size_t size; |
Antoine Pitrou | b7fb2e2 | 2011-01-07 21:43:59 +0000 | [diff] [blame] | 1537 | #ifndef Py_USING_MEMORY_DEBUGGER |
| 1538 | uint arenaindex_temp; |
| 1539 | #endif |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1540 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1541 | if (p == NULL) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1542 | return _PyObject_Malloc(ctx, nbytes); |
Georg Brandl | d492ad8 | 2008-07-23 16:13:07 +0000 | [diff] [blame] | 1543 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1544 | #ifdef WITH_VALGRIND |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1545 | /* Treat running_on_valgrind == -1 the same as 0 */ |
| 1546 | if (UNLIKELY(running_on_valgrind > 0)) |
| 1547 | goto redirect; |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1548 | #endif |
| 1549 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1550 | pool = POOL_ADDR(p); |
| 1551 | if (Py_ADDRESS_IN_RANGE(p, pool)) { |
| 1552 | /* We're in charge of this block */ |
| 1553 | size = INDEX2SIZE(pool->szidx); |
| 1554 | if (nbytes <= size) { |
| 1555 | /* The block is staying the same or shrinking. If |
| 1556 | * it's shrinking, there's a tradeoff: it costs |
| 1557 | * cycles to copy the block to a smaller size class, |
| 1558 | * but it wastes memory not to copy it. The |
| 1559 | * compromise here is to copy on shrink only if at |
| 1560 | * least 25% of size can be shaved off. |
| 1561 | */ |
| 1562 | if (4 * nbytes > 3 * size) { |
| 1563 | /* It's the same, |
| 1564 | * or shrinking and new/old > 3/4. |
| 1565 | */ |
| 1566 | return p; |
| 1567 | } |
| 1568 | size = nbytes; |
| 1569 | } |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1570 | bp = _PyObject_Malloc(ctx, nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1571 | if (bp != NULL) { |
| 1572 | memcpy(bp, p, size); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1573 | _PyObject_Free(ctx, p); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1574 | } |
| 1575 | return bp; |
| 1576 | } |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1577 | #ifdef WITH_VALGRIND |
| 1578 | redirect: |
| 1579 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1580 | /* We're not managing this block. If nbytes <= |
| 1581 | * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this |
| 1582 | * block. However, if we do, we need to copy the valid data from |
| 1583 | * the C-managed block to one of our blocks, and there's no portable |
| 1584 | * way to know how much of the memory space starting at p is valid. |
| 1585 | * As bug 1185883 pointed out the hard way, it's possible that the |
| 1586 | * C-managed block is "at the end" of allocated VM space, so that |
| 1587 | * a memory fault can occur if we try to copy nbytes bytes starting |
| 1588 | * at p. Instead we punt: let C continue to manage this block. |
| 1589 | */ |
| 1590 | if (nbytes) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1591 | return PyMem_Realloc(p, nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1592 | /* C doesn't define the result of realloc(p, 0) (it may or may not |
| 1593 | * return NULL then), but Python's docs promise that nbytes==0 never |
| 1594 | * returns NULL. We don't pass 0 to realloc(), to avoid that endcase |
| 1595 | * to begin with. Even then, we can't be sure that realloc() won't |
| 1596 | * return NULL. |
| 1597 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1598 | bp = PyMem_Realloc(p, 1); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1599 | return bp ? bp : p; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1600 | } |
| 1601 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1602 | #else /* ! WITH_PYMALLOC */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1603 | |
| 1604 | /*==========================================================================*/ |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1605 | /* pymalloc not enabled: Redirect the entry points to malloc. These will |
| 1606 | * only be used by extensions that are compiled with pymalloc enabled. */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1607 | |
Antoine Pitrou | 9284053 | 2012-12-17 23:05:59 +0100 | [diff] [blame] | 1608 | Py_ssize_t |
| 1609 | _Py_GetAllocatedBlocks(void) |
| 1610 | { |
| 1611 | return 0; |
| 1612 | } |
| 1613 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1614 | #endif /* WITH_PYMALLOC */ |
| 1615 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1616 | #ifdef PYMALLOC_DEBUG |
| 1617 | /*==========================================================================*/ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1618 | /* A x-platform debugging allocator. This doesn't manage memory directly, |
| 1619 | * it wraps a real allocator, adding extra debugging info to the memory blocks. |
| 1620 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1621 | |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1622 | /* Special bytes broadcast into debug memory blocks at appropriate times. |
| 1623 | * Strings of these are unlikely to be valid addresses, floats, ints or |
| 1624 | * 7-bit ASCII. |
| 1625 | */ |
| 1626 | #undef CLEANBYTE |
| 1627 | #undef DEADBYTE |
| 1628 | #undef FORBIDDENBYTE |
| 1629 | #define CLEANBYTE 0xCB /* clean (newly allocated) memory */ |
Tim Peters | 889f61d | 2002-07-10 19:29:49 +0000 | [diff] [blame] | 1630 | #define DEADBYTE 0xDB /* dead (newly freed) memory */ |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1631 | #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1632 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1633 | static size_t serialno = 0; /* incremented on each debug {m,re}alloc */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1634 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1635 | /* serialno is always incremented via calling this routine. The point is |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1636 | * to supply a single place to set a breakpoint. |
| 1637 | */ |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1638 | static void |
Neil Schemenauer | bd02b14 | 2002-03-28 21:05:38 +0000 | [diff] [blame] | 1639 | bumpserialno(void) |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1640 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1641 | ++serialno; |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1642 | } |
| 1643 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1644 | #define SST SIZEOF_SIZE_T |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1645 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1646 | /* Read sizeof(size_t) bytes at p as a big-endian size_t. */ |
| 1647 | static size_t |
| 1648 | read_size_t(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1649 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1650 | const uchar *q = (const uchar *)p; |
| 1651 | size_t result = *q++; |
| 1652 | int i; |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1653 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1654 | for (i = SST; --i > 0; ++q) |
| 1655 | result = (result << 8) | *q; |
| 1656 | return result; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1657 | } |
| 1658 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1659 | /* Write n as a big-endian size_t, MSB at address p, LSB at |
| 1660 | * p + sizeof(size_t) - 1. |
| 1661 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1662 | static void |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1663 | write_size_t(void *p, size_t n) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1664 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1665 | uchar *q = (uchar *)p + SST - 1; |
| 1666 | int i; |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1667 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1668 | for (i = SST; --i >= 0; --q) { |
| 1669 | *q = (uchar)(n & 0xff); |
| 1670 | n >>= 8; |
| 1671 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1672 | } |
| 1673 | |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1674 | #ifdef Py_DEBUG |
| 1675 | /* Is target in the list? The list is traversed via the nextpool pointers. |
| 1676 | * The list may be NULL-terminated, or circular. Return 1 if target is in |
| 1677 | * list, else 0. |
| 1678 | */ |
| 1679 | static int |
| 1680 | pool_is_in_list(const poolp target, poolp list) |
| 1681 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1682 | poolp origlist = list; |
| 1683 | assert(target != NULL); |
| 1684 | if (list == NULL) |
| 1685 | return 0; |
| 1686 | do { |
| 1687 | if (target == list) |
| 1688 | return 1; |
| 1689 | list = list->nextpool; |
| 1690 | } while (list != NULL && list != origlist); |
| 1691 | return 0; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1692 | } |
| 1693 | |
| 1694 | #else |
| 1695 | #define pool_is_in_list(X, Y) 1 |
| 1696 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1697 | #endif /* Py_DEBUG */ |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 1698 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1699 | /* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and |
| 1700 | fills them with useful stuff, here calling the underlying malloc's result p: |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1701 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1702 | p[0: S] |
| 1703 | Number of bytes originally asked for. This is a size_t, big-endian (easier |
| 1704 | to read in a memory dump). |
| 1705 | p[S: 2*S] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1706 | Copies of FORBIDDENBYTE. Used to catch under- writes and reads. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1707 | p[2*S: 2*S+n] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1708 | The requested memory, filled with copies of CLEANBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1709 | Used to catch reference to uninitialized memory. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1710 | &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1711 | handled the request itself. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1712 | p[2*S+n: 2*S+n+S] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1713 | Copies of FORBIDDENBYTE. Used to catch over- writes and reads. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1714 | p[2*S+n+S: 2*S+n+2*S] |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1715 | A serial number, incremented by 1 on each call to _PyMem_DebugMalloc |
| 1716 | and _PyMem_DebugRealloc. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1717 | This is a big-endian size_t. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1718 | If "bad memory" is detected later, the serial number gives an |
| 1719 | excellent way to set a breakpoint on the next run, to capture the |
| 1720 | instant at which this block was passed out. |
| 1721 | */ |
| 1722 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1723 | static void * |
| 1724 | _PyMem_DebugMalloc(void *ctx, size_t nbytes) |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 1725 | { |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1726 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1727 | uchar *p; /* base address of malloc'ed block */ |
| 1728 | uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */ |
| 1729 | size_t total; /* nbytes + 4*SST */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1730 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1731 | bumpserialno(); |
| 1732 | total = nbytes + 4*SST; |
| 1733 | if (total < nbytes) |
| 1734 | /* overflow: can't represent total as a size_t */ |
| 1735 | return NULL; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1736 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1737 | p = (uchar *)api->alloc.malloc(api->alloc.ctx, total); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1738 | if (p == NULL) |
| 1739 | return NULL; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1740 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1741 | /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */ |
| 1742 | write_size_t(p, nbytes); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1743 | p[SST] = (uchar)api->api_id; |
| 1744 | memset(p + SST + 1, FORBIDDENBYTE, SST-1); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1745 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1746 | if (nbytes > 0) |
| 1747 | memset(p + 2*SST, CLEANBYTE, nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1748 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1749 | /* at tail, write pad (SST bytes) and serialno (SST bytes) */ |
| 1750 | tail = p + 2*SST + nbytes; |
| 1751 | memset(tail, FORBIDDENBYTE, SST); |
| 1752 | write_size_t(tail + SST, serialno); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1753 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1754 | return p + 2*SST; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1755 | } |
| 1756 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1757 | /* The debug free first checks the 2*SST bytes on each end for sanity (in |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 1758 | particular, that the FORBIDDENBYTEs with the api ID are still intact). |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1759 | Then fills the original bytes with DEADBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1760 | Then calls the underlying free. |
| 1761 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1762 | static void |
| 1763 | _PyMem_DebugFree(void *ctx, void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1764 | { |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1765 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1766 | uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */ |
| 1767 | size_t nbytes; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1768 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1769 | if (p == NULL) |
| 1770 | return; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1771 | _PyMem_DebugCheckAddress(api->api_id, p); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1772 | nbytes = read_size_t(q); |
| 1773 | nbytes += 4*SST; |
| 1774 | if (nbytes > 0) |
| 1775 | memset(q, DEADBYTE, nbytes); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1776 | api->alloc.free(api->alloc.ctx, q); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1777 | } |
| 1778 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1779 | static void * |
| 1780 | _PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1781 | { |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1782 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1783 | uchar *q = (uchar *)p; |
| 1784 | uchar *tail; |
| 1785 | size_t total; /* nbytes + 4*SST */ |
| 1786 | size_t original_nbytes; |
| 1787 | int i; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1788 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1789 | if (p == NULL) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1790 | return _PyMem_DebugMalloc(ctx, nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1791 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1792 | _PyMem_DebugCheckAddress(api->api_id, p); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1793 | bumpserialno(); |
| 1794 | original_nbytes = read_size_t(q - 2*SST); |
| 1795 | total = nbytes + 4*SST; |
| 1796 | if (total < nbytes) |
| 1797 | /* overflow: can't represent total as a size_t */ |
| 1798 | return NULL; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1799 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1800 | if (nbytes < original_nbytes) { |
| 1801 | /* shrinking: mark old extra memory dead */ |
| 1802 | memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST); |
| 1803 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1804 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1805 | /* Resize and add decorations. We may get a new pointer here, in which |
| 1806 | * case we didn't get the chance to mark the old memory with DEADBYTE, |
| 1807 | * but we live with that. |
| 1808 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1809 | q = (uchar *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1810 | if (q == NULL) |
| 1811 | return NULL; |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1812 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1813 | write_size_t(q, nbytes); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1814 | assert(q[SST] == (uchar)api->api_id); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1815 | for (i = 1; i < SST; ++i) |
| 1816 | assert(q[SST + i] == FORBIDDENBYTE); |
| 1817 | q += 2*SST; |
| 1818 | tail = q + nbytes; |
| 1819 | memset(tail, FORBIDDENBYTE, SST); |
| 1820 | write_size_t(tail + SST, serialno); |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1821 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1822 | if (nbytes > original_nbytes) { |
| 1823 | /* growing: mark new extra memory clean */ |
| 1824 | memset(q + original_nbytes, CLEANBYTE, |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 1825 | nbytes - original_nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1826 | } |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 1827 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1828 | return q; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1829 | } |
| 1830 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1831 | /* Check the forbidden bytes on both ends of the memory allocated for p. |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1832 | * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress, |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1833 | * and call Py_FatalError to kill the program. |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 1834 | * The API id, is also checked. |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1835 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1836 | static void |
| 1837 | _PyMem_DebugCheckAddress(char api, const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1838 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1839 | const uchar *q = (const uchar *)p; |
| 1840 | char msgbuf[64]; |
| 1841 | char *msg; |
| 1842 | size_t nbytes; |
| 1843 | const uchar *tail; |
| 1844 | int i; |
| 1845 | char id; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1846 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1847 | if (p == NULL) { |
| 1848 | msg = "didn't expect a NULL pointer"; |
| 1849 | goto error; |
| 1850 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1851 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1852 | /* Check the API id */ |
| 1853 | id = (char)q[-SST]; |
| 1854 | if (id != api) { |
| 1855 | msg = msgbuf; |
| 1856 | snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api); |
| 1857 | msgbuf[sizeof(msgbuf)-1] = 0; |
| 1858 | goto error; |
| 1859 | } |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 1860 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1861 | /* Check the stuff at the start of p first: if there's underwrite |
| 1862 | * corruption, the number-of-bytes field may be nuts, and checking |
| 1863 | * the tail could lead to a segfault then. |
| 1864 | */ |
| 1865 | for (i = SST-1; i >= 1; --i) { |
| 1866 | if (*(q-i) != FORBIDDENBYTE) { |
| 1867 | msg = "bad leading pad byte"; |
| 1868 | goto error; |
| 1869 | } |
| 1870 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1871 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1872 | nbytes = read_size_t(q - 2*SST); |
| 1873 | tail = q + nbytes; |
| 1874 | for (i = 0; i < SST; ++i) { |
| 1875 | if (tail[i] != FORBIDDENBYTE) { |
| 1876 | msg = "bad trailing pad byte"; |
| 1877 | goto error; |
| 1878 | } |
| 1879 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1880 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1881 | return; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 1882 | |
| 1883 | error: |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1884 | _PyObject_DebugDumpAddress(p); |
| 1885 | Py_FatalError(msg); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1886 | } |
| 1887 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 1888 | /* Display info to stderr about the memory block at p. */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1889 | static void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1890 | _PyObject_DebugDumpAddress(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1891 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1892 | const uchar *q = (const uchar *)p; |
| 1893 | const uchar *tail; |
| 1894 | size_t nbytes, serial; |
| 1895 | int i; |
| 1896 | int ok; |
| 1897 | char id; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1898 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1899 | fprintf(stderr, "Debug memory block at address p=%p:", p); |
| 1900 | if (p == NULL) { |
| 1901 | fprintf(stderr, "\n"); |
| 1902 | return; |
| 1903 | } |
| 1904 | id = (char)q[-SST]; |
| 1905 | fprintf(stderr, " API '%c'\n", id); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1906 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1907 | nbytes = read_size_t(q - 2*SST); |
| 1908 | fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally " |
| 1909 | "requested\n", nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1910 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1911 | /* In case this is nuts, check the leading pad bytes first. */ |
| 1912 | fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1); |
| 1913 | ok = 1; |
| 1914 | for (i = 1; i <= SST-1; ++i) { |
| 1915 | if (*(q-i) != FORBIDDENBYTE) { |
| 1916 | ok = 0; |
| 1917 | break; |
| 1918 | } |
| 1919 | } |
| 1920 | if (ok) |
| 1921 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
| 1922 | else { |
| 1923 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
| 1924 | FORBIDDENBYTE); |
| 1925 | for (i = SST-1; i >= 1; --i) { |
| 1926 | const uchar byte = *(q-i); |
| 1927 | fprintf(stderr, " at p-%d: 0x%02x", i, byte); |
| 1928 | if (byte != FORBIDDENBYTE) |
| 1929 | fputs(" *** OUCH", stderr); |
| 1930 | fputc('\n', stderr); |
| 1931 | } |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 1932 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1933 | fputs(" Because memory is corrupted at the start, the " |
| 1934 | "count of bytes requested\n" |
| 1935 | " may be bogus, and checking the trailing pad " |
| 1936 | "bytes may segfault.\n", stderr); |
| 1937 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1938 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1939 | tail = q + nbytes; |
| 1940 | fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail); |
| 1941 | ok = 1; |
| 1942 | for (i = 0; i < SST; ++i) { |
| 1943 | if (tail[i] != FORBIDDENBYTE) { |
| 1944 | ok = 0; |
| 1945 | break; |
| 1946 | } |
| 1947 | } |
| 1948 | if (ok) |
| 1949 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
| 1950 | else { |
| 1951 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 1952 | FORBIDDENBYTE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1953 | for (i = 0; i < SST; ++i) { |
| 1954 | const uchar byte = tail[i]; |
| 1955 | fprintf(stderr, " at tail+%d: 0x%02x", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 1956 | i, byte); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1957 | if (byte != FORBIDDENBYTE) |
| 1958 | fputs(" *** OUCH", stderr); |
| 1959 | fputc('\n', stderr); |
| 1960 | } |
| 1961 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1962 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1963 | serial = read_size_t(tail + SST); |
| 1964 | fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T |
| 1965 | "u to debug malloc/realloc.\n", serial); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1966 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1967 | if (nbytes > 0) { |
| 1968 | i = 0; |
| 1969 | fputs(" Data at p:", stderr); |
| 1970 | /* print up to 8 bytes at the start */ |
| 1971 | while (q < tail && i < 8) { |
| 1972 | fprintf(stderr, " %02x", *q); |
| 1973 | ++i; |
| 1974 | ++q; |
| 1975 | } |
| 1976 | /* and up to 8 at the end */ |
| 1977 | if (q < tail) { |
| 1978 | if (tail - q > 8) { |
| 1979 | fputs(" ...", stderr); |
| 1980 | q = tail - 8; |
| 1981 | } |
| 1982 | while (q < tail) { |
| 1983 | fprintf(stderr, " %02x", *q); |
| 1984 | ++q; |
| 1985 | } |
| 1986 | } |
| 1987 | fputc('\n', stderr); |
| 1988 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1989 | } |
| 1990 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 1991 | #endif /* PYMALLOC_DEBUG */ |
| 1992 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1993 | static size_t |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 1994 | printone(FILE *out, const char* msg, size_t value) |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1995 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1996 | int i, k; |
| 1997 | char buf[100]; |
| 1998 | size_t origvalue = value; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 1999 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2000 | fputs(msg, out); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2001 | for (i = (int)strlen(msg); i < 35; ++i) |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2002 | fputc(' ', out); |
| 2003 | fputc('=', out); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2004 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2005 | /* Write the value with commas. */ |
| 2006 | i = 22; |
| 2007 | buf[i--] = '\0'; |
| 2008 | buf[i--] = '\n'; |
| 2009 | k = 3; |
| 2010 | do { |
| 2011 | size_t nextvalue = value / 10; |
Benjamin Peterson | 2dba1ee | 2013-02-20 16:54:30 -0500 | [diff] [blame] | 2012 | unsigned int digit = (unsigned int)(value - nextvalue * 10); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2013 | value = nextvalue; |
| 2014 | buf[i--] = (char)(digit + '0'); |
| 2015 | --k; |
| 2016 | if (k == 0 && value && i >= 0) { |
| 2017 | k = 3; |
| 2018 | buf[i--] = ','; |
| 2019 | } |
| 2020 | } while (value && i >= 0); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2021 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2022 | while (i >= 0) |
| 2023 | buf[i--] = ' '; |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2024 | fputs(buf, out); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2025 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2026 | return origvalue; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2027 | } |
| 2028 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2029 | void |
| 2030 | _PyDebugAllocatorStats(FILE *out, |
| 2031 | const char *block_name, int num_blocks, size_t sizeof_block) |
| 2032 | { |
| 2033 | char buf1[128]; |
| 2034 | char buf2[128]; |
| 2035 | PyOS_snprintf(buf1, sizeof(buf1), |
| 2036 | "%d %ss * %zd bytes each", |
| 2037 | num_blocks, block_name, sizeof_block); |
| 2038 | PyOS_snprintf(buf2, sizeof(buf2), |
| 2039 | "%48s ", buf1); |
| 2040 | (void)printone(out, buf2, num_blocks * sizeof_block); |
| 2041 | } |
| 2042 | |
| 2043 | #ifdef WITH_PYMALLOC |
| 2044 | |
| 2045 | /* Print summary info to "out" about the state of pymalloc's structures. |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2046 | * In Py_DEBUG mode, also perform some expensive internal consistency |
| 2047 | * checks. |
| 2048 | */ |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2049 | void |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2050 | _PyObject_DebugMallocStats(FILE *out) |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2051 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2052 | uint i; |
| 2053 | const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT; |
| 2054 | /* # of pools, allocated blocks, and free blocks per class index */ |
| 2055 | size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2056 | size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2057 | size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2058 | /* total # of allocated bytes in used and full pools */ |
| 2059 | size_t allocated_bytes = 0; |
| 2060 | /* total # of available bytes in used pools */ |
| 2061 | size_t available_bytes = 0; |
| 2062 | /* # of free pools + pools not yet carved out of current arena */ |
| 2063 | uint numfreepools = 0; |
| 2064 | /* # of bytes for arena alignment padding */ |
| 2065 | size_t arena_alignment = 0; |
| 2066 | /* # of bytes in used and full pools used for pool_headers */ |
| 2067 | size_t pool_header_bytes = 0; |
| 2068 | /* # of bytes in used and full pools wasted due to quantization, |
| 2069 | * i.e. the necessarily leftover space at the ends of used and |
| 2070 | * full pools. |
| 2071 | */ |
| 2072 | size_t quantization = 0; |
| 2073 | /* # of arenas actually allocated. */ |
| 2074 | size_t narenas = 0; |
| 2075 | /* running total -- should equal narenas * ARENA_SIZE */ |
| 2076 | size_t total; |
| 2077 | char buf[128]; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2078 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2079 | fprintf(out, "Small block threshold = %d, in %u size classes.\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2080 | SMALL_REQUEST_THRESHOLD, numclasses); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2081 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2082 | for (i = 0; i < numclasses; ++i) |
| 2083 | numpools[i] = numblocks[i] = numfreeblocks[i] = 0; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2084 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2085 | /* Because full pools aren't linked to from anything, it's easiest |
| 2086 | * to march over all the arenas. If we're lucky, most of the memory |
| 2087 | * will be living in full pools -- would be a shame to miss them. |
| 2088 | */ |
| 2089 | for (i = 0; i < maxarenas; ++i) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2090 | uint j; |
| 2091 | uptr base = arenas[i].address; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2092 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2093 | /* Skip arenas which are not allocated. */ |
| 2094 | if (arenas[i].address == (uptr)NULL) |
| 2095 | continue; |
| 2096 | narenas += 1; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2097 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2098 | numfreepools += arenas[i].nfreepools; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2099 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2100 | /* round up to pool alignment */ |
| 2101 | if (base & (uptr)POOL_SIZE_MASK) { |
| 2102 | arena_alignment += POOL_SIZE; |
| 2103 | base &= ~(uptr)POOL_SIZE_MASK; |
| 2104 | base += POOL_SIZE; |
| 2105 | } |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2106 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2107 | /* visit every pool in the arena */ |
| 2108 | assert(base <= (uptr) arenas[i].pool_address); |
| 2109 | for (j = 0; |
| 2110 | base < (uptr) arenas[i].pool_address; |
| 2111 | ++j, base += POOL_SIZE) { |
| 2112 | poolp p = (poolp)base; |
| 2113 | const uint sz = p->szidx; |
| 2114 | uint freeblocks; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2115 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2116 | if (p->ref.count == 0) { |
| 2117 | /* currently unused */ |
| 2118 | assert(pool_is_in_list(p, arenas[i].freepools)); |
| 2119 | continue; |
| 2120 | } |
| 2121 | ++numpools[sz]; |
| 2122 | numblocks[sz] += p->ref.count; |
| 2123 | freeblocks = NUMBLOCKS(sz) - p->ref.count; |
| 2124 | numfreeblocks[sz] += freeblocks; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2125 | #ifdef Py_DEBUG |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2126 | if (freeblocks > 0) |
| 2127 | assert(pool_is_in_list(p, usedpools[sz + sz])); |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2128 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2129 | } |
| 2130 | } |
| 2131 | assert(narenas == narenas_currently_allocated); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2132 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2133 | fputc('\n', out); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2134 | fputs("class size num pools blocks in use avail blocks\n" |
| 2135 | "----- ---- --------- ------------- ------------\n", |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2136 | out); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2137 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2138 | for (i = 0; i < numclasses; ++i) { |
| 2139 | size_t p = numpools[i]; |
| 2140 | size_t b = numblocks[i]; |
| 2141 | size_t f = numfreeblocks[i]; |
| 2142 | uint size = INDEX2SIZE(i); |
| 2143 | if (p == 0) { |
| 2144 | assert(b == 0 && f == 0); |
| 2145 | continue; |
| 2146 | } |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2147 | fprintf(out, "%5u %6u " |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2148 | "%11" PY_FORMAT_SIZE_T "u " |
| 2149 | "%15" PY_FORMAT_SIZE_T "u " |
| 2150 | "%13" PY_FORMAT_SIZE_T "u\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2151 | i, size, p, b, f); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2152 | allocated_bytes += b * size; |
| 2153 | available_bytes += f * size; |
| 2154 | pool_header_bytes += p * POOL_OVERHEAD; |
| 2155 | quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size); |
| 2156 | } |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2157 | fputc('\n', out); |
| 2158 | #ifdef PYMALLOC_DEBUG |
| 2159 | (void)printone(out, "# times object malloc called", serialno); |
| 2160 | #endif |
| 2161 | (void)printone(out, "# arenas allocated total", ntimes_arena_allocated); |
| 2162 | (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas); |
| 2163 | (void)printone(out, "# arenas highwater mark", narenas_highwater); |
| 2164 | (void)printone(out, "# arenas allocated current", narenas); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2165 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2166 | PyOS_snprintf(buf, sizeof(buf), |
| 2167 | "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena", |
| 2168 | narenas, ARENA_SIZE); |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2169 | (void)printone(out, buf, narenas * ARENA_SIZE); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2170 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2171 | fputc('\n', out); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2172 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2173 | total = printone(out, "# bytes in allocated blocks", allocated_bytes); |
| 2174 | total += printone(out, "# bytes in available blocks", available_bytes); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2175 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2176 | PyOS_snprintf(buf, sizeof(buf), |
| 2177 | "%u unused pools * %d bytes", numfreepools, POOL_SIZE); |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2178 | total += printone(out, buf, (size_t)numfreepools * POOL_SIZE); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2179 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2180 | total += printone(out, "# bytes lost to pool headers", pool_header_bytes); |
| 2181 | total += printone(out, "# bytes lost to quantization", quantization); |
| 2182 | total += printone(out, "# bytes lost to arena alignment", arena_alignment); |
| 2183 | (void)printone(out, "Total", total); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2184 | } |
| 2185 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2186 | #endif /* #ifdef WITH_PYMALLOC */ |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 2187 | |
| 2188 | #ifdef Py_USING_MEMORY_DEBUGGER |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2189 | /* Make this function last so gcc won't inline it since the definition is |
| 2190 | * after the reference. |
| 2191 | */ |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 2192 | int |
| 2193 | Py_ADDRESS_IN_RANGE(void *P, poolp pool) |
| 2194 | { |
Antoine Pitrou | b7fb2e2 | 2011-01-07 21:43:59 +0000 | [diff] [blame] | 2195 | uint arenaindex_temp = pool->arenaindex; |
| 2196 | |
| 2197 | return arenaindex_temp < maxarenas && |
| 2198 | (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && |
| 2199 | arenas[arenaindex_temp].address != 0; |
Neal Norwitz | 7eb3c91 | 2004-06-06 19:20:22 +0000 | [diff] [blame] | 2200 | } |
| 2201 | #endif |