blob: 3c33255170279b409d7ae2cb30fbdfcd2f90df41 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
Victor Stinner0507bf52013-07-07 02:05:46 +02003/* Python's malloc wrappers (see pymem.h) */
4
5#ifdef PYMALLOC_DEBUG /* WITH_PYMALLOC && PYMALLOC_DEBUG */
6/* Forward declaration */
7static void* _PyMem_DebugMalloc(void *ctx, size_t size);
8static void _PyMem_DebugFree(void *ctx, void *p);
9static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
10
11static void _PyObject_DebugDumpAddress(const void *p);
12static void _PyMem_DebugCheckAddress(char api_id, const void *p);
13#endif
14
Nick Coghlan6ba64f42013-09-29 00:28:55 +100015#if defined(__has_feature) /* Clang */
16 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
17 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
18 __attribute__((no_address_safety_analysis)) \
19 __attribute__ ((noinline))
20 #else
21 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
22 #endif
23#else
24 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
25 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
26 __attribute__((no_address_safety_analysis)) \
27 __attribute__ ((noinline))
28 #else
29 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
30 #endif
31#endif
32
Tim Peters1221c0a2002-03-23 00:20:15 +000033#ifdef WITH_PYMALLOC
34
Victor Stinner0507bf52013-07-07 02:05:46 +020035#ifdef MS_WINDOWS
36# include <windows.h>
37#elif defined(HAVE_MMAP)
38# include <sys/mman.h>
39# ifdef MAP_ANONYMOUS
40# define ARENAS_USE_MMAP
41# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020042#endif
43
Victor Stinner0507bf52013-07-07 02:05:46 +020044/* Forward declaration */
45static void* _PyObject_Malloc(void *ctx, size_t size);
46static void _PyObject_Free(void *ctx, void *p);
47static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020048#endif
49
Victor Stinner0507bf52013-07-07 02:05:46 +020050
51static void *
52_PyMem_RawMalloc(void *ctx, size_t size)
53{
54 /* PyMem_Malloc(0) means malloc(1). Some systems would return NULL
55 for malloc(0), which would be treated as an error. Some platforms would
56 return a pointer with no memory behind it, which would break pymalloc.
57 To solve these problems, allocate an extra byte. */
58 if (size == 0)
59 size = 1;
60 return malloc(size);
61}
62
63static void *
64_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
65{
66 if (size == 0)
67 size = 1;
68 return realloc(ptr, size);
69}
70
71static void
72_PyMem_RawFree(void *ctx, void *ptr)
73{
74 free(ptr);
75}
76
77
78#ifdef MS_WINDOWS
79static void *
80_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
81{
82 return VirtualAlloc(NULL, size,
83 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
84}
85
86static void
87_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
88{
Victor Stinner725e6682013-07-07 03:06:16 +020089 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +020090}
91
92#elif defined(ARENAS_USE_MMAP)
93static void *
94_PyObject_ArenaMmap(void *ctx, size_t size)
95{
96 void *ptr;
97 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
98 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
99 if (ptr == MAP_FAILED)
100 return NULL;
101 assert(ptr != NULL);
102 return ptr;
103}
104
105static void
106_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
107{
108 munmap(ptr, size);
109}
110
111#else
112static void *
113_PyObject_ArenaMalloc(void *ctx, size_t size)
114{
115 return malloc(size);
116}
117
118static void
119_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
120{
121 free(ptr);
122}
123#endif
124
125
126#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawRealloc, _PyMem_RawFree
127#ifdef WITH_PYMALLOC
Victor Stinner6cf185d2013-10-10 15:58:42 +0200128# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Realloc, _PyObject_Free
Victor Stinner0507bf52013-07-07 02:05:46 +0200129#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200130# define PYOBJ_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200131#endif
Victor Stinner6cf185d2013-10-10 15:58:42 +0200132#define PYMEM_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200133
134#ifdef PYMALLOC_DEBUG
135typedef struct {
136 /* We tag each block with an API ID in order to tag API violations */
137 char api_id;
138 PyMemAllocator alloc;
139} debug_alloc_api_t;
140static struct {
141 debug_alloc_api_t raw;
142 debug_alloc_api_t mem;
143 debug_alloc_api_t obj;
144} _PyMem_Debug = {
145 {'r', {NULL, PYRAW_FUNCS}},
Victor Stinner6cf185d2013-10-10 15:58:42 +0200146 {'m', {NULL, PYMEM_FUNCS}},
147 {'o', {NULL, PYOBJ_FUNCS}}
Victor Stinner0507bf52013-07-07 02:05:46 +0200148 };
149
Victor Stinner6cf185d2013-10-10 15:58:42 +0200150#define PYDBG_FUNCS _PyMem_DebugMalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200151#endif
152
153static PyMemAllocator _PyMem_Raw = {
154#ifdef PYMALLOC_DEBUG
Victor Stinner6cf185d2013-10-10 15:58:42 +0200155 &_PyMem_Debug.raw, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200156#else
157 NULL, PYRAW_FUNCS
158#endif
159 };
160
161static PyMemAllocator _PyMem = {
162#ifdef PYMALLOC_DEBUG
Victor Stinner6cf185d2013-10-10 15:58:42 +0200163 &_PyMem_Debug.mem, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200164#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200165 NULL, PYMEM_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200166#endif
167 };
168
169static PyMemAllocator _PyObject = {
170#ifdef PYMALLOC_DEBUG
Victor Stinner6cf185d2013-10-10 15:58:42 +0200171 &_PyMem_Debug.obj, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200172#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200173 NULL, PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200174#endif
175 };
176
177#undef PYRAW_FUNCS
Victor Stinner6cf185d2013-10-10 15:58:42 +0200178#undef PYMEM_FUNCS
179#undef PYOBJ_FUNCS
180#undef PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200181
182static PyObjectArenaAllocator _PyObject_Arena = {NULL,
183#ifdef MS_WINDOWS
184 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
185#elif defined(ARENAS_USE_MMAP)
186 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
187#else
188 _PyObject_ArenaMalloc, _PyObject_ArenaFree
189#endif
190 };
191
192void
193PyMem_SetupDebugHooks(void)
194{
195#ifdef PYMALLOC_DEBUG
196 PyMemAllocator alloc;
197
198 alloc.malloc = _PyMem_DebugMalloc;
199 alloc.realloc = _PyMem_DebugRealloc;
200 alloc.free = _PyMem_DebugFree;
201
202 if (_PyMem_Raw.malloc != _PyMem_DebugMalloc) {
203 alloc.ctx = &_PyMem_Debug.raw;
204 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
205 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
206 }
207
208 if (_PyMem.malloc != _PyMem_DebugMalloc) {
209 alloc.ctx = &_PyMem_Debug.mem;
210 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
211 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
212 }
213
214 if (_PyObject.malloc != _PyMem_DebugMalloc) {
215 alloc.ctx = &_PyMem_Debug.obj;
216 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
217 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
218 }
219#endif
220}
221
222void
223PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocator *allocator)
224{
225 switch(domain)
226 {
227 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
228 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
229 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
230 default:
231 /* unknown domain */
232 allocator->ctx = NULL;
233 allocator->malloc = NULL;
234 allocator->realloc = NULL;
235 allocator->free = NULL;
236 }
237}
238
239void
240PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocator *allocator)
241{
242 switch(domain)
243 {
244 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
245 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
246 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
247 /* ignore unknown domain */
248 }
249
250}
251
252void
253PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
254{
255 *allocator = _PyObject_Arena;
256}
257
258void
259PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
260{
261 _PyObject_Arena = *allocator;
262}
263
264void *
265PyMem_RawMalloc(size_t size)
266{
267 /*
268 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
269 * Most python internals blindly use a signed Py_ssize_t to track
270 * things without checking for overflows or negatives.
271 * As size_t is unsigned, checking for size < 0 is not required.
272 */
273 if (size > (size_t)PY_SSIZE_T_MAX)
274 return NULL;
275
276 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
277}
278
279void*
280PyMem_RawRealloc(void *ptr, size_t new_size)
281{
282 /* see PyMem_RawMalloc() */
283 if (new_size > (size_t)PY_SSIZE_T_MAX)
284 return NULL;
285 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
286}
287
288void PyMem_RawFree(void *ptr)
289{
290 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
291}
292
293void *
294PyMem_Malloc(size_t size)
295{
296 /* see PyMem_RawMalloc() */
297 if (size > (size_t)PY_SSIZE_T_MAX)
298 return NULL;
299 return _PyMem.malloc(_PyMem.ctx, size);
300}
301
302void *
303PyMem_Realloc(void *ptr, size_t new_size)
304{
305 /* see PyMem_RawMalloc() */
306 if (new_size > (size_t)PY_SSIZE_T_MAX)
307 return NULL;
308 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
309}
310
311void
312PyMem_Free(void *ptr)
313{
314 _PyMem.free(_PyMem.ctx, ptr);
315}
316
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200317char *
318_PyMem_RawStrdup(const char *str)
319{
320 size_t size;
321 char *copy;
322
323 size = strlen(str) + 1;
324 copy = PyMem_RawMalloc(size);
325 if (copy == NULL)
326 return NULL;
327 memcpy(copy, str, size);
328 return copy;
329}
330
331char *
332_PyMem_Strdup(const char *str)
333{
334 size_t size;
335 char *copy;
336
337 size = strlen(str) + 1;
338 copy = PyMem_Malloc(size);
339 if (copy == NULL)
340 return NULL;
341 memcpy(copy, str, size);
342 return copy;
343}
344
Victor Stinner0507bf52013-07-07 02:05:46 +0200345void *
346PyObject_Malloc(size_t size)
347{
348 /* see PyMem_RawMalloc() */
349 if (size > (size_t)PY_SSIZE_T_MAX)
350 return NULL;
351 return _PyObject.malloc(_PyObject.ctx, size);
352}
353
354void *
355PyObject_Realloc(void *ptr, size_t new_size)
356{
357 /* see PyMem_RawMalloc() */
358 if (new_size > (size_t)PY_SSIZE_T_MAX)
359 return NULL;
360 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
361}
362
363void
364PyObject_Free(void *ptr)
365{
366 _PyObject.free(_PyObject.ctx, ptr);
367}
368
369
370#ifdef WITH_PYMALLOC
371
Benjamin Peterson05159c42009-12-03 03:01:27 +0000372#ifdef WITH_VALGRIND
373#include <valgrind/valgrind.h>
374
375/* If we're using GCC, use __builtin_expect() to reduce overhead of
376 the valgrind checks */
377#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
378# define UNLIKELY(value) __builtin_expect((value), 0)
379#else
380# define UNLIKELY(value) (value)
381#endif
382
383/* -1 indicates that we haven't checked that we're running on valgrind yet. */
384static int running_on_valgrind = -1;
385#endif
386
Neil Schemenauera35c6882001-02-27 04:45:05 +0000387/* An object allocator for Python.
388
389 Here is an introduction to the layers of the Python memory architecture,
390 showing where the object allocator is actually used (layer +2), It is
391 called for every object allocation and deallocation (PyObject_New/Del),
392 unless the object-specific allocators implement a proprietary allocation
393 scheme (ex.: ints use a simple free list). This is also the place where
394 the cyclic garbage collector operates selectively on container objects.
395
396
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000397 Object-specific allocators
Neil Schemenauera35c6882001-02-27 04:45:05 +0000398 _____ ______ ______ ________
399 [ int ] [ dict ] [ list ] ... [ string ] Python core |
400+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
401 _______________________________ | |
402 [ Python's object allocator ] | |
403+2 | ####### Object memory ####### | <------ Internal buffers ------> |
404 ______________________________________________________________ |
405 [ Python's raw memory allocator (PyMem_ API) ] |
406+1 | <----- Python memory (under PyMem manager's control) ------> | |
407 __________________________________________________________________
408 [ Underlying general-purpose allocator (ex: C library malloc) ]
409 0 | <------ Virtual memory allocated for the python process -------> |
410
411 =========================================================================
412 _______________________________________________________________________
413 [ OS-specific Virtual Memory Manager (VMM) ]
414-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
415 __________________________________ __________________________________
416 [ ] [ ]
417-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
418
419*/
420/*==========================================================================*/
421
422/* A fast, special-purpose memory allocator for small blocks, to be used
423 on top of a general-purpose malloc -- heavily based on previous art. */
424
425/* Vladimir Marangozov -- August 2000 */
426
427/*
428 * "Memory management is where the rubber meets the road -- if we do the wrong
429 * thing at any level, the results will not be good. And if we don't make the
430 * levels work well together, we are in serious trouble." (1)
431 *
432 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
433 * "Dynamic Storage Allocation: A Survey and Critical Review",
434 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
435 */
436
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000437/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000438
439/*==========================================================================*/
440
441/*
Neil Schemenauera35c6882001-02-27 04:45:05 +0000442 * Allocation strategy abstract:
443 *
444 * For small requests, the allocator sub-allocates <Big> blocks of memory.
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200445 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
446 * system's allocator.
Tim Petersce7fb9b2002-03-23 00:28:57 +0000447 *
Neil Schemenauera35c6882001-02-27 04:45:05 +0000448 * Small requests are grouped in size classes spaced 8 bytes apart, due
449 * to the required valid alignment of the returned address. Requests of
450 * a particular size are serviced from memory pools of 4K (one VMM page).
451 * Pools are fragmented on demand and contain free lists of blocks of one
452 * particular size class. In other words, there is a fixed-size allocator
453 * for each size class. Free pools are shared by the different allocators
454 * thus minimizing the space reserved for a particular size class.
455 *
456 * This allocation strategy is a variant of what is known as "simple
457 * segregated storage based on array of free lists". The main drawback of
458 * simple segregated storage is that we might end up with lot of reserved
459 * memory for the different free lists, which degenerate in time. To avoid
460 * this, we partition each free list in pools and we share dynamically the
461 * reserved space between all free lists. This technique is quite efficient
462 * for memory intensive programs which allocate mainly small-sized blocks.
463 *
464 * For small requests we have the following table:
465 *
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000466 * Request in bytes Size of allocated block Size class idx
Neil Schemenauera35c6882001-02-27 04:45:05 +0000467 * ----------------------------------------------------------------
468 * 1-8 8 0
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000469 * 9-16 16 1
470 * 17-24 24 2
471 * 25-32 32 3
472 * 33-40 40 4
473 * 41-48 48 5
474 * 49-56 56 6
475 * 57-64 64 7
476 * 65-72 72 8
477 * ... ... ...
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200478 * 497-504 504 62
479 * 505-512 512 63
Tim Petersce7fb9b2002-03-23 00:28:57 +0000480 *
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200481 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
482 * allocator.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000483 */
484
485/*==========================================================================*/
486
487/*
488 * -- Main tunable settings section --
489 */
490
491/*
492 * Alignment of addresses returned to the user. 8-bytes alignment works
493 * on most current architectures (with 32-bit or 64-bit address busses).
494 * The alignment value is also used for grouping small requests in size
495 * classes spaced ALIGNMENT bytes apart.
496 *
497 * You shouldn't change this unless you know what you are doing.
498 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000499#define ALIGNMENT 8 /* must be 2^N */
500#define ALIGNMENT_SHIFT 3
Neil Schemenauera35c6882001-02-27 04:45:05 +0000501
Tim Peterse70ddf32002-04-05 04:32:29 +0000502/* Return the number of bytes in size class I, as a uint. */
503#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
504
Neil Schemenauera35c6882001-02-27 04:45:05 +0000505/*
506 * Max size threshold below which malloc requests are considered to be
507 * small enough in order to use preallocated memory pools. You can tune
508 * this value according to your application behaviour and memory needs.
509 *
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200510 * Note: a size threshold of 512 guarantees that newly created dictionaries
511 * will be allocated from preallocated memory pools on 64-bit.
512 *
Neil Schemenauera35c6882001-02-27 04:45:05 +0000513 * The following invariants must hold:
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200514 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000515 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
Neil Schemenauera35c6882001-02-27 04:45:05 +0000516 *
517 * Although not required, for better performance and space efficiency,
518 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
519 */
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200520#define SMALL_REQUEST_THRESHOLD 512
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000521#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000522
523/*
524 * The system's VMM page size can be obtained on most unices with a
525 * getpagesize() call or deduced from various header files. To make
526 * things simpler, we assume that it is 4K, which is OK for most systems.
527 * It is probably better if this is the native page size, but it doesn't
Tim Petersecc6e6a2005-07-10 22:30:55 +0000528 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
529 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
530 * violation fault. 4K is apparently OK for all the platforms that python
Martin v. Löwis8c140282002-10-26 15:01:53 +0000531 * currently targets.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000532 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000533#define SYSTEM_PAGE_SIZE (4 * 1024)
534#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000535
536/*
537 * Maximum amount of memory managed by the allocator for small requests.
538 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000539#ifdef WITH_MEMORY_LIMITS
540#ifndef SMALL_MEMORY_LIMIT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000541#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000542#endif
543#endif
544
545/*
546 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
547 * on a page boundary. This is a reserved virtual address space for the
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100548 * current process (obtained through a malloc()/mmap() call). In no way this
549 * means that the memory arenas will be used entirely. A malloc(<Big>) is
550 * usually an address range reservation for <Big> bytes, unless all pages within
551 * this space are referenced subsequently. So malloc'ing big blocks and not
552 * using them does not mean "wasting memory". It's an addressable range
553 * wastage...
Neil Schemenauera35c6882001-02-27 04:45:05 +0000554 *
Antoine Pitrouf0effe62011-11-26 01:11:02 +0100555 * Arenas are allocated with mmap() on systems supporting anonymous memory
556 * mappings to reduce heap fragmentation.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000557 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000558#define ARENA_SIZE (256 << 10) /* 256KB */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000559
560#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000561#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000562#endif
563
564/*
565 * Size of the pools used for small blocks. Should be a power of 2,
Tim Petersc2ce91a2002-03-30 21:36:04 +0000566 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000567 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000568#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
569#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
Neil Schemenauera35c6882001-02-27 04:45:05 +0000570
571/*
572 * -- End of tunable settings section --
573 */
574
575/*==========================================================================*/
576
577/*
578 * Locking
579 *
580 * To reduce lock contention, it would probably be better to refine the
581 * crude function locking with per size class locking. I'm not positive
582 * however, whether it's worth switching to such locking policy because
583 * of the performance penalty it might introduce.
584 *
585 * The following macros describe the simplest (should also be the fastest)
586 * lock object on a particular platform and the init/fini/lock/unlock
587 * operations on it. The locks defined here are not expected to be recursive
588 * because it is assumed that they will always be called in the order:
589 * INIT, [LOCK, UNLOCK]*, FINI.
590 */
591
592/*
593 * Python's threads are serialized, so object malloc locking is disabled.
594 */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000595#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
596#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
597#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
598#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
599#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000600
601/*
602 * Basic types
603 * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.
604 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000605#undef uchar
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000606#define uchar unsigned char /* assuming == 8 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000607
Neil Schemenauera35c6882001-02-27 04:45:05 +0000608#undef uint
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000609#define uint unsigned int /* assuming >= 16 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000610
611#undef ulong
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000612#define ulong unsigned long /* assuming >= 32 bits */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000613
Tim Petersd97a1c02002-03-30 06:09:22 +0000614#undef uptr
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000615#define uptr Py_uintptr_t
Tim Petersd97a1c02002-03-30 06:09:22 +0000616
Neil Schemenauera35c6882001-02-27 04:45:05 +0000617/* When you say memory, my mind reasons in terms of (pointers to) blocks */
618typedef uchar block;
619
Tim Peterse70ddf32002-04-05 04:32:29 +0000620/* Pool for small blocks. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000621struct pool_header {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000622 union { block *_padding;
Stefan Krah735bb122010-11-26 10:54:09 +0000623 uint count; } ref; /* number of allocated blocks */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000624 block *freeblock; /* pool's free list head */
625 struct pool_header *nextpool; /* next pool of this size class */
626 struct pool_header *prevpool; /* previous pool "" */
627 uint arenaindex; /* index into arenas of base adr */
628 uint szidx; /* block size class index */
629 uint nextoffset; /* bytes to virgin block */
630 uint maxnextoffset; /* largest valid nextoffset */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000631};
632
633typedef struct pool_header *poolp;
634
Thomas Woutersa9773292006-04-21 09:43:23 +0000635/* Record keeping for arenas. */
636struct arena_object {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000637 /* The address of the arena, as returned by malloc. Note that 0
638 * will never be returned by a successful malloc, and is used
639 * here to mark an arena_object that doesn't correspond to an
640 * allocated arena.
641 */
642 uptr address;
Thomas Woutersa9773292006-04-21 09:43:23 +0000643
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000644 /* Pool-aligned pointer to the next pool to be carved off. */
645 block* pool_address;
Thomas Woutersa9773292006-04-21 09:43:23 +0000646
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000647 /* The number of available pools in the arena: free pools + never-
648 * allocated pools.
649 */
650 uint nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000651
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000652 /* The total number of pools in the arena, whether or not available. */
653 uint ntotalpools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000654
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000655 /* Singly-linked list of available pools. */
656 struct pool_header* freepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000657
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000658 /* Whenever this arena_object is not associated with an allocated
659 * arena, the nextarena member is used to link all unassociated
660 * arena_objects in the singly-linked `unused_arena_objects` list.
661 * The prevarena member is unused in this case.
662 *
663 * When this arena_object is associated with an allocated arena
664 * with at least one available pool, both members are used in the
665 * doubly-linked `usable_arenas` list, which is maintained in
666 * increasing order of `nfreepools` values.
667 *
668 * Else this arena_object is associated with an allocated arena
669 * all of whose pools are in use. `nextarena` and `prevarena`
670 * are both meaningless in this case.
671 */
672 struct arena_object* nextarena;
673 struct arena_object* prevarena;
Thomas Woutersa9773292006-04-21 09:43:23 +0000674};
675
Antoine Pitrouca8aa4a2012-09-20 20:56:47 +0200676#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000677
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000678#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000679
Tim Petersd97a1c02002-03-30 06:09:22 +0000680/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
Antoine Pitrouca8aa4a2012-09-20 20:56:47 +0200681#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
Tim Peterse70ddf32002-04-05 04:32:29 +0000682
Tim Peters16bcb6b2002-04-05 05:45:31 +0000683/* Return total number of blocks in pool of size index I, as a uint. */
684#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
Tim Petersd97a1c02002-03-30 06:09:22 +0000685
Neil Schemenauera35c6882001-02-27 04:45:05 +0000686/*==========================================================================*/
687
688/*
689 * This malloc lock
690 */
Jeremy Hyltond1fedb62002-07-18 18:49:52 +0000691SIMPLELOCK_DECL(_malloc_lock)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000692#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
693#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
694#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
695#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000696
697/*
Tim Peters1e16db62002-03-31 01:05:22 +0000698 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
699
700This is involved. For an index i, usedpools[i+i] is the header for a list of
701all partially used pools holding small blocks with "size class idx" i. So
702usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
70316, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
704
Thomas Woutersa9773292006-04-21 09:43:23 +0000705Pools are carved off an arena's highwater mark (an arena_object's pool_address
706member) as needed. Once carved off, a pool is in one of three states forever
707after:
Tim Peters1e16db62002-03-31 01:05:22 +0000708
Tim Peters338e0102002-04-01 19:23:44 +0000709used == partially used, neither empty nor full
710 At least one block in the pool is currently allocated, and at least one
711 block in the pool is not currently allocated (note this implies a pool
712 has room for at least two blocks).
713 This is a pool's initial state, as a pool is created only when malloc
714 needs space.
715 The pool holds blocks of a fixed size, and is in the circular list headed
716 at usedpools[i] (see above). It's linked to the other used pools of the
717 same size class via the pool_header's nextpool and prevpool members.
718 If all but one block is currently allocated, a malloc can cause a
719 transition to the full state. If all but one block is not currently
720 allocated, a free can cause a transition to the empty state.
Tim Peters1e16db62002-03-31 01:05:22 +0000721
Tim Peters338e0102002-04-01 19:23:44 +0000722full == all the pool's blocks are currently allocated
723 On transition to full, a pool is unlinked from its usedpools[] list.
724 It's not linked to from anything then anymore, and its nextpool and
725 prevpool members are meaningless until it transitions back to used.
726 A free of a block in a full pool puts the pool back in the used state.
727 Then it's linked in at the front of the appropriate usedpools[] list, so
728 that the next allocation for its size class will reuse the freed block.
729
730empty == all the pool's blocks are currently available for allocation
731 On transition to empty, a pool is unlinked from its usedpools[] list,
Thomas Woutersa9773292006-04-21 09:43:23 +0000732 and linked to the front of its arena_object's singly-linked freepools list,
Tim Peters338e0102002-04-01 19:23:44 +0000733 via its nextpool member. The prevpool member has no meaning in this case.
734 Empty pools have no inherent size class: the next time a malloc finds
735 an empty list in usedpools[], it takes the first pool off of freepools.
736 If the size class needed happens to be the same as the size class the pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000737 last had, some pool initialization can be skipped.
Tim Peters338e0102002-04-01 19:23:44 +0000738
739
740Block Management
741
742Blocks within pools are again carved out as needed. pool->freeblock points to
743the start of a singly-linked list of free blocks within the pool. When a
744block is freed, it's inserted at the front of its pool's freeblock list. Note
745that the available blocks in a pool are *not* linked all together when a pool
Tim Peterse70ddf32002-04-05 04:32:29 +0000746is initialized. Instead only "the first two" (lowest addresses) blocks are
747set up, returning the first such block, and setting pool->freeblock to a
748one-block list holding the second such block. This is consistent with that
749pymalloc strives at all levels (arena, pool, and block) never to touch a piece
750of memory until it's actually needed.
751
752So long as a pool is in the used state, we're certain there *is* a block
Tim Peters52aefc82002-04-11 06:36:45 +0000753available for allocating, and pool->freeblock is not NULL. If pool->freeblock
754points to the end of the free list before we've carved the entire pool into
755blocks, that means we simply haven't yet gotten to one of the higher-address
756blocks. The offset from the pool_header to the start of "the next" virgin
757block is stored in the pool_header nextoffset member, and the largest value
758of nextoffset that makes sense is stored in the maxnextoffset member when a
759pool is initialized. All the blocks in a pool have been passed out at least
760once when and only when nextoffset > maxnextoffset.
Tim Peters338e0102002-04-01 19:23:44 +0000761
Tim Peters1e16db62002-03-31 01:05:22 +0000762
763Major obscurity: While the usedpools vector is declared to have poolp
764entries, it doesn't really. It really contains two pointers per (conceptual)
765poolp entry, the nextpool and prevpool members of a pool_header. The
766excruciating initialization code below fools C so that
767
768 usedpool[i+i]
769
770"acts like" a genuine poolp, but only so long as you only reference its
771nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
772compensating for that a pool_header's nextpool and prevpool members
773immediately follow a pool_header's first two members:
774
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000775 union { block *_padding;
Stefan Krah735bb122010-11-26 10:54:09 +0000776 uint count; } ref;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000777 block *freeblock;
Tim Peters1e16db62002-03-31 01:05:22 +0000778
779each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
780contains is a fudged-up pointer p such that *if* C believes it's a poolp
781pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
782circular list is empty).
783
784It's unclear why the usedpools setup is so convoluted. It could be to
785minimize the amount of cache required to hold this heavily-referenced table
786(which only *needs* the two interpool pointer members of a pool_header). OTOH,
787referencing code has to remember to "double the index" and doing so isn't
788free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
789on that C doesn't insert any padding anywhere in a pool_header at or before
790the prevpool member.
791**************************************************************************** */
792
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000793#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
794#define PT(x) PTA(x), PTA(x)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000795
796static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000797 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000798#if NB_SMALL_SIZE_CLASSES > 8
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000799 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000800#if NB_SMALL_SIZE_CLASSES > 16
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000801 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000802#if NB_SMALL_SIZE_CLASSES > 24
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000803 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000804#if NB_SMALL_SIZE_CLASSES > 32
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000805 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000806#if NB_SMALL_SIZE_CLASSES > 40
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000807 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000808#if NB_SMALL_SIZE_CLASSES > 48
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000809 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000810#if NB_SMALL_SIZE_CLASSES > 56
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000811 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
Antoine Pitrou6f26be02011-05-03 18:18:59 +0200812#if NB_SMALL_SIZE_CLASSES > 64
813#error "NB_SMALL_SIZE_CLASSES should be less than 64"
814#endif /* NB_SMALL_SIZE_CLASSES > 64 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000815#endif /* NB_SMALL_SIZE_CLASSES > 56 */
816#endif /* NB_SMALL_SIZE_CLASSES > 48 */
817#endif /* NB_SMALL_SIZE_CLASSES > 40 */
818#endif /* NB_SMALL_SIZE_CLASSES > 32 */
819#endif /* NB_SMALL_SIZE_CLASSES > 24 */
820#endif /* NB_SMALL_SIZE_CLASSES > 16 */
821#endif /* NB_SMALL_SIZE_CLASSES > 8 */
822};
823
Thomas Woutersa9773292006-04-21 09:43:23 +0000824/*==========================================================================
825Arena management.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000826
Thomas Woutersa9773292006-04-21 09:43:23 +0000827`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
828which may not be currently used (== they're arena_objects that aren't
829currently associated with an allocated arena). Note that arenas proper are
830separately malloc'ed.
Neil Schemenauera35c6882001-02-27 04:45:05 +0000831
Thomas Woutersa9773292006-04-21 09:43:23 +0000832Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
833we do try to free() arenas, and use some mild heuristic strategies to increase
834the likelihood that arenas eventually can be freed.
835
836unused_arena_objects
837
838 This is a singly-linked list of the arena_objects that are currently not
839 being used (no arena is associated with them). Objects are taken off the
840 head of the list in new_arena(), and are pushed on the head of the list in
841 PyObject_Free() when the arena is empty. Key invariant: an arena_object
842 is on this list if and only if its .address member is 0.
843
844usable_arenas
845
846 This is a doubly-linked list of the arena_objects associated with arenas
847 that have pools available. These pools are either waiting to be reused,
848 or have not been used before. The list is sorted to have the most-
849 allocated arenas first (ascending order based on the nfreepools member).
850 This means that the next allocation will come from a heavily used arena,
851 which gives the nearly empty arenas a chance to be returned to the system.
852 In my unscientific tests this dramatically improved the number of arenas
853 that could be freed.
854
855Note that an arena_object associated with an arena all of whose pools are
856currently in use isn't on either list.
857*/
858
859/* Array of objects used to track chunks of memory (arenas). */
860static struct arena_object* arenas = NULL;
861/* Number of slots currently allocated in the `arenas` vector. */
Tim Peters1d99af82002-03-30 10:35:09 +0000862static uint maxarenas = 0;
Tim Petersd97a1c02002-03-30 06:09:22 +0000863
Thomas Woutersa9773292006-04-21 09:43:23 +0000864/* The head of the singly-linked, NULL-terminated list of available
865 * arena_objects.
Tim Petersd97a1c02002-03-30 06:09:22 +0000866 */
Thomas Woutersa9773292006-04-21 09:43:23 +0000867static struct arena_object* unused_arena_objects = NULL;
868
869/* The head of the doubly-linked, NULL-terminated at each end, list of
870 * arena_objects associated with arenas that have pools available.
871 */
872static struct arena_object* usable_arenas = NULL;
873
874/* How many arena_objects do we initially allocate?
875 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
876 * `arenas` vector.
877 */
878#define INITIAL_ARENA_OBJECTS 16
879
880/* Number of arenas allocated that haven't been free()'d. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000881static size_t narenas_currently_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000882
Thomas Woutersa9773292006-04-21 09:43:23 +0000883/* Total number of times malloc() called to allocate an arena. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000884static size_t ntimes_arena_allocated = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000885/* High water mark (max value ever seen) for narenas_currently_allocated. */
Thomas Wouters73e5a5b2006-06-08 15:35:45 +0000886static size_t narenas_highwater = 0;
Thomas Woutersa9773292006-04-21 09:43:23 +0000887
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100888static Py_ssize_t _Py_AllocatedBlocks = 0;
889
890Py_ssize_t
891_Py_GetAllocatedBlocks(void)
892{
893 return _Py_AllocatedBlocks;
894}
895
896
Thomas Woutersa9773292006-04-21 09:43:23 +0000897/* Allocate a new arena. If we run out of memory, return NULL. Else
898 * allocate a new arena, and return the address of an arena_object
899 * describing the new arena. It's expected that the caller will set
900 * `usable_arenas` to the return value.
901 */
902static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000903new_arena(void)
904{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000905 struct arena_object* arenaobj;
906 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +0100907 void *address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000908
Tim Peters0e871182002-04-13 08:29:14 +0000909#ifdef PYMALLOC_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000910 if (Py_GETENV("PYTHONMALLOCSTATS"))
David Malcolm49526f42012-06-22 14:55:41 -0400911 _PyObject_DebugMallocStats(stderr);
Tim Peters0e871182002-04-13 08:29:14 +0000912#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000913 if (unused_arena_objects == NULL) {
914 uint i;
915 uint numarenas;
916 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000917
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000918 /* Double the number of arena objects on each allocation.
919 * Note that it's possible for `numarenas` to overflow.
920 */
921 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
922 if (numarenas <= maxarenas)
923 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000924#if SIZEOF_SIZE_T <= SIZEOF_INT
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000925 if (numarenas > PY_SIZE_MAX / sizeof(*arenas))
926 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000927#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000928 nbytes = numarenas * sizeof(*arenas);
Victor Stinner6cf185d2013-10-10 15:58:42 +0200929 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000930 if (arenaobj == NULL)
931 return NULL;
932 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +0000933
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000934 /* We might need to fix pointers that were copied. However,
935 * new_arena only gets called when all the pages in the
936 * previous arenas are full. Thus, there are *no* pointers
937 * into the old array. Thus, we don't have to worry about
938 * invalid pointers. Just to be sure, some asserts:
939 */
940 assert(usable_arenas == NULL);
941 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +0000942
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000943 /* Put the new arenas on the unused_arena_objects list. */
944 for (i = maxarenas; i < numarenas; ++i) {
945 arenas[i].address = 0; /* mark as unassociated */
946 arenas[i].nextarena = i < numarenas - 1 ?
947 &arenas[i+1] : NULL;
948 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000949
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000950 /* Update globals. */
951 unused_arena_objects = &arenas[maxarenas];
952 maxarenas = numarenas;
953 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000954
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000955 /* Take the next available arena object off the head of the list. */
956 assert(unused_arena_objects != NULL);
957 arenaobj = unused_arena_objects;
958 unused_arena_objects = arenaobj->nextarena;
959 assert(arenaobj->address == 0);
Victor Stinner0507bf52013-07-07 02:05:46 +0200960 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
961 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000962 /* The allocation failed: return NULL after putting the
963 * arenaobj back.
964 */
965 arenaobj->nextarena = unused_arena_objects;
966 unused_arena_objects = arenaobj;
967 return NULL;
968 }
Victor Stinnerba108822012-03-10 00:21:44 +0100969 arenaobj->address = (uptr)address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000970
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000971 ++narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000972 ++ntimes_arena_allocated;
973 if (narenas_currently_allocated > narenas_highwater)
974 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000975 arenaobj->freepools = NULL;
976 /* pool_address <- first pool-aligned address in the arena
977 nfreepools <- number of whole pools that fit after alignment */
978 arenaobj->pool_address = (block*)arenaobj->address;
979 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
980 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
981 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
982 if (excess != 0) {
983 --arenaobj->nfreepools;
984 arenaobj->pool_address += POOL_SIZE - excess;
985 }
986 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000987
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000988 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000989}
990
Thomas Woutersa9773292006-04-21 09:43:23 +0000991/*
992Py_ADDRESS_IN_RANGE(P, POOL)
993
994Return true if and only if P is an address that was allocated by pymalloc.
995POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
996(the caller is asked to compute this because the macro expands POOL more than
997once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
998variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is
999called on every alloc/realloc/free, micro-efficiency is important here).
1000
1001Tricky: Let B be the arena base address associated with the pool, B =
1002arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1003
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001004 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001005
1006Subtracting B throughout, this is true iff
1007
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001008 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001009
1010By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1011
1012Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1013before the first arena has been allocated. `arenas` is still NULL in that
1014case. We're relying on that maxarenas is also 0 in that case, so that
1015(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1016into a NULL arenas.
1017
1018Details: given P and POOL, the arena_object corresponding to P is AO =
1019arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1020stores, etc), POOL is the correct address of P's pool, AO.address is the
1021correct base address of the pool's arena, and P must be within ARENA_SIZE of
1022AO.address. In addition, AO.address is not 0 (no arena can start at address 0
1023(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc
1024controls P.
1025
1026Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1027call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1028in this case -- it may even be uninitialized trash. If the trash arenaindex
1029is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1030control P.
1031
1032Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1033allocated arena, obmalloc controls all the memory in slice AO.address :
1034AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1035so P doesn't lie in that slice, so the macro correctly reports that P is not
1036controlled by obmalloc.
1037
1038Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1039arena_object (one not currently associated with an allocated arena),
1040AO.address is 0, and the second test in the macro reduces to:
1041
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001042 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001043
1044If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1045that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1046of the test still passes, and the third clause (AO.address != 0) is necessary
1047to get the correct result: AO.address is 0 in this case, so the macro
1048correctly reports that P is not controlled by obmalloc (despite that P lies in
1049slice AO.address : AO.address + ARENA_SIZE).
1050
1051Note: The third (AO.address != 0) clause was added in Python 2.5. Before
10522.5, arenas were never free()'ed, and an arenaindex < maxarena always
1053corresponded to a currently-allocated arena, so the "P is not controlled by
1054obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1055was impossible.
1056
1057Note that the logic is excruciating, and reading up possibly uninitialized
1058memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1059creates problems for some memory debuggers. The overwhelming advantage is
1060that this test determines whether an arbitrary address is controlled by
1061obmalloc in a small constant time, independent of the number of arenas
1062obmalloc controls. Since this test is needed at every entry point, it's
1063extremely desirable that it be this fast.
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001064
1065Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated
1066by Python, it is important that (POOL)->arenaindex is read only once, as
1067another thread may be concurrently modifying the value without holding the
1068GIL. To accomplish this, the arenaindex_temp variable is used to store
1069(POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's
1070execution. The caller of the macro is responsible for declaring this
1071variable.
Thomas Woutersa9773292006-04-21 09:43:23 +00001072*/
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001073#define Py_ADDRESS_IN_RANGE(P, POOL) \
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001074 ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \
1075 (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \
1076 arenas[arenaindex_temp].address != 0)
Thomas Woutersa9773292006-04-21 09:43:23 +00001077
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001078
1079/* This is only useful when running memory debuggers such as
1080 * Purify or Valgrind. Uncomment to use.
1081 *
Martin v. Löwis9f2e3462007-07-21 17:22:18 +00001082#define Py_USING_MEMORY_DEBUGGER
Martin v. Löwis6fea2332008-09-25 04:15:27 +00001083 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001084
1085#ifdef Py_USING_MEMORY_DEBUGGER
1086
1087/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design
1088 * This leads to thousands of spurious warnings when using
1089 * Purify or Valgrind. By making a function, we can easily
1090 * suppress the uninitialized memory reads in this one function.
1091 * So we won't ignore real errors elsewhere.
1092 *
1093 * Disable the macro and use a function.
1094 */
1095
1096#undef Py_ADDRESS_IN_RANGE
1097
Thomas Wouters89f507f2006-12-13 04:49:30 +00001098#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \
Stefan Krah735bb122010-11-26 10:54:09 +00001099 (__GNUC__ >= 4))
Neal Norwitze5e5aa42005-11-13 18:55:39 +00001100#define Py_NO_INLINE __attribute__((__noinline__))
1101#else
1102#define Py_NO_INLINE
1103#endif
1104
1105/* Don't make static, to try to ensure this isn't inlined. */
1106int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
1107#undef Py_NO_INLINE
Neal Norwitz7eb3c912004-06-06 19:20:22 +00001108#endif
Tim Peters338e0102002-04-01 19:23:44 +00001109
Neil Schemenauera35c6882001-02-27 04:45:05 +00001110/*==========================================================================*/
1111
Tim Peters84c1b972002-04-04 04:44:32 +00001112/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
1113 * from all other currently live pointers. This may not be possible.
1114 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001115
1116/*
1117 * The basic blocks are ordered by decreasing execution frequency,
1118 * which minimizes the number of jumps in the most common cases,
1119 * improves branching prediction and instruction scheduling (small
1120 * block allocations typically result in a couple of instructions).
1121 * Unless the optimizer reorders everything, being too smart...
1122 */
1123
Victor Stinner0507bf52013-07-07 02:05:46 +02001124static void *
1125_PyObject_Malloc(void *ctx, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001126{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001127 block *bp;
1128 poolp pool;
1129 poolp next;
1130 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001131
Antoine Pitrou0aaaa622013-04-06 01:15:30 +02001132 _Py_AllocatedBlocks++;
1133
Benjamin Peterson05159c42009-12-03 03:01:27 +00001134#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001135 if (UNLIKELY(running_on_valgrind == -1))
1136 running_on_valgrind = RUNNING_ON_VALGRIND;
1137 if (UNLIKELY(running_on_valgrind))
1138 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001139#endif
1140
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001141 /*
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001142 * This implicitly redirects malloc(0).
1143 */
1144 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
1145 LOCK();
1146 /*
1147 * Most frequent paths first
1148 */
1149 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
1150 pool = usedpools[size + size];
1151 if (pool != pool->nextpool) {
1152 /*
1153 * There is a used pool for this size class.
1154 * Pick up the head block of its free list.
1155 */
1156 ++pool->ref.count;
1157 bp = pool->freeblock;
1158 assert(bp != NULL);
1159 if ((pool->freeblock = *(block **)bp) != NULL) {
1160 UNLOCK();
1161 return (void *)bp;
1162 }
1163 /*
1164 * Reached the end of the free list, try to extend it.
1165 */
1166 if (pool->nextoffset <= pool->maxnextoffset) {
1167 /* There is room for another block. */
1168 pool->freeblock = (block*)pool +
1169 pool->nextoffset;
1170 pool->nextoffset += INDEX2SIZE(size);
1171 *(block **)(pool->freeblock) = NULL;
1172 UNLOCK();
1173 return (void *)bp;
1174 }
1175 /* Pool is full, unlink from used pools. */
1176 next = pool->nextpool;
1177 pool = pool->prevpool;
1178 next->prevpool = pool;
1179 pool->nextpool = next;
1180 UNLOCK();
1181 return (void *)bp;
1182 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001183
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001184 /* There isn't a pool of the right size class immediately
1185 * available: use a free pool.
1186 */
1187 if (usable_arenas == NULL) {
1188 /* No arena has a free pool: allocate a new arena. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001189#ifdef WITH_MEMORY_LIMITS
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001190 if (narenas_currently_allocated >= MAX_ARENAS) {
1191 UNLOCK();
1192 goto redirect;
1193 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001194#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001195 usable_arenas = new_arena();
1196 if (usable_arenas == NULL) {
1197 UNLOCK();
1198 goto redirect;
1199 }
1200 usable_arenas->nextarena =
1201 usable_arenas->prevarena = NULL;
1202 }
1203 assert(usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001204
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001205 /* Try to get a cached free pool. */
1206 pool = usable_arenas->freepools;
1207 if (pool != NULL) {
1208 /* Unlink from cached pools. */
1209 usable_arenas->freepools = pool->nextpool;
Thomas Woutersa9773292006-04-21 09:43:23 +00001210
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001211 /* This arena already had the smallest nfreepools
1212 * value, so decreasing nfreepools doesn't change
1213 * that, and we don't need to rearrange the
1214 * usable_arenas list. However, if the arena has
1215 * become wholly allocated, we need to remove its
1216 * arena_object from usable_arenas.
1217 */
1218 --usable_arenas->nfreepools;
1219 if (usable_arenas->nfreepools == 0) {
1220 /* Wholly allocated: remove. */
1221 assert(usable_arenas->freepools == NULL);
1222 assert(usable_arenas->nextarena == NULL ||
1223 usable_arenas->nextarena->prevarena ==
1224 usable_arenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00001225
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001226 usable_arenas = usable_arenas->nextarena;
1227 if (usable_arenas != NULL) {
1228 usable_arenas->prevarena = NULL;
1229 assert(usable_arenas->address != 0);
1230 }
1231 }
1232 else {
1233 /* nfreepools > 0: it must be that freepools
1234 * isn't NULL, or that we haven't yet carved
1235 * off all the arena's pools for the first
1236 * time.
1237 */
1238 assert(usable_arenas->freepools != NULL ||
1239 usable_arenas->pool_address <=
1240 (block*)usable_arenas->address +
1241 ARENA_SIZE - POOL_SIZE);
1242 }
1243 init_pool:
1244 /* Frontlink to used pools. */
1245 next = usedpools[size + size]; /* == prev */
1246 pool->nextpool = next;
1247 pool->prevpool = next;
1248 next->nextpool = pool;
1249 next->prevpool = pool;
1250 pool->ref.count = 1;
1251 if (pool->szidx == size) {
1252 /* Luckily, this pool last contained blocks
1253 * of the same size class, so its header
1254 * and free list are already initialized.
1255 */
1256 bp = pool->freeblock;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001257 assert(bp != NULL);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001258 pool->freeblock = *(block **)bp;
1259 UNLOCK();
1260 return (void *)bp;
1261 }
1262 /*
1263 * Initialize the pool header, set up the free list to
1264 * contain just the second block, and return the first
1265 * block.
1266 */
1267 pool->szidx = size;
1268 size = INDEX2SIZE(size);
1269 bp = (block *)pool + POOL_OVERHEAD;
1270 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1271 pool->maxnextoffset = POOL_SIZE - size;
1272 pool->freeblock = bp + size;
1273 *(block **)(pool->freeblock) = NULL;
1274 UNLOCK();
1275 return (void *)bp;
1276 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001277
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001278 /* Carve off a new pool. */
1279 assert(usable_arenas->nfreepools > 0);
1280 assert(usable_arenas->freepools == NULL);
1281 pool = (poolp)usable_arenas->pool_address;
1282 assert((block*)pool <= (block*)usable_arenas->address +
1283 ARENA_SIZE - POOL_SIZE);
1284 pool->arenaindex = usable_arenas - arenas;
1285 assert(&arenas[pool->arenaindex] == usable_arenas);
1286 pool->szidx = DUMMY_SIZE_IDX;
1287 usable_arenas->pool_address += POOL_SIZE;
1288 --usable_arenas->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001289
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001290 if (usable_arenas->nfreepools == 0) {
1291 assert(usable_arenas->nextarena == NULL ||
1292 usable_arenas->nextarena->prevarena ==
1293 usable_arenas);
1294 /* Unlink the arena: it is completely allocated. */
1295 usable_arenas = usable_arenas->nextarena;
1296 if (usable_arenas != NULL) {
1297 usable_arenas->prevarena = NULL;
1298 assert(usable_arenas->address != 0);
1299 }
1300 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001301
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001302 goto init_pool;
1303 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001304
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001305 /* The small block allocator ends here. */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001306
Tim Petersd97a1c02002-03-30 06:09:22 +00001307redirect:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001308 /* Redirect the original request to the underlying (libc) allocator.
1309 * We jump here on bigger requests, on error in the code above (as a
1310 * last chance to serve the request) or when the max memory limit
1311 * has been reached.
1312 */
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001313 {
Victor Stinner6cf185d2013-10-10 15:58:42 +02001314 void *result = PyMem_RawMalloc(nbytes);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001315 if (!result)
1316 _Py_AllocatedBlocks--;
1317 return result;
1318 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001319}
1320
1321/* free */
1322
Nick Coghlan6ba64f42013-09-29 00:28:55 +10001323ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
Victor Stinner0507bf52013-07-07 02:05:46 +02001324static void
1325_PyObject_Free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001326{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001327 poolp pool;
1328 block *lastfree;
1329 poolp next, prev;
1330 uint size;
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001331#ifndef Py_USING_MEMORY_DEBUGGER
1332 uint arenaindex_temp;
1333#endif
Neil Schemenauera35c6882001-02-27 04:45:05 +00001334
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001335 if (p == NULL) /* free(NULL) has no effect */
1336 return;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001337
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001338 _Py_AllocatedBlocks--;
1339
Benjamin Peterson05159c42009-12-03 03:01:27 +00001340#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001341 if (UNLIKELY(running_on_valgrind > 0))
1342 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001343#endif
1344
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001345 pool = POOL_ADDR(p);
1346 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1347 /* We allocated this address. */
1348 LOCK();
1349 /* Link p to the start of the pool's freeblock list. Since
1350 * the pool had at least the p block outstanding, the pool
1351 * wasn't empty (so it's already in a usedpools[] list, or
1352 * was full and is in no list -- it's not in the freeblocks
1353 * list in any case).
1354 */
1355 assert(pool->ref.count > 0); /* else it was empty */
1356 *(block **)p = lastfree = pool->freeblock;
1357 pool->freeblock = (block *)p;
1358 if (lastfree) {
1359 struct arena_object* ao;
1360 uint nf; /* ao->nfreepools */
Thomas Woutersa9773292006-04-21 09:43:23 +00001361
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001362 /* freeblock wasn't NULL, so the pool wasn't full,
1363 * and the pool is in a usedpools[] list.
1364 */
1365 if (--pool->ref.count != 0) {
1366 /* pool isn't empty: leave it in usedpools */
1367 UNLOCK();
1368 return;
1369 }
1370 /* Pool is now empty: unlink from usedpools, and
1371 * link to the front of freepools. This ensures that
1372 * previously freed pools will be allocated later
1373 * (being not referenced, they are perhaps paged out).
1374 */
1375 next = pool->nextpool;
1376 prev = pool->prevpool;
1377 next->prevpool = prev;
1378 prev->nextpool = next;
Thomas Woutersa9773292006-04-21 09:43:23 +00001379
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001380 /* Link the pool to freepools. This is a singly-linked
1381 * list, and pool->prevpool isn't used there.
1382 */
1383 ao = &arenas[pool->arenaindex];
1384 pool->nextpool = ao->freepools;
1385 ao->freepools = pool;
1386 nf = ++ao->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001387
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001388 /* All the rest is arena management. We just freed
1389 * a pool, and there are 4 cases for arena mgmt:
1390 * 1. If all the pools are free, return the arena to
1391 * the system free().
1392 * 2. If this is the only free pool in the arena,
1393 * add the arena back to the `usable_arenas` list.
1394 * 3. If the "next" arena has a smaller count of free
1395 * pools, we have to "slide this arena right" to
1396 * restore that usable_arenas is sorted in order of
1397 * nfreepools.
1398 * 4. Else there's nothing more to do.
1399 */
1400 if (nf == ao->ntotalpools) {
1401 /* Case 1. First unlink ao from usable_arenas.
1402 */
1403 assert(ao->prevarena == NULL ||
1404 ao->prevarena->address != 0);
1405 assert(ao ->nextarena == NULL ||
1406 ao->nextarena->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001407
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001408 /* Fix the pointer in the prevarena, or the
1409 * usable_arenas pointer.
1410 */
1411 if (ao->prevarena == NULL) {
1412 usable_arenas = ao->nextarena;
1413 assert(usable_arenas == NULL ||
1414 usable_arenas->address != 0);
1415 }
1416 else {
1417 assert(ao->prevarena->nextarena == ao);
1418 ao->prevarena->nextarena =
1419 ao->nextarena;
1420 }
1421 /* Fix the pointer in the nextarena. */
1422 if (ao->nextarena != NULL) {
1423 assert(ao->nextarena->prevarena == ao);
1424 ao->nextarena->prevarena =
1425 ao->prevarena;
1426 }
1427 /* Record that this arena_object slot is
1428 * available to be reused.
1429 */
1430 ao->nextarena = unused_arena_objects;
1431 unused_arena_objects = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001432
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001433 /* Free the entire arena. */
Victor Stinner0507bf52013-07-07 02:05:46 +02001434 _PyObject_Arena.free(_PyObject_Arena.ctx,
1435 (void *)ao->address, ARENA_SIZE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001436 ao->address = 0; /* mark unassociated */
1437 --narenas_currently_allocated;
Thomas Woutersa9773292006-04-21 09:43:23 +00001438
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001439 UNLOCK();
1440 return;
1441 }
1442 if (nf == 1) {
1443 /* Case 2. Put ao at the head of
1444 * usable_arenas. Note that because
1445 * ao->nfreepools was 0 before, ao isn't
1446 * currently on the usable_arenas list.
1447 */
1448 ao->nextarena = usable_arenas;
1449 ao->prevarena = NULL;
1450 if (usable_arenas)
1451 usable_arenas->prevarena = ao;
1452 usable_arenas = ao;
1453 assert(usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001454
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001455 UNLOCK();
1456 return;
1457 }
1458 /* If this arena is now out of order, we need to keep
1459 * the list sorted. The list is kept sorted so that
1460 * the "most full" arenas are used first, which allows
1461 * the nearly empty arenas to be completely freed. In
1462 * a few un-scientific tests, it seems like this
1463 * approach allowed a lot more memory to be freed.
1464 */
1465 if (ao->nextarena == NULL ||
1466 nf <= ao->nextarena->nfreepools) {
1467 /* Case 4. Nothing to do. */
1468 UNLOCK();
1469 return;
1470 }
1471 /* Case 3: We have to move the arena towards the end
1472 * of the list, because it has more free pools than
1473 * the arena to its right.
1474 * First unlink ao from usable_arenas.
1475 */
1476 if (ao->prevarena != NULL) {
1477 /* ao isn't at the head of the list */
1478 assert(ao->prevarena->nextarena == ao);
1479 ao->prevarena->nextarena = ao->nextarena;
1480 }
1481 else {
1482 /* ao is at the head of the list */
1483 assert(usable_arenas == ao);
1484 usable_arenas = ao->nextarena;
1485 }
1486 ao->nextarena->prevarena = ao->prevarena;
Thomas Woutersa9773292006-04-21 09:43:23 +00001487
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001488 /* Locate the new insertion point by iterating over
1489 * the list, using our nextarena pointer.
1490 */
1491 while (ao->nextarena != NULL &&
1492 nf > ao->nextarena->nfreepools) {
1493 ao->prevarena = ao->nextarena;
1494 ao->nextarena = ao->nextarena->nextarena;
1495 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001496
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001497 /* Insert ao at this point. */
1498 assert(ao->nextarena == NULL ||
1499 ao->prevarena == ao->nextarena->prevarena);
1500 assert(ao->prevarena->nextarena == ao->nextarena);
Thomas Woutersa9773292006-04-21 09:43:23 +00001501
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001502 ao->prevarena->nextarena = ao;
1503 if (ao->nextarena != NULL)
1504 ao->nextarena->prevarena = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001505
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001506 /* Verify that the swaps worked. */
1507 assert(ao->nextarena == NULL ||
1508 nf <= ao->nextarena->nfreepools);
1509 assert(ao->prevarena == NULL ||
1510 nf > ao->prevarena->nfreepools);
1511 assert(ao->nextarena == NULL ||
1512 ao->nextarena->prevarena == ao);
1513 assert((usable_arenas == ao &&
1514 ao->prevarena == NULL) ||
1515 ao->prevarena->nextarena == ao);
Thomas Woutersa9773292006-04-21 09:43:23 +00001516
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001517 UNLOCK();
1518 return;
1519 }
1520 /* Pool was full, so doesn't currently live in any list:
1521 * link it to the front of the appropriate usedpools[] list.
1522 * This mimics LRU pool usage for new allocations and
1523 * targets optimal filling when several pools contain
1524 * blocks of the same size class.
1525 */
1526 --pool->ref.count;
1527 assert(pool->ref.count > 0); /* else the pool is empty */
1528 size = pool->szidx;
1529 next = usedpools[size + size];
1530 prev = next->prevpool;
1531 /* insert pool before next: prev <-> pool <-> next */
1532 pool->nextpool = next;
1533 pool->prevpool = prev;
1534 next->prevpool = pool;
1535 prev->nextpool = pool;
1536 UNLOCK();
1537 return;
1538 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001539
Benjamin Peterson05159c42009-12-03 03:01:27 +00001540#ifdef WITH_VALGRIND
1541redirect:
1542#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001543 /* We didn't allocate this address. */
Victor Stinner6cf185d2013-10-10 15:58:42 +02001544 PyMem_RawFree(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001545}
1546
Tim Peters84c1b972002-04-04 04:44:32 +00001547/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1548 * then as the Python docs promise, we do not treat this like free(p), and
1549 * return a non-NULL result.
1550 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001551
Nick Coghlan6ba64f42013-09-29 00:28:55 +10001552ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
Victor Stinner0507bf52013-07-07 02:05:46 +02001553static void *
1554_PyObject_Realloc(void *ctx, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001555{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001556 void *bp;
1557 poolp pool;
1558 size_t size;
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00001559#ifndef Py_USING_MEMORY_DEBUGGER
1560 uint arenaindex_temp;
1561#endif
Neil Schemenauera35c6882001-02-27 04:45:05 +00001562
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001563 if (p == NULL)
Victor Stinner0507bf52013-07-07 02:05:46 +02001564 return _PyObject_Malloc(ctx, nbytes);
Georg Brandld492ad82008-07-23 16:13:07 +00001565
Benjamin Peterson05159c42009-12-03 03:01:27 +00001566#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001567 /* Treat running_on_valgrind == -1 the same as 0 */
1568 if (UNLIKELY(running_on_valgrind > 0))
1569 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001570#endif
1571
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001572 pool = POOL_ADDR(p);
1573 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1574 /* We're in charge of this block */
1575 size = INDEX2SIZE(pool->szidx);
1576 if (nbytes <= size) {
1577 /* The block is staying the same or shrinking. If
1578 * it's shrinking, there's a tradeoff: it costs
1579 * cycles to copy the block to a smaller size class,
1580 * but it wastes memory not to copy it. The
1581 * compromise here is to copy on shrink only if at
1582 * least 25% of size can be shaved off.
1583 */
1584 if (4 * nbytes > 3 * size) {
1585 /* It's the same,
1586 * or shrinking and new/old > 3/4.
1587 */
1588 return p;
1589 }
1590 size = nbytes;
1591 }
Victor Stinner0507bf52013-07-07 02:05:46 +02001592 bp = _PyObject_Malloc(ctx, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001593 if (bp != NULL) {
1594 memcpy(bp, p, size);
Victor Stinner0507bf52013-07-07 02:05:46 +02001595 _PyObject_Free(ctx, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001596 }
1597 return bp;
1598 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001599#ifdef WITH_VALGRIND
1600 redirect:
1601#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001602 /* We're not managing this block. If nbytes <=
1603 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1604 * block. However, if we do, we need to copy the valid data from
1605 * the C-managed block to one of our blocks, and there's no portable
1606 * way to know how much of the memory space starting at p is valid.
1607 * As bug 1185883 pointed out the hard way, it's possible that the
1608 * C-managed block is "at the end" of allocated VM space, so that
1609 * a memory fault can occur if we try to copy nbytes bytes starting
1610 * at p. Instead we punt: let C continue to manage this block.
1611 */
1612 if (nbytes)
Victor Stinner6cf185d2013-10-10 15:58:42 +02001613 return PyMem_RawRealloc(p, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001614 /* C doesn't define the result of realloc(p, 0) (it may or may not
1615 * return NULL then), but Python's docs promise that nbytes==0 never
1616 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1617 * to begin with. Even then, we can't be sure that realloc() won't
1618 * return NULL.
1619 */
Victor Stinner6cf185d2013-10-10 15:58:42 +02001620 bp = PyMem_RawRealloc(p, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001621 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001622}
1623
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001624#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001625
1626/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001627/* pymalloc not enabled: Redirect the entry points to malloc. These will
1628 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001629
Antoine Pitrou92840532012-12-17 23:05:59 +01001630Py_ssize_t
1631_Py_GetAllocatedBlocks(void)
1632{
1633 return 0;
1634}
1635
Tim Peters1221c0a2002-03-23 00:20:15 +00001636#endif /* WITH_PYMALLOC */
1637
Tim Petersddea2082002-03-23 10:03:50 +00001638#ifdef PYMALLOC_DEBUG
1639/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001640/* A x-platform debugging allocator. This doesn't manage memory directly,
1641 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1642 */
Tim Petersddea2082002-03-23 10:03:50 +00001643
Tim Petersf6fb5012002-04-12 07:38:53 +00001644/* Special bytes broadcast into debug memory blocks at appropriate times.
1645 * Strings of these are unlikely to be valid addresses, floats, ints or
1646 * 7-bit ASCII.
1647 */
1648#undef CLEANBYTE
1649#undef DEADBYTE
1650#undef FORBIDDENBYTE
1651#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001652#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001653#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001654
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001655static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
Tim Petersddea2082002-03-23 10:03:50 +00001656
Tim Peterse0850172002-03-24 00:34:21 +00001657/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001658 * to supply a single place to set a breakpoint.
1659 */
Tim Peterse0850172002-03-24 00:34:21 +00001660static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001661bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001662{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001663 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001664}
1665
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001666#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001667
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001668/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1669static size_t
1670read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001671{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001672 const uchar *q = (const uchar *)p;
1673 size_t result = *q++;
1674 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001675
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001676 for (i = SST; --i > 0; ++q)
1677 result = (result << 8) | *q;
1678 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001679}
1680
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001681/* Write n as a big-endian size_t, MSB at address p, LSB at
1682 * p + sizeof(size_t) - 1.
1683 */
Tim Petersddea2082002-03-23 10:03:50 +00001684static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001685write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001686{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001687 uchar *q = (uchar *)p + SST - 1;
1688 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001689
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001690 for (i = SST; --i >= 0; --q) {
1691 *q = (uchar)(n & 0xff);
1692 n >>= 8;
1693 }
Tim Petersddea2082002-03-23 10:03:50 +00001694}
1695
Tim Peters08d82152002-04-18 22:25:03 +00001696#ifdef Py_DEBUG
1697/* Is target in the list? The list is traversed via the nextpool pointers.
1698 * The list may be NULL-terminated, or circular. Return 1 if target is in
1699 * list, else 0.
1700 */
1701static int
1702pool_is_in_list(const poolp target, poolp list)
1703{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001704 poolp origlist = list;
1705 assert(target != NULL);
1706 if (list == NULL)
1707 return 0;
1708 do {
1709 if (target == list)
1710 return 1;
1711 list = list->nextpool;
1712 } while (list != NULL && list != origlist);
1713 return 0;
Tim Peters08d82152002-04-18 22:25:03 +00001714}
1715
1716#else
1717#define pool_is_in_list(X, Y) 1
1718
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001719#endif /* Py_DEBUG */
Tim Peters08d82152002-04-18 22:25:03 +00001720
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001721/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1722 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001723
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001724p[0: S]
1725 Number of bytes originally asked for. This is a size_t, big-endian (easier
1726 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001727p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001728 API ID. See PEP 445. This is a character, but seems undocumented.
1729p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001730 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001731p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001732 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001733 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001734 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001735 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001736p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001737 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001738p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001739 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1740 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001741 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001742 If "bad memory" is detected later, the serial number gives an
1743 excellent way to set a breakpoint on the next run, to capture the
1744 instant at which this block was passed out.
1745*/
1746
Victor Stinner0507bf52013-07-07 02:05:46 +02001747static void *
1748_PyMem_DebugMalloc(void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001749{
Victor Stinner0507bf52013-07-07 02:05:46 +02001750 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001751 uchar *p; /* base address of malloc'ed block */
1752 uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
1753 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001754
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001755 bumpserialno();
1756 total = nbytes + 4*SST;
Antoine Pitroucc231542014-11-02 18:40:09 +01001757 if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1758 /* overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001759 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001760
Victor Stinner0507bf52013-07-07 02:05:46 +02001761 p = (uchar *)api->alloc.malloc(api->alloc.ctx, total);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001762 if (p == NULL)
1763 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001764
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001765 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1766 write_size_t(p, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001767 p[SST] = (uchar)api->api_id;
1768 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001769
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001770 if (nbytes > 0)
1771 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001772
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001773 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1774 tail = p + 2*SST + nbytes;
1775 memset(tail, FORBIDDENBYTE, SST);
1776 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001777
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001778 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001779}
1780
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001781/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001782 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001783 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001784 Then calls the underlying free.
1785*/
Victor Stinner0507bf52013-07-07 02:05:46 +02001786static void
1787_PyMem_DebugFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001788{
Victor Stinner0507bf52013-07-07 02:05:46 +02001789 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001790 uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */
1791 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001792
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001793 if (p == NULL)
1794 return;
Victor Stinner0507bf52013-07-07 02:05:46 +02001795 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001796 nbytes = read_size_t(q);
1797 nbytes += 4*SST;
1798 if (nbytes > 0)
1799 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001800 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00001801}
1802
Victor Stinner0507bf52013-07-07 02:05:46 +02001803static void *
1804_PyMem_DebugRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001805{
Victor Stinner0507bf52013-07-07 02:05:46 +02001806 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Victor Stinnerc4266362013-07-09 00:44:43 +02001807 uchar *q = (uchar *)p, *oldq;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001808 uchar *tail;
1809 size_t total; /* nbytes + 4*SST */
1810 size_t original_nbytes;
1811 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001812
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001813 if (p == NULL)
Victor Stinner0507bf52013-07-07 02:05:46 +02001814 return _PyMem_DebugMalloc(ctx, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001815
Victor Stinner0507bf52013-07-07 02:05:46 +02001816 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001817 bumpserialno();
1818 original_nbytes = read_size_t(q - 2*SST);
1819 total = nbytes + 4*SST;
Antoine Pitroucc231542014-11-02 18:40:09 +01001820 if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1821 /* overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001822 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001823
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001824 /* Resize and add decorations. We may get a new pointer here, in which
1825 * case we didn't get the chance to mark the old memory with DEADBYTE,
1826 * but we live with that.
1827 */
Victor Stinnerc4266362013-07-09 00:44:43 +02001828 oldq = q;
Victor Stinner0507bf52013-07-07 02:05:46 +02001829 q = (uchar *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001830 if (q == NULL)
1831 return NULL;
Tim Peters85cc1c42002-04-12 08:52:50 +00001832
Victor Stinnerc4266362013-07-09 00:44:43 +02001833 if (q == oldq && nbytes < original_nbytes) {
1834 /* shrinking: mark old extra memory dead */
1835 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1836 }
1837
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001838 write_size_t(q, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001839 assert(q[SST] == (uchar)api->api_id);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001840 for (i = 1; i < SST; ++i)
1841 assert(q[SST + i] == FORBIDDENBYTE);
1842 q += 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02001843
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001844 tail = q + nbytes;
1845 memset(tail, FORBIDDENBYTE, SST);
1846 write_size_t(tail + SST, serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001847
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001848 if (nbytes > original_nbytes) {
1849 /* growing: mark new extra memory clean */
1850 memset(q + original_nbytes, CLEANBYTE,
Stefan Krah735bb122010-11-26 10:54:09 +00001851 nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001852 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001853
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001854 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001855}
1856
Tim Peters7ccfadf2002-04-01 06:04:21 +00001857/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001858 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001859 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001860 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001861 */
Victor Stinner0507bf52013-07-07 02:05:46 +02001862static void
1863_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001864{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001865 const uchar *q = (const uchar *)p;
1866 char msgbuf[64];
1867 char *msg;
1868 size_t nbytes;
1869 const uchar *tail;
1870 int i;
1871 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001872
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001873 if (p == NULL) {
1874 msg = "didn't expect a NULL pointer";
1875 goto error;
1876 }
Tim Petersddea2082002-03-23 10:03:50 +00001877
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001878 /* Check the API id */
1879 id = (char)q[-SST];
1880 if (id != api) {
1881 msg = msgbuf;
1882 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1883 msgbuf[sizeof(msgbuf)-1] = 0;
1884 goto error;
1885 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001886
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001887 /* Check the stuff at the start of p first: if there's underwrite
1888 * corruption, the number-of-bytes field may be nuts, and checking
1889 * the tail could lead to a segfault then.
1890 */
1891 for (i = SST-1; i >= 1; --i) {
1892 if (*(q-i) != FORBIDDENBYTE) {
1893 msg = "bad leading pad byte";
1894 goto error;
1895 }
1896 }
Tim Petersddea2082002-03-23 10:03:50 +00001897
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001898 nbytes = read_size_t(q - 2*SST);
1899 tail = q + nbytes;
1900 for (i = 0; i < SST; ++i) {
1901 if (tail[i] != FORBIDDENBYTE) {
1902 msg = "bad trailing pad byte";
1903 goto error;
1904 }
1905 }
Tim Petersddea2082002-03-23 10:03:50 +00001906
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001907 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001908
1909error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001910 _PyObject_DebugDumpAddress(p);
1911 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001912}
1913
Tim Peters7ccfadf2002-04-01 06:04:21 +00001914/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02001915static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001916_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001917{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001918 const uchar *q = (const uchar *)p;
1919 const uchar *tail;
1920 size_t nbytes, serial;
1921 int i;
1922 int ok;
1923 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001924
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001925 fprintf(stderr, "Debug memory block at address p=%p:", p);
1926 if (p == NULL) {
1927 fprintf(stderr, "\n");
1928 return;
1929 }
1930 id = (char)q[-SST];
1931 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001932
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001933 nbytes = read_size_t(q - 2*SST);
1934 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1935 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001936
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001937 /* In case this is nuts, check the leading pad bytes first. */
1938 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1939 ok = 1;
1940 for (i = 1; i <= SST-1; ++i) {
1941 if (*(q-i) != FORBIDDENBYTE) {
1942 ok = 0;
1943 break;
1944 }
1945 }
1946 if (ok)
1947 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1948 else {
1949 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1950 FORBIDDENBYTE);
1951 for (i = SST-1; i >= 1; --i) {
1952 const uchar byte = *(q-i);
1953 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1954 if (byte != FORBIDDENBYTE)
1955 fputs(" *** OUCH", stderr);
1956 fputc('\n', stderr);
1957 }
Tim Peters449b5a82002-04-28 06:14:45 +00001958
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001959 fputs(" Because memory is corrupted at the start, the "
1960 "count of bytes requested\n"
1961 " may be bogus, and checking the trailing pad "
1962 "bytes may segfault.\n", stderr);
1963 }
Tim Petersddea2082002-03-23 10:03:50 +00001964
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001965 tail = q + nbytes;
1966 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1967 ok = 1;
1968 for (i = 0; i < SST; ++i) {
1969 if (tail[i] != FORBIDDENBYTE) {
1970 ok = 0;
1971 break;
1972 }
1973 }
1974 if (ok)
1975 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1976 else {
1977 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001978 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001979 for (i = 0; i < SST; ++i) {
1980 const uchar byte = tail[i];
1981 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00001982 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001983 if (byte != FORBIDDENBYTE)
1984 fputs(" *** OUCH", stderr);
1985 fputc('\n', stderr);
1986 }
1987 }
Tim Petersddea2082002-03-23 10:03:50 +00001988
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001989 serial = read_size_t(tail + SST);
1990 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1991 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001992
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001993 if (nbytes > 0) {
1994 i = 0;
1995 fputs(" Data at p:", stderr);
1996 /* print up to 8 bytes at the start */
1997 while (q < tail && i < 8) {
1998 fprintf(stderr, " %02x", *q);
1999 ++i;
2000 ++q;
2001 }
2002 /* and up to 8 at the end */
2003 if (q < tail) {
2004 if (tail - q > 8) {
2005 fputs(" ...", stderr);
2006 q = tail - 8;
2007 }
2008 while (q < tail) {
2009 fprintf(stderr, " %02x", *q);
2010 ++q;
2011 }
2012 }
2013 fputc('\n', stderr);
2014 }
Tim Petersddea2082002-03-23 10:03:50 +00002015}
2016
David Malcolm49526f42012-06-22 14:55:41 -04002017#endif /* PYMALLOC_DEBUG */
2018
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002019static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002020printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002021{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002022 int i, k;
2023 char buf[100];
2024 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002025
David Malcolm49526f42012-06-22 14:55:41 -04002026 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002027 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002028 fputc(' ', out);
2029 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002030
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002031 /* Write the value with commas. */
2032 i = 22;
2033 buf[i--] = '\0';
2034 buf[i--] = '\n';
2035 k = 3;
2036 do {
2037 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002038 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002039 value = nextvalue;
2040 buf[i--] = (char)(digit + '0');
2041 --k;
2042 if (k == 0 && value && i >= 0) {
2043 k = 3;
2044 buf[i--] = ',';
2045 }
2046 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002047
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002048 while (i >= 0)
2049 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002050 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002051
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002052 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002053}
2054
David Malcolm49526f42012-06-22 14:55:41 -04002055void
2056_PyDebugAllocatorStats(FILE *out,
2057 const char *block_name, int num_blocks, size_t sizeof_block)
2058{
2059 char buf1[128];
2060 char buf2[128];
2061 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05002062 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002063 num_blocks, block_name, sizeof_block);
2064 PyOS_snprintf(buf2, sizeof(buf2),
2065 "%48s ", buf1);
2066 (void)printone(out, buf2, num_blocks * sizeof_block);
2067}
2068
2069#ifdef WITH_PYMALLOC
2070
2071/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002072 * In Py_DEBUG mode, also perform some expensive internal consistency
2073 * checks.
2074 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00002075void
David Malcolm49526f42012-06-22 14:55:41 -04002076_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002077{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002078 uint i;
2079 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2080 /* # of pools, allocated blocks, and free blocks per class index */
2081 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2082 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2083 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2084 /* total # of allocated bytes in used and full pools */
2085 size_t allocated_bytes = 0;
2086 /* total # of available bytes in used pools */
2087 size_t available_bytes = 0;
2088 /* # of free pools + pools not yet carved out of current arena */
2089 uint numfreepools = 0;
2090 /* # of bytes for arena alignment padding */
2091 size_t arena_alignment = 0;
2092 /* # of bytes in used and full pools used for pool_headers */
2093 size_t pool_header_bytes = 0;
2094 /* # of bytes in used and full pools wasted due to quantization,
2095 * i.e. the necessarily leftover space at the ends of used and
2096 * full pools.
2097 */
2098 size_t quantization = 0;
2099 /* # of arenas actually allocated. */
2100 size_t narenas = 0;
2101 /* running total -- should equal narenas * ARENA_SIZE */
2102 size_t total;
2103 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002104
David Malcolm49526f42012-06-22 14:55:41 -04002105 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002106 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002107
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002108 for (i = 0; i < numclasses; ++i)
2109 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002110
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002111 /* Because full pools aren't linked to from anything, it's easiest
2112 * to march over all the arenas. If we're lucky, most of the memory
2113 * will be living in full pools -- would be a shame to miss them.
2114 */
2115 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002116 uint j;
2117 uptr base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002118
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002119 /* Skip arenas which are not allocated. */
2120 if (arenas[i].address == (uptr)NULL)
2121 continue;
2122 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002123
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002124 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002125
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002126 /* round up to pool alignment */
2127 if (base & (uptr)POOL_SIZE_MASK) {
2128 arena_alignment += POOL_SIZE;
2129 base &= ~(uptr)POOL_SIZE_MASK;
2130 base += POOL_SIZE;
2131 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002132
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002133 /* visit every pool in the arena */
2134 assert(base <= (uptr) arenas[i].pool_address);
2135 for (j = 0;
2136 base < (uptr) arenas[i].pool_address;
2137 ++j, base += POOL_SIZE) {
2138 poolp p = (poolp)base;
2139 const uint sz = p->szidx;
2140 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002141
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002142 if (p->ref.count == 0) {
2143 /* currently unused */
2144 assert(pool_is_in_list(p, arenas[i].freepools));
2145 continue;
2146 }
2147 ++numpools[sz];
2148 numblocks[sz] += p->ref.count;
2149 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2150 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002151#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002152 if (freeblocks > 0)
2153 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002154#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002155 }
2156 }
2157 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002158
David Malcolm49526f42012-06-22 14:55:41 -04002159 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002160 fputs("class size num pools blocks in use avail blocks\n"
2161 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04002162 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002163
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002164 for (i = 0; i < numclasses; ++i) {
2165 size_t p = numpools[i];
2166 size_t b = numblocks[i];
2167 size_t f = numfreeblocks[i];
2168 uint size = INDEX2SIZE(i);
2169 if (p == 0) {
2170 assert(b == 0 && f == 0);
2171 continue;
2172 }
David Malcolm49526f42012-06-22 14:55:41 -04002173 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002174 "%11" PY_FORMAT_SIZE_T "u "
2175 "%15" PY_FORMAT_SIZE_T "u "
2176 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002177 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002178 allocated_bytes += b * size;
2179 available_bytes += f * size;
2180 pool_header_bytes += p * POOL_OVERHEAD;
2181 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2182 }
David Malcolm49526f42012-06-22 14:55:41 -04002183 fputc('\n', out);
2184#ifdef PYMALLOC_DEBUG
2185 (void)printone(out, "# times object malloc called", serialno);
2186#endif
2187 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2188 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2189 (void)printone(out, "# arenas highwater mark", narenas_highwater);
2190 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002191
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002192 PyOS_snprintf(buf, sizeof(buf),
2193 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2194 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002195 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002196
David Malcolm49526f42012-06-22 14:55:41 -04002197 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002198
David Malcolm49526f42012-06-22 14:55:41 -04002199 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2200 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002201
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002202 PyOS_snprintf(buf, sizeof(buf),
2203 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002204 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002205
David Malcolm49526f42012-06-22 14:55:41 -04002206 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2207 total += printone(out, "# bytes lost to quantization", quantization);
2208 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2209 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002210}
2211
David Malcolm49526f42012-06-22 14:55:41 -04002212#endif /* #ifdef WITH_PYMALLOC */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00002213
2214#ifdef Py_USING_MEMORY_DEBUGGER
Thomas Woutersa9773292006-04-21 09:43:23 +00002215/* Make this function last so gcc won't inline it since the definition is
2216 * after the reference.
2217 */
Neal Norwitz7eb3c912004-06-06 19:20:22 +00002218int
2219Py_ADDRESS_IN_RANGE(void *P, poolp pool)
2220{
Antoine Pitroub7fb2e22011-01-07 21:43:59 +00002221 uint arenaindex_temp = pool->arenaindex;
2222
2223 return arenaindex_temp < maxarenas &&
2224 (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&
2225 arenas[arenaindex_temp].address != 0;
Neal Norwitz7eb3c912004-06-06 19:20:22 +00002226}
2227#endif