blob: f2651d7574b20f7b36665cfae05e84af404ef363 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
Eric Snow2ebc5ce2017-09-07 23:51:28 -06002#include "internal/mem.h"
3#include "internal/pystate.h"
Tim Peters1221c0a2002-03-23 00:20:15 +00004
Benjamin Peterson3924f932016-09-18 19:12:48 -07005#include <stdbool.h>
6
Victor Stinner0611c262016-03-15 22:22:13 +01007
8/* Defined in tracemalloc.c */
9extern void _PyMem_DumpTraceback(int fd, const void *ptr);
10
11
Victor Stinner0507bf52013-07-07 02:05:46 +020012/* Python's malloc wrappers (see pymem.h) */
13
Victor Stinner34be8072016-03-14 12:04:26 +010014#undef uint
15#define uint unsigned int /* assuming >= 16 bits */
16
Victor Stinner0507bf52013-07-07 02:05:46 +020017/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010018static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
19static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
20static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
21static void _PyMem_DebugRawFree(void *ctx, void *p);
22
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020024static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020025static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010026static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020027
28static void _PyObject_DebugDumpAddress(const void *p);
29static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020030
Nick Coghlan6ba64f42013-09-29 00:28:55 +100031#if defined(__has_feature) /* Clang */
32 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
33 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070034 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100035 #else
36 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
37 #endif
38#else
39 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
40 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070041 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100042 #else
43 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
44 #endif
45#endif
46
Tim Peters1221c0a2002-03-23 00:20:15 +000047#ifdef WITH_PYMALLOC
48
Victor Stinner0507bf52013-07-07 02:05:46 +020049#ifdef MS_WINDOWS
50# include <windows.h>
51#elif defined(HAVE_MMAP)
52# include <sys/mman.h>
53# ifdef MAP_ANONYMOUS
54# define ARENAS_USE_MMAP
55# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020056#endif
57
Victor Stinner0507bf52013-07-07 02:05:46 +020058/* Forward declaration */
59static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020060static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020061static void _PyObject_Free(void *ctx, void *p);
62static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020063#endif
64
Victor Stinner0507bf52013-07-07 02:05:46 +020065
66static void *
67_PyMem_RawMalloc(void *ctx, size_t size)
68{
Victor Stinnerdb067af2014-05-02 22:31:14 +020069 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020070 for malloc(0), which would be treated as an error. Some platforms would
71 return a pointer with no memory behind it, which would break pymalloc.
72 To solve these problems, allocate an extra byte. */
73 if (size == 0)
74 size = 1;
75 return malloc(size);
76}
77
78static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020079_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
80{
81 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
82 for calloc(0, 0), which would be treated as an error. Some platforms
83 would return a pointer with no memory behind it, which would break
84 pymalloc. To solve these problems, allocate an extra byte. */
85 if (nelem == 0 || elsize == 0) {
86 nelem = 1;
87 elsize = 1;
88 }
89 return calloc(nelem, elsize);
90}
91
92static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020093_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
94{
95 if (size == 0)
96 size = 1;
97 return realloc(ptr, size);
98}
99
100static void
101_PyMem_RawFree(void *ctx, void *ptr)
102{
103 free(ptr);
104}
105
106
107#ifdef MS_WINDOWS
108static void *
109_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
110{
111 return VirtualAlloc(NULL, size,
112 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
113}
114
115static void
116_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
117{
Victor Stinner725e6682013-07-07 03:06:16 +0200118 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200119}
120
121#elif defined(ARENAS_USE_MMAP)
122static void *
123_PyObject_ArenaMmap(void *ctx, size_t size)
124{
125 void *ptr;
126 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
127 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128 if (ptr == MAP_FAILED)
129 return NULL;
130 assert(ptr != NULL);
131 return ptr;
132}
133
134static void
135_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
136{
137 munmap(ptr, size);
138}
139
140#else
141static void *
142_PyObject_ArenaMalloc(void *ctx, size_t size)
143{
144 return malloc(size);
145}
146
147static void
148_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
149{
150 free(ptr);
151}
152#endif
153
154
Victor Stinnerdb067af2014-05-02 22:31:14 +0200155#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200156#ifdef WITH_PYMALLOC
Victor Stinnerdb067af2014-05-02 22:31:14 +0200157# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
Victor Stinner0507bf52013-07-07 02:05:46 +0200158#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200159# define PYOBJ_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200160#endif
Victor Stinner15932592016-04-22 18:52:22 +0200161#define PYMEM_FUNCS PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200162
Victor Stinner0507bf52013-07-07 02:05:46 +0200163typedef struct {
164 /* We tag each block with an API ID in order to tag API violations */
165 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200166 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200167} debug_alloc_api_t;
168static struct {
169 debug_alloc_api_t raw;
170 debug_alloc_api_t mem;
171 debug_alloc_api_t obj;
172} _PyMem_Debug = {
173 {'r', {NULL, PYRAW_FUNCS}},
Victor Stinner6cf185d2013-10-10 15:58:42 +0200174 {'m', {NULL, PYMEM_FUNCS}},
175 {'o', {NULL, PYOBJ_FUNCS}}
Victor Stinner0507bf52013-07-07 02:05:46 +0200176 };
177
Victor Stinnerc4aec362016-03-14 22:26:53 +0100178#define PYRAWDBG_FUNCS \
179 _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree
180#define PYDBG_FUNCS \
181 _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200182
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600183
184#define _PyMem_Raw _PyRuntime.mem.allocators.raw
185static const PyMemAllocatorEx _pymem_raw = {
Victor Stinner34be8072016-03-14 12:04:26 +0100186#ifdef Py_DEBUG
Victor Stinnerc4aec362016-03-14 22:26:53 +0100187 &_PyMem_Debug.raw, PYRAWDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200188#else
189 NULL, PYRAW_FUNCS
190#endif
191 };
192
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600193#define _PyMem _PyRuntime.mem.allocators.mem
194static const PyMemAllocatorEx _pymem = {
Victor Stinner34be8072016-03-14 12:04:26 +0100195#ifdef Py_DEBUG
Victor Stinner15932592016-04-22 18:52:22 +0200196 &_PyMem_Debug.mem, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200197#else
Victor Stinner15932592016-04-22 18:52:22 +0200198 NULL, PYMEM_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200199#endif
200 };
201
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600202#define _PyObject _PyRuntime.mem.allocators.obj
203static const PyMemAllocatorEx _pyobject = {
Victor Stinner34be8072016-03-14 12:04:26 +0100204#ifdef Py_DEBUG
Victor Stinner6cf185d2013-10-10 15:58:42 +0200205 &_PyMem_Debug.obj, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200206#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200207 NULL, PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200208#endif
209 };
210
Victor Stinner34be8072016-03-14 12:04:26 +0100211int
212_PyMem_SetupAllocators(const char *opt)
213{
214 if (opt == NULL || *opt == '\0') {
215 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
216 options): use default allocators */
217#ifdef Py_DEBUG
218# ifdef WITH_PYMALLOC
219 opt = "pymalloc_debug";
220# else
221 opt = "malloc_debug";
222# endif
223#else
224 /* !Py_DEBUG */
225# ifdef WITH_PYMALLOC
226 opt = "pymalloc";
227# else
228 opt = "malloc";
229# endif
230#endif
231 }
232
233 if (strcmp(opt, "debug") == 0) {
234 PyMem_SetupDebugHooks();
235 }
236 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0)
237 {
238 PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
239
240 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
241 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
242 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
243
244 if (strcmp(opt, "malloc_debug") == 0)
245 PyMem_SetupDebugHooks();
246 }
247#ifdef WITH_PYMALLOC
248 else if (strcmp(opt, "pymalloc") == 0
249 || strcmp(opt, "pymalloc_debug") == 0)
250 {
Victor Stinner15932592016-04-22 18:52:22 +0200251 PyMemAllocatorEx raw_alloc = {NULL, PYRAW_FUNCS};
252 PyMemAllocatorEx mem_alloc = {NULL, PYMEM_FUNCS};
Victor Stinner34be8072016-03-14 12:04:26 +0100253 PyMemAllocatorEx obj_alloc = {NULL, PYOBJ_FUNCS};
254
Victor Stinner15932592016-04-22 18:52:22 +0200255 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &raw_alloc);
256 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &mem_alloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100257 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &obj_alloc);
258
259 if (strcmp(opt, "pymalloc_debug") == 0)
260 PyMem_SetupDebugHooks();
261 }
262#endif
263 else {
264 /* unknown allocator */
265 return -1;
266 }
267 return 0;
268}
269
Victor Stinner0507bf52013-07-07 02:05:46 +0200270#undef PYRAW_FUNCS
Victor Stinner6cf185d2013-10-10 15:58:42 +0200271#undef PYMEM_FUNCS
272#undef PYOBJ_FUNCS
Victor Stinnerc4aec362016-03-14 22:26:53 +0100273#undef PYRAWDBG_FUNCS
Victor Stinner6cf185d2013-10-10 15:58:42 +0200274#undef PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200275
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600276static const PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinner0507bf52013-07-07 02:05:46 +0200277#ifdef MS_WINDOWS
278 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
279#elif defined(ARENAS_USE_MMAP)
280 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
281#else
282 _PyObject_ArenaMalloc, _PyObject_ArenaFree
283#endif
284 };
285
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600286void
287_PyObject_Initialize(struct _pyobj_runtime_state *state)
288{
289 state->allocator_arenas = _PyObject_Arena;
290}
291
292void
293_PyMem_Initialize(struct _pymem_runtime_state *state)
294{
295 state->allocators.raw = _pymem_raw;
296 state->allocators.mem = _pymem;
297 state->allocators.obj = _pyobject;
298
299#ifdef WITH_PYMALLOC
Victor Stinnerccb3c762017-09-14 14:48:37 -0700300 Py_BUILD_ASSERT(NB_SMALL_SIZE_CLASSES == 64);
301
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600302 for (int i = 0; i < 8; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600303 for (int j = 0; j < 8; j++) {
304 int x = i * 8 + j;
305 poolp *addr = &(state->usedpools[2*(x)]);
306 poolp val = (poolp)((uint8_t *)addr - 2*sizeof(pyblock *));
307 state->usedpools[x * 2] = val;
308 state->usedpools[x * 2 + 1] = val;
309 };
310 };
311#endif /* WITH_PYMALLOC */
312}
313
Victor Stinner0621e0e2016-04-19 17:02:55 +0200314#ifdef WITH_PYMALLOC
Victor Stinner34be8072016-03-14 12:04:26 +0100315static int
316_PyMem_DebugEnabled(void)
317{
318 return (_PyObject.malloc == _PyMem_DebugMalloc);
319}
320
Victor Stinner34be8072016-03-14 12:04:26 +0100321int
322_PyMem_PymallocEnabled(void)
323{
324 if (_PyMem_DebugEnabled()) {
325 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
326 }
327 else {
328 return (_PyObject.malloc == _PyObject_Malloc);
329 }
330}
331#endif
332
Victor Stinner0507bf52013-07-07 02:05:46 +0200333void
334PyMem_SetupDebugHooks(void)
335{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200336 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200337
Victor Stinnerc4aec362016-03-14 22:26:53 +0100338 alloc.malloc = _PyMem_DebugRawMalloc;
339 alloc.calloc = _PyMem_DebugRawCalloc;
340 alloc.realloc = _PyMem_DebugRawRealloc;
341 alloc.free = _PyMem_DebugRawFree;
Victor Stinner34be8072016-03-14 12:04:26 +0100342
Victor Stinnerc4aec362016-03-14 22:26:53 +0100343 if (_PyMem_Raw.malloc != _PyMem_DebugRawMalloc) {
Victor Stinner0507bf52013-07-07 02:05:46 +0200344 alloc.ctx = &_PyMem_Debug.raw;
345 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
346 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
347 }
348
Victor Stinnerc4aec362016-03-14 22:26:53 +0100349 alloc.malloc = _PyMem_DebugMalloc;
350 alloc.calloc = _PyMem_DebugCalloc;
351 alloc.realloc = _PyMem_DebugRealloc;
352 alloc.free = _PyMem_DebugFree;
353
Victor Stinnerad524372016-03-16 12:12:53 +0100354 if (_PyMem.malloc != _PyMem_DebugMalloc) {
355 alloc.ctx = &_PyMem_Debug.mem;
356 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
357 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
358 }
359
Victor Stinner0507bf52013-07-07 02:05:46 +0200360 if (_PyObject.malloc != _PyMem_DebugMalloc) {
361 alloc.ctx = &_PyMem_Debug.obj;
362 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
363 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
364 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200365}
366
367void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200368PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200369{
370 switch(domain)
371 {
372 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
373 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
374 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
375 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200376 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200377 allocator->ctx = NULL;
378 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200379 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200380 allocator->realloc = NULL;
381 allocator->free = NULL;
382 }
383}
384
385void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200386PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200387{
388 switch(domain)
389 {
390 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
391 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
392 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
393 /* ignore unknown domain */
394 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200395}
396
397void
398PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
399{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600400 *allocator = _PyRuntime.obj.allocator_arenas;
Victor Stinner0507bf52013-07-07 02:05:46 +0200401}
402
403void
404PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
405{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600406 _PyRuntime.obj.allocator_arenas = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200407}
408
409void *
410PyMem_RawMalloc(size_t size)
411{
412 /*
413 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
414 * Most python internals blindly use a signed Py_ssize_t to track
415 * things without checking for overflows or negatives.
416 * As size_t is unsigned, checking for size < 0 is not required.
417 */
418 if (size > (size_t)PY_SSIZE_T_MAX)
419 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200420 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
421}
422
Victor Stinnerdb067af2014-05-02 22:31:14 +0200423void *
424PyMem_RawCalloc(size_t nelem, size_t elsize)
425{
426 /* see PyMem_RawMalloc() */
427 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
428 return NULL;
429 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
430}
431
Victor Stinner0507bf52013-07-07 02:05:46 +0200432void*
433PyMem_RawRealloc(void *ptr, size_t new_size)
434{
435 /* see PyMem_RawMalloc() */
436 if (new_size > (size_t)PY_SSIZE_T_MAX)
437 return NULL;
438 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
439}
440
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600441void
442PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200443{
444 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
445}
446
447void *
448PyMem_Malloc(size_t size)
449{
450 /* see PyMem_RawMalloc() */
451 if (size > (size_t)PY_SSIZE_T_MAX)
452 return NULL;
453 return _PyMem.malloc(_PyMem.ctx, size);
454}
455
456void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200457PyMem_Calloc(size_t nelem, size_t elsize)
458{
459 /* see PyMem_RawMalloc() */
460 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
461 return NULL;
462 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
463}
464
465void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200466PyMem_Realloc(void *ptr, size_t new_size)
467{
468 /* see PyMem_RawMalloc() */
469 if (new_size > (size_t)PY_SSIZE_T_MAX)
470 return NULL;
471 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
472}
473
474void
475PyMem_Free(void *ptr)
476{
477 _PyMem.free(_PyMem.ctx, ptr);
478}
479
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200480char *
481_PyMem_RawStrdup(const char *str)
482{
483 size_t size;
484 char *copy;
485
486 size = strlen(str) + 1;
487 copy = PyMem_RawMalloc(size);
488 if (copy == NULL)
489 return NULL;
490 memcpy(copy, str, size);
491 return copy;
492}
493
494char *
495_PyMem_Strdup(const char *str)
496{
497 size_t size;
498 char *copy;
499
500 size = strlen(str) + 1;
501 copy = PyMem_Malloc(size);
502 if (copy == NULL)
503 return NULL;
504 memcpy(copy, str, size);
505 return copy;
506}
507
Victor Stinner0507bf52013-07-07 02:05:46 +0200508void *
509PyObject_Malloc(size_t size)
510{
511 /* see PyMem_RawMalloc() */
512 if (size > (size_t)PY_SSIZE_T_MAX)
513 return NULL;
514 return _PyObject.malloc(_PyObject.ctx, size);
515}
516
517void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200518PyObject_Calloc(size_t nelem, size_t elsize)
519{
520 /* see PyMem_RawMalloc() */
521 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
522 return NULL;
523 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
524}
525
526void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200527PyObject_Realloc(void *ptr, size_t new_size)
528{
529 /* see PyMem_RawMalloc() */
530 if (new_size > (size_t)PY_SSIZE_T_MAX)
531 return NULL;
532 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
533}
534
535void
536PyObject_Free(void *ptr)
537{
538 _PyObject.free(_PyObject.ctx, ptr);
539}
540
541
542#ifdef WITH_PYMALLOC
543
Benjamin Peterson05159c42009-12-03 03:01:27 +0000544#ifdef WITH_VALGRIND
545#include <valgrind/valgrind.h>
546
547/* If we're using GCC, use __builtin_expect() to reduce overhead of
548 the valgrind checks */
549#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
550# define UNLIKELY(value) __builtin_expect((value), 0)
551#else
552# define UNLIKELY(value) (value)
553#endif
554
555/* -1 indicates that we haven't checked that we're running on valgrind yet. */
556static int running_on_valgrind = -1;
557#endif
558
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100559Py_ssize_t
560_Py_GetAllocatedBlocks(void)
561{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600562 return _PyRuntime.mem.num_allocated_blocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100563}
564
565
Thomas Woutersa9773292006-04-21 09:43:23 +0000566/* Allocate a new arena. If we run out of memory, return NULL. Else
567 * allocate a new arena, and return the address of an arena_object
568 * describing the new arena. It's expected that the caller will set
569 * `usable_arenas` to the return value.
570 */
571static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000572new_arena(void)
573{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000574 struct arena_object* arenaobj;
575 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +0100576 void *address;
Victor Stinner34be8072016-03-14 12:04:26 +0100577 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +0000578
Victor Stinner34be8072016-03-14 12:04:26 +0100579 if (debug_stats == -1) {
580 char *opt = Py_GETENV("PYTHONMALLOCSTATS");
581 debug_stats = (opt != NULL && *opt != '\0');
582 }
583 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -0400584 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be8072016-03-14 12:04:26 +0100585
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600586 if (_PyRuntime.mem.unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000587 uint i;
588 uint numarenas;
589 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000590
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000591 /* Double the number of arena objects on each allocation.
592 * Note that it's possible for `numarenas` to overflow.
593 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600594 numarenas = _PyRuntime.mem.maxarenas ? _PyRuntime.mem.maxarenas << 1 : INITIAL_ARENA_OBJECTS;
595 if (numarenas <= _PyRuntime.mem.maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000596 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000597#if SIZEOF_SIZE_T <= SIZEOF_INT
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600598 if (numarenas > SIZE_MAX / sizeof(*_PyRuntime.mem.arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000599 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000600#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600601 nbytes = numarenas * sizeof(*_PyRuntime.mem.arenas);
602 arenaobj = (struct arena_object *)PyMem_RawRealloc(_PyRuntime.mem.arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000603 if (arenaobj == NULL)
604 return NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600605 _PyRuntime.mem.arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +0000606
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000607 /* We might need to fix pointers that were copied. However,
608 * new_arena only gets called when all the pages in the
609 * previous arenas are full. Thus, there are *no* pointers
610 * into the old array. Thus, we don't have to worry about
611 * invalid pointers. Just to be sure, some asserts:
612 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600613 assert(_PyRuntime.mem.usable_arenas == NULL);
614 assert(_PyRuntime.mem.unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +0000615
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000616 /* Put the new arenas on the unused_arena_objects list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600617 for (i = _PyRuntime.mem.maxarenas; i < numarenas; ++i) {
618 _PyRuntime.mem.arenas[i].address = 0; /* mark as unassociated */
619 _PyRuntime.mem.arenas[i].nextarena = i < numarenas - 1 ?
620 &_PyRuntime.mem.arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000621 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000622
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000623 /* Update globals. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600624 _PyRuntime.mem.unused_arena_objects = &_PyRuntime.mem.arenas[_PyRuntime.mem.maxarenas];
625 _PyRuntime.mem.maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000626 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000627
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000628 /* Take the next available arena object off the head of the list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600629 assert(_PyRuntime.mem.unused_arena_objects != NULL);
630 arenaobj = _PyRuntime.mem.unused_arena_objects;
631 _PyRuntime.mem.unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000632 assert(arenaobj->address == 0);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600633 address = _PyRuntime.obj.allocator_arenas.alloc(_PyRuntime.obj.allocator_arenas.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200634 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000635 /* The allocation failed: return NULL after putting the
636 * arenaobj back.
637 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600638 arenaobj->nextarena = _PyRuntime.mem.unused_arena_objects;
639 _PyRuntime.mem.unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000640 return NULL;
641 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -0700642 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000643
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600644 ++_PyRuntime.mem.narenas_currently_allocated;
645 ++_PyRuntime.mem.ntimes_arena_allocated;
646 if (_PyRuntime.mem.narenas_currently_allocated > _PyRuntime.mem.narenas_highwater)
647 _PyRuntime.mem.narenas_highwater = _PyRuntime.mem.narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000648 arenaobj->freepools = NULL;
649 /* pool_address <- first pool-aligned address in the arena
650 nfreepools <- number of whole pools that fit after alignment */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600651 arenaobj->pool_address = (pyblock*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000652 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
653 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
654 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
655 if (excess != 0) {
656 --arenaobj->nfreepools;
657 arenaobj->pool_address += POOL_SIZE - excess;
658 }
659 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000660
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000661 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000662}
663
Thomas Woutersa9773292006-04-21 09:43:23 +0000664/*
Benjamin Peterson3924f932016-09-18 19:12:48 -0700665address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +0000666
667Return true if and only if P is an address that was allocated by pymalloc.
668POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
669(the caller is asked to compute this because the macro expands POOL more than
670once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -0700671variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +0000672called on every alloc/realloc/free, micro-efficiency is important here).
673
674Tricky: Let B be the arena base address associated with the pool, B =
675arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
676
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000677 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000678
679Subtracting B throughout, this is true iff
680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000681 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000682
683By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
684
685Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
686before the first arena has been allocated. `arenas` is still NULL in that
687case. We're relying on that maxarenas is also 0 in that case, so that
688(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
689into a NULL arenas.
690
691Details: given P and POOL, the arena_object corresponding to P is AO =
692arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
693stores, etc), POOL is the correct address of P's pool, AO.address is the
694correct base address of the pool's arena, and P must be within ARENA_SIZE of
695AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -0700696(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +0000697controls P.
698
699Now suppose obmalloc does not control P (e.g., P was obtained via a direct
700call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
701in this case -- it may even be uninitialized trash. If the trash arenaindex
702is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
703control P.
704
705Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
706allocated arena, obmalloc controls all the memory in slice AO.address :
707AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
708so P doesn't lie in that slice, so the macro correctly reports that P is not
709controlled by obmalloc.
710
711Finally, if P is not controlled by obmalloc and AO corresponds to an unused
712arena_object (one not currently associated with an allocated arena),
713AO.address is 0, and the second test in the macro reduces to:
714
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000715 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000716
717If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
718that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
719of the test still passes, and the third clause (AO.address != 0) is necessary
720to get the correct result: AO.address is 0 in this case, so the macro
721correctly reports that P is not controlled by obmalloc (despite that P lies in
722slice AO.address : AO.address + ARENA_SIZE).
723
724Note: The third (AO.address != 0) clause was added in Python 2.5. Before
7252.5, arenas were never free()'ed, and an arenaindex < maxarena always
726corresponded to a currently-allocated arena, so the "P is not controlled by
727obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
728was impossible.
729
730Note that the logic is excruciating, and reading up possibly uninitialized
731memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
732creates problems for some memory debuggers. The overwhelming advantage is
733that this test determines whether an arbitrary address is controlled by
734obmalloc in a small constant time, independent of the number of arenas
735obmalloc controls. Since this test is needed at every entry point, it's
736extremely desirable that it be this fast.
737*/
Thomas Woutersa9773292006-04-21 09:43:23 +0000738
Benjamin Peterson3924f932016-09-18 19:12:48 -0700739static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
740address_in_range(void *p, poolp pool)
741{
742 // Since address_in_range may be reading from memory which was not allocated
743 // by Python, it is important that pool->arenaindex is read only once, as
744 // another thread may be concurrently modifying the value without holding
745 // the GIL. The following dance forces the compiler to read pool->arenaindex
746 // only once.
747 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600748 return arenaindex < _PyRuntime.mem.maxarenas &&
749 (uintptr_t)p - _PyRuntime.mem.arenas[arenaindex].address < ARENA_SIZE &&
750 _PyRuntime.mem.arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -0700751}
Tim Peters338e0102002-04-01 19:23:44 +0000752
Neil Schemenauera35c6882001-02-27 04:45:05 +0000753/*==========================================================================*/
754
Tim Peters84c1b972002-04-04 04:44:32 +0000755/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct
756 * from all other currently live pointers. This may not be possible.
757 */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000758
759/*
760 * The basic blocks are ordered by decreasing execution frequency,
761 * which minimizes the number of jumps in the most common cases,
762 * improves branching prediction and instruction scheduling (small
763 * block allocations typically result in a couple of instructions).
764 * Unless the optimizer reorders everything, being too smart...
765 */
766
Victor Stinner0507bf52013-07-07 02:05:46 +0200767static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200768_PyObject_Alloc(int use_calloc, void *ctx, size_t nelem, size_t elsize)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000769{
Victor Stinnerdb067af2014-05-02 22:31:14 +0200770 size_t nbytes;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600771 pyblock *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000772 poolp pool;
773 poolp next;
774 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000775
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600776 _PyRuntime.mem.num_allocated_blocks++;
Antoine Pitrou0aaaa622013-04-06 01:15:30 +0200777
T. Wouters06bb4872017-03-31 10:10:19 -0700778 assert(elsize == 0 || nelem <= PY_SSIZE_T_MAX / elsize);
Victor Stinner3080d922014-05-06 11:32:29 +0200779 nbytes = nelem * elsize;
780
Benjamin Peterson05159c42009-12-03 03:01:27 +0000781#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000782 if (UNLIKELY(running_on_valgrind == -1))
783 running_on_valgrind = RUNNING_ON_VALGRIND;
784 if (UNLIKELY(running_on_valgrind))
785 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +0000786#endif
787
T. Wouters06bb4872017-03-31 10:10:19 -0700788 if (nelem == 0 || elsize == 0)
789 goto redirect;
790
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000791 if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {
792 LOCK();
793 /*
794 * Most frequent paths first
795 */
796 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600797 pool = _PyRuntime.mem.usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000798 if (pool != pool->nextpool) {
799 /*
800 * There is a used pool for this size class.
801 * Pick up the head block of its free list.
802 */
803 ++pool->ref.count;
804 bp = pool->freeblock;
805 assert(bp != NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600806 if ((pool->freeblock = *(pyblock **)bp) != NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000807 UNLOCK();
Victor Stinnerdb067af2014-05-02 22:31:14 +0200808 if (use_calloc)
809 memset(bp, 0, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000810 return (void *)bp;
811 }
812 /*
813 * Reached the end of the free list, try to extend it.
814 */
815 if (pool->nextoffset <= pool->maxnextoffset) {
816 /* There is room for another block. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600817 pool->freeblock = (pyblock*)pool +
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000818 pool->nextoffset;
819 pool->nextoffset += INDEX2SIZE(size);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600820 *(pyblock **)(pool->freeblock) = NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000821 UNLOCK();
Victor Stinnerdb067af2014-05-02 22:31:14 +0200822 if (use_calloc)
823 memset(bp, 0, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000824 return (void *)bp;
825 }
826 /* Pool is full, unlink from used pools. */
827 next = pool->nextpool;
828 pool = pool->prevpool;
829 next->prevpool = pool;
830 pool->nextpool = next;
831 UNLOCK();
Victor Stinnerdb067af2014-05-02 22:31:14 +0200832 if (use_calloc)
833 memset(bp, 0, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000834 return (void *)bp;
835 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000836
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000837 /* There isn't a pool of the right size class immediately
838 * available: use a free pool.
839 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600840 if (_PyRuntime.mem.usable_arenas == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000841 /* No arena has a free pool: allocate a new arena. */
Thomas Woutersa9773292006-04-21 09:43:23 +0000842#ifdef WITH_MEMORY_LIMITS
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600843 if (_PyRuntime.mem.narenas_currently_allocated >= MAX_ARENAS) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000844 UNLOCK();
845 goto redirect;
846 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000847#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600848 _PyRuntime.mem.usable_arenas = new_arena();
849 if (_PyRuntime.mem.usable_arenas == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000850 UNLOCK();
851 goto redirect;
852 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600853 _PyRuntime.mem.usable_arenas->nextarena =
854 _PyRuntime.mem.usable_arenas->prevarena = NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000855 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600856 assert(_PyRuntime.mem.usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +0000857
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000858 /* Try to get a cached free pool. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600859 pool = _PyRuntime.mem.usable_arenas->freepools;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000860 if (pool != NULL) {
861 /* Unlink from cached pools. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600862 _PyRuntime.mem.usable_arenas->freepools = pool->nextpool;
Thomas Woutersa9773292006-04-21 09:43:23 +0000863
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000864 /* This arena already had the smallest nfreepools
865 * value, so decreasing nfreepools doesn't change
866 * that, and we don't need to rearrange the
867 * usable_arenas list. However, if the arena has
868 * become wholly allocated, we need to remove its
869 * arena_object from usable_arenas.
870 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600871 --_PyRuntime.mem.usable_arenas->nfreepools;
872 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000873 /* Wholly allocated: remove. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600874 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
875 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
876 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
877 _PyRuntime.mem.usable_arenas);
Thomas Woutersa9773292006-04-21 09:43:23 +0000878
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600879 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
880 if (_PyRuntime.mem.usable_arenas != NULL) {
881 _PyRuntime.mem.usable_arenas->prevarena = NULL;
882 assert(_PyRuntime.mem.usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000883 }
884 }
885 else {
886 /* nfreepools > 0: it must be that freepools
887 * isn't NULL, or that we haven't yet carved
888 * off all the arena's pools for the first
889 * time.
890 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600891 assert(_PyRuntime.mem.usable_arenas->freepools != NULL ||
892 _PyRuntime.mem.usable_arenas->pool_address <=
893 (pyblock*)_PyRuntime.mem.usable_arenas->address +
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000894 ARENA_SIZE - POOL_SIZE);
895 }
896 init_pool:
897 /* Frontlink to used pools. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600898 next = _PyRuntime.mem.usedpools[size + size]; /* == prev */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000899 pool->nextpool = next;
900 pool->prevpool = next;
901 next->nextpool = pool;
902 next->prevpool = pool;
903 pool->ref.count = 1;
904 if (pool->szidx == size) {
905 /* Luckily, this pool last contained blocks
906 * of the same size class, so its header
907 * and free list are already initialized.
908 */
909 bp = pool->freeblock;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100910 assert(bp != NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600911 pool->freeblock = *(pyblock **)bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000912 UNLOCK();
Victor Stinnerdb067af2014-05-02 22:31:14 +0200913 if (use_calloc)
914 memset(bp, 0, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000915 return (void *)bp;
916 }
917 /*
918 * Initialize the pool header, set up the free list to
919 * contain just the second block, and return the first
920 * block.
921 */
922 pool->szidx = size;
923 size = INDEX2SIZE(size);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600924 bp = (pyblock *)pool + POOL_OVERHEAD;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000925 pool->nextoffset = POOL_OVERHEAD + (size << 1);
926 pool->maxnextoffset = POOL_SIZE - size;
927 pool->freeblock = bp + size;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600928 *(pyblock **)(pool->freeblock) = NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000929 UNLOCK();
Victor Stinnerdb067af2014-05-02 22:31:14 +0200930 if (use_calloc)
931 memset(bp, 0, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000932 return (void *)bp;
933 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000934
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000935 /* Carve off a new pool. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600936 assert(_PyRuntime.mem.usable_arenas->nfreepools > 0);
937 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
938 pool = (poolp)_PyRuntime.mem.usable_arenas->pool_address;
939 assert((pyblock*)pool <= (pyblock*)_PyRuntime.mem.usable_arenas->address +
940 ARENA_SIZE - POOL_SIZE);
941 pool->arenaindex = (uint)(_PyRuntime.mem.usable_arenas - _PyRuntime.mem.arenas);
942 assert(&_PyRuntime.mem.arenas[pool->arenaindex] == _PyRuntime.mem.usable_arenas);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000943 pool->szidx = DUMMY_SIZE_IDX;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600944 _PyRuntime.mem.usable_arenas->pool_address += POOL_SIZE;
945 --_PyRuntime.mem.usable_arenas->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000946
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600947 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
948 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
949 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
950 _PyRuntime.mem.usable_arenas);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000951 /* Unlink the arena: it is completely allocated. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600952 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
953 if (_PyRuntime.mem.usable_arenas != NULL) {
954 _PyRuntime.mem.usable_arenas->prevarena = NULL;
955 assert(_PyRuntime.mem.usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000956 }
957 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000958
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000959 goto init_pool;
960 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000961
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000962 /* The small block allocator ends here. */
Neil Schemenauera35c6882001-02-27 04:45:05 +0000963
Tim Petersd97a1c02002-03-30 06:09:22 +0000964redirect:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000965 /* Redirect the original request to the underlying (libc) allocator.
966 * We jump here on bigger requests, on error in the code above (as a
967 * last chance to serve the request) or when the max memory limit
968 * has been reached.
969 */
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100970 {
Victor Stinnerdb067af2014-05-02 22:31:14 +0200971 void *result;
972 if (use_calloc)
973 result = PyMem_RawCalloc(nelem, elsize);
974 else
975 result = PyMem_RawMalloc(nbytes);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100976 if (!result)
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600977 _PyRuntime.mem.num_allocated_blocks--;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100978 return result;
979 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000980}
981
Victor Stinnerdb067af2014-05-02 22:31:14 +0200982static void *
983_PyObject_Malloc(void *ctx, size_t nbytes)
984{
985 return _PyObject_Alloc(0, ctx, 1, nbytes);
986}
987
988static void *
989_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
990{
991 return _PyObject_Alloc(1, ctx, nelem, elsize);
992}
993
Neil Schemenauera35c6882001-02-27 04:45:05 +0000994/* free */
995
Victor Stinner0507bf52013-07-07 02:05:46 +0200996static void
997_PyObject_Free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000998{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000999 poolp pool;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001000 pyblock *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001001 poolp next, prev;
1002 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001003
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001004 if (p == NULL) /* free(NULL) has no effect */
1005 return;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001006
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001007 _PyRuntime.mem.num_allocated_blocks--;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001008
Benjamin Peterson05159c42009-12-03 03:01:27 +00001009#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001010 if (UNLIKELY(running_on_valgrind > 0))
1011 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001012#endif
1013
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001014 pool = POOL_ADDR(p);
Benjamin Peterson3924f932016-09-18 19:12:48 -07001015 if (address_in_range(p, pool)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001016 /* We allocated this address. */
1017 LOCK();
1018 /* Link p to the start of the pool's freeblock list. Since
1019 * the pool had at least the p block outstanding, the pool
1020 * wasn't empty (so it's already in a usedpools[] list, or
1021 * was full and is in no list -- it's not in the freeblocks
1022 * list in any case).
1023 */
1024 assert(pool->ref.count > 0); /* else it was empty */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001025 *(pyblock **)p = lastfree = pool->freeblock;
1026 pool->freeblock = (pyblock *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001027 if (lastfree) {
1028 struct arena_object* ao;
1029 uint nf; /* ao->nfreepools */
Thomas Woutersa9773292006-04-21 09:43:23 +00001030
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001031 /* freeblock wasn't NULL, so the pool wasn't full,
1032 * and the pool is in a usedpools[] list.
1033 */
1034 if (--pool->ref.count != 0) {
1035 /* pool isn't empty: leave it in usedpools */
1036 UNLOCK();
1037 return;
1038 }
1039 /* Pool is now empty: unlink from usedpools, and
1040 * link to the front of freepools. This ensures that
1041 * previously freed pools will be allocated later
1042 * (being not referenced, they are perhaps paged out).
1043 */
1044 next = pool->nextpool;
1045 prev = pool->prevpool;
1046 next->prevpool = prev;
1047 prev->nextpool = next;
Thomas Woutersa9773292006-04-21 09:43:23 +00001048
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001049 /* Link the pool to freepools. This is a singly-linked
1050 * list, and pool->prevpool isn't used there.
1051 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001052 ao = &_PyRuntime.mem.arenas[pool->arenaindex];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001053 pool->nextpool = ao->freepools;
1054 ao->freepools = pool;
1055 nf = ++ao->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001056
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001057 /* All the rest is arena management. We just freed
1058 * a pool, and there are 4 cases for arena mgmt:
1059 * 1. If all the pools are free, return the arena to
1060 * the system free().
1061 * 2. If this is the only free pool in the arena,
1062 * add the arena back to the `usable_arenas` list.
1063 * 3. If the "next" arena has a smaller count of free
1064 * pools, we have to "slide this arena right" to
1065 * restore that usable_arenas is sorted in order of
1066 * nfreepools.
1067 * 4. Else there's nothing more to do.
1068 */
1069 if (nf == ao->ntotalpools) {
1070 /* Case 1. First unlink ao from usable_arenas.
1071 */
1072 assert(ao->prevarena == NULL ||
1073 ao->prevarena->address != 0);
1074 assert(ao ->nextarena == NULL ||
1075 ao->nextarena->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001076
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001077 /* Fix the pointer in the prevarena, or the
1078 * usable_arenas pointer.
1079 */
1080 if (ao->prevarena == NULL) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001081 _PyRuntime.mem.usable_arenas = ao->nextarena;
1082 assert(_PyRuntime.mem.usable_arenas == NULL ||
1083 _PyRuntime.mem.usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001084 }
1085 else {
1086 assert(ao->prevarena->nextarena == ao);
1087 ao->prevarena->nextarena =
1088 ao->nextarena;
1089 }
1090 /* Fix the pointer in the nextarena. */
1091 if (ao->nextarena != NULL) {
1092 assert(ao->nextarena->prevarena == ao);
1093 ao->nextarena->prevarena =
1094 ao->prevarena;
1095 }
1096 /* Record that this arena_object slot is
1097 * available to be reused.
1098 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001099 ao->nextarena = _PyRuntime.mem.unused_arena_objects;
1100 _PyRuntime.mem.unused_arena_objects = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001101
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001102 /* Free the entire arena. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001103 _PyRuntime.obj.allocator_arenas.free(_PyRuntime.obj.allocator_arenas.ctx,
Victor Stinner0507bf52013-07-07 02:05:46 +02001104 (void *)ao->address, ARENA_SIZE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001105 ao->address = 0; /* mark unassociated */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001106 --_PyRuntime.mem.narenas_currently_allocated;
Thomas Woutersa9773292006-04-21 09:43:23 +00001107
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001108 UNLOCK();
1109 return;
1110 }
1111 if (nf == 1) {
1112 /* Case 2. Put ao at the head of
1113 * usable_arenas. Note that because
1114 * ao->nfreepools was 0 before, ao isn't
1115 * currently on the usable_arenas list.
1116 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001117 ao->nextarena = _PyRuntime.mem.usable_arenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001118 ao->prevarena = NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001119 if (_PyRuntime.mem.usable_arenas)
1120 _PyRuntime.mem.usable_arenas->prevarena = ao;
1121 _PyRuntime.mem.usable_arenas = ao;
1122 assert(_PyRuntime.mem.usable_arenas->address != 0);
Thomas Woutersa9773292006-04-21 09:43:23 +00001123
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001124 UNLOCK();
1125 return;
1126 }
1127 /* If this arena is now out of order, we need to keep
1128 * the list sorted. The list is kept sorted so that
1129 * the "most full" arenas are used first, which allows
1130 * the nearly empty arenas to be completely freed. In
1131 * a few un-scientific tests, it seems like this
1132 * approach allowed a lot more memory to be freed.
1133 */
1134 if (ao->nextarena == NULL ||
1135 nf <= ao->nextarena->nfreepools) {
1136 /* Case 4. Nothing to do. */
1137 UNLOCK();
1138 return;
1139 }
1140 /* Case 3: We have to move the arena towards the end
1141 * of the list, because it has more free pools than
1142 * the arena to its right.
1143 * First unlink ao from usable_arenas.
1144 */
1145 if (ao->prevarena != NULL) {
1146 /* ao isn't at the head of the list */
1147 assert(ao->prevarena->nextarena == ao);
1148 ao->prevarena->nextarena = ao->nextarena;
1149 }
1150 else {
1151 /* ao is at the head of the list */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001152 assert(_PyRuntime.mem.usable_arenas == ao);
1153 _PyRuntime.mem.usable_arenas = ao->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001154 }
1155 ao->nextarena->prevarena = ao->prevarena;
Thomas Woutersa9773292006-04-21 09:43:23 +00001156
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001157 /* Locate the new insertion point by iterating over
1158 * the list, using our nextarena pointer.
1159 */
1160 while (ao->nextarena != NULL &&
1161 nf > ao->nextarena->nfreepools) {
1162 ao->prevarena = ao->nextarena;
1163 ao->nextarena = ao->nextarena->nextarena;
1164 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001165
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001166 /* Insert ao at this point. */
1167 assert(ao->nextarena == NULL ||
1168 ao->prevarena == ao->nextarena->prevarena);
1169 assert(ao->prevarena->nextarena == ao->nextarena);
Thomas Woutersa9773292006-04-21 09:43:23 +00001170
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001171 ao->prevarena->nextarena = ao;
1172 if (ao->nextarena != NULL)
1173 ao->nextarena->prevarena = ao;
Thomas Woutersa9773292006-04-21 09:43:23 +00001174
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001175 /* Verify that the swaps worked. */
1176 assert(ao->nextarena == NULL ||
1177 nf <= ao->nextarena->nfreepools);
1178 assert(ao->prevarena == NULL ||
1179 nf > ao->prevarena->nfreepools);
1180 assert(ao->nextarena == NULL ||
1181 ao->nextarena->prevarena == ao);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001182 assert((_PyRuntime.mem.usable_arenas == ao &&
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001183 ao->prevarena == NULL) ||
1184 ao->prevarena->nextarena == ao);
Thomas Woutersa9773292006-04-21 09:43:23 +00001185
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001186 UNLOCK();
1187 return;
1188 }
1189 /* Pool was full, so doesn't currently live in any list:
1190 * link it to the front of the appropriate usedpools[] list.
1191 * This mimics LRU pool usage for new allocations and
1192 * targets optimal filling when several pools contain
1193 * blocks of the same size class.
1194 */
1195 --pool->ref.count;
1196 assert(pool->ref.count > 0); /* else the pool is empty */
1197 size = pool->szidx;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001198 next = _PyRuntime.mem.usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001199 prev = next->prevpool;
1200 /* insert pool before next: prev <-> pool <-> next */
1201 pool->nextpool = next;
1202 pool->prevpool = prev;
1203 next->prevpool = pool;
1204 prev->nextpool = pool;
1205 UNLOCK();
1206 return;
1207 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001208
Benjamin Peterson05159c42009-12-03 03:01:27 +00001209#ifdef WITH_VALGRIND
1210redirect:
1211#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001212 /* We didn't allocate this address. */
Victor Stinner6cf185d2013-10-10 15:58:42 +02001213 PyMem_RawFree(p);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001214}
1215
Tim Peters84c1b972002-04-04 04:44:32 +00001216/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,
1217 * then as the Python docs promise, we do not treat this like free(p), and
1218 * return a non-NULL result.
1219 */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001220
Victor Stinner0507bf52013-07-07 02:05:46 +02001221static void *
1222_PyObject_Realloc(void *ctx, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001223{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001224 void *bp;
1225 poolp pool;
1226 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001227
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001228 if (p == NULL)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001229 return _PyObject_Alloc(0, ctx, 1, nbytes);
Georg Brandld492ad82008-07-23 16:13:07 +00001230
Benjamin Peterson05159c42009-12-03 03:01:27 +00001231#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001232 /* Treat running_on_valgrind == -1 the same as 0 */
1233 if (UNLIKELY(running_on_valgrind > 0))
1234 goto redirect;
Benjamin Peterson05159c42009-12-03 03:01:27 +00001235#endif
1236
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001237 pool = POOL_ADDR(p);
Benjamin Peterson3924f932016-09-18 19:12:48 -07001238 if (address_in_range(p, pool)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001239 /* We're in charge of this block */
1240 size = INDEX2SIZE(pool->szidx);
1241 if (nbytes <= size) {
1242 /* The block is staying the same or shrinking. If
1243 * it's shrinking, there's a tradeoff: it costs
1244 * cycles to copy the block to a smaller size class,
1245 * but it wastes memory not to copy it. The
1246 * compromise here is to copy on shrink only if at
1247 * least 25% of size can be shaved off.
1248 */
1249 if (4 * nbytes > 3 * size) {
1250 /* It's the same,
1251 * or shrinking and new/old > 3/4.
1252 */
1253 return p;
1254 }
1255 size = nbytes;
1256 }
Victor Stinnerdb067af2014-05-02 22:31:14 +02001257 bp = _PyObject_Alloc(0, ctx, 1, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001258 if (bp != NULL) {
1259 memcpy(bp, p, size);
Victor Stinner0507bf52013-07-07 02:05:46 +02001260 _PyObject_Free(ctx, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001261 }
1262 return bp;
1263 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001264#ifdef WITH_VALGRIND
1265 redirect:
1266#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001267 /* We're not managing this block. If nbytes <=
1268 * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this
1269 * block. However, if we do, we need to copy the valid data from
1270 * the C-managed block to one of our blocks, and there's no portable
1271 * way to know how much of the memory space starting at p is valid.
1272 * As bug 1185883 pointed out the hard way, it's possible that the
1273 * C-managed block is "at the end" of allocated VM space, so that
1274 * a memory fault can occur if we try to copy nbytes bytes starting
1275 * at p. Instead we punt: let C continue to manage this block.
1276 */
1277 if (nbytes)
Victor Stinner6cf185d2013-10-10 15:58:42 +02001278 return PyMem_RawRealloc(p, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001279 /* C doesn't define the result of realloc(p, 0) (it may or may not
1280 * return NULL then), but Python's docs promise that nbytes==0 never
1281 * returns NULL. We don't pass 0 to realloc(), to avoid that endcase
1282 * to begin with. Even then, we can't be sure that realloc() won't
1283 * return NULL.
1284 */
Victor Stinner6cf185d2013-10-10 15:58:42 +02001285 bp = PyMem_RawRealloc(p, 1);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001286 return bp ? bp : p;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001287}
1288
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001289#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001290
1291/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001292/* pymalloc not enabled: Redirect the entry points to malloc. These will
1293 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001294
Antoine Pitrou92840532012-12-17 23:05:59 +01001295Py_ssize_t
1296_Py_GetAllocatedBlocks(void)
1297{
1298 return 0;
1299}
1300
Tim Peters1221c0a2002-03-23 00:20:15 +00001301#endif /* WITH_PYMALLOC */
1302
Victor Stinner34be8072016-03-14 12:04:26 +01001303
Tim Petersddea2082002-03-23 10:03:50 +00001304/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001305/* A x-platform debugging allocator. This doesn't manage memory directly,
1306 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1307 */
Tim Petersddea2082002-03-23 10:03:50 +00001308
Tim Petersf6fb5012002-04-12 07:38:53 +00001309/* Special bytes broadcast into debug memory blocks at appropriate times.
1310 * Strings of these are unlikely to be valid addresses, floats, ints or
1311 * 7-bit ASCII.
1312 */
1313#undef CLEANBYTE
1314#undef DEADBYTE
1315#undef FORBIDDENBYTE
1316#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001317#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001318#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001319
Tim Peterse0850172002-03-24 00:34:21 +00001320/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001321 * to supply a single place to set a breakpoint.
1322 */
Tim Peterse0850172002-03-24 00:34:21 +00001323static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001324bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001325{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001326 ++_PyRuntime.mem.serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001327}
1328
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001329#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001330
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001331/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1332static size_t
1333read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001334{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001335 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001336 size_t result = *q++;
1337 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001338
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001339 for (i = SST; --i > 0; ++q)
1340 result = (result << 8) | *q;
1341 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001342}
1343
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001344/* Write n as a big-endian size_t, MSB at address p, LSB at
1345 * p + sizeof(size_t) - 1.
1346 */
Tim Petersddea2082002-03-23 10:03:50 +00001347static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001348write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001349{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001350 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001351 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001352
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001353 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001354 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001355 n >>= 8;
1356 }
Tim Petersddea2082002-03-23 10:03:50 +00001357}
1358
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001359/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1360 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001361
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001362p[0: S]
1363 Number of bytes originally asked for. This is a size_t, big-endian (easier
1364 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001365p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001366 API ID. See PEP 445. This is a character, but seems undocumented.
1367p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001368 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001369p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001370 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001371 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001372 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001373 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001374p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001375 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001376p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001377 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1378 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001379 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001380 If "bad memory" is detected later, the serial number gives an
1381 excellent way to set a breakpoint on the next run, to capture the
1382 instant at which this block was passed out.
1383*/
1384
Victor Stinner0507bf52013-07-07 02:05:46 +02001385static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001386_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001387{
Victor Stinner0507bf52013-07-07 02:05:46 +02001388 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001389 uint8_t *p; /* base address of malloc'ed block */
1390 uint8_t *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001391 size_t total; /* nbytes + 4*SST */
Tim Petersddea2082002-03-23 10:03:50 +00001392
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001393 bumpserialno();
1394 total = nbytes + 4*SST;
Antoine Pitroucc231542014-11-02 18:40:09 +01001395 if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1396 /* overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001397 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001398
Victor Stinnerdb067af2014-05-02 22:31:14 +02001399 if (use_calloc)
Benjamin Peterson19517e42016-09-18 19:22:22 -07001400 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001401 else
Benjamin Peterson19517e42016-09-18 19:22:22 -07001402 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001403 if (p == NULL)
1404 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001405
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001406 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1407 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001408 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02001409 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001410
Victor Stinnerdb067af2014-05-02 22:31:14 +02001411 if (nbytes > 0 && !use_calloc)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001412 memset(p + 2*SST, CLEANBYTE, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001413
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001414 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
1415 tail = p + 2*SST + nbytes;
1416 memset(tail, FORBIDDENBYTE, SST);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001417 write_size_t(tail + SST, _PyRuntime.mem.serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001418
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001419 return p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001420}
1421
Victor Stinnerdb067af2014-05-02 22:31:14 +02001422static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001423_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001424{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001425 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001426}
1427
1428static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001429_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001430{
1431 size_t nbytes;
1432 assert(elsize == 0 || nelem <= PY_SSIZE_T_MAX / elsize);
1433 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01001434 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001435}
1436
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001437/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001438 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001439 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001440 Then calls the underlying free.
1441*/
Victor Stinner0507bf52013-07-07 02:05:46 +02001442static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01001443_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001444{
Victor Stinner0507bf52013-07-07 02:05:46 +02001445 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001446 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001447 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001448
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001449 if (p == NULL)
1450 return;
Victor Stinner0507bf52013-07-07 02:05:46 +02001451 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001452 nbytes = read_size_t(q);
1453 nbytes += 4*SST;
1454 if (nbytes > 0)
1455 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001456 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00001457}
1458
Victor Stinner0507bf52013-07-07 02:05:46 +02001459static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001460_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001461{
Victor Stinner0507bf52013-07-07 02:05:46 +02001462 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001463 uint8_t *q = (uint8_t *)p, *oldq;
1464 uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001465 size_t total; /* nbytes + 4*SST */
1466 size_t original_nbytes;
1467 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001468
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001469 if (p == NULL)
Victor Stinnerc4aec362016-03-14 22:26:53 +01001470 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001471
Victor Stinner0507bf52013-07-07 02:05:46 +02001472 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001473 bumpserialno();
1474 original_nbytes = read_size_t(q - 2*SST);
1475 total = nbytes + 4*SST;
Antoine Pitroucc231542014-11-02 18:40:09 +01001476 if (nbytes > PY_SSIZE_T_MAX - 4*SST)
1477 /* overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001478 return NULL;
Tim Petersddea2082002-03-23 10:03:50 +00001479
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001480 /* Resize and add decorations. We may get a new pointer here, in which
1481 * case we didn't get the chance to mark the old memory with DEADBYTE,
1482 * but we live with that.
1483 */
Victor Stinnerc4266362013-07-09 00:44:43 +02001484 oldq = q;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001485 q = (uint8_t *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001486 if (q == NULL)
1487 return NULL;
Tim Peters85cc1c42002-04-12 08:52:50 +00001488
Victor Stinnerc4266362013-07-09 00:44:43 +02001489 if (q == oldq && nbytes < original_nbytes) {
1490 /* shrinking: mark old extra memory dead */
1491 memset(q + nbytes, DEADBYTE, original_nbytes - nbytes);
1492 }
1493
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001494 write_size_t(q, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001495 assert(q[SST] == (uint8_t)api->api_id);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001496 for (i = 1; i < SST; ++i)
1497 assert(q[SST + i] == FORBIDDENBYTE);
1498 q += 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02001499
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001500 tail = q + nbytes;
1501 memset(tail, FORBIDDENBYTE, SST);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001502 write_size_t(tail + SST, _PyRuntime.mem.serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001503
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001504 if (nbytes > original_nbytes) {
1505 /* growing: mark new extra memory clean */
1506 memset(q + original_nbytes, CLEANBYTE,
Stefan Krah735bb122010-11-26 10:54:09 +00001507 nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001508 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001509
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001510 return q;
Tim Petersddea2082002-03-23 10:03:50 +00001511}
1512
Victor Stinnerc4aec362016-03-14 22:26:53 +01001513static void
1514_PyMem_DebugCheckGIL(void)
1515{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001516 if (!PyGILState_Check())
1517 Py_FatalError("Python memory allocator called "
1518 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01001519}
1520
1521static void *
1522_PyMem_DebugMalloc(void *ctx, size_t nbytes)
1523{
1524 _PyMem_DebugCheckGIL();
1525 return _PyMem_DebugRawMalloc(ctx, nbytes);
1526}
1527
1528static void *
1529_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
1530{
1531 _PyMem_DebugCheckGIL();
1532 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
1533}
1534
1535static void
1536_PyMem_DebugFree(void *ctx, void *ptr)
1537{
1538 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01001539 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01001540}
1541
1542static void *
1543_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
1544{
1545 _PyMem_DebugCheckGIL();
1546 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
1547}
1548
Tim Peters7ccfadf2002-04-01 06:04:21 +00001549/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001550 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001551 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001552 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001553 */
Victor Stinner0507bf52013-07-07 02:05:46 +02001554static void
1555_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001556{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001557 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001558 char msgbuf[64];
1559 char *msg;
1560 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001561 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001562 int i;
1563 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001564
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001565 if (p == NULL) {
1566 msg = "didn't expect a NULL pointer";
1567 goto error;
1568 }
Tim Petersddea2082002-03-23 10:03:50 +00001569
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001570 /* Check the API id */
1571 id = (char)q[-SST];
1572 if (id != api) {
1573 msg = msgbuf;
1574 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1575 msgbuf[sizeof(msgbuf)-1] = 0;
1576 goto error;
1577 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001578
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001579 /* Check the stuff at the start of p first: if there's underwrite
1580 * corruption, the number-of-bytes field may be nuts, and checking
1581 * the tail could lead to a segfault then.
1582 */
1583 for (i = SST-1; i >= 1; --i) {
1584 if (*(q-i) != FORBIDDENBYTE) {
1585 msg = "bad leading pad byte";
1586 goto error;
1587 }
1588 }
Tim Petersddea2082002-03-23 10:03:50 +00001589
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001590 nbytes = read_size_t(q - 2*SST);
1591 tail = q + nbytes;
1592 for (i = 0; i < SST; ++i) {
1593 if (tail[i] != FORBIDDENBYTE) {
1594 msg = "bad trailing pad byte";
1595 goto error;
1596 }
1597 }
Tim Petersddea2082002-03-23 10:03:50 +00001598
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001599 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001600
1601error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001602 _PyObject_DebugDumpAddress(p);
1603 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001604}
1605
Tim Peters7ccfadf2002-04-01 06:04:21 +00001606/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02001607static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001608_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001609{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001610 const uint8_t *q = (const uint8_t *)p;
1611 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001612 size_t nbytes, serial;
1613 int i;
1614 int ok;
1615 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001616
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001617 fprintf(stderr, "Debug memory block at address p=%p:", p);
1618 if (p == NULL) {
1619 fprintf(stderr, "\n");
1620 return;
1621 }
1622 id = (char)q[-SST];
1623 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001624
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001625 nbytes = read_size_t(q - 2*SST);
1626 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1627 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001628
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001629 /* In case this is nuts, check the leading pad bytes first. */
1630 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1631 ok = 1;
1632 for (i = 1; i <= SST-1; ++i) {
1633 if (*(q-i) != FORBIDDENBYTE) {
1634 ok = 0;
1635 break;
1636 }
1637 }
1638 if (ok)
1639 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1640 else {
1641 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1642 FORBIDDENBYTE);
1643 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001644 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001645 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1646 if (byte != FORBIDDENBYTE)
1647 fputs(" *** OUCH", stderr);
1648 fputc('\n', stderr);
1649 }
Tim Peters449b5a82002-04-28 06:14:45 +00001650
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001651 fputs(" Because memory is corrupted at the start, the "
1652 "count of bytes requested\n"
1653 " may be bogus, and checking the trailing pad "
1654 "bytes may segfault.\n", stderr);
1655 }
Tim Petersddea2082002-03-23 10:03:50 +00001656
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001657 tail = q + nbytes;
1658 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1659 ok = 1;
1660 for (i = 0; i < SST; ++i) {
1661 if (tail[i] != FORBIDDENBYTE) {
1662 ok = 0;
1663 break;
1664 }
1665 }
1666 if (ok)
1667 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1668 else {
1669 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001670 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001671 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001672 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001673 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00001674 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001675 if (byte != FORBIDDENBYTE)
1676 fputs(" *** OUCH", stderr);
1677 fputc('\n', stderr);
1678 }
1679 }
Tim Petersddea2082002-03-23 10:03:50 +00001680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001681 serial = read_size_t(tail + SST);
1682 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1683 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001684
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001685 if (nbytes > 0) {
1686 i = 0;
1687 fputs(" Data at p:", stderr);
1688 /* print up to 8 bytes at the start */
1689 while (q < tail && i < 8) {
1690 fprintf(stderr, " %02x", *q);
1691 ++i;
1692 ++q;
1693 }
1694 /* and up to 8 at the end */
1695 if (q < tail) {
1696 if (tail - q > 8) {
1697 fputs(" ...", stderr);
1698 q = tail - 8;
1699 }
1700 while (q < tail) {
1701 fprintf(stderr, " %02x", *q);
1702 ++q;
1703 }
1704 }
1705 fputc('\n', stderr);
1706 }
Victor Stinner0611c262016-03-15 22:22:13 +01001707 fputc('\n', stderr);
1708
1709 fflush(stderr);
1710 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00001711}
1712
David Malcolm49526f42012-06-22 14:55:41 -04001713
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001714static size_t
David Malcolm49526f42012-06-22 14:55:41 -04001715printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001716{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001717 int i, k;
1718 char buf[100];
1719 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001720
David Malcolm49526f42012-06-22 14:55:41 -04001721 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001722 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04001723 fputc(' ', out);
1724 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00001725
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001726 /* Write the value with commas. */
1727 i = 22;
1728 buf[i--] = '\0';
1729 buf[i--] = '\n';
1730 k = 3;
1731 do {
1732 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05001733 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001734 value = nextvalue;
1735 buf[i--] = (char)(digit + '0');
1736 --k;
1737 if (k == 0 && value && i >= 0) {
1738 k = 3;
1739 buf[i--] = ',';
1740 }
1741 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00001742
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001743 while (i >= 0)
1744 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04001745 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00001746
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001747 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001748}
1749
David Malcolm49526f42012-06-22 14:55:41 -04001750void
1751_PyDebugAllocatorStats(FILE *out,
1752 const char *block_name, int num_blocks, size_t sizeof_block)
1753{
1754 char buf1[128];
1755 char buf2[128];
1756 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05001757 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04001758 num_blocks, block_name, sizeof_block);
1759 PyOS_snprintf(buf2, sizeof(buf2),
1760 "%48s ", buf1);
1761 (void)printone(out, buf2, num_blocks * sizeof_block);
1762}
1763
Victor Stinner34be8072016-03-14 12:04:26 +01001764
David Malcolm49526f42012-06-22 14:55:41 -04001765#ifdef WITH_PYMALLOC
1766
Victor Stinner34be8072016-03-14 12:04:26 +01001767#ifdef Py_DEBUG
1768/* Is target in the list? The list is traversed via the nextpool pointers.
1769 * The list may be NULL-terminated, or circular. Return 1 if target is in
1770 * list, else 0.
1771 */
1772static int
1773pool_is_in_list(const poolp target, poolp list)
1774{
1775 poolp origlist = list;
1776 assert(target != NULL);
1777 if (list == NULL)
1778 return 0;
1779 do {
1780 if (target == list)
1781 return 1;
1782 list = list->nextpool;
1783 } while (list != NULL && list != origlist);
1784 return 0;
1785}
1786#endif
1787
David Malcolm49526f42012-06-22 14:55:41 -04001788/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00001789 * In Py_DEBUG mode, also perform some expensive internal consistency
1790 * checks.
1791 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001792void
David Malcolm49526f42012-06-22 14:55:41 -04001793_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001794{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001795 uint i;
1796 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1797 /* # of pools, allocated blocks, and free blocks per class index */
1798 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1799 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1800 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1801 /* total # of allocated bytes in used and full pools */
1802 size_t allocated_bytes = 0;
1803 /* total # of available bytes in used pools */
1804 size_t available_bytes = 0;
1805 /* # of free pools + pools not yet carved out of current arena */
1806 uint numfreepools = 0;
1807 /* # of bytes for arena alignment padding */
1808 size_t arena_alignment = 0;
1809 /* # of bytes in used and full pools used for pool_headers */
1810 size_t pool_header_bytes = 0;
1811 /* # of bytes in used and full pools wasted due to quantization,
1812 * i.e. the necessarily leftover space at the ends of used and
1813 * full pools.
1814 */
1815 size_t quantization = 0;
1816 /* # of arenas actually allocated. */
1817 size_t narenas = 0;
1818 /* running total -- should equal narenas * ARENA_SIZE */
1819 size_t total;
1820 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001821
David Malcolm49526f42012-06-22 14:55:41 -04001822 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001823 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001824
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001825 for (i = 0; i < numclasses; ++i)
1826 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001827
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001828 /* Because full pools aren't linked to from anything, it's easiest
1829 * to march over all the arenas. If we're lucky, most of the memory
1830 * will be living in full pools -- would be a shame to miss them.
1831 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001832 for (i = 0; i < _PyRuntime.mem.maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001833 uint j;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001834 uintptr_t base = _PyRuntime.mem.arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00001835
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001836 /* Skip arenas which are not allocated. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001837 if (_PyRuntime.mem.arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001838 continue;
1839 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00001840
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001841 numfreepools += _PyRuntime.mem.arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001842
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001843 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001844 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001845 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001846 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001847 base += POOL_SIZE;
1848 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00001849
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001850 /* visit every pool in the arena */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001851 assert(base <= (uintptr_t) _PyRuntime.mem.arenas[i].pool_address);
1852 for (j = 0; base < (uintptr_t) _PyRuntime.mem.arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001853 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001854 poolp p = (poolp)base;
1855 const uint sz = p->szidx;
1856 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001857
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001858 if (p->ref.count == 0) {
1859 /* currently unused */
Victor Stinner34be8072016-03-14 12:04:26 +01001860#ifdef Py_DEBUG
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001861 assert(pool_is_in_list(p, _PyRuntime.mem.arenas[i].freepools));
Victor Stinner34be8072016-03-14 12:04:26 +01001862#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001863 continue;
1864 }
1865 ++numpools[sz];
1866 numblocks[sz] += p->ref.count;
1867 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1868 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001869#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001870 if (freeblocks > 0)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001871 assert(pool_is_in_list(p, _PyRuntime.mem.usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00001872#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001873 }
1874 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001875 assert(narenas == _PyRuntime.mem.narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001876
David Malcolm49526f42012-06-22 14:55:41 -04001877 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001878 fputs("class size num pools blocks in use avail blocks\n"
1879 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04001880 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001881
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001882 for (i = 0; i < numclasses; ++i) {
1883 size_t p = numpools[i];
1884 size_t b = numblocks[i];
1885 size_t f = numfreeblocks[i];
1886 uint size = INDEX2SIZE(i);
1887 if (p == 0) {
1888 assert(b == 0 && f == 0);
1889 continue;
1890 }
David Malcolm49526f42012-06-22 14:55:41 -04001891 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001892 "%11" PY_FORMAT_SIZE_T "u "
1893 "%15" PY_FORMAT_SIZE_T "u "
1894 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001895 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001896 allocated_bytes += b * size;
1897 available_bytes += f * size;
1898 pool_header_bytes += p * POOL_OVERHEAD;
1899 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1900 }
David Malcolm49526f42012-06-22 14:55:41 -04001901 fputc('\n', out);
Victor Stinner34be8072016-03-14 12:04:26 +01001902 if (_PyMem_DebugEnabled())
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001903 (void)printone(out, "# times object malloc called", _PyRuntime.mem.serialno);
1904 (void)printone(out, "# arenas allocated total", _PyRuntime.mem.ntimes_arena_allocated);
1905 (void)printone(out, "# arenas reclaimed", _PyRuntime.mem.ntimes_arena_allocated - narenas);
1906 (void)printone(out, "# arenas highwater mark", _PyRuntime.mem.narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04001907 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00001908
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001909 PyOS_snprintf(buf, sizeof(buf),
1910 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1911 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001912 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001913
David Malcolm49526f42012-06-22 14:55:41 -04001914 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001915
David Malcolm49526f42012-06-22 14:55:41 -04001916 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1917 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001918
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001919 PyOS_snprintf(buf, sizeof(buf),
1920 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001921 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001922
David Malcolm49526f42012-06-22 14:55:41 -04001923 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1924 total += printone(out, "# bytes lost to quantization", quantization);
1925 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1926 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001927}
1928
David Malcolm49526f42012-06-22 14:55:41 -04001929#endif /* #ifdef WITH_PYMALLOC */