blob: b92116cd554d189a070e1898112a418b267d1929 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
Eric Snow2ebc5ce2017-09-07 23:51:28 -06002#include "internal/mem.h"
3#include "internal/pystate.h"
Tim Peters1221c0a2002-03-23 00:20:15 +00004
Benjamin Peterson3924f932016-09-18 19:12:48 -07005#include <stdbool.h>
6
Victor Stinner0611c262016-03-15 22:22:13 +01007
8/* Defined in tracemalloc.c */
9extern void _PyMem_DumpTraceback(int fd, const void *ptr);
10
11
Victor Stinner0507bf52013-07-07 02:05:46 +020012/* Python's malloc wrappers (see pymem.h) */
13
Victor Stinner34be8072016-03-14 12:04:26 +010014#undef uint
15#define uint unsigned int /* assuming >= 16 bits */
16
Victor Stinner0507bf52013-07-07 02:05:46 +020017/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010018static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
19static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
20static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070021static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010022
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020024static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020025static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010026static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020027
28static void _PyObject_DebugDumpAddress(const void *p);
29static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020030
Nick Coghlan6ba64f42013-09-29 00:28:55 +100031#if defined(__has_feature) /* Clang */
32 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
33 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070034 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100035 #else
36 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
37 #endif
38#else
39 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
40 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070041 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100042 #else
43 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
44 #endif
45#endif
46
Tim Peters1221c0a2002-03-23 00:20:15 +000047#ifdef WITH_PYMALLOC
48
Victor Stinner0507bf52013-07-07 02:05:46 +020049#ifdef MS_WINDOWS
50# include <windows.h>
51#elif defined(HAVE_MMAP)
52# include <sys/mman.h>
53# ifdef MAP_ANONYMOUS
54# define ARENAS_USE_MMAP
55# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020056#endif
57
Victor Stinner0507bf52013-07-07 02:05:46 +020058/* Forward declaration */
59static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020060static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020061static void _PyObject_Free(void *ctx, void *p);
62static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020063#endif
64
Victor Stinner0507bf52013-07-07 02:05:46 +020065
66static void *
67_PyMem_RawMalloc(void *ctx, size_t size)
68{
Victor Stinnerdb067af2014-05-02 22:31:14 +020069 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020070 for malloc(0), which would be treated as an error. Some platforms would
71 return a pointer with no memory behind it, which would break pymalloc.
72 To solve these problems, allocate an extra byte. */
73 if (size == 0)
74 size = 1;
75 return malloc(size);
76}
77
78static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020079_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
80{
81 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
82 for calloc(0, 0), which would be treated as an error. Some platforms
83 would return a pointer with no memory behind it, which would break
84 pymalloc. To solve these problems, allocate an extra byte. */
85 if (nelem == 0 || elsize == 0) {
86 nelem = 1;
87 elsize = 1;
88 }
89 return calloc(nelem, elsize);
90}
91
92static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020093_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
94{
95 if (size == 0)
96 size = 1;
97 return realloc(ptr, size);
98}
99
100static void
101_PyMem_RawFree(void *ctx, void *ptr)
102{
103 free(ptr);
104}
105
106
107#ifdef MS_WINDOWS
108static void *
109_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
110{
111 return VirtualAlloc(NULL, size,
112 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
113}
114
115static void
116_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
117{
Victor Stinner725e6682013-07-07 03:06:16 +0200118 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200119}
120
121#elif defined(ARENAS_USE_MMAP)
122static void *
123_PyObject_ArenaMmap(void *ctx, size_t size)
124{
125 void *ptr;
126 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
127 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128 if (ptr == MAP_FAILED)
129 return NULL;
130 assert(ptr != NULL);
131 return ptr;
132}
133
134static void
135_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
136{
137 munmap(ptr, size);
138}
139
140#else
141static void *
142_PyObject_ArenaMalloc(void *ctx, size_t size)
143{
144 return malloc(size);
145}
146
147static void
148_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
149{
150 free(ptr);
151}
152#endif
153
154
Victor Stinnerdb067af2014-05-02 22:31:14 +0200155#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200156#ifdef WITH_PYMALLOC
Victor Stinnerdb067af2014-05-02 22:31:14 +0200157# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
Victor Stinner0507bf52013-07-07 02:05:46 +0200158#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200159# define PYOBJ_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200160#endif
Victor Stinner15932592016-04-22 18:52:22 +0200161#define PYMEM_FUNCS PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200162
Victor Stinner0507bf52013-07-07 02:05:46 +0200163typedef struct {
164 /* We tag each block with an API ID in order to tag API violations */
165 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200166 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200167} debug_alloc_api_t;
168static struct {
169 debug_alloc_api_t raw;
170 debug_alloc_api_t mem;
171 debug_alloc_api_t obj;
172} _PyMem_Debug = {
173 {'r', {NULL, PYRAW_FUNCS}},
Victor Stinner6cf185d2013-10-10 15:58:42 +0200174 {'m', {NULL, PYMEM_FUNCS}},
175 {'o', {NULL, PYOBJ_FUNCS}}
Victor Stinner0507bf52013-07-07 02:05:46 +0200176 };
177
Victor Stinnerc4aec362016-03-14 22:26:53 +0100178#define PYRAWDBG_FUNCS \
179 _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree
180#define PYDBG_FUNCS \
181 _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200182
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600183
184#define _PyMem_Raw _PyRuntime.mem.allocators.raw
185static const PyMemAllocatorEx _pymem_raw = {
Victor Stinner34be8072016-03-14 12:04:26 +0100186#ifdef Py_DEBUG
Victor Stinnerc4aec362016-03-14 22:26:53 +0100187 &_PyMem_Debug.raw, PYRAWDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200188#else
189 NULL, PYRAW_FUNCS
190#endif
191 };
192
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600193#define _PyMem _PyRuntime.mem.allocators.mem
194static const PyMemAllocatorEx _pymem = {
Victor Stinner34be8072016-03-14 12:04:26 +0100195#ifdef Py_DEBUG
Victor Stinner15932592016-04-22 18:52:22 +0200196 &_PyMem_Debug.mem, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200197#else
Victor Stinner15932592016-04-22 18:52:22 +0200198 NULL, PYMEM_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200199#endif
200 };
201
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600202#define _PyObject _PyRuntime.mem.allocators.obj
203static const PyMemAllocatorEx _pyobject = {
Victor Stinner34be8072016-03-14 12:04:26 +0100204#ifdef Py_DEBUG
Victor Stinner6cf185d2013-10-10 15:58:42 +0200205 &_PyMem_Debug.obj, PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200206#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200207 NULL, PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200208#endif
209 };
210
Victor Stinner34be8072016-03-14 12:04:26 +0100211int
212_PyMem_SetupAllocators(const char *opt)
213{
214 if (opt == NULL || *opt == '\0') {
215 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
216 options): use default allocators */
217#ifdef Py_DEBUG
218# ifdef WITH_PYMALLOC
219 opt = "pymalloc_debug";
220# else
221 opt = "malloc_debug";
222# endif
223#else
224 /* !Py_DEBUG */
225# ifdef WITH_PYMALLOC
226 opt = "pymalloc";
227# else
228 opt = "malloc";
229# endif
230#endif
231 }
232
233 if (strcmp(opt, "debug") == 0) {
234 PyMem_SetupDebugHooks();
235 }
236 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0)
237 {
238 PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
239
240 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
241 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
242 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
243
244 if (strcmp(opt, "malloc_debug") == 0)
245 PyMem_SetupDebugHooks();
246 }
247#ifdef WITH_PYMALLOC
248 else if (strcmp(opt, "pymalloc") == 0
249 || strcmp(opt, "pymalloc_debug") == 0)
250 {
Victor Stinner15932592016-04-22 18:52:22 +0200251 PyMemAllocatorEx raw_alloc = {NULL, PYRAW_FUNCS};
252 PyMemAllocatorEx mem_alloc = {NULL, PYMEM_FUNCS};
Victor Stinner34be8072016-03-14 12:04:26 +0100253 PyMemAllocatorEx obj_alloc = {NULL, PYOBJ_FUNCS};
254
Victor Stinner15932592016-04-22 18:52:22 +0200255 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &raw_alloc);
256 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &mem_alloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100257 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &obj_alloc);
258
259 if (strcmp(opt, "pymalloc_debug") == 0)
260 PyMem_SetupDebugHooks();
261 }
262#endif
263 else {
264 /* unknown allocator */
265 return -1;
266 }
267 return 0;
268}
269
Victor Stinner0507bf52013-07-07 02:05:46 +0200270#undef PYRAW_FUNCS
Victor Stinner6cf185d2013-10-10 15:58:42 +0200271#undef PYMEM_FUNCS
272#undef PYOBJ_FUNCS
Victor Stinnerc4aec362016-03-14 22:26:53 +0100273#undef PYRAWDBG_FUNCS
Victor Stinner6cf185d2013-10-10 15:58:42 +0200274#undef PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200275
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600276static const PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinner0507bf52013-07-07 02:05:46 +0200277#ifdef MS_WINDOWS
278 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
279#elif defined(ARENAS_USE_MMAP)
280 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
281#else
282 _PyObject_ArenaMalloc, _PyObject_ArenaFree
283#endif
284 };
285
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600286void
287_PyObject_Initialize(struct _pyobj_runtime_state *state)
288{
289 state->allocator_arenas = _PyObject_Arena;
290}
291
292void
293_PyMem_Initialize(struct _pymem_runtime_state *state)
294{
295 state->allocators.raw = _pymem_raw;
296 state->allocators.mem = _pymem;
297 state->allocators.obj = _pyobject;
298
299#ifdef WITH_PYMALLOC
Victor Stinnerccb3c762017-09-14 14:48:37 -0700300 Py_BUILD_ASSERT(NB_SMALL_SIZE_CLASSES == 64);
301
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600302 for (int i = 0; i < 8; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600303 for (int j = 0; j < 8; j++) {
304 int x = i * 8 + j;
305 poolp *addr = &(state->usedpools[2*(x)]);
306 poolp val = (poolp)((uint8_t *)addr - 2*sizeof(pyblock *));
307 state->usedpools[x * 2] = val;
308 state->usedpools[x * 2 + 1] = val;
309 };
310 };
311#endif /* WITH_PYMALLOC */
312}
313
Victor Stinner0621e0e2016-04-19 17:02:55 +0200314#ifdef WITH_PYMALLOC
Victor Stinner34be8072016-03-14 12:04:26 +0100315static int
316_PyMem_DebugEnabled(void)
317{
318 return (_PyObject.malloc == _PyMem_DebugMalloc);
319}
320
Victor Stinner34be8072016-03-14 12:04:26 +0100321int
322_PyMem_PymallocEnabled(void)
323{
324 if (_PyMem_DebugEnabled()) {
325 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
326 }
327 else {
328 return (_PyObject.malloc == _PyObject_Malloc);
329 }
330}
331#endif
332
Victor Stinner0507bf52013-07-07 02:05:46 +0200333void
334PyMem_SetupDebugHooks(void)
335{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200336 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200337
Victor Stinnerc4aec362016-03-14 22:26:53 +0100338 alloc.malloc = _PyMem_DebugRawMalloc;
339 alloc.calloc = _PyMem_DebugRawCalloc;
340 alloc.realloc = _PyMem_DebugRawRealloc;
341 alloc.free = _PyMem_DebugRawFree;
Victor Stinner34be8072016-03-14 12:04:26 +0100342
Victor Stinnerc4aec362016-03-14 22:26:53 +0100343 if (_PyMem_Raw.malloc != _PyMem_DebugRawMalloc) {
Victor Stinner0507bf52013-07-07 02:05:46 +0200344 alloc.ctx = &_PyMem_Debug.raw;
345 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
346 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
347 }
348
Victor Stinnerc4aec362016-03-14 22:26:53 +0100349 alloc.malloc = _PyMem_DebugMalloc;
350 alloc.calloc = _PyMem_DebugCalloc;
351 alloc.realloc = _PyMem_DebugRealloc;
352 alloc.free = _PyMem_DebugFree;
353
Victor Stinnerad524372016-03-16 12:12:53 +0100354 if (_PyMem.malloc != _PyMem_DebugMalloc) {
355 alloc.ctx = &_PyMem_Debug.mem;
356 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
357 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
358 }
359
Victor Stinner0507bf52013-07-07 02:05:46 +0200360 if (_PyObject.malloc != _PyMem_DebugMalloc) {
361 alloc.ctx = &_PyMem_Debug.obj;
362 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
363 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
364 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200365}
366
367void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200368PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200369{
370 switch(domain)
371 {
372 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
373 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
374 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
375 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200376 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200377 allocator->ctx = NULL;
378 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200379 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200380 allocator->realloc = NULL;
381 allocator->free = NULL;
382 }
383}
384
385void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200386PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200387{
388 switch(domain)
389 {
390 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
391 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
392 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
393 /* ignore unknown domain */
394 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200395}
396
397void
398PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
399{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600400 *allocator = _PyRuntime.obj.allocator_arenas;
Victor Stinner0507bf52013-07-07 02:05:46 +0200401}
402
403void
404PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
405{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600406 _PyRuntime.obj.allocator_arenas = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200407}
408
409void *
410PyMem_RawMalloc(size_t size)
411{
412 /*
413 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
414 * Most python internals blindly use a signed Py_ssize_t to track
415 * things without checking for overflows or negatives.
416 * As size_t is unsigned, checking for size < 0 is not required.
417 */
418 if (size > (size_t)PY_SSIZE_T_MAX)
419 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200420 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
421}
422
Victor Stinnerdb067af2014-05-02 22:31:14 +0200423void *
424PyMem_RawCalloc(size_t nelem, size_t elsize)
425{
426 /* see PyMem_RawMalloc() */
427 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
428 return NULL;
429 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
430}
431
Victor Stinner0507bf52013-07-07 02:05:46 +0200432void*
433PyMem_RawRealloc(void *ptr, size_t new_size)
434{
435 /* see PyMem_RawMalloc() */
436 if (new_size > (size_t)PY_SSIZE_T_MAX)
437 return NULL;
438 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
439}
440
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600441void
442PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200443{
444 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
445}
446
Victor Stinner9ed83c42017-10-31 12:18:10 -0700447
Victor Stinner0507bf52013-07-07 02:05:46 +0200448void *
449PyMem_Malloc(size_t size)
450{
451 /* see PyMem_RawMalloc() */
452 if (size > (size_t)PY_SSIZE_T_MAX)
453 return NULL;
454 return _PyMem.malloc(_PyMem.ctx, size);
455}
456
457void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200458PyMem_Calloc(size_t nelem, size_t elsize)
459{
460 /* see PyMem_RawMalloc() */
461 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
462 return NULL;
463 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
464}
465
466void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200467PyMem_Realloc(void *ptr, size_t new_size)
468{
469 /* see PyMem_RawMalloc() */
470 if (new_size > (size_t)PY_SSIZE_T_MAX)
471 return NULL;
472 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
473}
474
475void
476PyMem_Free(void *ptr)
477{
478 _PyMem.free(_PyMem.ctx, ptr);
479}
480
Victor Stinner9ed83c42017-10-31 12:18:10 -0700481
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200482char *
483_PyMem_RawStrdup(const char *str)
484{
485 size_t size;
486 char *copy;
487
488 size = strlen(str) + 1;
489 copy = PyMem_RawMalloc(size);
490 if (copy == NULL)
491 return NULL;
492 memcpy(copy, str, size);
493 return copy;
494}
495
496char *
497_PyMem_Strdup(const char *str)
498{
499 size_t size;
500 char *copy;
501
502 size = strlen(str) + 1;
503 copy = PyMem_Malloc(size);
504 if (copy == NULL)
505 return NULL;
506 memcpy(copy, str, size);
507 return copy;
508}
509
Victor Stinner0507bf52013-07-07 02:05:46 +0200510void *
511PyObject_Malloc(size_t size)
512{
513 /* see PyMem_RawMalloc() */
514 if (size > (size_t)PY_SSIZE_T_MAX)
515 return NULL;
516 return _PyObject.malloc(_PyObject.ctx, size);
517}
518
519void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200520PyObject_Calloc(size_t nelem, size_t elsize)
521{
522 /* see PyMem_RawMalloc() */
523 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
524 return NULL;
525 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
526}
527
528void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200529PyObject_Realloc(void *ptr, size_t new_size)
530{
531 /* see PyMem_RawMalloc() */
532 if (new_size > (size_t)PY_SSIZE_T_MAX)
533 return NULL;
534 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
535}
536
537void
538PyObject_Free(void *ptr)
539{
540 _PyObject.free(_PyObject.ctx, ptr);
541}
542
543
544#ifdef WITH_PYMALLOC
545
Benjamin Peterson05159c42009-12-03 03:01:27 +0000546#ifdef WITH_VALGRIND
547#include <valgrind/valgrind.h>
548
549/* If we're using GCC, use __builtin_expect() to reduce overhead of
550 the valgrind checks */
551#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
552# define UNLIKELY(value) __builtin_expect((value), 0)
553#else
554# define UNLIKELY(value) (value)
555#endif
556
557/* -1 indicates that we haven't checked that we're running on valgrind yet. */
558static int running_on_valgrind = -1;
559#endif
560
Victor Stinner9ed83c42017-10-31 12:18:10 -0700561
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100562Py_ssize_t
563_Py_GetAllocatedBlocks(void)
564{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600565 return _PyRuntime.mem.num_allocated_blocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100566}
567
568
Thomas Woutersa9773292006-04-21 09:43:23 +0000569/* Allocate a new arena. If we run out of memory, return NULL. Else
570 * allocate a new arena, and return the address of an arena_object
571 * describing the new arena. It's expected that the caller will set
572 * `usable_arenas` to the return value.
573 */
574static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000575new_arena(void)
576{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000577 struct arena_object* arenaobj;
578 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +0100579 void *address;
Victor Stinner34be8072016-03-14 12:04:26 +0100580 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +0000581
Victor Stinner34be8072016-03-14 12:04:26 +0100582 if (debug_stats == -1) {
583 char *opt = Py_GETENV("PYTHONMALLOCSTATS");
584 debug_stats = (opt != NULL && *opt != '\0');
585 }
586 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -0400587 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be8072016-03-14 12:04:26 +0100588
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600589 if (_PyRuntime.mem.unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000590 uint i;
591 uint numarenas;
592 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000593
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000594 /* Double the number of arena objects on each allocation.
595 * Note that it's possible for `numarenas` to overflow.
596 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600597 numarenas = _PyRuntime.mem.maxarenas ? _PyRuntime.mem.maxarenas << 1 : INITIAL_ARENA_OBJECTS;
598 if (numarenas <= _PyRuntime.mem.maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000599 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000600#if SIZEOF_SIZE_T <= SIZEOF_INT
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600601 if (numarenas > SIZE_MAX / sizeof(*_PyRuntime.mem.arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000602 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000603#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600604 nbytes = numarenas * sizeof(*_PyRuntime.mem.arenas);
605 arenaobj = (struct arena_object *)PyMem_RawRealloc(_PyRuntime.mem.arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000606 if (arenaobj == NULL)
607 return NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600608 _PyRuntime.mem.arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +0000609
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000610 /* We might need to fix pointers that were copied. However,
611 * new_arena only gets called when all the pages in the
612 * previous arenas are full. Thus, there are *no* pointers
613 * into the old array. Thus, we don't have to worry about
614 * invalid pointers. Just to be sure, some asserts:
615 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600616 assert(_PyRuntime.mem.usable_arenas == NULL);
617 assert(_PyRuntime.mem.unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +0000618
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000619 /* Put the new arenas on the unused_arena_objects list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600620 for (i = _PyRuntime.mem.maxarenas; i < numarenas; ++i) {
621 _PyRuntime.mem.arenas[i].address = 0; /* mark as unassociated */
622 _PyRuntime.mem.arenas[i].nextarena = i < numarenas - 1 ?
623 &_PyRuntime.mem.arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000624 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000625
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000626 /* Update globals. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600627 _PyRuntime.mem.unused_arena_objects = &_PyRuntime.mem.arenas[_PyRuntime.mem.maxarenas];
628 _PyRuntime.mem.maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000629 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000630
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000631 /* Take the next available arena object off the head of the list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600632 assert(_PyRuntime.mem.unused_arena_objects != NULL);
633 arenaobj = _PyRuntime.mem.unused_arena_objects;
634 _PyRuntime.mem.unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000635 assert(arenaobj->address == 0);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600636 address = _PyRuntime.obj.allocator_arenas.alloc(_PyRuntime.obj.allocator_arenas.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200637 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000638 /* The allocation failed: return NULL after putting the
639 * arenaobj back.
640 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600641 arenaobj->nextarena = _PyRuntime.mem.unused_arena_objects;
642 _PyRuntime.mem.unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000643 return NULL;
644 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -0700645 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000646
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600647 ++_PyRuntime.mem.narenas_currently_allocated;
648 ++_PyRuntime.mem.ntimes_arena_allocated;
649 if (_PyRuntime.mem.narenas_currently_allocated > _PyRuntime.mem.narenas_highwater)
650 _PyRuntime.mem.narenas_highwater = _PyRuntime.mem.narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000651 arenaobj->freepools = NULL;
652 /* pool_address <- first pool-aligned address in the arena
653 nfreepools <- number of whole pools that fit after alignment */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600654 arenaobj->pool_address = (pyblock*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000655 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
656 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
657 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
658 if (excess != 0) {
659 --arenaobj->nfreepools;
660 arenaobj->pool_address += POOL_SIZE - excess;
661 }
662 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000663
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000664 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000665}
666
Victor Stinner9ed83c42017-10-31 12:18:10 -0700667
Thomas Woutersa9773292006-04-21 09:43:23 +0000668/*
Benjamin Peterson3924f932016-09-18 19:12:48 -0700669address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +0000670
671Return true if and only if P is an address that was allocated by pymalloc.
672POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
673(the caller is asked to compute this because the macro expands POOL more than
674once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -0700675variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +0000676called on every alloc/realloc/free, micro-efficiency is important here).
677
678Tricky: Let B be the arena base address associated with the pool, B =
679arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000681 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000682
683Subtracting B throughout, this is true iff
684
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000685 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000686
687By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
688
689Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
690before the first arena has been allocated. `arenas` is still NULL in that
691case. We're relying on that maxarenas is also 0 in that case, so that
692(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
693into a NULL arenas.
694
695Details: given P and POOL, the arena_object corresponding to P is AO =
696arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
697stores, etc), POOL is the correct address of P's pool, AO.address is the
698correct base address of the pool's arena, and P must be within ARENA_SIZE of
699AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -0700700(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +0000701controls P.
702
703Now suppose obmalloc does not control P (e.g., P was obtained via a direct
704call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
705in this case -- it may even be uninitialized trash. If the trash arenaindex
706is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
707control P.
708
709Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
710allocated arena, obmalloc controls all the memory in slice AO.address :
711AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
712so P doesn't lie in that slice, so the macro correctly reports that P is not
713controlled by obmalloc.
714
715Finally, if P is not controlled by obmalloc and AO corresponds to an unused
716arena_object (one not currently associated with an allocated arena),
717AO.address is 0, and the second test in the macro reduces to:
718
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000719 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000720
721If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
722that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
723of the test still passes, and the third clause (AO.address != 0) is necessary
724to get the correct result: AO.address is 0 in this case, so the macro
725correctly reports that P is not controlled by obmalloc (despite that P lies in
726slice AO.address : AO.address + ARENA_SIZE).
727
728Note: The third (AO.address != 0) clause was added in Python 2.5. Before
7292.5, arenas were never free()'ed, and an arenaindex < maxarena always
730corresponded to a currently-allocated arena, so the "P is not controlled by
731obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
732was impossible.
733
734Note that the logic is excruciating, and reading up possibly uninitialized
735memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
736creates problems for some memory debuggers. The overwhelming advantage is
737that this test determines whether an arbitrary address is controlled by
738obmalloc in a small constant time, independent of the number of arenas
739obmalloc controls. Since this test is needed at every entry point, it's
740extremely desirable that it be this fast.
741*/
Thomas Woutersa9773292006-04-21 09:43:23 +0000742
Benjamin Peterson3924f932016-09-18 19:12:48 -0700743static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
744address_in_range(void *p, poolp pool)
745{
746 // Since address_in_range may be reading from memory which was not allocated
747 // by Python, it is important that pool->arenaindex is read only once, as
748 // another thread may be concurrently modifying the value without holding
749 // the GIL. The following dance forces the compiler to read pool->arenaindex
750 // only once.
751 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600752 return arenaindex < _PyRuntime.mem.maxarenas &&
753 (uintptr_t)p - _PyRuntime.mem.arenas[arenaindex].address < ARENA_SIZE &&
754 _PyRuntime.mem.arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -0700755}
Tim Peters338e0102002-04-01 19:23:44 +0000756
Victor Stinner9ed83c42017-10-31 12:18:10 -0700757
Neil Schemenauera35c6882001-02-27 04:45:05 +0000758/*==========================================================================*/
759
Victor Stinner9ed83c42017-10-31 12:18:10 -0700760/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +0000761
Victor Stinner9ed83c42017-10-31 12:18:10 -0700762 The basic blocks are ordered by decreasing execution frequency,
763 which minimizes the number of jumps in the most common cases,
764 improves branching prediction and instruction scheduling (small
765 block allocations typically result in a couple of instructions).
766 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +0000767
Victor Stinner9ed83c42017-10-31 12:18:10 -0700768 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
769
770 Return 0 if pymalloc failed to allocate the memory block: on bigger
771 requests, on error in the code below (as a last chance to serve the request)
772 or when the max memory limit has been reached. */
773static int
774pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000775{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600776 pyblock *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000777 poolp pool;
778 poolp next;
779 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000780
Benjamin Peterson05159c42009-12-03 03:01:27 +0000781#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -0700782 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000783 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -0700784 }
785 if (UNLIKELY(running_on_valgrind)) {
786 return 0;
787 }
Benjamin Peterson05159c42009-12-03 03:01:27 +0000788#endif
789
Victor Stinner9ed83c42017-10-31 12:18:10 -0700790 if (nbytes == 0) {
791 return 0;
792 }
793 if (nbytes > SMALL_REQUEST_THRESHOLD) {
794 return 0;
795 }
T. Wouters06bb4872017-03-31 10:10:19 -0700796
Victor Stinner9ed83c42017-10-31 12:18:10 -0700797 LOCK();
798 /*
799 * Most frequent paths first
800 */
801 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
802 pool = _PyRuntime.mem.usedpools[size + size];
803 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000804 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -0700805 * There is a used pool for this size class.
806 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000807 */
Victor Stinner9ed83c42017-10-31 12:18:10 -0700808 ++pool->ref.count;
809 bp = pool->freeblock;
810 assert(bp != NULL);
811 if ((pool->freeblock = *(pyblock **)bp) != NULL) {
812 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000813 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000814
Victor Stinner9ed83c42017-10-31 12:18:10 -0700815 /*
816 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000817 */
Victor Stinner9ed83c42017-10-31 12:18:10 -0700818 if (pool->nextoffset <= pool->maxnextoffset) {
819 /* There is room for another block. */
820 pool->freeblock = (pyblock*)pool +
821 pool->nextoffset;
822 pool->nextoffset += INDEX2SIZE(size);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600823 *(pyblock **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -0700824 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000825 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000826
Victor Stinner9ed83c42017-10-31 12:18:10 -0700827 /* Pool is full, unlink from used pools. */
828 next = pool->nextpool;
829 pool = pool->prevpool;
830 next->prevpool = pool;
831 pool->nextpool = next;
832 goto success;
833 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000834
Victor Stinner9ed83c42017-10-31 12:18:10 -0700835 /* There isn't a pool of the right size class immediately
836 * available: use a free pool.
837 */
838 if (_PyRuntime.mem.usable_arenas == NULL) {
839 /* No arena has a free pool: allocate a new arena. */
840#ifdef WITH_MEMORY_LIMITS
841 if (_PyRuntime.mem.narenas_currently_allocated >= MAX_ARENAS) {
842 goto failed;
843 }
844#endif
845 _PyRuntime.mem.usable_arenas = new_arena();
846 if (_PyRuntime.mem.usable_arenas == NULL) {
847 goto failed;
848 }
849 _PyRuntime.mem.usable_arenas->nextarena =
850 _PyRuntime.mem.usable_arenas->prevarena = NULL;
851 }
852 assert(_PyRuntime.mem.usable_arenas->address != 0);
853
854 /* Try to get a cached free pool. */
855 pool = _PyRuntime.mem.usable_arenas->freepools;
856 if (pool != NULL) {
857 /* Unlink from cached pools. */
858 _PyRuntime.mem.usable_arenas->freepools = pool->nextpool;
859
860 /* This arena already had the smallest nfreepools
861 * value, so decreasing nfreepools doesn't change
862 * that, and we don't need to rearrange the
863 * usable_arenas list. However, if the arena has
864 * become wholly allocated, we need to remove its
865 * arena_object from usable_arenas.
866 */
867 --_PyRuntime.mem.usable_arenas->nfreepools;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600868 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -0700869 /* Wholly allocated: remove. */
870 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600871 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
872 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
873 _PyRuntime.mem.usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -0700874
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600875 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
876 if (_PyRuntime.mem.usable_arenas != NULL) {
877 _PyRuntime.mem.usable_arenas->prevarena = NULL;
878 assert(_PyRuntime.mem.usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000879 }
880 }
Victor Stinner9ed83c42017-10-31 12:18:10 -0700881 else {
882 /* nfreepools > 0: it must be that freepools
883 * isn't NULL, or that we haven't yet carved
884 * off all the arena's pools for the first
885 * time.
886 */
887 assert(_PyRuntime.mem.usable_arenas->freepools != NULL ||
888 _PyRuntime.mem.usable_arenas->pool_address <=
889 (pyblock*)_PyRuntime.mem.usable_arenas->address +
890 ARENA_SIZE - POOL_SIZE);
891 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000892
Victor Stinner9ed83c42017-10-31 12:18:10 -0700893 init_pool:
894 /* Frontlink to used pools. */
895 next = _PyRuntime.mem.usedpools[size + size]; /* == prev */
896 pool->nextpool = next;
897 pool->prevpool = next;
898 next->nextpool = pool;
899 next->prevpool = pool;
900 pool->ref.count = 1;
901 if (pool->szidx == size) {
902 /* Luckily, this pool last contained blocks
903 * of the same size class, so its header
904 * and free list are already initialized.
905 */
906 bp = pool->freeblock;
907 assert(bp != NULL);
908 pool->freeblock = *(pyblock **)bp;
909 goto success;
910 }
911 /*
912 * Initialize the pool header, set up the free list to
913 * contain just the second block, and return the first
914 * block.
915 */
916 pool->szidx = size;
917 size = INDEX2SIZE(size);
918 bp = (pyblock *)pool + POOL_OVERHEAD;
919 pool->nextoffset = POOL_OVERHEAD + (size << 1);
920 pool->maxnextoffset = POOL_SIZE - size;
921 pool->freeblock = bp + size;
922 *(pyblock **)(pool->freeblock) = NULL;
923 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000924 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000925
Victor Stinner9ed83c42017-10-31 12:18:10 -0700926 /* Carve off a new pool. */
927 assert(_PyRuntime.mem.usable_arenas->nfreepools > 0);
928 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
929 pool = (poolp)_PyRuntime.mem.usable_arenas->pool_address;
930 assert((pyblock*)pool <= (pyblock*)_PyRuntime.mem.usable_arenas->address +
931 ARENA_SIZE - POOL_SIZE);
932 pool->arenaindex = (uint)(_PyRuntime.mem.usable_arenas - _PyRuntime.mem.arenas);
933 assert(&_PyRuntime.mem.arenas[pool->arenaindex] == _PyRuntime.mem.usable_arenas);
934 pool->szidx = DUMMY_SIZE_IDX;
935 _PyRuntime.mem.usable_arenas->pool_address += POOL_SIZE;
936 --_PyRuntime.mem.usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000937
Victor Stinner9ed83c42017-10-31 12:18:10 -0700938 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
939 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
940 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
941 _PyRuntime.mem.usable_arenas);
942 /* Unlink the arena: it is completely allocated. */
943 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
944 if (_PyRuntime.mem.usable_arenas != NULL) {
945 _PyRuntime.mem.usable_arenas->prevarena = NULL;
946 assert(_PyRuntime.mem.usable_arenas->address != 0);
947 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100948 }
Victor Stinner9ed83c42017-10-31 12:18:10 -0700949
950 goto init_pool;
951
952success:
953 UNLOCK();
954 assert(bp != NULL);
955 *ptr_p = (void *)bp;
956 return 1;
957
958failed:
959 UNLOCK();
960 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000961}
962
Victor Stinner9ed83c42017-10-31 12:18:10 -0700963
Victor Stinnerdb067af2014-05-02 22:31:14 +0200964static void *
965_PyObject_Malloc(void *ctx, size_t nbytes)
966{
Victor Stinner9ed83c42017-10-31 12:18:10 -0700967 void* ptr;
968 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
969 _PyRuntime.mem.num_allocated_blocks++;
970 return ptr;
971 }
972
973 ptr = PyMem_RawMalloc(nbytes);
974 if (ptr != NULL) {
975 _PyRuntime.mem.num_allocated_blocks++;
976 }
977 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200978}
979
Victor Stinner9ed83c42017-10-31 12:18:10 -0700980
Victor Stinnerdb067af2014-05-02 22:31:14 +0200981static void *
982_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
983{
Victor Stinner9ed83c42017-10-31 12:18:10 -0700984 void* ptr;
985
986 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
987 size_t nbytes = nelem * elsize;
988
989 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
990 memset(ptr, 0, nbytes);
991 _PyRuntime.mem.num_allocated_blocks++;
992 return ptr;
993 }
994
995 ptr = PyMem_RawCalloc(nelem, elsize);
996 if (ptr != NULL) {
997 _PyRuntime.mem.num_allocated_blocks++;
998 }
999 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001000}
1001
Neil Schemenauera35c6882001-02-27 04:45:05 +00001002
Victor Stinner9ed83c42017-10-31 12:18:10 -07001003/* Free a memory block allocated by pymalloc_alloc().
1004 Return 1 if it was freed.
1005 Return 0 if the block was not allocated by pymalloc_alloc(). */
1006static int
1007pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001008{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001009 poolp pool;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001010 pyblock *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001011 poolp next, prev;
1012 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001013
Victor Stinner9ed83c42017-10-31 12:18:10 -07001014 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001015
Benjamin Peterson05159c42009-12-03 03:01:27 +00001016#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001017 if (UNLIKELY(running_on_valgrind > 0)) {
1018 return 0;
1019 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001020#endif
1021
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001022 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001023 if (!address_in_range(p, pool)) {
1024 return 0;
1025 }
1026 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001027
Victor Stinner9ed83c42017-10-31 12:18:10 -07001028 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +00001029
Victor Stinner9ed83c42017-10-31 12:18:10 -07001030 /* Link p to the start of the pool's freeblock list. Since
1031 * the pool had at least the p block outstanding, the pool
1032 * wasn't empty (so it's already in a usedpools[] list, or
1033 * was full and is in no list -- it's not in the freeblocks
1034 * list in any case).
1035 */
1036 assert(pool->ref.count > 0); /* else it was empty */
1037 *(pyblock **)p = lastfree = pool->freeblock;
1038 pool->freeblock = (pyblock *)p;
1039 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001040 /* Pool was full, so doesn't currently live in any list:
1041 * link it to the front of the appropriate usedpools[] list.
1042 * This mimics LRU pool usage for new allocations and
1043 * targets optimal filling when several pools contain
1044 * blocks of the same size class.
1045 */
1046 --pool->ref.count;
1047 assert(pool->ref.count > 0); /* else the pool is empty */
1048 size = pool->szidx;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001049 next = _PyRuntime.mem.usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001050 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001051
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001052 /* insert pool before next: prev <-> pool <-> next */
1053 pool->nextpool = next;
1054 pool->prevpool = prev;
1055 next->prevpool = pool;
1056 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001057 goto success;
1058 }
1059
1060 struct arena_object* ao;
1061 uint nf; /* ao->nfreepools */
1062
1063 /* freeblock wasn't NULL, so the pool wasn't full,
1064 * and the pool is in a usedpools[] list.
1065 */
1066 if (--pool->ref.count != 0) {
1067 /* pool isn't empty: leave it in usedpools */
1068 goto success;
1069 }
1070 /* Pool is now empty: unlink from usedpools, and
1071 * link to the front of freepools. This ensures that
1072 * previously freed pools will be allocated later
1073 * (being not referenced, they are perhaps paged out).
1074 */
1075 next = pool->nextpool;
1076 prev = pool->prevpool;
1077 next->prevpool = prev;
1078 prev->nextpool = next;
1079
1080 /* Link the pool to freepools. This is a singly-linked
1081 * list, and pool->prevpool isn't used there.
1082 */
1083 ao = &_PyRuntime.mem.arenas[pool->arenaindex];
1084 pool->nextpool = ao->freepools;
1085 ao->freepools = pool;
1086 nf = ++ao->nfreepools;
1087
1088 /* All the rest is arena management. We just freed
1089 * a pool, and there are 4 cases for arena mgmt:
1090 * 1. If all the pools are free, return the arena to
1091 * the system free().
1092 * 2. If this is the only free pool in the arena,
1093 * add the arena back to the `usable_arenas` list.
1094 * 3. If the "next" arena has a smaller count of free
1095 * pools, we have to "slide this arena right" to
1096 * restore that usable_arenas is sorted in order of
1097 * nfreepools.
1098 * 4. Else there's nothing more to do.
1099 */
1100 if (nf == ao->ntotalpools) {
1101 /* Case 1. First unlink ao from usable_arenas.
1102 */
1103 assert(ao->prevarena == NULL ||
1104 ao->prevarena->address != 0);
1105 assert(ao ->nextarena == NULL ||
1106 ao->nextarena->address != 0);
1107
1108 /* Fix the pointer in the prevarena, or the
1109 * usable_arenas pointer.
1110 */
1111 if (ao->prevarena == NULL) {
1112 _PyRuntime.mem.usable_arenas = ao->nextarena;
1113 assert(_PyRuntime.mem.usable_arenas == NULL ||
1114 _PyRuntime.mem.usable_arenas->address != 0);
1115 }
1116 else {
1117 assert(ao->prevarena->nextarena == ao);
1118 ao->prevarena->nextarena =
1119 ao->nextarena;
1120 }
1121 /* Fix the pointer in the nextarena. */
1122 if (ao->nextarena != NULL) {
1123 assert(ao->nextarena->prevarena == ao);
1124 ao->nextarena->prevarena =
1125 ao->prevarena;
1126 }
1127 /* Record that this arena_object slot is
1128 * available to be reused.
1129 */
1130 ao->nextarena = _PyRuntime.mem.unused_arena_objects;
1131 _PyRuntime.mem.unused_arena_objects = ao;
1132
1133 /* Free the entire arena. */
1134 _PyRuntime.obj.allocator_arenas.free(_PyRuntime.obj.allocator_arenas.ctx,
1135 (void *)ao->address, ARENA_SIZE);
1136 ao->address = 0; /* mark unassociated */
1137 --_PyRuntime.mem.narenas_currently_allocated;
1138
1139 goto success;
1140 }
1141
1142 if (nf == 1) {
1143 /* Case 2. Put ao at the head of
1144 * usable_arenas. Note that because
1145 * ao->nfreepools was 0 before, ao isn't
1146 * currently on the usable_arenas list.
1147 */
1148 ao->nextarena = _PyRuntime.mem.usable_arenas;
1149 ao->prevarena = NULL;
1150 if (_PyRuntime.mem.usable_arenas)
1151 _PyRuntime.mem.usable_arenas->prevarena = ao;
1152 _PyRuntime.mem.usable_arenas = ao;
1153 assert(_PyRuntime.mem.usable_arenas->address != 0);
1154
1155 goto success;
1156 }
1157
1158 /* If this arena is now out of order, we need to keep
1159 * the list sorted. The list is kept sorted so that
1160 * the "most full" arenas are used first, which allows
1161 * the nearly empty arenas to be completely freed. In
1162 * a few un-scientific tests, it seems like this
1163 * approach allowed a lot more memory to be freed.
1164 */
1165 if (ao->nextarena == NULL ||
1166 nf <= ao->nextarena->nfreepools) {
1167 /* Case 4. Nothing to do. */
1168 goto success;
1169 }
1170 /* Case 3: We have to move the arena towards the end
1171 * of the list, because it has more free pools than
1172 * the arena to its right.
1173 * First unlink ao from usable_arenas.
1174 */
1175 if (ao->prevarena != NULL) {
1176 /* ao isn't at the head of the list */
1177 assert(ao->prevarena->nextarena == ao);
1178 ao->prevarena->nextarena = ao->nextarena;
1179 }
1180 else {
1181 /* ao is at the head of the list */
1182 assert(_PyRuntime.mem.usable_arenas == ao);
1183 _PyRuntime.mem.usable_arenas = ao->nextarena;
1184 }
1185 ao->nextarena->prevarena = ao->prevarena;
1186
1187 /* Locate the new insertion point by iterating over
1188 * the list, using our nextarena pointer.
1189 */
1190 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1191 ao->prevarena = ao->nextarena;
1192 ao->nextarena = ao->nextarena->nextarena;
1193 }
1194
1195 /* Insert ao at this point. */
1196 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1197 assert(ao->prevarena->nextarena == ao->nextarena);
1198
1199 ao->prevarena->nextarena = ao;
1200 if (ao->nextarena != NULL) {
1201 ao->nextarena->prevarena = ao;
1202 }
1203
1204 /* Verify that the swaps worked. */
1205 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1206 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1207 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
1208 assert((_PyRuntime.mem.usable_arenas == ao && ao->prevarena == NULL)
1209 || ao->prevarena->nextarena == ao);
1210
1211 goto success;
1212
1213success:
1214 UNLOCK();
1215 return 1;
1216}
1217
1218
1219static void
1220_PyObject_Free(void *ctx, void *p)
1221{
1222 /* PyObject_Free(NULL) has no effect */
1223 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001224 return;
1225 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001226
Victor Stinner9ed83c42017-10-31 12:18:10 -07001227 _PyRuntime.mem.num_allocated_blocks--;
1228 if (!pymalloc_free(ctx, p)) {
1229 /* pymalloc didn't allocate this address */
1230 PyMem_RawFree(p);
1231 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001232}
1233
Neil Schemenauera35c6882001-02-27 04:45:05 +00001234
Victor Stinner9ed83c42017-10-31 12:18:10 -07001235/* pymalloc realloc.
1236
1237 If nbytes==0, then as the Python docs promise, we do not treat this like
1238 free(p), and return a non-NULL result.
1239
1240 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1241 newptr_p.
1242
1243 Return 0 if pymalloc didn't allocated p. */
1244static int
1245pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001246{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001247 void *bp;
1248 poolp pool;
1249 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001250
Victor Stinner9ed83c42017-10-31 12:18:10 -07001251 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001252
Benjamin Peterson05159c42009-12-03 03:01:27 +00001253#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001254 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001255 if (UNLIKELY(running_on_valgrind > 0)) {
1256 return 0;
1257 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001258#endif
1259
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001260 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001261 if (!address_in_range(p, pool)) {
1262 /* pymalloc is not managing this block.
1263
1264 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1265 over this block. However, if we do, we need to copy the valid data
1266 from the C-managed block to one of our blocks, and there's no
1267 portable way to know how much of the memory space starting at p is
1268 valid.
1269
1270 As bug 1185883 pointed out the hard way, it's possible that the
1271 C-managed block is "at the end" of allocated VM space, so that a
1272 memory fault can occur if we try to copy nbytes bytes starting at p.
1273 Instead we punt: let C continue to manage this block. */
1274 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001275 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001276
1277 /* pymalloc is in charge of this block */
1278 size = INDEX2SIZE(pool->szidx);
1279 if (nbytes <= size) {
1280 /* The block is staying the same or shrinking.
1281
1282 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1283 block to a smaller size class, but it wastes memory not to copy it.
1284
1285 The compromise here is to copy on shrink only if at least 25% of
1286 size can be shaved off. */
1287 if (4 * nbytes > 3 * size) {
1288 /* It's the same, or shrinking and new/old > 3/4. */
1289 *newptr_p = p;
1290 return 1;
1291 }
1292 size = nbytes;
1293 }
1294
1295 bp = _PyObject_Malloc(ctx, nbytes);
1296 if (bp != NULL) {
1297 memcpy(bp, p, size);
1298 _PyObject_Free(ctx, p);
1299 }
1300 *newptr_p = bp;
1301 return 1;
1302}
1303
1304
1305static void *
1306_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1307{
1308 void *ptr2;
1309
1310 if (ptr == NULL) {
1311 return _PyObject_Malloc(ctx, nbytes);
1312 }
1313
1314 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1315 return ptr2;
1316 }
1317
1318 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001319}
1320
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001321#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001322
1323/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001324/* pymalloc not enabled: Redirect the entry points to malloc. These will
1325 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001326
Antoine Pitrou92840532012-12-17 23:05:59 +01001327Py_ssize_t
1328_Py_GetAllocatedBlocks(void)
1329{
1330 return 0;
1331}
1332
Tim Peters1221c0a2002-03-23 00:20:15 +00001333#endif /* WITH_PYMALLOC */
1334
Victor Stinner34be8072016-03-14 12:04:26 +01001335
Tim Petersddea2082002-03-23 10:03:50 +00001336/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001337/* A x-platform debugging allocator. This doesn't manage memory directly,
1338 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1339 */
Tim Petersddea2082002-03-23 10:03:50 +00001340
Tim Petersf6fb5012002-04-12 07:38:53 +00001341/* Special bytes broadcast into debug memory blocks at appropriate times.
1342 * Strings of these are unlikely to be valid addresses, floats, ints or
1343 * 7-bit ASCII.
1344 */
1345#undef CLEANBYTE
1346#undef DEADBYTE
1347#undef FORBIDDENBYTE
1348#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001349#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001350#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001351
Tim Peterse0850172002-03-24 00:34:21 +00001352/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001353 * to supply a single place to set a breakpoint.
1354 */
Tim Peterse0850172002-03-24 00:34:21 +00001355static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001356bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001357{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001358 ++_PyRuntime.mem.serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001359}
1360
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001361#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001362
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001363/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1364static size_t
1365read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001366{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001367 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001368 size_t result = *q++;
1369 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001370
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001371 for (i = SST; --i > 0; ++q)
1372 result = (result << 8) | *q;
1373 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001374}
1375
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001376/* Write n as a big-endian size_t, MSB at address p, LSB at
1377 * p + sizeof(size_t) - 1.
1378 */
Tim Petersddea2082002-03-23 10:03:50 +00001379static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001380write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001381{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001382 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001383 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001384
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001385 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001386 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001387 n >>= 8;
1388 }
Tim Petersddea2082002-03-23 10:03:50 +00001389}
1390
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001391/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1392 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001393
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001394p[0: S]
1395 Number of bytes originally asked for. This is a size_t, big-endian (easier
1396 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001397p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001398 API ID. See PEP 445. This is a character, but seems undocumented.
1399p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001400 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001401p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001402 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001403 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001404 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001405 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001406p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001407 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001408p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001409 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1410 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001411 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001412 If "bad memory" is detected later, the serial number gives an
1413 excellent way to set a breakpoint on the next run, to capture the
1414 instant at which this block was passed out.
1415*/
1416
Victor Stinner0507bf52013-07-07 02:05:46 +02001417static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001418_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001419{
Victor Stinner0507bf52013-07-07 02:05:46 +02001420 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001421 uint8_t *p; /* base address of malloc'epad d block */
1422 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1423 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1424 size_t total; /* 2 * SST + nbytes + 2 * SST */
1425
1426 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
1427 /* integer overflow: can't represent total as a Py_ssize_t */
1428 return NULL;
1429 }
1430 total = nbytes + 4 * SST;
1431
1432 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
1433 * ^--- p ^--- data ^--- tail
1434 S: nbytes stored as size_t
1435 I: API identifier (1 byte)
1436 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
1437 C: Clean bytes used later to store actual data
1438 N: Serial number stored as size_t */
1439
1440 if (use_calloc) {
1441 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1442 }
1443 else {
1444 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
1445 }
1446 if (p == NULL) {
1447 return NULL;
1448 }
1449 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001450
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001451 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001452
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001453 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1454 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001455 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02001456 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001457
Victor Stinner9ed83c42017-10-31 12:18:10 -07001458 if (nbytes > 0 && !use_calloc) {
1459 memset(data, CLEANBYTE, nbytes);
1460 }
Tim Petersddea2082002-03-23 10:03:50 +00001461
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001462 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001463 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001464 memset(tail, FORBIDDENBYTE, SST);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001465 write_size_t(tail + SST, _PyRuntime.mem.serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001466
Victor Stinner9ed83c42017-10-31 12:18:10 -07001467 return data;
Tim Petersddea2082002-03-23 10:03:50 +00001468}
1469
Victor Stinnerdb067af2014-05-02 22:31:14 +02001470static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001471_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001472{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001473 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001474}
1475
1476static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001477_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001478{
1479 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001480 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001481 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01001482 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001483}
1484
Victor Stinner9ed83c42017-10-31 12:18:10 -07001485
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001486/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001487 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001488 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001489 Then calls the underlying free.
1490*/
Victor Stinner0507bf52013-07-07 02:05:46 +02001491static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01001492_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001493{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001494 /* PyMem_Free(NULL) has no effect */
1495 if (p == NULL) {
1496 return;
1497 }
1498
Victor Stinner0507bf52013-07-07 02:05:46 +02001499 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001500 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001501 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001502
Victor Stinner0507bf52013-07-07 02:05:46 +02001503 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001504 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001505 nbytes += 4 * SST;
1506 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001507 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00001508}
1509
Victor Stinner9ed83c42017-10-31 12:18:10 -07001510
Victor Stinner0507bf52013-07-07 02:05:46 +02001511static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001512_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001513{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001514 if (p == NULL) {
1515 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
1516 }
1517
Victor Stinner0507bf52013-07-07 02:05:46 +02001518 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001519 uint8_t *q; /* base address of malloc'epad d block */
1520 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1521 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1522 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001523 size_t original_nbytes;
1524 int i;
Tim Petersddea2082002-03-23 10:03:50 +00001525
Victor Stinner0507bf52013-07-07 02:05:46 +02001526 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001527
1528 q = (uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001529 original_nbytes = read_size_t(q - 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001530 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
1531 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001532 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001533 }
1534 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001535
Serhiy Storchakab484d562017-10-31 14:05:03 +02001536 /* Resize and add decorations. */
Benjamin Peterson19517e42016-09-18 19:22:22 -07001537 q = (uint8_t *)api->alloc.realloc(api->alloc.ctx, q - 2*SST, total);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001538 if (q == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001539 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001540 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001541
Victor Stinner9ed83c42017-10-31 12:18:10 -07001542 bumpserialno();
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001543 write_size_t(q, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001544 assert(q[SST] == (uint8_t)api->api_id);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001545 for (i = 1; i < SST; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001546 assert(q[SST + i] == FORBIDDENBYTE);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001547 }
1548 data = q + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02001549
Victor Stinner9ed83c42017-10-31 12:18:10 -07001550 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001551 memset(tail, FORBIDDENBYTE, SST);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001552 write_size_t(tail + SST, _PyRuntime.mem.serialno);
Tim Peters85cc1c42002-04-12 08:52:50 +00001553
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001554 if (nbytes > original_nbytes) {
1555 /* growing: mark new extra memory clean */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001556 memset(data + original_nbytes, CLEANBYTE,
Stefan Krah735bb122010-11-26 10:54:09 +00001557 nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001558 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001559
Victor Stinner9ed83c42017-10-31 12:18:10 -07001560 return data;
Tim Petersddea2082002-03-23 10:03:50 +00001561}
1562
Victor Stinnerc4aec362016-03-14 22:26:53 +01001563static void
1564_PyMem_DebugCheckGIL(void)
1565{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001566 if (!PyGILState_Check())
1567 Py_FatalError("Python memory allocator called "
1568 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01001569}
1570
1571static void *
1572_PyMem_DebugMalloc(void *ctx, size_t nbytes)
1573{
1574 _PyMem_DebugCheckGIL();
1575 return _PyMem_DebugRawMalloc(ctx, nbytes);
1576}
1577
1578static void *
1579_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
1580{
1581 _PyMem_DebugCheckGIL();
1582 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
1583}
1584
Victor Stinner9ed83c42017-10-31 12:18:10 -07001585
Victor Stinnerc4aec362016-03-14 22:26:53 +01001586static void
1587_PyMem_DebugFree(void *ctx, void *ptr)
1588{
1589 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01001590 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01001591}
1592
Victor Stinner9ed83c42017-10-31 12:18:10 -07001593
Victor Stinnerc4aec362016-03-14 22:26:53 +01001594static void *
1595_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
1596{
1597 _PyMem_DebugCheckGIL();
1598 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
1599}
1600
Tim Peters7ccfadf2002-04-01 06:04:21 +00001601/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001602 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001603 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001604 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001605 */
Victor Stinner0507bf52013-07-07 02:05:46 +02001606static void
1607_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001608{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001609 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001610 char msgbuf[64];
1611 char *msg;
1612 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001613 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001614 int i;
1615 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001616
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001617 if (p == NULL) {
1618 msg = "didn't expect a NULL pointer";
1619 goto error;
1620 }
Tim Petersddea2082002-03-23 10:03:50 +00001621
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001622 /* Check the API id */
1623 id = (char)q[-SST];
1624 if (id != api) {
1625 msg = msgbuf;
1626 snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
1627 msgbuf[sizeof(msgbuf)-1] = 0;
1628 goto error;
1629 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001630
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001631 /* Check the stuff at the start of p first: if there's underwrite
1632 * corruption, the number-of-bytes field may be nuts, and checking
1633 * the tail could lead to a segfault then.
1634 */
1635 for (i = SST-1; i >= 1; --i) {
1636 if (*(q-i) != FORBIDDENBYTE) {
1637 msg = "bad leading pad byte";
1638 goto error;
1639 }
1640 }
Tim Petersddea2082002-03-23 10:03:50 +00001641
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001642 nbytes = read_size_t(q - 2*SST);
1643 tail = q + nbytes;
1644 for (i = 0; i < SST; ++i) {
1645 if (tail[i] != FORBIDDENBYTE) {
1646 msg = "bad trailing pad byte";
1647 goto error;
1648 }
1649 }
Tim Petersddea2082002-03-23 10:03:50 +00001650
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001651 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001652
1653error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001654 _PyObject_DebugDumpAddress(p);
1655 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001656}
1657
Tim Peters7ccfadf2002-04-01 06:04:21 +00001658/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02001659static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001660_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001661{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001662 const uint8_t *q = (const uint8_t *)p;
1663 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001664 size_t nbytes, serial;
1665 int i;
1666 int ok;
1667 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001668
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001669 fprintf(stderr, "Debug memory block at address p=%p:", p);
1670 if (p == NULL) {
1671 fprintf(stderr, "\n");
1672 return;
1673 }
1674 id = (char)q[-SST];
1675 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001676
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001677 nbytes = read_size_t(q - 2*SST);
1678 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1679 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001680
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001681 /* In case this is nuts, check the leading pad bytes first. */
1682 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1683 ok = 1;
1684 for (i = 1; i <= SST-1; ++i) {
1685 if (*(q-i) != FORBIDDENBYTE) {
1686 ok = 0;
1687 break;
1688 }
1689 }
1690 if (ok)
1691 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1692 else {
1693 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1694 FORBIDDENBYTE);
1695 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001696 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001697 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1698 if (byte != FORBIDDENBYTE)
1699 fputs(" *** OUCH", stderr);
1700 fputc('\n', stderr);
1701 }
Tim Peters449b5a82002-04-28 06:14:45 +00001702
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001703 fputs(" Because memory is corrupted at the start, the "
1704 "count of bytes requested\n"
1705 " may be bogus, and checking the trailing pad "
1706 "bytes may segfault.\n", stderr);
1707 }
Tim Petersddea2082002-03-23 10:03:50 +00001708
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001709 tail = q + nbytes;
1710 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1711 ok = 1;
1712 for (i = 0; i < SST; ++i) {
1713 if (tail[i] != FORBIDDENBYTE) {
1714 ok = 0;
1715 break;
1716 }
1717 }
1718 if (ok)
1719 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1720 else {
1721 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001722 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001723 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001724 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001725 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00001726 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001727 if (byte != FORBIDDENBYTE)
1728 fputs(" *** OUCH", stderr);
1729 fputc('\n', stderr);
1730 }
1731 }
Tim Petersddea2082002-03-23 10:03:50 +00001732
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001733 serial = read_size_t(tail + SST);
1734 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1735 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001736
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001737 if (nbytes > 0) {
1738 i = 0;
1739 fputs(" Data at p:", stderr);
1740 /* print up to 8 bytes at the start */
1741 while (q < tail && i < 8) {
1742 fprintf(stderr, " %02x", *q);
1743 ++i;
1744 ++q;
1745 }
1746 /* and up to 8 at the end */
1747 if (q < tail) {
1748 if (tail - q > 8) {
1749 fputs(" ...", stderr);
1750 q = tail - 8;
1751 }
1752 while (q < tail) {
1753 fprintf(stderr, " %02x", *q);
1754 ++q;
1755 }
1756 }
1757 fputc('\n', stderr);
1758 }
Victor Stinner0611c262016-03-15 22:22:13 +01001759 fputc('\n', stderr);
1760
1761 fflush(stderr);
1762 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00001763}
1764
David Malcolm49526f42012-06-22 14:55:41 -04001765
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001766static size_t
David Malcolm49526f42012-06-22 14:55:41 -04001767printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001768{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001769 int i, k;
1770 char buf[100];
1771 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001772
David Malcolm49526f42012-06-22 14:55:41 -04001773 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001774 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04001775 fputc(' ', out);
1776 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00001777
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001778 /* Write the value with commas. */
1779 i = 22;
1780 buf[i--] = '\0';
1781 buf[i--] = '\n';
1782 k = 3;
1783 do {
1784 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05001785 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001786 value = nextvalue;
1787 buf[i--] = (char)(digit + '0');
1788 --k;
1789 if (k == 0 && value && i >= 0) {
1790 k = 3;
1791 buf[i--] = ',';
1792 }
1793 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00001794
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001795 while (i >= 0)
1796 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04001797 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00001798
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001799 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001800}
1801
David Malcolm49526f42012-06-22 14:55:41 -04001802void
1803_PyDebugAllocatorStats(FILE *out,
1804 const char *block_name, int num_blocks, size_t sizeof_block)
1805{
1806 char buf1[128];
1807 char buf2[128];
1808 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05001809 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04001810 num_blocks, block_name, sizeof_block);
1811 PyOS_snprintf(buf2, sizeof(buf2),
1812 "%48s ", buf1);
1813 (void)printone(out, buf2, num_blocks * sizeof_block);
1814}
1815
Victor Stinner34be8072016-03-14 12:04:26 +01001816
David Malcolm49526f42012-06-22 14:55:41 -04001817#ifdef WITH_PYMALLOC
1818
Victor Stinner34be8072016-03-14 12:04:26 +01001819#ifdef Py_DEBUG
1820/* Is target in the list? The list is traversed via the nextpool pointers.
1821 * The list may be NULL-terminated, or circular. Return 1 if target is in
1822 * list, else 0.
1823 */
1824static int
1825pool_is_in_list(const poolp target, poolp list)
1826{
1827 poolp origlist = list;
1828 assert(target != NULL);
1829 if (list == NULL)
1830 return 0;
1831 do {
1832 if (target == list)
1833 return 1;
1834 list = list->nextpool;
1835 } while (list != NULL && list != origlist);
1836 return 0;
1837}
1838#endif
1839
David Malcolm49526f42012-06-22 14:55:41 -04001840/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00001841 * In Py_DEBUG mode, also perform some expensive internal consistency
1842 * checks.
1843 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001844void
David Malcolm49526f42012-06-22 14:55:41 -04001845_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001846{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001847 uint i;
1848 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1849 /* # of pools, allocated blocks, and free blocks per class index */
1850 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1851 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1852 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1853 /* total # of allocated bytes in used and full pools */
1854 size_t allocated_bytes = 0;
1855 /* total # of available bytes in used pools */
1856 size_t available_bytes = 0;
1857 /* # of free pools + pools not yet carved out of current arena */
1858 uint numfreepools = 0;
1859 /* # of bytes for arena alignment padding */
1860 size_t arena_alignment = 0;
1861 /* # of bytes in used and full pools used for pool_headers */
1862 size_t pool_header_bytes = 0;
1863 /* # of bytes in used and full pools wasted due to quantization,
1864 * i.e. the necessarily leftover space at the ends of used and
1865 * full pools.
1866 */
1867 size_t quantization = 0;
1868 /* # of arenas actually allocated. */
1869 size_t narenas = 0;
1870 /* running total -- should equal narenas * ARENA_SIZE */
1871 size_t total;
1872 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001873
David Malcolm49526f42012-06-22 14:55:41 -04001874 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001875 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001876
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001877 for (i = 0; i < numclasses; ++i)
1878 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001879
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001880 /* Because full pools aren't linked to from anything, it's easiest
1881 * to march over all the arenas. If we're lucky, most of the memory
1882 * will be living in full pools -- would be a shame to miss them.
1883 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001884 for (i = 0; i < _PyRuntime.mem.maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001885 uint j;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001886 uintptr_t base = _PyRuntime.mem.arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00001887
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001888 /* Skip arenas which are not allocated. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001889 if (_PyRuntime.mem.arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001890 continue;
1891 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00001892
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001893 numfreepools += _PyRuntime.mem.arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001894
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001895 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001896 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001897 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001898 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001899 base += POOL_SIZE;
1900 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00001901
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001902 /* visit every pool in the arena */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001903 assert(base <= (uintptr_t) _PyRuntime.mem.arenas[i].pool_address);
1904 for (j = 0; base < (uintptr_t) _PyRuntime.mem.arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001905 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001906 poolp p = (poolp)base;
1907 const uint sz = p->szidx;
1908 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001909
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001910 if (p->ref.count == 0) {
1911 /* currently unused */
Victor Stinner34be8072016-03-14 12:04:26 +01001912#ifdef Py_DEBUG
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001913 assert(pool_is_in_list(p, _PyRuntime.mem.arenas[i].freepools));
Victor Stinner34be8072016-03-14 12:04:26 +01001914#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001915 continue;
1916 }
1917 ++numpools[sz];
1918 numblocks[sz] += p->ref.count;
1919 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1920 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001921#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001922 if (freeblocks > 0)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001923 assert(pool_is_in_list(p, _PyRuntime.mem.usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00001924#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001925 }
1926 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001927 assert(narenas == _PyRuntime.mem.narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001928
David Malcolm49526f42012-06-22 14:55:41 -04001929 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001930 fputs("class size num pools blocks in use avail blocks\n"
1931 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04001932 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001933
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001934 for (i = 0; i < numclasses; ++i) {
1935 size_t p = numpools[i];
1936 size_t b = numblocks[i];
1937 size_t f = numfreeblocks[i];
1938 uint size = INDEX2SIZE(i);
1939 if (p == 0) {
1940 assert(b == 0 && f == 0);
1941 continue;
1942 }
David Malcolm49526f42012-06-22 14:55:41 -04001943 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001944 "%11" PY_FORMAT_SIZE_T "u "
1945 "%15" PY_FORMAT_SIZE_T "u "
1946 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001947 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001948 allocated_bytes += b * size;
1949 available_bytes += f * size;
1950 pool_header_bytes += p * POOL_OVERHEAD;
1951 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1952 }
David Malcolm49526f42012-06-22 14:55:41 -04001953 fputc('\n', out);
Victor Stinner34be8072016-03-14 12:04:26 +01001954 if (_PyMem_DebugEnabled())
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001955 (void)printone(out, "# times object malloc called", _PyRuntime.mem.serialno);
1956 (void)printone(out, "# arenas allocated total", _PyRuntime.mem.ntimes_arena_allocated);
1957 (void)printone(out, "# arenas reclaimed", _PyRuntime.mem.ntimes_arena_allocated - narenas);
1958 (void)printone(out, "# arenas highwater mark", _PyRuntime.mem.narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04001959 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00001960
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001961 PyOS_snprintf(buf, sizeof(buf),
1962 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
1963 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001964 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001965
David Malcolm49526f42012-06-22 14:55:41 -04001966 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001967
David Malcolm49526f42012-06-22 14:55:41 -04001968 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
1969 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00001970
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001971 PyOS_snprintf(buf, sizeof(buf),
1972 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04001973 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00001974
David Malcolm49526f42012-06-22 14:55:41 -04001975 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
1976 total += printone(out, "# bytes lost to quantization", quantization);
1977 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
1978 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001979}
1980
David Malcolm49526f42012-06-22 14:55:41 -04001981#endif /* #ifdef WITH_PYMALLOC */