blob: 7c6973ec035f10b61bd3dd13f94d4d3aa44a21e6 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
Eric Snow2ebc5ce2017-09-07 23:51:28 -06002#include "internal/mem.h"
3#include "internal/pystate.h"
Tim Peters1221c0a2002-03-23 00:20:15 +00004
Benjamin Peterson3924f932016-09-18 19:12:48 -07005#include <stdbool.h>
6
Victor Stinner0611c262016-03-15 22:22:13 +01007
8/* Defined in tracemalloc.c */
9extern void _PyMem_DumpTraceback(int fd, const void *ptr);
10
11
Victor Stinner0507bf52013-07-07 02:05:46 +020012/* Python's malloc wrappers (see pymem.h) */
13
Victor Stinner34be807c2016-03-14 12:04:26 +010014#undef uint
15#define uint unsigned int /* assuming >= 16 bits */
16
Victor Stinner0507bf52013-07-07 02:05:46 +020017/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010018static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
19static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
20static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070021static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010022
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020024static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020025static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010026static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020027
28static void _PyObject_DebugDumpAddress(const void *p);
29static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020030
Nick Coghlan6ba64f42013-09-29 00:28:55 +100031#if defined(__has_feature) /* Clang */
32 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
33 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070034 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100035 #else
36 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
37 #endif
38#else
39 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
40 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070041 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100042 #else
43 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
44 #endif
45#endif
46
Tim Peters1221c0a2002-03-23 00:20:15 +000047#ifdef WITH_PYMALLOC
48
Victor Stinner0507bf52013-07-07 02:05:46 +020049#ifdef MS_WINDOWS
50# include <windows.h>
51#elif defined(HAVE_MMAP)
52# include <sys/mman.h>
53# ifdef MAP_ANONYMOUS
54# define ARENAS_USE_MMAP
55# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020056#endif
57
Victor Stinner0507bf52013-07-07 02:05:46 +020058/* Forward declaration */
59static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020060static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020061static void _PyObject_Free(void *ctx, void *p);
62static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020063#endif
64
Victor Stinner0507bf52013-07-07 02:05:46 +020065
66static void *
67_PyMem_RawMalloc(void *ctx, size_t size)
68{
Victor Stinnerdb067af2014-05-02 22:31:14 +020069 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020070 for malloc(0), which would be treated as an error. Some platforms would
71 return a pointer with no memory behind it, which would break pymalloc.
72 To solve these problems, allocate an extra byte. */
73 if (size == 0)
74 size = 1;
75 return malloc(size);
76}
77
78static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020079_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
80{
81 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
82 for calloc(0, 0), which would be treated as an error. Some platforms
83 would return a pointer with no memory behind it, which would break
84 pymalloc. To solve these problems, allocate an extra byte. */
85 if (nelem == 0 || elsize == 0) {
86 nelem = 1;
87 elsize = 1;
88 }
89 return calloc(nelem, elsize);
90}
91
92static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020093_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
94{
95 if (size == 0)
96 size = 1;
97 return realloc(ptr, size);
98}
99
100static void
101_PyMem_RawFree(void *ctx, void *ptr)
102{
103 free(ptr);
104}
105
106
107#ifdef MS_WINDOWS
108static void *
109_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
110{
111 return VirtualAlloc(NULL, size,
112 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
113}
114
115static void
116_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
117{
Victor Stinner725e6682013-07-07 03:06:16 +0200118 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200119}
120
121#elif defined(ARENAS_USE_MMAP)
122static void *
123_PyObject_ArenaMmap(void *ctx, size_t size)
124{
125 void *ptr;
126 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
127 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128 if (ptr == MAP_FAILED)
129 return NULL;
130 assert(ptr != NULL);
131 return ptr;
132}
133
134static void
135_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
136{
137 munmap(ptr, size);
138}
139
140#else
141static void *
142_PyObject_ArenaMalloc(void *ctx, size_t size)
143{
144 return malloc(size);
145}
146
147static void
148_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
149{
150 free(ptr);
151}
152#endif
153
154
Victor Stinnerdb067af2014-05-02 22:31:14 +0200155#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200156#ifdef WITH_PYMALLOC
Victor Stinnerdb067af2014-05-02 22:31:14 +0200157# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
Victor Stinner0507bf52013-07-07 02:05:46 +0200158#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200159# define PYOBJ_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200160#endif
Victor Stinner15932592016-04-22 18:52:22 +0200161#define PYMEM_FUNCS PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200162
Victor Stinner0507bf52013-07-07 02:05:46 +0200163typedef struct {
164 /* We tag each block with an API ID in order to tag API violations */
165 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200166 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200167} debug_alloc_api_t;
168static struct {
169 debug_alloc_api_t raw;
170 debug_alloc_api_t mem;
171 debug_alloc_api_t obj;
172} _PyMem_Debug = {
173 {'r', {NULL, PYRAW_FUNCS}},
Victor Stinner6cf185d2013-10-10 15:58:42 +0200174 {'m', {NULL, PYMEM_FUNCS}},
175 {'o', {NULL, PYOBJ_FUNCS}}
Victor Stinner0507bf52013-07-07 02:05:46 +0200176 };
177
Victor Stinnerc4aec362016-03-14 22:26:53 +0100178#define PYRAWDBG_FUNCS \
179 _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree
180#define PYDBG_FUNCS \
181 _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200182
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600183
184#define _PyMem_Raw _PyRuntime.mem.allocators.raw
Victor Stinner0507bf52013-07-07 02:05:46 +0200185
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600186#define _PyMem _PyRuntime.mem.allocators.mem
Victor Stinner0507bf52013-07-07 02:05:46 +0200187
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600188#define _PyObject _PyRuntime.mem.allocators.obj
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800189
190void
191_PyMem_GetDefaultRawAllocator(PyMemAllocatorEx *alloc_p)
192{
Victor Stinnerccb04422017-11-16 03:20:31 -0800193 PyMemAllocatorEx pymem_raw = {
194#ifdef Py_DEBUG
195 &_PyMem_Debug.raw, PYRAWDBG_FUNCS
196#else
197 NULL, PYRAW_FUNCS
198#endif
199 };
200 *alloc_p = pymem_raw;
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800201}
Victor Stinner0507bf52013-07-07 02:05:46 +0200202
Victor Stinner34be807c2016-03-14 12:04:26 +0100203int
204_PyMem_SetupAllocators(const char *opt)
205{
206 if (opt == NULL || *opt == '\0') {
207 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
208 options): use default allocators */
209#ifdef Py_DEBUG
210# ifdef WITH_PYMALLOC
211 opt = "pymalloc_debug";
212# else
213 opt = "malloc_debug";
214# endif
215#else
216 /* !Py_DEBUG */
217# ifdef WITH_PYMALLOC
218 opt = "pymalloc";
219# else
220 opt = "malloc";
221# endif
222#endif
223 }
224
225 if (strcmp(opt, "debug") == 0) {
226 PyMem_SetupDebugHooks();
227 }
228 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0)
229 {
230 PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
231
232 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
233 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
234 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
235
236 if (strcmp(opt, "malloc_debug") == 0)
237 PyMem_SetupDebugHooks();
238 }
239#ifdef WITH_PYMALLOC
240 else if (strcmp(opt, "pymalloc") == 0
241 || strcmp(opt, "pymalloc_debug") == 0)
242 {
Victor Stinner15932592016-04-22 18:52:22 +0200243 PyMemAllocatorEx raw_alloc = {NULL, PYRAW_FUNCS};
244 PyMemAllocatorEx mem_alloc = {NULL, PYMEM_FUNCS};
Victor Stinner34be807c2016-03-14 12:04:26 +0100245 PyMemAllocatorEx obj_alloc = {NULL, PYOBJ_FUNCS};
246
Victor Stinner15932592016-04-22 18:52:22 +0200247 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &raw_alloc);
248 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &mem_alloc);
Victor Stinner34be807c2016-03-14 12:04:26 +0100249 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &obj_alloc);
250
251 if (strcmp(opt, "pymalloc_debug") == 0)
252 PyMem_SetupDebugHooks();
253 }
254#endif
255 else {
256 /* unknown allocator */
257 return -1;
258 }
259 return 0;
260}
261
Victor Stinner0507bf52013-07-07 02:05:46 +0200262
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600263void
264_PyObject_Initialize(struct _pyobj_runtime_state *state)
265{
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800266 PyObjectArenaAllocator _PyObject_Arena = {NULL,
267#ifdef MS_WINDOWS
268 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
269#elif defined(ARENAS_USE_MMAP)
270 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
271#else
272 _PyObject_ArenaMalloc, _PyObject_ArenaFree
273#endif
274 };
275
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600276 state->allocator_arenas = _PyObject_Arena;
277}
278
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800279
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600280void
281_PyMem_Initialize(struct _pymem_runtime_state *state)
282{
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800283 PyMemAllocatorEx pymem = {
284#ifdef Py_DEBUG
285 &_PyMem_Debug.mem, PYDBG_FUNCS
286#else
287 NULL, PYMEM_FUNCS
288#endif
289 };
290 PyMemAllocatorEx pyobject = {
291#ifdef Py_DEBUG
292 &_PyMem_Debug.obj, PYDBG_FUNCS
293#else
294 NULL, PYOBJ_FUNCS
295#endif
296 };
297
Victor Stinnerccb04422017-11-16 03:20:31 -0800298 _PyMem_GetDefaultRawAllocator(&state->allocators.raw);
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800299 state->allocators.mem = pymem;
300 state->allocators.obj = pyobject;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600301
302#ifdef WITH_PYMALLOC
Victor Stinnerccb3c762017-09-14 14:48:37 -0700303 Py_BUILD_ASSERT(NB_SMALL_SIZE_CLASSES == 64);
304
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600305 for (int i = 0; i < 8; i++) {
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600306 for (int j = 0; j < 8; j++) {
307 int x = i * 8 + j;
308 poolp *addr = &(state->usedpools[2*(x)]);
309 poolp val = (poolp)((uint8_t *)addr - 2*sizeof(pyblock *));
310 state->usedpools[x * 2] = val;
311 state->usedpools[x * 2 + 1] = val;
312 };
313 };
314#endif /* WITH_PYMALLOC */
315}
316
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800317
Victor Stinner0621e0e2016-04-19 17:02:55 +0200318#ifdef WITH_PYMALLOC
Victor Stinner34be807c2016-03-14 12:04:26 +0100319static int
320_PyMem_DebugEnabled(void)
321{
322 return (_PyObject.malloc == _PyMem_DebugMalloc);
323}
324
Victor Stinner34be807c2016-03-14 12:04:26 +0100325int
326_PyMem_PymallocEnabled(void)
327{
328 if (_PyMem_DebugEnabled()) {
329 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
330 }
331 else {
332 return (_PyObject.malloc == _PyObject_Malloc);
333 }
334}
335#endif
336
Victor Stinner0507bf52013-07-07 02:05:46 +0200337void
338PyMem_SetupDebugHooks(void)
339{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200340 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200341
Victor Stinnerc4aec362016-03-14 22:26:53 +0100342 alloc.malloc = _PyMem_DebugRawMalloc;
343 alloc.calloc = _PyMem_DebugRawCalloc;
344 alloc.realloc = _PyMem_DebugRawRealloc;
345 alloc.free = _PyMem_DebugRawFree;
Victor Stinner34be807c2016-03-14 12:04:26 +0100346
Victor Stinnerc4aec362016-03-14 22:26:53 +0100347 if (_PyMem_Raw.malloc != _PyMem_DebugRawMalloc) {
Victor Stinner0507bf52013-07-07 02:05:46 +0200348 alloc.ctx = &_PyMem_Debug.raw;
349 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
350 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
351 }
352
Victor Stinnerc4aec362016-03-14 22:26:53 +0100353 alloc.malloc = _PyMem_DebugMalloc;
354 alloc.calloc = _PyMem_DebugCalloc;
355 alloc.realloc = _PyMem_DebugRealloc;
356 alloc.free = _PyMem_DebugFree;
357
Victor Stinnerad524372016-03-16 12:12:53 +0100358 if (_PyMem.malloc != _PyMem_DebugMalloc) {
359 alloc.ctx = &_PyMem_Debug.mem;
360 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
361 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
362 }
363
Victor Stinner0507bf52013-07-07 02:05:46 +0200364 if (_PyObject.malloc != _PyMem_DebugMalloc) {
365 alloc.ctx = &_PyMem_Debug.obj;
366 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
367 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
368 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200369}
370
371void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200372PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200373{
374 switch(domain)
375 {
376 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
377 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
378 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
379 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200380 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200381 allocator->ctx = NULL;
382 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200383 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200384 allocator->realloc = NULL;
385 allocator->free = NULL;
386 }
387}
388
389void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200390PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200391{
392 switch(domain)
393 {
394 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
395 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
396 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
397 /* ignore unknown domain */
398 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200399}
400
401void
402PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
403{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600404 *allocator = _PyRuntime.obj.allocator_arenas;
Victor Stinner0507bf52013-07-07 02:05:46 +0200405}
406
407void
408PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
409{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600410 _PyRuntime.obj.allocator_arenas = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200411}
412
413void *
414PyMem_RawMalloc(size_t size)
415{
416 /*
417 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
418 * Most python internals blindly use a signed Py_ssize_t to track
419 * things without checking for overflows or negatives.
420 * As size_t is unsigned, checking for size < 0 is not required.
421 */
422 if (size > (size_t)PY_SSIZE_T_MAX)
423 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200424 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
425}
426
Victor Stinnerdb067af2014-05-02 22:31:14 +0200427void *
428PyMem_RawCalloc(size_t nelem, size_t elsize)
429{
430 /* see PyMem_RawMalloc() */
431 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
432 return NULL;
433 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
434}
435
Victor Stinner0507bf52013-07-07 02:05:46 +0200436void*
437PyMem_RawRealloc(void *ptr, size_t new_size)
438{
439 /* see PyMem_RawMalloc() */
440 if (new_size > (size_t)PY_SSIZE_T_MAX)
441 return NULL;
442 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
443}
444
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600445void
446PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200447{
448 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
449}
450
Victor Stinner9ed83c42017-10-31 12:18:10 -0700451
Victor Stinner0507bf52013-07-07 02:05:46 +0200452void *
453PyMem_Malloc(size_t size)
454{
455 /* see PyMem_RawMalloc() */
456 if (size > (size_t)PY_SSIZE_T_MAX)
457 return NULL;
458 return _PyMem.malloc(_PyMem.ctx, size);
459}
460
461void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200462PyMem_Calloc(size_t nelem, size_t elsize)
463{
464 /* see PyMem_RawMalloc() */
465 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
466 return NULL;
467 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
468}
469
470void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200471PyMem_Realloc(void *ptr, size_t new_size)
472{
473 /* see PyMem_RawMalloc() */
474 if (new_size > (size_t)PY_SSIZE_T_MAX)
475 return NULL;
476 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
477}
478
479void
480PyMem_Free(void *ptr)
481{
482 _PyMem.free(_PyMem.ctx, ptr);
483}
484
Victor Stinner9ed83c42017-10-31 12:18:10 -0700485
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200486char *
487_PyMem_RawStrdup(const char *str)
488{
489 size_t size;
490 char *copy;
491
492 size = strlen(str) + 1;
493 copy = PyMem_RawMalloc(size);
494 if (copy == NULL)
495 return NULL;
496 memcpy(copy, str, size);
497 return copy;
498}
499
500char *
501_PyMem_Strdup(const char *str)
502{
503 size_t size;
504 char *copy;
505
506 size = strlen(str) + 1;
507 copy = PyMem_Malloc(size);
508 if (copy == NULL)
509 return NULL;
510 memcpy(copy, str, size);
511 return copy;
512}
513
Victor Stinner0507bf52013-07-07 02:05:46 +0200514void *
515PyObject_Malloc(size_t size)
516{
517 /* see PyMem_RawMalloc() */
518 if (size > (size_t)PY_SSIZE_T_MAX)
519 return NULL;
520 return _PyObject.malloc(_PyObject.ctx, size);
521}
522
523void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200524PyObject_Calloc(size_t nelem, size_t elsize)
525{
526 /* see PyMem_RawMalloc() */
527 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
528 return NULL;
529 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
530}
531
532void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200533PyObject_Realloc(void *ptr, size_t new_size)
534{
535 /* see PyMem_RawMalloc() */
536 if (new_size > (size_t)PY_SSIZE_T_MAX)
537 return NULL;
538 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
539}
540
541void
542PyObject_Free(void *ptr)
543{
544 _PyObject.free(_PyObject.ctx, ptr);
545}
546
547
548#ifdef WITH_PYMALLOC
549
Benjamin Peterson05159c42009-12-03 03:01:27 +0000550#ifdef WITH_VALGRIND
551#include <valgrind/valgrind.h>
552
553/* If we're using GCC, use __builtin_expect() to reduce overhead of
554 the valgrind checks */
555#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
556# define UNLIKELY(value) __builtin_expect((value), 0)
557#else
558# define UNLIKELY(value) (value)
559#endif
560
561/* -1 indicates that we haven't checked that we're running on valgrind yet. */
562static int running_on_valgrind = -1;
563#endif
564
Victor Stinner9ed83c42017-10-31 12:18:10 -0700565
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100566Py_ssize_t
567_Py_GetAllocatedBlocks(void)
568{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600569 return _PyRuntime.mem.num_allocated_blocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100570}
571
572
Thomas Woutersa9773292006-04-21 09:43:23 +0000573/* Allocate a new arena. If we run out of memory, return NULL. Else
574 * allocate a new arena, and return the address of an arena_object
575 * describing the new arena. It's expected that the caller will set
576 * `usable_arenas` to the return value.
577 */
578static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +0000579new_arena(void)
580{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000581 struct arena_object* arenaobj;
582 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +0100583 void *address;
Victor Stinner34be807c2016-03-14 12:04:26 +0100584 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +0000585
Victor Stinner34be807c2016-03-14 12:04:26 +0100586 if (debug_stats == -1) {
587 char *opt = Py_GETENV("PYTHONMALLOCSTATS");
588 debug_stats = (opt != NULL && *opt != '\0');
589 }
590 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -0400591 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be807c2016-03-14 12:04:26 +0100592
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600593 if (_PyRuntime.mem.unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000594 uint i;
595 uint numarenas;
596 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +0000597
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000598 /* Double the number of arena objects on each allocation.
599 * Note that it's possible for `numarenas` to overflow.
600 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600601 numarenas = _PyRuntime.mem.maxarenas ? _PyRuntime.mem.maxarenas << 1 : INITIAL_ARENA_OBJECTS;
602 if (numarenas <= _PyRuntime.mem.maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000603 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000604#if SIZEOF_SIZE_T <= SIZEOF_INT
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600605 if (numarenas > SIZE_MAX / sizeof(*_PyRuntime.mem.arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000606 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +0000607#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600608 nbytes = numarenas * sizeof(*_PyRuntime.mem.arenas);
609 arenaobj = (struct arena_object *)PyMem_RawRealloc(_PyRuntime.mem.arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000610 if (arenaobj == NULL)
611 return NULL;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600612 _PyRuntime.mem.arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +0000613
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000614 /* We might need to fix pointers that were copied. However,
615 * new_arena only gets called when all the pages in the
616 * previous arenas are full. Thus, there are *no* pointers
617 * into the old array. Thus, we don't have to worry about
618 * invalid pointers. Just to be sure, some asserts:
619 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600620 assert(_PyRuntime.mem.usable_arenas == NULL);
621 assert(_PyRuntime.mem.unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +0000622
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000623 /* Put the new arenas on the unused_arena_objects list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600624 for (i = _PyRuntime.mem.maxarenas; i < numarenas; ++i) {
625 _PyRuntime.mem.arenas[i].address = 0; /* mark as unassociated */
626 _PyRuntime.mem.arenas[i].nextarena = i < numarenas - 1 ?
627 &_PyRuntime.mem.arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000628 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000629
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000630 /* Update globals. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600631 _PyRuntime.mem.unused_arena_objects = &_PyRuntime.mem.arenas[_PyRuntime.mem.maxarenas];
632 _PyRuntime.mem.maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000633 }
Tim Petersd97a1c02002-03-30 06:09:22 +0000634
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000635 /* Take the next available arena object off the head of the list. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600636 assert(_PyRuntime.mem.unused_arena_objects != NULL);
637 arenaobj = _PyRuntime.mem.unused_arena_objects;
638 _PyRuntime.mem.unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000639 assert(arenaobj->address == 0);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600640 address = _PyRuntime.obj.allocator_arenas.alloc(_PyRuntime.obj.allocator_arenas.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200641 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000642 /* The allocation failed: return NULL after putting the
643 * arenaobj back.
644 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600645 arenaobj->nextarena = _PyRuntime.mem.unused_arena_objects;
646 _PyRuntime.mem.unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000647 return NULL;
648 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -0700649 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +0000650
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600651 ++_PyRuntime.mem.narenas_currently_allocated;
652 ++_PyRuntime.mem.ntimes_arena_allocated;
653 if (_PyRuntime.mem.narenas_currently_allocated > _PyRuntime.mem.narenas_highwater)
654 _PyRuntime.mem.narenas_highwater = _PyRuntime.mem.narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000655 arenaobj->freepools = NULL;
656 /* pool_address <- first pool-aligned address in the arena
657 nfreepools <- number of whole pools that fit after alignment */
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600658 arenaobj->pool_address = (pyblock*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000659 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
660 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
661 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
662 if (excess != 0) {
663 --arenaobj->nfreepools;
664 arenaobj->pool_address += POOL_SIZE - excess;
665 }
666 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +0000667
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000668 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +0000669}
670
Victor Stinner9ed83c42017-10-31 12:18:10 -0700671
Thomas Woutersa9773292006-04-21 09:43:23 +0000672/*
Benjamin Peterson3924f932016-09-18 19:12:48 -0700673address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +0000674
675Return true if and only if P is an address that was allocated by pymalloc.
676POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
677(the caller is asked to compute this because the macro expands POOL more than
678once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -0700679variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +0000680called on every alloc/realloc/free, micro-efficiency is important here).
681
682Tricky: Let B be the arena base address associated with the pool, B =
683arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
684
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000685 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000686
687Subtracting B throughout, this is true iff
688
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000689 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000690
691By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
692
693Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
694before the first arena has been allocated. `arenas` is still NULL in that
695case. We're relying on that maxarenas is also 0 in that case, so that
696(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
697into a NULL arenas.
698
699Details: given P and POOL, the arena_object corresponding to P is AO =
700arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
701stores, etc), POOL is the correct address of P's pool, AO.address is the
702correct base address of the pool's arena, and P must be within ARENA_SIZE of
703AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -0700704(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +0000705controls P.
706
707Now suppose obmalloc does not control P (e.g., P was obtained via a direct
708call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
709in this case -- it may even be uninitialized trash. If the trash arenaindex
710is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
711control P.
712
713Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
714allocated arena, obmalloc controls all the memory in slice AO.address :
715AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
716so P doesn't lie in that slice, so the macro correctly reports that P is not
717controlled by obmalloc.
718
719Finally, if P is not controlled by obmalloc and AO corresponds to an unused
720arena_object (one not currently associated with an allocated arena),
721AO.address is 0, and the second test in the macro reduces to:
722
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000723 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +0000724
725If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
726that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
727of the test still passes, and the third clause (AO.address != 0) is necessary
728to get the correct result: AO.address is 0 in this case, so the macro
729correctly reports that P is not controlled by obmalloc (despite that P lies in
730slice AO.address : AO.address + ARENA_SIZE).
731
732Note: The third (AO.address != 0) clause was added in Python 2.5. Before
7332.5, arenas were never free()'ed, and an arenaindex < maxarena always
734corresponded to a currently-allocated arena, so the "P is not controlled by
735obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
736was impossible.
737
738Note that the logic is excruciating, and reading up possibly uninitialized
739memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
740creates problems for some memory debuggers. The overwhelming advantage is
741that this test determines whether an arbitrary address is controlled by
742obmalloc in a small constant time, independent of the number of arenas
743obmalloc controls. Since this test is needed at every entry point, it's
744extremely desirable that it be this fast.
745*/
Thomas Woutersa9773292006-04-21 09:43:23 +0000746
Benjamin Peterson3924f932016-09-18 19:12:48 -0700747static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
748address_in_range(void *p, poolp pool)
749{
750 // Since address_in_range may be reading from memory which was not allocated
751 // by Python, it is important that pool->arenaindex is read only once, as
752 // another thread may be concurrently modifying the value without holding
753 // the GIL. The following dance forces the compiler to read pool->arenaindex
754 // only once.
755 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600756 return arenaindex < _PyRuntime.mem.maxarenas &&
757 (uintptr_t)p - _PyRuntime.mem.arenas[arenaindex].address < ARENA_SIZE &&
758 _PyRuntime.mem.arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -0700759}
Tim Peters338e0102002-04-01 19:23:44 +0000760
Victor Stinner9ed83c42017-10-31 12:18:10 -0700761
Neil Schemenauera35c6882001-02-27 04:45:05 +0000762/*==========================================================================*/
763
Victor Stinner9ed83c42017-10-31 12:18:10 -0700764/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +0000765
Victor Stinner9ed83c42017-10-31 12:18:10 -0700766 The basic blocks are ordered by decreasing execution frequency,
767 which minimizes the number of jumps in the most common cases,
768 improves branching prediction and instruction scheduling (small
769 block allocations typically result in a couple of instructions).
770 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +0000771
Victor Stinner9ed83c42017-10-31 12:18:10 -0700772 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
773
774 Return 0 if pymalloc failed to allocate the memory block: on bigger
775 requests, on error in the code below (as a last chance to serve the request)
776 or when the max memory limit has been reached. */
777static int
778pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +0000779{
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600780 pyblock *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000781 poolp pool;
782 poolp next;
783 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000784
Benjamin Peterson05159c42009-12-03 03:01:27 +0000785#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -0700786 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000787 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -0700788 }
789 if (UNLIKELY(running_on_valgrind)) {
790 return 0;
791 }
Benjamin Peterson05159c42009-12-03 03:01:27 +0000792#endif
793
Victor Stinner9ed83c42017-10-31 12:18:10 -0700794 if (nbytes == 0) {
795 return 0;
796 }
797 if (nbytes > SMALL_REQUEST_THRESHOLD) {
798 return 0;
799 }
T. Wouters06bb4872017-03-31 10:10:19 -0700800
Victor Stinner9ed83c42017-10-31 12:18:10 -0700801 LOCK();
802 /*
803 * Most frequent paths first
804 */
805 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
806 pool = _PyRuntime.mem.usedpools[size + size];
807 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000808 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -0700809 * There is a used pool for this size class.
810 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000811 */
Victor Stinner9ed83c42017-10-31 12:18:10 -0700812 ++pool->ref.count;
813 bp = pool->freeblock;
814 assert(bp != NULL);
815 if ((pool->freeblock = *(pyblock **)bp) != NULL) {
816 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000817 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000818
Victor Stinner9ed83c42017-10-31 12:18:10 -0700819 /*
820 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000821 */
Victor Stinner9ed83c42017-10-31 12:18:10 -0700822 if (pool->nextoffset <= pool->maxnextoffset) {
823 /* There is room for another block. */
824 pool->freeblock = (pyblock*)pool +
825 pool->nextoffset;
826 pool->nextoffset += INDEX2SIZE(size);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600827 *(pyblock **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -0700828 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000829 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000830
Victor Stinner9ed83c42017-10-31 12:18:10 -0700831 /* Pool is full, unlink from used pools. */
832 next = pool->nextpool;
833 pool = pool->prevpool;
834 next->prevpool = pool;
835 pool->nextpool = next;
836 goto success;
837 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000838
Victor Stinner9ed83c42017-10-31 12:18:10 -0700839 /* There isn't a pool of the right size class immediately
840 * available: use a free pool.
841 */
842 if (_PyRuntime.mem.usable_arenas == NULL) {
843 /* No arena has a free pool: allocate a new arena. */
844#ifdef WITH_MEMORY_LIMITS
845 if (_PyRuntime.mem.narenas_currently_allocated >= MAX_ARENAS) {
846 goto failed;
847 }
848#endif
849 _PyRuntime.mem.usable_arenas = new_arena();
850 if (_PyRuntime.mem.usable_arenas == NULL) {
851 goto failed;
852 }
853 _PyRuntime.mem.usable_arenas->nextarena =
854 _PyRuntime.mem.usable_arenas->prevarena = NULL;
855 }
856 assert(_PyRuntime.mem.usable_arenas->address != 0);
857
858 /* Try to get a cached free pool. */
859 pool = _PyRuntime.mem.usable_arenas->freepools;
860 if (pool != NULL) {
861 /* Unlink from cached pools. */
862 _PyRuntime.mem.usable_arenas->freepools = pool->nextpool;
863
864 /* This arena already had the smallest nfreepools
865 * value, so decreasing nfreepools doesn't change
866 * that, and we don't need to rearrange the
867 * usable_arenas list. However, if the arena has
868 * become wholly allocated, we need to remove its
869 * arena_object from usable_arenas.
870 */
871 --_PyRuntime.mem.usable_arenas->nfreepools;
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600872 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -0700873 /* Wholly allocated: remove. */
874 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600875 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
876 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
877 _PyRuntime.mem.usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -0700878
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600879 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
880 if (_PyRuntime.mem.usable_arenas != NULL) {
881 _PyRuntime.mem.usable_arenas->prevarena = NULL;
882 assert(_PyRuntime.mem.usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000883 }
884 }
Victor Stinner9ed83c42017-10-31 12:18:10 -0700885 else {
886 /* nfreepools > 0: it must be that freepools
887 * isn't NULL, or that we haven't yet carved
888 * off all the arena's pools for the first
889 * time.
890 */
891 assert(_PyRuntime.mem.usable_arenas->freepools != NULL ||
892 _PyRuntime.mem.usable_arenas->pool_address <=
893 (pyblock*)_PyRuntime.mem.usable_arenas->address +
894 ARENA_SIZE - POOL_SIZE);
895 }
Thomas Woutersa9773292006-04-21 09:43:23 +0000896
Victor Stinner9ed83c42017-10-31 12:18:10 -0700897 init_pool:
898 /* Frontlink to used pools. */
899 next = _PyRuntime.mem.usedpools[size + size]; /* == prev */
900 pool->nextpool = next;
901 pool->prevpool = next;
902 next->nextpool = pool;
903 next->prevpool = pool;
904 pool->ref.count = 1;
905 if (pool->szidx == size) {
906 /* Luckily, this pool last contained blocks
907 * of the same size class, so its header
908 * and free list are already initialized.
909 */
910 bp = pool->freeblock;
911 assert(bp != NULL);
912 pool->freeblock = *(pyblock **)bp;
913 goto success;
914 }
915 /*
916 * Initialize the pool header, set up the free list to
917 * contain just the second block, and return the first
918 * block.
919 */
920 pool->szidx = size;
921 size = INDEX2SIZE(size);
922 bp = (pyblock *)pool + POOL_OVERHEAD;
923 pool->nextoffset = POOL_OVERHEAD + (size << 1);
924 pool->maxnextoffset = POOL_SIZE - size;
925 pool->freeblock = bp + size;
926 *(pyblock **)(pool->freeblock) = NULL;
927 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +0000928 }
Neil Schemenauera35c6882001-02-27 04:45:05 +0000929
Victor Stinner9ed83c42017-10-31 12:18:10 -0700930 /* Carve off a new pool. */
931 assert(_PyRuntime.mem.usable_arenas->nfreepools > 0);
932 assert(_PyRuntime.mem.usable_arenas->freepools == NULL);
933 pool = (poolp)_PyRuntime.mem.usable_arenas->pool_address;
934 assert((pyblock*)pool <= (pyblock*)_PyRuntime.mem.usable_arenas->address +
935 ARENA_SIZE - POOL_SIZE);
936 pool->arenaindex = (uint)(_PyRuntime.mem.usable_arenas - _PyRuntime.mem.arenas);
937 assert(&_PyRuntime.mem.arenas[pool->arenaindex] == _PyRuntime.mem.usable_arenas);
938 pool->szidx = DUMMY_SIZE_IDX;
939 _PyRuntime.mem.usable_arenas->pool_address += POOL_SIZE;
940 --_PyRuntime.mem.usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000941
Victor Stinner9ed83c42017-10-31 12:18:10 -0700942 if (_PyRuntime.mem.usable_arenas->nfreepools == 0) {
943 assert(_PyRuntime.mem.usable_arenas->nextarena == NULL ||
944 _PyRuntime.mem.usable_arenas->nextarena->prevarena ==
945 _PyRuntime.mem.usable_arenas);
946 /* Unlink the arena: it is completely allocated. */
947 _PyRuntime.mem.usable_arenas = _PyRuntime.mem.usable_arenas->nextarena;
948 if (_PyRuntime.mem.usable_arenas != NULL) {
949 _PyRuntime.mem.usable_arenas->prevarena = NULL;
950 assert(_PyRuntime.mem.usable_arenas->address != 0);
951 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +0100952 }
Victor Stinner9ed83c42017-10-31 12:18:10 -0700953
954 goto init_pool;
955
956success:
957 UNLOCK();
958 assert(bp != NULL);
959 *ptr_p = (void *)bp;
960 return 1;
961
962failed:
963 UNLOCK();
964 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +0000965}
966
Victor Stinner9ed83c42017-10-31 12:18:10 -0700967
Victor Stinnerdb067af2014-05-02 22:31:14 +0200968static void *
969_PyObject_Malloc(void *ctx, size_t nbytes)
970{
Victor Stinner9ed83c42017-10-31 12:18:10 -0700971 void* ptr;
972 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
973 _PyRuntime.mem.num_allocated_blocks++;
974 return ptr;
975 }
976
977 ptr = PyMem_RawMalloc(nbytes);
978 if (ptr != NULL) {
979 _PyRuntime.mem.num_allocated_blocks++;
980 }
981 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200982}
983
Victor Stinner9ed83c42017-10-31 12:18:10 -0700984
Victor Stinnerdb067af2014-05-02 22:31:14 +0200985static void *
986_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
987{
Victor Stinner9ed83c42017-10-31 12:18:10 -0700988 void* ptr;
989
990 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
991 size_t nbytes = nelem * elsize;
992
993 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
994 memset(ptr, 0, nbytes);
995 _PyRuntime.mem.num_allocated_blocks++;
996 return ptr;
997 }
998
999 ptr = PyMem_RawCalloc(nelem, elsize);
1000 if (ptr != NULL) {
1001 _PyRuntime.mem.num_allocated_blocks++;
1002 }
1003 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001004}
1005
Neil Schemenauera35c6882001-02-27 04:45:05 +00001006
Victor Stinner9ed83c42017-10-31 12:18:10 -07001007/* Free a memory block allocated by pymalloc_alloc().
1008 Return 1 if it was freed.
1009 Return 0 if the block was not allocated by pymalloc_alloc(). */
1010static int
1011pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001012{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001013 poolp pool;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001014 pyblock *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001015 poolp next, prev;
1016 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001017
Victor Stinner9ed83c42017-10-31 12:18:10 -07001018 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001019
Benjamin Peterson05159c42009-12-03 03:01:27 +00001020#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001021 if (UNLIKELY(running_on_valgrind > 0)) {
1022 return 0;
1023 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001024#endif
1025
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001026 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001027 if (!address_in_range(p, pool)) {
1028 return 0;
1029 }
1030 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001031
Victor Stinner9ed83c42017-10-31 12:18:10 -07001032 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +00001033
Victor Stinner9ed83c42017-10-31 12:18:10 -07001034 /* Link p to the start of the pool's freeblock list. Since
1035 * the pool had at least the p block outstanding, the pool
1036 * wasn't empty (so it's already in a usedpools[] list, or
1037 * was full and is in no list -- it's not in the freeblocks
1038 * list in any case).
1039 */
1040 assert(pool->ref.count > 0); /* else it was empty */
1041 *(pyblock **)p = lastfree = pool->freeblock;
1042 pool->freeblock = (pyblock *)p;
1043 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001044 /* Pool was full, so doesn't currently live in any list:
1045 * link it to the front of the appropriate usedpools[] list.
1046 * This mimics LRU pool usage for new allocations and
1047 * targets optimal filling when several pools contain
1048 * blocks of the same size class.
1049 */
1050 --pool->ref.count;
1051 assert(pool->ref.count > 0); /* else the pool is empty */
1052 size = pool->szidx;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001053 next = _PyRuntime.mem.usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001054 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001055
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001056 /* insert pool before next: prev <-> pool <-> next */
1057 pool->nextpool = next;
1058 pool->prevpool = prev;
1059 next->prevpool = pool;
1060 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001061 goto success;
1062 }
1063
1064 struct arena_object* ao;
1065 uint nf; /* ao->nfreepools */
1066
1067 /* freeblock wasn't NULL, so the pool wasn't full,
1068 * and the pool is in a usedpools[] list.
1069 */
1070 if (--pool->ref.count != 0) {
1071 /* pool isn't empty: leave it in usedpools */
1072 goto success;
1073 }
1074 /* Pool is now empty: unlink from usedpools, and
1075 * link to the front of freepools. This ensures that
1076 * previously freed pools will be allocated later
1077 * (being not referenced, they are perhaps paged out).
1078 */
1079 next = pool->nextpool;
1080 prev = pool->prevpool;
1081 next->prevpool = prev;
1082 prev->nextpool = next;
1083
1084 /* Link the pool to freepools. This is a singly-linked
1085 * list, and pool->prevpool isn't used there.
1086 */
1087 ao = &_PyRuntime.mem.arenas[pool->arenaindex];
1088 pool->nextpool = ao->freepools;
1089 ao->freepools = pool;
1090 nf = ++ao->nfreepools;
1091
1092 /* All the rest is arena management. We just freed
1093 * a pool, and there are 4 cases for arena mgmt:
1094 * 1. If all the pools are free, return the arena to
1095 * the system free().
1096 * 2. If this is the only free pool in the arena,
1097 * add the arena back to the `usable_arenas` list.
1098 * 3. If the "next" arena has a smaller count of free
1099 * pools, we have to "slide this arena right" to
1100 * restore that usable_arenas is sorted in order of
1101 * nfreepools.
1102 * 4. Else there's nothing more to do.
1103 */
1104 if (nf == ao->ntotalpools) {
1105 /* Case 1. First unlink ao from usable_arenas.
1106 */
1107 assert(ao->prevarena == NULL ||
1108 ao->prevarena->address != 0);
1109 assert(ao ->nextarena == NULL ||
1110 ao->nextarena->address != 0);
1111
1112 /* Fix the pointer in the prevarena, or the
1113 * usable_arenas pointer.
1114 */
1115 if (ao->prevarena == NULL) {
1116 _PyRuntime.mem.usable_arenas = ao->nextarena;
1117 assert(_PyRuntime.mem.usable_arenas == NULL ||
1118 _PyRuntime.mem.usable_arenas->address != 0);
1119 }
1120 else {
1121 assert(ao->prevarena->nextarena == ao);
1122 ao->prevarena->nextarena =
1123 ao->nextarena;
1124 }
1125 /* Fix the pointer in the nextarena. */
1126 if (ao->nextarena != NULL) {
1127 assert(ao->nextarena->prevarena == ao);
1128 ao->nextarena->prevarena =
1129 ao->prevarena;
1130 }
1131 /* Record that this arena_object slot is
1132 * available to be reused.
1133 */
1134 ao->nextarena = _PyRuntime.mem.unused_arena_objects;
1135 _PyRuntime.mem.unused_arena_objects = ao;
1136
1137 /* Free the entire arena. */
1138 _PyRuntime.obj.allocator_arenas.free(_PyRuntime.obj.allocator_arenas.ctx,
1139 (void *)ao->address, ARENA_SIZE);
1140 ao->address = 0; /* mark unassociated */
1141 --_PyRuntime.mem.narenas_currently_allocated;
1142
1143 goto success;
1144 }
1145
1146 if (nf == 1) {
1147 /* Case 2. Put ao at the head of
1148 * usable_arenas. Note that because
1149 * ao->nfreepools was 0 before, ao isn't
1150 * currently on the usable_arenas list.
1151 */
1152 ao->nextarena = _PyRuntime.mem.usable_arenas;
1153 ao->prevarena = NULL;
1154 if (_PyRuntime.mem.usable_arenas)
1155 _PyRuntime.mem.usable_arenas->prevarena = ao;
1156 _PyRuntime.mem.usable_arenas = ao;
1157 assert(_PyRuntime.mem.usable_arenas->address != 0);
1158
1159 goto success;
1160 }
1161
1162 /* If this arena is now out of order, we need to keep
1163 * the list sorted. The list is kept sorted so that
1164 * the "most full" arenas are used first, which allows
1165 * the nearly empty arenas to be completely freed. In
1166 * a few un-scientific tests, it seems like this
1167 * approach allowed a lot more memory to be freed.
1168 */
1169 if (ao->nextarena == NULL ||
1170 nf <= ao->nextarena->nfreepools) {
1171 /* Case 4. Nothing to do. */
1172 goto success;
1173 }
1174 /* Case 3: We have to move the arena towards the end
1175 * of the list, because it has more free pools than
1176 * the arena to its right.
1177 * First unlink ao from usable_arenas.
1178 */
1179 if (ao->prevarena != NULL) {
1180 /* ao isn't at the head of the list */
1181 assert(ao->prevarena->nextarena == ao);
1182 ao->prevarena->nextarena = ao->nextarena;
1183 }
1184 else {
1185 /* ao is at the head of the list */
1186 assert(_PyRuntime.mem.usable_arenas == ao);
1187 _PyRuntime.mem.usable_arenas = ao->nextarena;
1188 }
1189 ao->nextarena->prevarena = ao->prevarena;
1190
1191 /* Locate the new insertion point by iterating over
1192 * the list, using our nextarena pointer.
1193 */
1194 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1195 ao->prevarena = ao->nextarena;
1196 ao->nextarena = ao->nextarena->nextarena;
1197 }
1198
1199 /* Insert ao at this point. */
1200 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1201 assert(ao->prevarena->nextarena == ao->nextarena);
1202
1203 ao->prevarena->nextarena = ao;
1204 if (ao->nextarena != NULL) {
1205 ao->nextarena->prevarena = ao;
1206 }
1207
1208 /* Verify that the swaps worked. */
1209 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1210 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1211 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
1212 assert((_PyRuntime.mem.usable_arenas == ao && ao->prevarena == NULL)
1213 || ao->prevarena->nextarena == ao);
1214
1215 goto success;
1216
1217success:
1218 UNLOCK();
1219 return 1;
1220}
1221
1222
1223static void
1224_PyObject_Free(void *ctx, void *p)
1225{
1226 /* PyObject_Free(NULL) has no effect */
1227 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001228 return;
1229 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001230
Victor Stinner9ed83c42017-10-31 12:18:10 -07001231 _PyRuntime.mem.num_allocated_blocks--;
1232 if (!pymalloc_free(ctx, p)) {
1233 /* pymalloc didn't allocate this address */
1234 PyMem_RawFree(p);
1235 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001236}
1237
Neil Schemenauera35c6882001-02-27 04:45:05 +00001238
Victor Stinner9ed83c42017-10-31 12:18:10 -07001239/* pymalloc realloc.
1240
1241 If nbytes==0, then as the Python docs promise, we do not treat this like
1242 free(p), and return a non-NULL result.
1243
1244 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1245 newptr_p.
1246
1247 Return 0 if pymalloc didn't allocated p. */
1248static int
1249pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001250{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001251 void *bp;
1252 poolp pool;
1253 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001254
Victor Stinner9ed83c42017-10-31 12:18:10 -07001255 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001256
Benjamin Peterson05159c42009-12-03 03:01:27 +00001257#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001258 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001259 if (UNLIKELY(running_on_valgrind > 0)) {
1260 return 0;
1261 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001262#endif
1263
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001264 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001265 if (!address_in_range(p, pool)) {
1266 /* pymalloc is not managing this block.
1267
1268 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1269 over this block. However, if we do, we need to copy the valid data
1270 from the C-managed block to one of our blocks, and there's no
1271 portable way to know how much of the memory space starting at p is
1272 valid.
1273
1274 As bug 1185883 pointed out the hard way, it's possible that the
1275 C-managed block is "at the end" of allocated VM space, so that a
1276 memory fault can occur if we try to copy nbytes bytes starting at p.
1277 Instead we punt: let C continue to manage this block. */
1278 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001279 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001280
1281 /* pymalloc is in charge of this block */
1282 size = INDEX2SIZE(pool->szidx);
1283 if (nbytes <= size) {
1284 /* The block is staying the same or shrinking.
1285
1286 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1287 block to a smaller size class, but it wastes memory not to copy it.
1288
1289 The compromise here is to copy on shrink only if at least 25% of
1290 size can be shaved off. */
1291 if (4 * nbytes > 3 * size) {
1292 /* It's the same, or shrinking and new/old > 3/4. */
1293 *newptr_p = p;
1294 return 1;
1295 }
1296 size = nbytes;
1297 }
1298
1299 bp = _PyObject_Malloc(ctx, nbytes);
1300 if (bp != NULL) {
1301 memcpy(bp, p, size);
1302 _PyObject_Free(ctx, p);
1303 }
1304 *newptr_p = bp;
1305 return 1;
1306}
1307
1308
1309static void *
1310_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1311{
1312 void *ptr2;
1313
1314 if (ptr == NULL) {
1315 return _PyObject_Malloc(ctx, nbytes);
1316 }
1317
1318 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1319 return ptr2;
1320 }
1321
1322 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001323}
1324
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001325#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001326
1327/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001328/* pymalloc not enabled: Redirect the entry points to malloc. These will
1329 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001330
Antoine Pitrou92840532012-12-17 23:05:59 +01001331Py_ssize_t
1332_Py_GetAllocatedBlocks(void)
1333{
1334 return 0;
1335}
1336
Tim Peters1221c0a2002-03-23 00:20:15 +00001337#endif /* WITH_PYMALLOC */
1338
Victor Stinner34be807c2016-03-14 12:04:26 +01001339
Tim Petersddea2082002-03-23 10:03:50 +00001340/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001341/* A x-platform debugging allocator. This doesn't manage memory directly,
1342 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1343 */
Tim Petersddea2082002-03-23 10:03:50 +00001344
Tim Petersf6fb5012002-04-12 07:38:53 +00001345/* Special bytes broadcast into debug memory blocks at appropriate times.
1346 * Strings of these are unlikely to be valid addresses, floats, ints or
1347 * 7-bit ASCII.
1348 */
1349#undef CLEANBYTE
1350#undef DEADBYTE
1351#undef FORBIDDENBYTE
1352#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001353#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001354#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001355
Tim Peterse0850172002-03-24 00:34:21 +00001356/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001357 * to supply a single place to set a breakpoint.
1358 */
Tim Peterse0850172002-03-24 00:34:21 +00001359static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001360bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001361{
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001362 ++_PyRuntime.mem.serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001363}
1364
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001365#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001366
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001367/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1368static size_t
1369read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001370{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001371 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001372 size_t result = *q++;
1373 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001374
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001375 for (i = SST; --i > 0; ++q)
1376 result = (result << 8) | *q;
1377 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001378}
1379
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001380/* Write n as a big-endian size_t, MSB at address p, LSB at
1381 * p + sizeof(size_t) - 1.
1382 */
Tim Petersddea2082002-03-23 10:03:50 +00001383static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001384write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001385{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001386 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001387 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001388
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001389 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001390 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001391 n >>= 8;
1392 }
Tim Petersddea2082002-03-23 10:03:50 +00001393}
1394
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001395/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1396 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001397
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001398p[0: S]
1399 Number of bytes originally asked for. This is a size_t, big-endian (easier
1400 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001401p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001402 API ID. See PEP 445. This is a character, but seems undocumented.
1403p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001404 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001405p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001406 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001407 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001408 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001409 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001410p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001411 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001412p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001413 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1414 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001415 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001416 If "bad memory" is detected later, the serial number gives an
1417 excellent way to set a breakpoint on the next run, to capture the
1418 instant at which this block was passed out.
1419*/
1420
Victor Stinner0507bf52013-07-07 02:05:46 +02001421static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001422_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001423{
Victor Stinner0507bf52013-07-07 02:05:46 +02001424 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001425 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001426 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1427 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1428 size_t total; /* 2 * SST + nbytes + 2 * SST */
1429
1430 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
1431 /* integer overflow: can't represent total as a Py_ssize_t */
1432 return NULL;
1433 }
1434 total = nbytes + 4 * SST;
1435
1436 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
1437 * ^--- p ^--- data ^--- tail
1438 S: nbytes stored as size_t
1439 I: API identifier (1 byte)
1440 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
1441 C: Clean bytes used later to store actual data
1442 N: Serial number stored as size_t */
1443
1444 if (use_calloc) {
1445 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1446 }
1447 else {
1448 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
1449 }
1450 if (p == NULL) {
1451 return NULL;
1452 }
1453 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001454
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001455 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001456
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001457 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1458 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001459 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02001460 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001461
Victor Stinner9ed83c42017-10-31 12:18:10 -07001462 if (nbytes > 0 && !use_calloc) {
1463 memset(data, CLEANBYTE, nbytes);
1464 }
Tim Petersddea2082002-03-23 10:03:50 +00001465
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001466 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001467 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001468 memset(tail, FORBIDDENBYTE, SST);
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001469 write_size_t(tail + SST, _PyRuntime.mem.serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001470
Victor Stinner9ed83c42017-10-31 12:18:10 -07001471 return data;
Tim Petersddea2082002-03-23 10:03:50 +00001472}
1473
Victor Stinnerdb067af2014-05-02 22:31:14 +02001474static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001475_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001476{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001477 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001478}
1479
1480static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001481_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001482{
1483 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001484 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001485 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01001486 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001487}
1488
Victor Stinner9ed83c42017-10-31 12:18:10 -07001489
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001490/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001491 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001492 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001493 Then calls the underlying free.
1494*/
Victor Stinner0507bf52013-07-07 02:05:46 +02001495static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01001496_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001497{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001498 /* PyMem_Free(NULL) has no effect */
1499 if (p == NULL) {
1500 return;
1501 }
1502
Victor Stinner0507bf52013-07-07 02:05:46 +02001503 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001504 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001505 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001506
Victor Stinner0507bf52013-07-07 02:05:46 +02001507 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001508 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001509 nbytes += 4 * SST;
1510 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001511 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00001512}
1513
Victor Stinner9ed83c42017-10-31 12:18:10 -07001514
Victor Stinner0507bf52013-07-07 02:05:46 +02001515static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001516_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001517{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001518 if (p == NULL) {
1519 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
1520 }
1521
Victor Stinner0507bf52013-07-07 02:05:46 +02001522 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001523 uint8_t *head; /* base address of malloc'ed pad block */
1524 uint8_t *data; /* pointer to data bytes */
1525 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001526 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1527 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001528 size_t original_nbytes;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001529 size_t serialno;
1530#define ERASED_SIZE 64
1531 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00001532
Victor Stinner0507bf52013-07-07 02:05:46 +02001533 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001534
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001535 data = (uint8_t *)p;
1536 head = data - 2*SST;
1537 original_nbytes = read_size_t(head);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001538 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
1539 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001540 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001541 }
1542 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001543
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001544 tail = data + original_nbytes;
1545 serialno = read_size_t(tail + SST);
1546 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
1547 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
1548 */
1549 if (original_nbytes <= sizeof(save)) {
1550 memcpy(save, data, original_nbytes);
1551 memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST);
1552 }
1553 else {
1554 memcpy(save, data, ERASED_SIZE);
1555 memset(head, DEADBYTE, ERASED_SIZE + 2*SST);
1556 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
1557 memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001558 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001559
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001560 /* Resize and add decorations. */
1561 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
1562 if (r == NULL) {
1563 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001564 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001565 else {
1566 head = r;
1567 bumpserialno();
1568 serialno = _PyRuntime.mem.serialno;
1569 }
1570
1571 write_size_t(head, nbytes);
1572 head[SST] = (uint8_t)api->api_id;
1573 memset(head + SST + 1, FORBIDDENBYTE, SST-1);
1574 data = head + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02001575
Victor Stinner9ed83c42017-10-31 12:18:10 -07001576 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001577 memset(tail, FORBIDDENBYTE, SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001578 write_size_t(tail + SST, serialno);
1579
1580 /* Restore saved bytes. */
1581 if (original_nbytes <= sizeof(save)) {
1582 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
1583 }
1584 else {
1585 size_t i = original_nbytes - ERASED_SIZE;
1586 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
1587 if (nbytes > i) {
1588 memcpy(data + i, &save[ERASED_SIZE],
1589 Py_MIN(nbytes - i, ERASED_SIZE));
1590 }
1591 }
1592
1593 if (r == NULL) {
1594 return NULL;
1595 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001596
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001597 if (nbytes > original_nbytes) {
1598 /* growing: mark new extra memory clean */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001599 memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001600 }
Tim Peters85cc1c42002-04-12 08:52:50 +00001601
Victor Stinner9ed83c42017-10-31 12:18:10 -07001602 return data;
Tim Petersddea2082002-03-23 10:03:50 +00001603}
1604
Victor Stinnerc4aec362016-03-14 22:26:53 +01001605static void
1606_PyMem_DebugCheckGIL(void)
1607{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001608 if (!PyGILState_Check())
1609 Py_FatalError("Python memory allocator called "
1610 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01001611}
1612
1613static void *
1614_PyMem_DebugMalloc(void *ctx, size_t nbytes)
1615{
1616 _PyMem_DebugCheckGIL();
1617 return _PyMem_DebugRawMalloc(ctx, nbytes);
1618}
1619
1620static void *
1621_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
1622{
1623 _PyMem_DebugCheckGIL();
1624 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
1625}
1626
Victor Stinner9ed83c42017-10-31 12:18:10 -07001627
Victor Stinnerc4aec362016-03-14 22:26:53 +01001628static void
1629_PyMem_DebugFree(void *ctx, void *ptr)
1630{
1631 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01001632 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01001633}
1634
Victor Stinner9ed83c42017-10-31 12:18:10 -07001635
Victor Stinnerc4aec362016-03-14 22:26:53 +01001636static void *
1637_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
1638{
1639 _PyMem_DebugCheckGIL();
1640 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
1641}
1642
Tim Peters7ccfadf2002-04-01 06:04:21 +00001643/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001644 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00001645 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001646 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00001647 */
Victor Stinner0507bf52013-07-07 02:05:46 +02001648static void
1649_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001650{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001651 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001652 char msgbuf[64];
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001653 const char *msg;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001654 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001655 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001656 int i;
1657 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001658
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001659 if (p == NULL) {
1660 msg = "didn't expect a NULL pointer";
1661 goto error;
1662 }
Tim Petersddea2082002-03-23 10:03:50 +00001663
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001664 /* Check the API id */
1665 id = (char)q[-SST];
1666 if (id != api) {
1667 msg = msgbuf;
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02001668 snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001669 msgbuf[sizeof(msgbuf)-1] = 0;
1670 goto error;
1671 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001672
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001673 /* Check the stuff at the start of p first: if there's underwrite
1674 * corruption, the number-of-bytes field may be nuts, and checking
1675 * the tail could lead to a segfault then.
1676 */
1677 for (i = SST-1; i >= 1; --i) {
1678 if (*(q-i) != FORBIDDENBYTE) {
1679 msg = "bad leading pad byte";
1680 goto error;
1681 }
1682 }
Tim Petersddea2082002-03-23 10:03:50 +00001683
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001684 nbytes = read_size_t(q - 2*SST);
1685 tail = q + nbytes;
1686 for (i = 0; i < SST; ++i) {
1687 if (tail[i] != FORBIDDENBYTE) {
1688 msg = "bad trailing pad byte";
1689 goto error;
1690 }
1691 }
Tim Petersddea2082002-03-23 10:03:50 +00001692
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001693 return;
Tim Petersd1139e02002-03-28 07:32:11 +00001694
1695error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001696 _PyObject_DebugDumpAddress(p);
1697 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00001698}
1699
Tim Peters7ccfadf2002-04-01 06:04:21 +00001700/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02001701static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001702_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001703{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001704 const uint8_t *q = (const uint8_t *)p;
1705 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001706 size_t nbytes, serial;
1707 int i;
1708 int ok;
1709 char id;
Tim Petersddea2082002-03-23 10:03:50 +00001710
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001711 fprintf(stderr, "Debug memory block at address p=%p:", p);
1712 if (p == NULL) {
1713 fprintf(stderr, "\n");
1714 return;
1715 }
1716 id = (char)q[-SST];
1717 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00001718
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001719 nbytes = read_size_t(q - 2*SST);
1720 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
1721 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00001722
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001723 /* In case this is nuts, check the leading pad bytes first. */
1724 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
1725 ok = 1;
1726 for (i = 1; i <= SST-1; ++i) {
1727 if (*(q-i) != FORBIDDENBYTE) {
1728 ok = 0;
1729 break;
1730 }
1731 }
1732 if (ok)
1733 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1734 else {
1735 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
1736 FORBIDDENBYTE);
1737 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001738 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001739 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
1740 if (byte != FORBIDDENBYTE)
1741 fputs(" *** OUCH", stderr);
1742 fputc('\n', stderr);
1743 }
Tim Peters449b5a82002-04-28 06:14:45 +00001744
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001745 fputs(" Because memory is corrupted at the start, the "
1746 "count of bytes requested\n"
1747 " may be bogus, and checking the trailing pad "
1748 "bytes may segfault.\n", stderr);
1749 }
Tim Petersddea2082002-03-23 10:03:50 +00001750
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001751 tail = q + nbytes;
1752 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
1753 ok = 1;
1754 for (i = 0; i < SST; ++i) {
1755 if (tail[i] != FORBIDDENBYTE) {
1756 ok = 0;
1757 break;
1758 }
1759 }
1760 if (ok)
1761 fputs("FORBIDDENBYTE, as expected.\n", stderr);
1762 else {
1763 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001764 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001765 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001766 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001767 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00001768 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001769 if (byte != FORBIDDENBYTE)
1770 fputs(" *** OUCH", stderr);
1771 fputc('\n', stderr);
1772 }
1773 }
Tim Petersddea2082002-03-23 10:03:50 +00001774
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001775 serial = read_size_t(tail + SST);
1776 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
1777 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00001778
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001779 if (nbytes > 0) {
1780 i = 0;
1781 fputs(" Data at p:", stderr);
1782 /* print up to 8 bytes at the start */
1783 while (q < tail && i < 8) {
1784 fprintf(stderr, " %02x", *q);
1785 ++i;
1786 ++q;
1787 }
1788 /* and up to 8 at the end */
1789 if (q < tail) {
1790 if (tail - q > 8) {
1791 fputs(" ...", stderr);
1792 q = tail - 8;
1793 }
1794 while (q < tail) {
1795 fprintf(stderr, " %02x", *q);
1796 ++q;
1797 }
1798 }
1799 fputc('\n', stderr);
1800 }
Victor Stinner0611c262016-03-15 22:22:13 +01001801 fputc('\n', stderr);
1802
1803 fflush(stderr);
1804 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00001805}
1806
David Malcolm49526f42012-06-22 14:55:41 -04001807
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001808static size_t
David Malcolm49526f42012-06-22 14:55:41 -04001809printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00001810{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001811 int i, k;
1812 char buf[100];
1813 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001814
David Malcolm49526f42012-06-22 14:55:41 -04001815 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001816 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04001817 fputc(' ', out);
1818 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00001819
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001820 /* Write the value with commas. */
1821 i = 22;
1822 buf[i--] = '\0';
1823 buf[i--] = '\n';
1824 k = 3;
1825 do {
1826 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05001827 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001828 value = nextvalue;
1829 buf[i--] = (char)(digit + '0');
1830 --k;
1831 if (k == 0 && value && i >= 0) {
1832 k = 3;
1833 buf[i--] = ',';
1834 }
1835 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00001836
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001837 while (i >= 0)
1838 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04001839 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00001840
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001841 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00001842}
1843
David Malcolm49526f42012-06-22 14:55:41 -04001844void
1845_PyDebugAllocatorStats(FILE *out,
1846 const char *block_name, int num_blocks, size_t sizeof_block)
1847{
1848 char buf1[128];
1849 char buf2[128];
1850 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05001851 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04001852 num_blocks, block_name, sizeof_block);
1853 PyOS_snprintf(buf2, sizeof(buf2),
1854 "%48s ", buf1);
1855 (void)printone(out, buf2, num_blocks * sizeof_block);
1856}
1857
Victor Stinner34be807c2016-03-14 12:04:26 +01001858
David Malcolm49526f42012-06-22 14:55:41 -04001859#ifdef WITH_PYMALLOC
1860
Victor Stinner34be807c2016-03-14 12:04:26 +01001861#ifdef Py_DEBUG
1862/* Is target in the list? The list is traversed via the nextpool pointers.
1863 * The list may be NULL-terminated, or circular. Return 1 if target is in
1864 * list, else 0.
1865 */
1866static int
1867pool_is_in_list(const poolp target, poolp list)
1868{
1869 poolp origlist = list;
1870 assert(target != NULL);
1871 if (list == NULL)
1872 return 0;
1873 do {
1874 if (target == list)
1875 return 1;
1876 list = list->nextpool;
1877 } while (list != NULL && list != origlist);
1878 return 0;
1879}
1880#endif
1881
David Malcolm49526f42012-06-22 14:55:41 -04001882/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00001883 * In Py_DEBUG mode, also perform some expensive internal consistency
1884 * checks.
1885 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00001886void
David Malcolm49526f42012-06-22 14:55:41 -04001887_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00001888{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001889 uint i;
1890 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
1891 /* # of pools, allocated blocks, and free blocks per class index */
1892 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1893 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1894 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
1895 /* total # of allocated bytes in used and full pools */
1896 size_t allocated_bytes = 0;
1897 /* total # of available bytes in used pools */
1898 size_t available_bytes = 0;
1899 /* # of free pools + pools not yet carved out of current arena */
1900 uint numfreepools = 0;
1901 /* # of bytes for arena alignment padding */
1902 size_t arena_alignment = 0;
1903 /* # of bytes in used and full pools used for pool_headers */
1904 size_t pool_header_bytes = 0;
1905 /* # of bytes in used and full pools wasted due to quantization,
1906 * i.e. the necessarily leftover space at the ends of used and
1907 * full pools.
1908 */
1909 size_t quantization = 0;
1910 /* # of arenas actually allocated. */
1911 size_t narenas = 0;
1912 /* running total -- should equal narenas * ARENA_SIZE */
1913 size_t total;
1914 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00001915
David Malcolm49526f42012-06-22 14:55:41 -04001916 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001917 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001918
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001919 for (i = 0; i < numclasses; ++i)
1920 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001921
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001922 /* Because full pools aren't linked to from anything, it's easiest
1923 * to march over all the arenas. If we're lucky, most of the memory
1924 * will be living in full pools -- would be a shame to miss them.
1925 */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001926 for (i = 0; i < _PyRuntime.mem.maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001927 uint j;
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001928 uintptr_t base = _PyRuntime.mem.arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00001929
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001930 /* Skip arenas which are not allocated. */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001931 if (_PyRuntime.mem.arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001932 continue;
1933 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00001934
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001935 numfreepools += _PyRuntime.mem.arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00001936
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001937 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001938 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001939 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001940 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001941 base += POOL_SIZE;
1942 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00001943
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001944 /* visit every pool in the arena */
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001945 assert(base <= (uintptr_t) _PyRuntime.mem.arenas[i].pool_address);
1946 for (j = 0; base < (uintptr_t) _PyRuntime.mem.arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001947 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001948 poolp p = (poolp)base;
1949 const uint sz = p->szidx;
1950 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001951
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001952 if (p->ref.count == 0) {
1953 /* currently unused */
Victor Stinner34be807c2016-03-14 12:04:26 +01001954#ifdef Py_DEBUG
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001955 assert(pool_is_in_list(p, _PyRuntime.mem.arenas[i].freepools));
Victor Stinner34be807c2016-03-14 12:04:26 +01001956#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001957 continue;
1958 }
1959 ++numpools[sz];
1960 numblocks[sz] += p->ref.count;
1961 freeblocks = NUMBLOCKS(sz) - p->ref.count;
1962 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00001963#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001964 if (freeblocks > 0)
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001965 assert(pool_is_in_list(p, _PyRuntime.mem.usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00001966#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001967 }
1968 }
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001969 assert(narenas == _PyRuntime.mem.narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001970
David Malcolm49526f42012-06-22 14:55:41 -04001971 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001972 fputs("class size num pools blocks in use avail blocks\n"
1973 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04001974 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00001975
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001976 for (i = 0; i < numclasses; ++i) {
1977 size_t p = numpools[i];
1978 size_t b = numblocks[i];
1979 size_t f = numfreeblocks[i];
1980 uint size = INDEX2SIZE(i);
1981 if (p == 0) {
1982 assert(b == 0 && f == 0);
1983 continue;
1984 }
David Malcolm49526f42012-06-22 14:55:41 -04001985 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001986 "%11" PY_FORMAT_SIZE_T "u "
1987 "%15" PY_FORMAT_SIZE_T "u "
1988 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00001989 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001990 allocated_bytes += b * size;
1991 available_bytes += f * size;
1992 pool_header_bytes += p * POOL_OVERHEAD;
1993 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
1994 }
David Malcolm49526f42012-06-22 14:55:41 -04001995 fputc('\n', out);
Victor Stinner34be807c2016-03-14 12:04:26 +01001996 if (_PyMem_DebugEnabled())
Eric Snow2ebc5ce2017-09-07 23:51:28 -06001997 (void)printone(out, "# times object malloc called", _PyRuntime.mem.serialno);
1998 (void)printone(out, "# arenas allocated total", _PyRuntime.mem.ntimes_arena_allocated);
1999 (void)printone(out, "# arenas reclaimed", _PyRuntime.mem.ntimes_arena_allocated - narenas);
2000 (void)printone(out, "# arenas highwater mark", _PyRuntime.mem.narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04002001 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002002
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002003 PyOS_snprintf(buf, sizeof(buf),
2004 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2005 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002006 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002007
David Malcolm49526f42012-06-22 14:55:41 -04002008 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002009
David Malcolm49526f42012-06-22 14:55:41 -04002010 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2011 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002012
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002013 PyOS_snprintf(buf, sizeof(buf),
2014 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002015 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002016
David Malcolm49526f42012-06-22 14:55:41 -04002017 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2018 total += printone(out, "# bytes lost to quantization", quantization);
2019 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2020 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002021}
2022
David Malcolm49526f42012-06-22 14:55:41 -04002023#endif /* #ifdef WITH_PYMALLOC */