blob: 9bd97986300f3d0cf1a1dc6f4ce66ec47663bf86 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
Benjamin Peterson3924f932016-09-18 19:12:48 -07003#include <stdbool.h>
4
Victor Stinner0611c262016-03-15 22:22:13 +01005
6/* Defined in tracemalloc.c */
7extern void _PyMem_DumpTraceback(int fd, const void *ptr);
8
9
Victor Stinner0507bf52013-07-07 02:05:46 +020010/* Python's malloc wrappers (see pymem.h) */
11
Victor Stinner34be807c2016-03-14 12:04:26 +010012#undef uint
13#define uint unsigned int /* assuming >= 16 bits */
14
Victor Stinner0507bf52013-07-07 02:05:46 +020015/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010016static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
17static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
18static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070019static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010020
Victor Stinner0507bf52013-07-07 02:05:46 +020021static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020022static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010024static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020025
26static void _PyObject_DebugDumpAddress(const void *p);
27static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020028
Nick Coghlan6ba64f42013-09-29 00:28:55 +100029#if defined(__has_feature) /* Clang */
30 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
31 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070032 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100033 #else
34 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
35 #endif
36#else
37 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
38 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070039 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100040 #else
41 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
42 #endif
43#endif
44
Tim Peters1221c0a2002-03-23 00:20:15 +000045#ifdef WITH_PYMALLOC
46
Victor Stinner0507bf52013-07-07 02:05:46 +020047#ifdef MS_WINDOWS
48# include <windows.h>
49#elif defined(HAVE_MMAP)
50# include <sys/mman.h>
51# ifdef MAP_ANONYMOUS
52# define ARENAS_USE_MMAP
53# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020054#endif
55
Victor Stinner0507bf52013-07-07 02:05:46 +020056/* Forward declaration */
57static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020058static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020059static void _PyObject_Free(void *ctx, void *p);
60static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020061#endif
62
Victor Stinner0507bf52013-07-07 02:05:46 +020063
64static void *
65_PyMem_RawMalloc(void *ctx, size_t size)
66{
Victor Stinnerdb067af2014-05-02 22:31:14 +020067 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020068 for malloc(0), which would be treated as an error. Some platforms would
69 return a pointer with no memory behind it, which would break pymalloc.
70 To solve these problems, allocate an extra byte. */
71 if (size == 0)
72 size = 1;
73 return malloc(size);
74}
75
76static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020077_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
78{
79 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
80 for calloc(0, 0), which would be treated as an error. Some platforms
81 would return a pointer with no memory behind it, which would break
82 pymalloc. To solve these problems, allocate an extra byte. */
83 if (nelem == 0 || elsize == 0) {
84 nelem = 1;
85 elsize = 1;
86 }
87 return calloc(nelem, elsize);
88}
89
90static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020091_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
92{
93 if (size == 0)
94 size = 1;
95 return realloc(ptr, size);
96}
97
98static void
99_PyMem_RawFree(void *ctx, void *ptr)
100{
101 free(ptr);
102}
103
104
105#ifdef MS_WINDOWS
106static void *
107_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
108{
109 return VirtualAlloc(NULL, size,
110 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
111}
112
113static void
114_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
115{
Victor Stinner725e6682013-07-07 03:06:16 +0200116 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200117}
118
119#elif defined(ARENAS_USE_MMAP)
120static void *
121_PyObject_ArenaMmap(void *ctx, size_t size)
122{
123 void *ptr;
124 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
125 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
126 if (ptr == MAP_FAILED)
127 return NULL;
128 assert(ptr != NULL);
129 return ptr;
130}
131
132static void
133_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
134{
135 munmap(ptr, size);
136}
137
138#else
139static void *
140_PyObject_ArenaMalloc(void *ctx, size_t size)
141{
142 return malloc(size);
143}
144
145static void
146_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
147{
148 free(ptr);
149}
150#endif
151
152
Victor Stinnerdb067af2014-05-02 22:31:14 +0200153#define PYRAW_FUNCS _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200154#ifdef WITH_PYMALLOC
Victor Stinnerdb067af2014-05-02 22:31:14 +0200155# define PYOBJ_FUNCS _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free
Victor Stinner0507bf52013-07-07 02:05:46 +0200156#else
Victor Stinner6cf185d2013-10-10 15:58:42 +0200157# define PYOBJ_FUNCS PYRAW_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200158#endif
Victor Stinner15932592016-04-22 18:52:22 +0200159#define PYMEM_FUNCS PYOBJ_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200160
Victor Stinner0507bf52013-07-07 02:05:46 +0200161typedef struct {
162 /* We tag each block with an API ID in order to tag API violations */
163 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200164 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200165} debug_alloc_api_t;
166static struct {
167 debug_alloc_api_t raw;
168 debug_alloc_api_t mem;
169 debug_alloc_api_t obj;
170} _PyMem_Debug = {
171 {'r', {NULL, PYRAW_FUNCS}},
Victor Stinner6cf185d2013-10-10 15:58:42 +0200172 {'m', {NULL, PYMEM_FUNCS}},
173 {'o', {NULL, PYOBJ_FUNCS}}
Victor Stinner0507bf52013-07-07 02:05:46 +0200174 };
175
Victor Stinnerc4aec362016-03-14 22:26:53 +0100176#define PYRAWDBG_FUNCS \
177 _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree
178#define PYDBG_FUNCS \
179 _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree
Victor Stinner0507bf52013-07-07 02:05:46 +0200180
Victor Stinner9e87e772017-11-24 12:09:24 +0100181static PyMemAllocatorEx _PyMem_Raw = {
182#ifdef Py_DEBUG
183 &_PyMem_Debug.raw, PYRAWDBG_FUNCS
184#else
185 NULL, PYRAW_FUNCS
186#endif
187 };
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600188
Victor Stinner9e87e772017-11-24 12:09:24 +0100189static PyMemAllocatorEx _PyMem = {
190#ifdef Py_DEBUG
191 &_PyMem_Debug.mem, PYDBG_FUNCS
192#else
193 NULL, PYMEM_FUNCS
194#endif
195 };
Victor Stinner0507bf52013-07-07 02:05:46 +0200196
Victor Stinner9e87e772017-11-24 12:09:24 +0100197static PyMemAllocatorEx _PyObject = {
198#ifdef Py_DEBUG
199 &_PyMem_Debug.obj, PYDBG_FUNCS
200#else
201 NULL, PYOBJ_FUNCS
202#endif
203 };
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800204
205void
206_PyMem_GetDefaultRawAllocator(PyMemAllocatorEx *alloc_p)
207{
Victor Stinnerccb04422017-11-16 03:20:31 -0800208#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +0100209 PyMemAllocatorEx alloc = {&_PyMem_Debug.raw, PYDBG_FUNCS};
Victor Stinnerccb04422017-11-16 03:20:31 -0800210#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100211 PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
Victor Stinnerccb04422017-11-16 03:20:31 -0800212#endif
Victor Stinner9e87e772017-11-24 12:09:24 +0100213 *alloc_p = alloc;
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800214}
Victor Stinner0507bf52013-07-07 02:05:46 +0200215
Victor Stinner34be807c2016-03-14 12:04:26 +0100216int
217_PyMem_SetupAllocators(const char *opt)
218{
219 if (opt == NULL || *opt == '\0') {
220 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
221 options): use default allocators */
222#ifdef Py_DEBUG
223# ifdef WITH_PYMALLOC
224 opt = "pymalloc_debug";
225# else
226 opt = "malloc_debug";
227# endif
228#else
229 /* !Py_DEBUG */
230# ifdef WITH_PYMALLOC
231 opt = "pymalloc";
232# else
233 opt = "malloc";
234# endif
235#endif
236 }
237
238 if (strcmp(opt, "debug") == 0) {
239 PyMem_SetupDebugHooks();
240 }
241 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0)
242 {
243 PyMemAllocatorEx alloc = {NULL, PYRAW_FUNCS};
244
245 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
246 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
247 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
248
249 if (strcmp(opt, "malloc_debug") == 0)
250 PyMem_SetupDebugHooks();
251 }
252#ifdef WITH_PYMALLOC
253 else if (strcmp(opt, "pymalloc") == 0
254 || strcmp(opt, "pymalloc_debug") == 0)
255 {
Victor Stinner15932592016-04-22 18:52:22 +0200256 PyMemAllocatorEx raw_alloc = {NULL, PYRAW_FUNCS};
257 PyMemAllocatorEx mem_alloc = {NULL, PYMEM_FUNCS};
Victor Stinner34be807c2016-03-14 12:04:26 +0100258 PyMemAllocatorEx obj_alloc = {NULL, PYOBJ_FUNCS};
259
Victor Stinner15932592016-04-22 18:52:22 +0200260 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &raw_alloc);
261 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &mem_alloc);
Victor Stinner34be807c2016-03-14 12:04:26 +0100262 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &obj_alloc);
263
264 if (strcmp(opt, "pymalloc_debug") == 0)
265 PyMem_SetupDebugHooks();
266 }
267#endif
268 else {
269 /* unknown allocator */
270 return -1;
271 }
272 return 0;
273}
274
Victor Stinner9e87e772017-11-24 12:09:24 +0100275#undef PYRAW_FUNCS
276#undef PYMEM_FUNCS
277#undef PYOBJ_FUNCS
278#undef PYRAWDBG_FUNCS
279#undef PYDBG_FUNCS
Victor Stinner0507bf52013-07-07 02:05:46 +0200280
Victor Stinner9e87e772017-11-24 12:09:24 +0100281static PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800282#ifdef MS_WINDOWS
Victor Stinner9e87e772017-11-24 12:09:24 +0100283 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800284#elif defined(ARENAS_USE_MMAP)
Victor Stinner9e87e772017-11-24 12:09:24 +0100285 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800286#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100287 _PyObject_ArenaMalloc, _PyObject_ArenaFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800288#endif
289 };
290
Victor Stinner0621e0e2016-04-19 17:02:55 +0200291#ifdef WITH_PYMALLOC
Victor Stinner34be807c2016-03-14 12:04:26 +0100292static int
293_PyMem_DebugEnabled(void)
294{
295 return (_PyObject.malloc == _PyMem_DebugMalloc);
296}
297
Victor Stinner34be807c2016-03-14 12:04:26 +0100298int
299_PyMem_PymallocEnabled(void)
300{
301 if (_PyMem_DebugEnabled()) {
302 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
303 }
304 else {
305 return (_PyObject.malloc == _PyObject_Malloc);
306 }
307}
308#endif
309
Victor Stinner0507bf52013-07-07 02:05:46 +0200310void
311PyMem_SetupDebugHooks(void)
312{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200313 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200314
Victor Stinnerc4aec362016-03-14 22:26:53 +0100315 alloc.malloc = _PyMem_DebugRawMalloc;
316 alloc.calloc = _PyMem_DebugRawCalloc;
317 alloc.realloc = _PyMem_DebugRawRealloc;
318 alloc.free = _PyMem_DebugRawFree;
Victor Stinner34be807c2016-03-14 12:04:26 +0100319
Victor Stinnerc4aec362016-03-14 22:26:53 +0100320 if (_PyMem_Raw.malloc != _PyMem_DebugRawMalloc) {
Victor Stinner0507bf52013-07-07 02:05:46 +0200321 alloc.ctx = &_PyMem_Debug.raw;
322 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
323 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
324 }
325
Victor Stinnerc4aec362016-03-14 22:26:53 +0100326 alloc.malloc = _PyMem_DebugMalloc;
327 alloc.calloc = _PyMem_DebugCalloc;
328 alloc.realloc = _PyMem_DebugRealloc;
329 alloc.free = _PyMem_DebugFree;
330
Victor Stinnerad524372016-03-16 12:12:53 +0100331 if (_PyMem.malloc != _PyMem_DebugMalloc) {
332 alloc.ctx = &_PyMem_Debug.mem;
333 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
334 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
335 }
336
Victor Stinner0507bf52013-07-07 02:05:46 +0200337 if (_PyObject.malloc != _PyMem_DebugMalloc) {
338 alloc.ctx = &_PyMem_Debug.obj;
339 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
340 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
341 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200342}
343
344void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200345PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200346{
347 switch(domain)
348 {
349 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
350 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
351 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
352 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200353 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200354 allocator->ctx = NULL;
355 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200356 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200357 allocator->realloc = NULL;
358 allocator->free = NULL;
359 }
360}
361
362void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200363PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200364{
365 switch(domain)
366 {
367 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
368 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
369 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
370 /* ignore unknown domain */
371 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200372}
373
374void
375PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
376{
Victor Stinner9e87e772017-11-24 12:09:24 +0100377 *allocator = _PyObject_Arena;
Victor Stinner0507bf52013-07-07 02:05:46 +0200378}
379
380void
381PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
382{
Victor Stinner9e87e772017-11-24 12:09:24 +0100383 _PyObject_Arena = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200384}
385
386void *
387PyMem_RawMalloc(size_t size)
388{
389 /*
390 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
391 * Most python internals blindly use a signed Py_ssize_t to track
392 * things without checking for overflows or negatives.
393 * As size_t is unsigned, checking for size < 0 is not required.
394 */
395 if (size > (size_t)PY_SSIZE_T_MAX)
396 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200397 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
398}
399
Victor Stinnerdb067af2014-05-02 22:31:14 +0200400void *
401PyMem_RawCalloc(size_t nelem, size_t elsize)
402{
403 /* see PyMem_RawMalloc() */
404 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
405 return NULL;
406 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
407}
408
Victor Stinner0507bf52013-07-07 02:05:46 +0200409void*
410PyMem_RawRealloc(void *ptr, size_t new_size)
411{
412 /* see PyMem_RawMalloc() */
413 if (new_size > (size_t)PY_SSIZE_T_MAX)
414 return NULL;
415 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
416}
417
Victor Stinner9e87e772017-11-24 12:09:24 +0100418void PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200419{
420 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
421}
422
Victor Stinner9ed83c42017-10-31 12:18:10 -0700423
Victor Stinner0507bf52013-07-07 02:05:46 +0200424void *
425PyMem_Malloc(size_t size)
426{
427 /* see PyMem_RawMalloc() */
428 if (size > (size_t)PY_SSIZE_T_MAX)
429 return NULL;
430 return _PyMem.malloc(_PyMem.ctx, size);
431}
432
433void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200434PyMem_Calloc(size_t nelem, size_t elsize)
435{
436 /* see PyMem_RawMalloc() */
437 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
438 return NULL;
439 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
440}
441
442void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200443PyMem_Realloc(void *ptr, size_t new_size)
444{
445 /* see PyMem_RawMalloc() */
446 if (new_size > (size_t)PY_SSIZE_T_MAX)
447 return NULL;
448 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
449}
450
451void
452PyMem_Free(void *ptr)
453{
454 _PyMem.free(_PyMem.ctx, ptr);
455}
456
Victor Stinner9ed83c42017-10-31 12:18:10 -0700457
Victor Stinner46972b72017-11-24 22:55:40 +0100458wchar_t*
459_PyMem_RawWcsdup(const wchar_t *str)
460{
461 size_t len = wcslen(str);
462 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) {
463 return NULL;
464 }
465
466 size_t size = (len + 1) * sizeof(wchar_t);
467 wchar_t *str2 = PyMem_RawMalloc(size);
468 if (str2 == NULL) {
469 return NULL;
470 }
471
472 memcpy(str2, str, size);
473 return str2;
474}
475
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200476char *
477_PyMem_RawStrdup(const char *str)
478{
479 size_t size;
480 char *copy;
481
482 size = strlen(str) + 1;
483 copy = PyMem_RawMalloc(size);
484 if (copy == NULL)
485 return NULL;
486 memcpy(copy, str, size);
487 return copy;
488}
489
490char *
491_PyMem_Strdup(const char *str)
492{
493 size_t size;
494 char *copy;
495
496 size = strlen(str) + 1;
497 copy = PyMem_Malloc(size);
498 if (copy == NULL)
499 return NULL;
500 memcpy(copy, str, size);
501 return copy;
502}
503
Victor Stinner0507bf52013-07-07 02:05:46 +0200504void *
505PyObject_Malloc(size_t size)
506{
507 /* see PyMem_RawMalloc() */
508 if (size > (size_t)PY_SSIZE_T_MAX)
509 return NULL;
510 return _PyObject.malloc(_PyObject.ctx, size);
511}
512
513void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200514PyObject_Calloc(size_t nelem, size_t elsize)
515{
516 /* see PyMem_RawMalloc() */
517 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
518 return NULL;
519 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
520}
521
522void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200523PyObject_Realloc(void *ptr, size_t new_size)
524{
525 /* see PyMem_RawMalloc() */
526 if (new_size > (size_t)PY_SSIZE_T_MAX)
527 return NULL;
528 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
529}
530
531void
532PyObject_Free(void *ptr)
533{
534 _PyObject.free(_PyObject.ctx, ptr);
535}
536
537
538#ifdef WITH_PYMALLOC
539
Benjamin Peterson05159c42009-12-03 03:01:27 +0000540#ifdef WITH_VALGRIND
541#include <valgrind/valgrind.h>
542
543/* If we're using GCC, use __builtin_expect() to reduce overhead of
544 the valgrind checks */
545#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
546# define UNLIKELY(value) __builtin_expect((value), 0)
547#else
548# define UNLIKELY(value) (value)
549#endif
550
551/* -1 indicates that we haven't checked that we're running on valgrind yet. */
552static int running_on_valgrind = -1;
553#endif
554
Victor Stinner9ed83c42017-10-31 12:18:10 -0700555
Victor Stinner9e87e772017-11-24 12:09:24 +0100556/* An object allocator for Python.
557
558 Here is an introduction to the layers of the Python memory architecture,
559 showing where the object allocator is actually used (layer +2), It is
560 called for every object allocation and deallocation (PyObject_New/Del),
561 unless the object-specific allocators implement a proprietary allocation
562 scheme (ex.: ints use a simple free list). This is also the place where
563 the cyclic garbage collector operates selectively on container objects.
564
565
566 Object-specific allocators
567 _____ ______ ______ ________
568 [ int ] [ dict ] [ list ] ... [ string ] Python core |
569+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
570 _______________________________ | |
571 [ Python's object allocator ] | |
572+2 | ####### Object memory ####### | <------ Internal buffers ------> |
573 ______________________________________________________________ |
574 [ Python's raw memory allocator (PyMem_ API) ] |
575+1 | <----- Python memory (under PyMem manager's control) ------> | |
576 __________________________________________________________________
577 [ Underlying general-purpose allocator (ex: C library malloc) ]
578 0 | <------ Virtual memory allocated for the python process -------> |
579
580 =========================================================================
581 _______________________________________________________________________
582 [ OS-specific Virtual Memory Manager (VMM) ]
583-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
584 __________________________________ __________________________________
585 [ ] [ ]
586-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
587
588*/
589/*==========================================================================*/
590
591/* A fast, special-purpose memory allocator for small blocks, to be used
592 on top of a general-purpose malloc -- heavily based on previous art. */
593
594/* Vladimir Marangozov -- August 2000 */
595
596/*
597 * "Memory management is where the rubber meets the road -- if we do the wrong
598 * thing at any level, the results will not be good. And if we don't make the
599 * levels work well together, we are in serious trouble." (1)
600 *
601 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
602 * "Dynamic Storage Allocation: A Survey and Critical Review",
603 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
604 */
605
606/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
607
608/*==========================================================================*/
609
610/*
611 * Allocation strategy abstract:
612 *
613 * For small requests, the allocator sub-allocates <Big> blocks of memory.
614 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
615 * system's allocator.
616 *
617 * Small requests are grouped in size classes spaced 8 bytes apart, due
618 * to the required valid alignment of the returned address. Requests of
619 * a particular size are serviced from memory pools of 4K (one VMM page).
620 * Pools are fragmented on demand and contain free lists of blocks of one
621 * particular size class. In other words, there is a fixed-size allocator
622 * for each size class. Free pools are shared by the different allocators
623 * thus minimizing the space reserved for a particular size class.
624 *
625 * This allocation strategy is a variant of what is known as "simple
626 * segregated storage based on array of free lists". The main drawback of
627 * simple segregated storage is that we might end up with lot of reserved
628 * memory for the different free lists, which degenerate in time. To avoid
629 * this, we partition each free list in pools and we share dynamically the
630 * reserved space between all free lists. This technique is quite efficient
631 * for memory intensive programs which allocate mainly small-sized blocks.
632 *
633 * For small requests we have the following table:
634 *
635 * Request in bytes Size of allocated block Size class idx
636 * ----------------------------------------------------------------
637 * 1-8 8 0
638 * 9-16 16 1
639 * 17-24 24 2
640 * 25-32 32 3
641 * 33-40 40 4
642 * 41-48 48 5
643 * 49-56 56 6
644 * 57-64 64 7
645 * 65-72 72 8
646 * ... ... ...
647 * 497-504 504 62
648 * 505-512 512 63
649 *
650 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
651 * allocator.
652 */
653
654/*==========================================================================*/
655
656/*
657 * -- Main tunable settings section --
658 */
659
660/*
661 * Alignment of addresses returned to the user. 8-bytes alignment works
662 * on most current architectures (with 32-bit or 64-bit address busses).
663 * The alignment value is also used for grouping small requests in size
664 * classes spaced ALIGNMENT bytes apart.
665 *
666 * You shouldn't change this unless you know what you are doing.
667 */
668#define ALIGNMENT 8 /* must be 2^N */
669#define ALIGNMENT_SHIFT 3
670
671/* Return the number of bytes in size class I, as a uint. */
672#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
673
674/*
675 * Max size threshold below which malloc requests are considered to be
676 * small enough in order to use preallocated memory pools. You can tune
677 * this value according to your application behaviour and memory needs.
678 *
679 * Note: a size threshold of 512 guarantees that newly created dictionaries
680 * will be allocated from preallocated memory pools on 64-bit.
681 *
682 * The following invariants must hold:
683 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
684 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
685 *
686 * Although not required, for better performance and space efficiency,
687 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
688 */
689#define SMALL_REQUEST_THRESHOLD 512
690#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
691
692/*
693 * The system's VMM page size can be obtained on most unices with a
694 * getpagesize() call or deduced from various header files. To make
695 * things simpler, we assume that it is 4K, which is OK for most systems.
696 * It is probably better if this is the native page size, but it doesn't
697 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
698 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
699 * violation fault. 4K is apparently OK for all the platforms that python
700 * currently targets.
701 */
702#define SYSTEM_PAGE_SIZE (4 * 1024)
703#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
704
705/*
706 * Maximum amount of memory managed by the allocator for small requests.
707 */
708#ifdef WITH_MEMORY_LIMITS
709#ifndef SMALL_MEMORY_LIMIT
710#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
711#endif
712#endif
713
714/*
715 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
716 * on a page boundary. This is a reserved virtual address space for the
717 * current process (obtained through a malloc()/mmap() call). In no way this
718 * means that the memory arenas will be used entirely. A malloc(<Big>) is
719 * usually an address range reservation for <Big> bytes, unless all pages within
720 * this space are referenced subsequently. So malloc'ing big blocks and not
721 * using them does not mean "wasting memory". It's an addressable range
722 * wastage...
723 *
724 * Arenas are allocated with mmap() on systems supporting anonymous memory
725 * mappings to reduce heap fragmentation.
726 */
727#define ARENA_SIZE (256 << 10) /* 256KB */
728
729#ifdef WITH_MEMORY_LIMITS
730#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
731#endif
732
733/*
734 * Size of the pools used for small blocks. Should be a power of 2,
735 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
736 */
737#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
738#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
739
740/*
741 * -- End of tunable settings section --
742 */
743
744/*==========================================================================*/
745
746/*
747 * Locking
748 *
749 * To reduce lock contention, it would probably be better to refine the
750 * crude function locking with per size class locking. I'm not positive
751 * however, whether it's worth switching to such locking policy because
752 * of the performance penalty it might introduce.
753 *
754 * The following macros describe the simplest (should also be the fastest)
755 * lock object on a particular platform and the init/fini/lock/unlock
756 * operations on it. The locks defined here are not expected to be recursive
757 * because it is assumed that they will always be called in the order:
758 * INIT, [LOCK, UNLOCK]*, FINI.
759 */
760
761/*
762 * Python's threads are serialized, so object malloc locking is disabled.
763 */
764#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
765#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
766#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
767#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
768#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
769
770/* When you say memory, my mind reasons in terms of (pointers to) blocks */
771typedef uint8_t block;
772
773/* Pool for small blocks. */
774struct pool_header {
775 union { block *_padding;
776 uint count; } ref; /* number of allocated blocks */
777 block *freeblock; /* pool's free list head */
778 struct pool_header *nextpool; /* next pool of this size class */
779 struct pool_header *prevpool; /* previous pool "" */
780 uint arenaindex; /* index into arenas of base adr */
781 uint szidx; /* block size class index */
782 uint nextoffset; /* bytes to virgin block */
783 uint maxnextoffset; /* largest valid nextoffset */
784};
785
786typedef struct pool_header *poolp;
787
788/* Record keeping for arenas. */
789struct arena_object {
790 /* The address of the arena, as returned by malloc. Note that 0
791 * will never be returned by a successful malloc, and is used
792 * here to mark an arena_object that doesn't correspond to an
793 * allocated arena.
794 */
795 uintptr_t address;
796
797 /* Pool-aligned pointer to the next pool to be carved off. */
798 block* pool_address;
799
800 /* The number of available pools in the arena: free pools + never-
801 * allocated pools.
802 */
803 uint nfreepools;
804
805 /* The total number of pools in the arena, whether or not available. */
806 uint ntotalpools;
807
808 /* Singly-linked list of available pools. */
809 struct pool_header* freepools;
810
811 /* Whenever this arena_object is not associated with an allocated
812 * arena, the nextarena member is used to link all unassociated
813 * arena_objects in the singly-linked `unused_arena_objects` list.
814 * The prevarena member is unused in this case.
815 *
816 * When this arena_object is associated with an allocated arena
817 * with at least one available pool, both members are used in the
818 * doubly-linked `usable_arenas` list, which is maintained in
819 * increasing order of `nfreepools` values.
820 *
821 * Else this arena_object is associated with an allocated arena
822 * all of whose pools are in use. `nextarena` and `prevarena`
823 * are both meaningless in this case.
824 */
825 struct arena_object* nextarena;
826 struct arena_object* prevarena;
827};
828
829#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
830
831#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
832
833/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
834#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
835
836/* Return total number of blocks in pool of size index I, as a uint. */
837#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
838
839/*==========================================================================*/
840
841/*
842 * This malloc lock
843 */
844SIMPLELOCK_DECL(_malloc_lock)
845#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
846#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
847#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
848#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
849
850/*
851 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
852
853This is involved. For an index i, usedpools[i+i] is the header for a list of
854all partially used pools holding small blocks with "size class idx" i. So
855usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
85616, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
857
858Pools are carved off an arena's highwater mark (an arena_object's pool_address
859member) as needed. Once carved off, a pool is in one of three states forever
860after:
861
862used == partially used, neither empty nor full
863 At least one block in the pool is currently allocated, and at least one
864 block in the pool is not currently allocated (note this implies a pool
865 has room for at least two blocks).
866 This is a pool's initial state, as a pool is created only when malloc
867 needs space.
868 The pool holds blocks of a fixed size, and is in the circular list headed
869 at usedpools[i] (see above). It's linked to the other used pools of the
870 same size class via the pool_header's nextpool and prevpool members.
871 If all but one block is currently allocated, a malloc can cause a
872 transition to the full state. If all but one block is not currently
873 allocated, a free can cause a transition to the empty state.
874
875full == all the pool's blocks are currently allocated
876 On transition to full, a pool is unlinked from its usedpools[] list.
877 It's not linked to from anything then anymore, and its nextpool and
878 prevpool members are meaningless until it transitions back to used.
879 A free of a block in a full pool puts the pool back in the used state.
880 Then it's linked in at the front of the appropriate usedpools[] list, so
881 that the next allocation for its size class will reuse the freed block.
882
883empty == all the pool's blocks are currently available for allocation
884 On transition to empty, a pool is unlinked from its usedpools[] list,
885 and linked to the front of its arena_object's singly-linked freepools list,
886 via its nextpool member. The prevpool member has no meaning in this case.
887 Empty pools have no inherent size class: the next time a malloc finds
888 an empty list in usedpools[], it takes the first pool off of freepools.
889 If the size class needed happens to be the same as the size class the pool
890 last had, some pool initialization can be skipped.
891
892
893Block Management
894
895Blocks within pools are again carved out as needed. pool->freeblock points to
896the start of a singly-linked list of free blocks within the pool. When a
897block is freed, it's inserted at the front of its pool's freeblock list. Note
898that the available blocks in a pool are *not* linked all together when a pool
899is initialized. Instead only "the first two" (lowest addresses) blocks are
900set up, returning the first such block, and setting pool->freeblock to a
901one-block list holding the second such block. This is consistent with that
902pymalloc strives at all levels (arena, pool, and block) never to touch a piece
903of memory until it's actually needed.
904
905So long as a pool is in the used state, we're certain there *is* a block
906available for allocating, and pool->freeblock is not NULL. If pool->freeblock
907points to the end of the free list before we've carved the entire pool into
908blocks, that means we simply haven't yet gotten to one of the higher-address
909blocks. The offset from the pool_header to the start of "the next" virgin
910block is stored in the pool_header nextoffset member, and the largest value
911of nextoffset that makes sense is stored in the maxnextoffset member when a
912pool is initialized. All the blocks in a pool have been passed out at least
913once when and only when nextoffset > maxnextoffset.
914
915
916Major obscurity: While the usedpools vector is declared to have poolp
917entries, it doesn't really. It really contains two pointers per (conceptual)
918poolp entry, the nextpool and prevpool members of a pool_header. The
919excruciating initialization code below fools C so that
920
921 usedpool[i+i]
922
923"acts like" a genuine poolp, but only so long as you only reference its
924nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
925compensating for that a pool_header's nextpool and prevpool members
926immediately follow a pool_header's first two members:
927
928 union { block *_padding;
929 uint count; } ref;
930 block *freeblock;
931
932each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
933contains is a fudged-up pointer p such that *if* C believes it's a poolp
934pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
935circular list is empty).
936
937It's unclear why the usedpools setup is so convoluted. It could be to
938minimize the amount of cache required to hold this heavily-referenced table
939(which only *needs* the two interpool pointer members of a pool_header). OTOH,
940referencing code has to remember to "double the index" and doing so isn't
941free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
942on that C doesn't insert any padding anywhere in a pool_header at or before
943the prevpool member.
944**************************************************************************** */
945
946#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
947#define PT(x) PTA(x), PTA(x)
948
949static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
950 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
951#if NB_SMALL_SIZE_CLASSES > 8
952 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
953#if NB_SMALL_SIZE_CLASSES > 16
954 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
955#if NB_SMALL_SIZE_CLASSES > 24
956 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
957#if NB_SMALL_SIZE_CLASSES > 32
958 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
959#if NB_SMALL_SIZE_CLASSES > 40
960 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
961#if NB_SMALL_SIZE_CLASSES > 48
962 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
963#if NB_SMALL_SIZE_CLASSES > 56
964 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
965#if NB_SMALL_SIZE_CLASSES > 64
966#error "NB_SMALL_SIZE_CLASSES should be less than 64"
967#endif /* NB_SMALL_SIZE_CLASSES > 64 */
968#endif /* NB_SMALL_SIZE_CLASSES > 56 */
969#endif /* NB_SMALL_SIZE_CLASSES > 48 */
970#endif /* NB_SMALL_SIZE_CLASSES > 40 */
971#endif /* NB_SMALL_SIZE_CLASSES > 32 */
972#endif /* NB_SMALL_SIZE_CLASSES > 24 */
973#endif /* NB_SMALL_SIZE_CLASSES > 16 */
974#endif /* NB_SMALL_SIZE_CLASSES > 8 */
975};
976
977/*==========================================================================
978Arena management.
979
980`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
981which may not be currently used (== they're arena_objects that aren't
982currently associated with an allocated arena). Note that arenas proper are
983separately malloc'ed.
984
985Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
986we do try to free() arenas, and use some mild heuristic strategies to increase
987the likelihood that arenas eventually can be freed.
988
989unused_arena_objects
990
991 This is a singly-linked list of the arena_objects that are currently not
992 being used (no arena is associated with them). Objects are taken off the
993 head of the list in new_arena(), and are pushed on the head of the list in
994 PyObject_Free() when the arena is empty. Key invariant: an arena_object
995 is on this list if and only if its .address member is 0.
996
997usable_arenas
998
999 This is a doubly-linked list of the arena_objects associated with arenas
1000 that have pools available. These pools are either waiting to be reused,
1001 or have not been used before. The list is sorted to have the most-
1002 allocated arenas first (ascending order based on the nfreepools member).
1003 This means that the next allocation will come from a heavily used arena,
1004 which gives the nearly empty arenas a chance to be returned to the system.
1005 In my unscientific tests this dramatically improved the number of arenas
1006 that could be freed.
1007
1008Note that an arena_object associated with an arena all of whose pools are
1009currently in use isn't on either list.
1010*/
1011
1012/* Array of objects used to track chunks of memory (arenas). */
1013static struct arena_object* arenas = NULL;
1014/* Number of slots currently allocated in the `arenas` vector. */
1015static uint maxarenas = 0;
1016
1017/* The head of the singly-linked, NULL-terminated list of available
1018 * arena_objects.
1019 */
1020static struct arena_object* unused_arena_objects = NULL;
1021
1022/* The head of the doubly-linked, NULL-terminated at each end, list of
1023 * arena_objects associated with arenas that have pools available.
1024 */
1025static struct arena_object* usable_arenas = NULL;
1026
1027/* How many arena_objects do we initially allocate?
1028 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
1029 * `arenas` vector.
1030 */
1031#define INITIAL_ARENA_OBJECTS 16
1032
1033/* Number of arenas allocated that haven't been free()'d. */
1034static size_t narenas_currently_allocated = 0;
1035
1036/* Total number of times malloc() called to allocate an arena. */
1037static size_t ntimes_arena_allocated = 0;
1038/* High water mark (max value ever seen) for narenas_currently_allocated. */
1039static size_t narenas_highwater = 0;
1040
1041static Py_ssize_t _Py_AllocatedBlocks = 0;
1042
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001043Py_ssize_t
1044_Py_GetAllocatedBlocks(void)
1045{
Victor Stinner9e87e772017-11-24 12:09:24 +01001046 return _Py_AllocatedBlocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001047}
1048
1049
Thomas Woutersa9773292006-04-21 09:43:23 +00001050/* Allocate a new arena. If we run out of memory, return NULL. Else
1051 * allocate a new arena, and return the address of an arena_object
1052 * describing the new arena. It's expected that the caller will set
1053 * `usable_arenas` to the return value.
1054 */
1055static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +00001056new_arena(void)
1057{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001058 struct arena_object* arenaobj;
1059 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +01001060 void *address;
Victor Stinner34be807c2016-03-14 12:04:26 +01001061 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +00001062
Victor Stinner34be807c2016-03-14 12:04:26 +01001063 if (debug_stats == -1) {
1064 char *opt = Py_GETENV("PYTHONMALLOCSTATS");
1065 debug_stats = (opt != NULL && *opt != '\0');
1066 }
1067 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -04001068 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be807c2016-03-14 12:04:26 +01001069
Victor Stinner9e87e772017-11-24 12:09:24 +01001070 if (unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001071 uint i;
1072 uint numarenas;
1073 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +00001074
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001075 /* Double the number of arena objects on each allocation.
1076 * Note that it's possible for `numarenas` to overflow.
1077 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001078 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1079 if (numarenas <= maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001080 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001081#if SIZEOF_SIZE_T <= SIZEOF_INT
Victor Stinner9e87e772017-11-24 12:09:24 +01001082 if (numarenas > SIZE_MAX / sizeof(*arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001083 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001084#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001085 nbytes = numarenas * sizeof(*arenas);
1086 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001087 if (arenaobj == NULL)
1088 return NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001089 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +00001090
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001091 /* We might need to fix pointers that were copied. However,
1092 * new_arena only gets called when all the pages in the
1093 * previous arenas are full. Thus, there are *no* pointers
1094 * into the old array. Thus, we don't have to worry about
1095 * invalid pointers. Just to be sure, some asserts:
1096 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001097 assert(usable_arenas == NULL);
1098 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +00001099
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001100 /* Put the new arenas on the unused_arena_objects list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001101 for (i = maxarenas; i < numarenas; ++i) {
1102 arenas[i].address = 0; /* mark as unassociated */
1103 arenas[i].nextarena = i < numarenas - 1 ?
1104 &arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001105 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001106
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001107 /* Update globals. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001108 unused_arena_objects = &arenas[maxarenas];
1109 maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001110 }
Tim Petersd97a1c02002-03-30 06:09:22 +00001111
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001112 /* Take the next available arena object off the head of the list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001113 assert(unused_arena_objects != NULL);
1114 arenaobj = unused_arena_objects;
1115 unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001116 assert(arenaobj->address == 0);
Victor Stinner9e87e772017-11-24 12:09:24 +01001117 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +02001118 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001119 /* The allocation failed: return NULL after putting the
1120 * arenaobj back.
1121 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001122 arenaobj->nextarena = unused_arena_objects;
1123 unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001124 return NULL;
1125 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001126 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +00001127
Victor Stinner9e87e772017-11-24 12:09:24 +01001128 ++narenas_currently_allocated;
1129 ++ntimes_arena_allocated;
1130 if (narenas_currently_allocated > narenas_highwater)
1131 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001132 arenaobj->freepools = NULL;
1133 /* pool_address <- first pool-aligned address in the arena
1134 nfreepools <- number of whole pools that fit after alignment */
Victor Stinner9e87e772017-11-24 12:09:24 +01001135 arenaobj->pool_address = (block*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001136 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
1137 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
1138 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1139 if (excess != 0) {
1140 --arenaobj->nfreepools;
1141 arenaobj->pool_address += POOL_SIZE - excess;
1142 }
1143 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001144
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001145 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +00001146}
1147
Victor Stinner9ed83c42017-10-31 12:18:10 -07001148
Thomas Woutersa9773292006-04-21 09:43:23 +00001149/*
Benjamin Peterson3924f932016-09-18 19:12:48 -07001150address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +00001151
1152Return true if and only if P is an address that was allocated by pymalloc.
1153POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1154(the caller is asked to compute this because the macro expands POOL more than
1155once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -07001156variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +00001157called on every alloc/realloc/free, micro-efficiency is important here).
1158
1159Tricky: Let B be the arena base address associated with the pool, B =
1160arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1161
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001162 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001163
1164Subtracting B throughout, this is true iff
1165
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001166 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001167
1168By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1169
1170Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1171before the first arena has been allocated. `arenas` is still NULL in that
1172case. We're relying on that maxarenas is also 0 in that case, so that
1173(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1174into a NULL arenas.
1175
1176Details: given P and POOL, the arena_object corresponding to P is AO =
1177arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1178stores, etc), POOL is the correct address of P's pool, AO.address is the
1179correct base address of the pool's arena, and P must be within ARENA_SIZE of
1180AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -07001181(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +00001182controls P.
1183
1184Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1185call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1186in this case -- it may even be uninitialized trash. If the trash arenaindex
1187is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1188control P.
1189
1190Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1191allocated arena, obmalloc controls all the memory in slice AO.address :
1192AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1193so P doesn't lie in that slice, so the macro correctly reports that P is not
1194controlled by obmalloc.
1195
1196Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1197arena_object (one not currently associated with an allocated arena),
1198AO.address is 0, and the second test in the macro reduces to:
1199
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001200 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001201
1202If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1203that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1204of the test still passes, and the third clause (AO.address != 0) is necessary
1205to get the correct result: AO.address is 0 in this case, so the macro
1206correctly reports that P is not controlled by obmalloc (despite that P lies in
1207slice AO.address : AO.address + ARENA_SIZE).
1208
1209Note: The third (AO.address != 0) clause was added in Python 2.5. Before
12102.5, arenas were never free()'ed, and an arenaindex < maxarena always
1211corresponded to a currently-allocated arena, so the "P is not controlled by
1212obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1213was impossible.
1214
1215Note that the logic is excruciating, and reading up possibly uninitialized
1216memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1217creates problems for some memory debuggers. The overwhelming advantage is
1218that this test determines whether an arbitrary address is controlled by
1219obmalloc in a small constant time, independent of the number of arenas
1220obmalloc controls. Since this test is needed at every entry point, it's
1221extremely desirable that it be this fast.
1222*/
Thomas Woutersa9773292006-04-21 09:43:23 +00001223
Benjamin Peterson3924f932016-09-18 19:12:48 -07001224static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
1225address_in_range(void *p, poolp pool)
1226{
1227 // Since address_in_range may be reading from memory which was not allocated
1228 // by Python, it is important that pool->arenaindex is read only once, as
1229 // another thread may be concurrently modifying the value without holding
1230 // the GIL. The following dance forces the compiler to read pool->arenaindex
1231 // only once.
1232 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Victor Stinner9e87e772017-11-24 12:09:24 +01001233 return arenaindex < maxarenas &&
1234 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1235 arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -07001236}
Tim Peters338e0102002-04-01 19:23:44 +00001237
Victor Stinner9ed83c42017-10-31 12:18:10 -07001238
Neil Schemenauera35c6882001-02-27 04:45:05 +00001239/*==========================================================================*/
1240
Victor Stinner9ed83c42017-10-31 12:18:10 -07001241/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +00001242
Victor Stinner9ed83c42017-10-31 12:18:10 -07001243 The basic blocks are ordered by decreasing execution frequency,
1244 which minimizes the number of jumps in the most common cases,
1245 improves branching prediction and instruction scheduling (small
1246 block allocations typically result in a couple of instructions).
1247 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +00001248
Victor Stinner9ed83c42017-10-31 12:18:10 -07001249 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
1250
1251 Return 0 if pymalloc failed to allocate the memory block: on bigger
1252 requests, on error in the code below (as a last chance to serve the request)
1253 or when the max memory limit has been reached. */
1254static int
1255pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001256{
Victor Stinner9e87e772017-11-24 12:09:24 +01001257 block *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001258 poolp pool;
1259 poolp next;
1260 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001261
Benjamin Peterson05159c42009-12-03 03:01:27 +00001262#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001263 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001264 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001265 }
1266 if (UNLIKELY(running_on_valgrind)) {
1267 return 0;
1268 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001269#endif
1270
Victor Stinner9ed83c42017-10-31 12:18:10 -07001271 if (nbytes == 0) {
1272 return 0;
1273 }
1274 if (nbytes > SMALL_REQUEST_THRESHOLD) {
1275 return 0;
1276 }
T. Wouters06bb4872017-03-31 10:10:19 -07001277
Victor Stinner9ed83c42017-10-31 12:18:10 -07001278 LOCK();
1279 /*
1280 * Most frequent paths first
1281 */
1282 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Victor Stinner9e87e772017-11-24 12:09:24 +01001283 pool = usedpools[size + size];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001284 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001285 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -07001286 * There is a used pool for this size class.
1287 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001288 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001289 ++pool->ref.count;
1290 bp = pool->freeblock;
1291 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001292 if ((pool->freeblock = *(block **)bp) != NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001293 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001294 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001295
Victor Stinner9ed83c42017-10-31 12:18:10 -07001296 /*
1297 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001298 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001299 if (pool->nextoffset <= pool->maxnextoffset) {
1300 /* There is room for another block. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001301 pool->freeblock = (block*)pool +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001302 pool->nextoffset;
1303 pool->nextoffset += INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001304 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001305 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001306 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001307
Victor Stinner9ed83c42017-10-31 12:18:10 -07001308 /* Pool is full, unlink from used pools. */
1309 next = pool->nextpool;
1310 pool = pool->prevpool;
1311 next->prevpool = pool;
1312 pool->nextpool = next;
1313 goto success;
1314 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001315
Victor Stinner9ed83c42017-10-31 12:18:10 -07001316 /* There isn't a pool of the right size class immediately
1317 * available: use a free pool.
1318 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001319 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001320 /* No arena has a free pool: allocate a new arena. */
1321#ifdef WITH_MEMORY_LIMITS
Victor Stinner9e87e772017-11-24 12:09:24 +01001322 if (narenas_currently_allocated >= MAX_ARENAS) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001323 goto failed;
1324 }
1325#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001326 usable_arenas = new_arena();
1327 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001328 goto failed;
1329 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001330 usable_arenas->nextarena =
1331 usable_arenas->prevarena = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001332 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001333 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001334
1335 /* Try to get a cached free pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001336 pool = usable_arenas->freepools;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001337 if (pool != NULL) {
1338 /* Unlink from cached pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001339 usable_arenas->freepools = pool->nextpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001340
1341 /* This arena already had the smallest nfreepools
1342 * value, so decreasing nfreepools doesn't change
1343 * that, and we don't need to rearrange the
1344 * usable_arenas list. However, if the arena has
1345 * become wholly allocated, we need to remove its
1346 * arena_object from usable_arenas.
1347 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001348 --usable_arenas->nfreepools;
1349 if (usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001350 /* Wholly allocated: remove. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001351 assert(usable_arenas->freepools == NULL);
1352 assert(usable_arenas->nextarena == NULL ||
1353 usable_arenas->nextarena->prevarena ==
1354 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001355
Victor Stinner9e87e772017-11-24 12:09:24 +01001356 usable_arenas = usable_arenas->nextarena;
1357 if (usable_arenas != NULL) {
1358 usable_arenas->prevarena = NULL;
1359 assert(usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001360 }
1361 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001362 else {
1363 /* nfreepools > 0: it must be that freepools
1364 * isn't NULL, or that we haven't yet carved
1365 * off all the arena's pools for the first
1366 * time.
1367 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001368 assert(usable_arenas->freepools != NULL ||
1369 usable_arenas->pool_address <=
1370 (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001371 ARENA_SIZE - POOL_SIZE);
1372 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001373
Victor Stinner9ed83c42017-10-31 12:18:10 -07001374 init_pool:
1375 /* Frontlink to used pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001376 next = usedpools[size + size]; /* == prev */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001377 pool->nextpool = next;
1378 pool->prevpool = next;
1379 next->nextpool = pool;
1380 next->prevpool = pool;
1381 pool->ref.count = 1;
1382 if (pool->szidx == size) {
1383 /* Luckily, this pool last contained blocks
1384 * of the same size class, so its header
1385 * and free list are already initialized.
1386 */
1387 bp = pool->freeblock;
1388 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001389 pool->freeblock = *(block **)bp;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001390 goto success;
1391 }
1392 /*
1393 * Initialize the pool header, set up the free list to
1394 * contain just the second block, and return the first
1395 * block.
1396 */
1397 pool->szidx = size;
1398 size = INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001399 bp = (block *)pool + POOL_OVERHEAD;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001400 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1401 pool->maxnextoffset = POOL_SIZE - size;
1402 pool->freeblock = bp + size;
Victor Stinner9e87e772017-11-24 12:09:24 +01001403 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001404 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001405 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001406
Victor Stinner9ed83c42017-10-31 12:18:10 -07001407 /* Carve off a new pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001408 assert(usable_arenas->nfreepools > 0);
1409 assert(usable_arenas->freepools == NULL);
1410 pool = (poolp)usable_arenas->pool_address;
1411 assert((block*)pool <= (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001412 ARENA_SIZE - POOL_SIZE);
Victor Stinner9e87e772017-11-24 12:09:24 +01001413 pool->arenaindex = (uint)(usable_arenas - arenas);
1414 assert(&arenas[pool->arenaindex] == usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001415 pool->szidx = DUMMY_SIZE_IDX;
Victor Stinner9e87e772017-11-24 12:09:24 +01001416 usable_arenas->pool_address += POOL_SIZE;
1417 --usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001418
Victor Stinner9e87e772017-11-24 12:09:24 +01001419 if (usable_arenas->nfreepools == 0) {
1420 assert(usable_arenas->nextarena == NULL ||
1421 usable_arenas->nextarena->prevarena ==
1422 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001423 /* Unlink the arena: it is completely allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001424 usable_arenas = usable_arenas->nextarena;
1425 if (usable_arenas != NULL) {
1426 usable_arenas->prevarena = NULL;
1427 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001428 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001429 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001430
1431 goto init_pool;
1432
1433success:
1434 UNLOCK();
1435 assert(bp != NULL);
1436 *ptr_p = (void *)bp;
1437 return 1;
1438
1439failed:
1440 UNLOCK();
1441 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001442}
1443
Victor Stinner9ed83c42017-10-31 12:18:10 -07001444
Victor Stinnerdb067af2014-05-02 22:31:14 +02001445static void *
1446_PyObject_Malloc(void *ctx, size_t nbytes)
1447{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001448 void* ptr;
1449 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001450 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001451 return ptr;
1452 }
1453
1454 ptr = PyMem_RawMalloc(nbytes);
1455 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001456 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001457 }
1458 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001459}
1460
Victor Stinner9ed83c42017-10-31 12:18:10 -07001461
Victor Stinnerdb067af2014-05-02 22:31:14 +02001462static void *
1463_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1464{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001465 void* ptr;
1466
1467 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
1468 size_t nbytes = nelem * elsize;
1469
1470 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
1471 memset(ptr, 0, nbytes);
Victor Stinner9e87e772017-11-24 12:09:24 +01001472 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001473 return ptr;
1474 }
1475
1476 ptr = PyMem_RawCalloc(nelem, elsize);
1477 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001478 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001479 }
1480 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001481}
1482
Neil Schemenauera35c6882001-02-27 04:45:05 +00001483
Victor Stinner9ed83c42017-10-31 12:18:10 -07001484/* Free a memory block allocated by pymalloc_alloc().
1485 Return 1 if it was freed.
1486 Return 0 if the block was not allocated by pymalloc_alloc(). */
1487static int
1488pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001489{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001490 poolp pool;
Victor Stinner9e87e772017-11-24 12:09:24 +01001491 block *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001492 poolp next, prev;
1493 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001494
Victor Stinner9ed83c42017-10-31 12:18:10 -07001495 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001496
Benjamin Peterson05159c42009-12-03 03:01:27 +00001497#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001498 if (UNLIKELY(running_on_valgrind > 0)) {
1499 return 0;
1500 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001501#endif
1502
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001503 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001504 if (!address_in_range(p, pool)) {
1505 return 0;
1506 }
1507 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001508
Victor Stinner9ed83c42017-10-31 12:18:10 -07001509 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +00001510
Victor Stinner9ed83c42017-10-31 12:18:10 -07001511 /* Link p to the start of the pool's freeblock list. Since
1512 * the pool had at least the p block outstanding, the pool
1513 * wasn't empty (so it's already in a usedpools[] list, or
1514 * was full and is in no list -- it's not in the freeblocks
1515 * list in any case).
1516 */
1517 assert(pool->ref.count > 0); /* else it was empty */
Victor Stinner9e87e772017-11-24 12:09:24 +01001518 *(block **)p = lastfree = pool->freeblock;
1519 pool->freeblock = (block *)p;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001520 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001521 /* Pool was full, so doesn't currently live in any list:
1522 * link it to the front of the appropriate usedpools[] list.
1523 * This mimics LRU pool usage for new allocations and
1524 * targets optimal filling when several pools contain
1525 * blocks of the same size class.
1526 */
1527 --pool->ref.count;
1528 assert(pool->ref.count > 0); /* else the pool is empty */
1529 size = pool->szidx;
Victor Stinner9e87e772017-11-24 12:09:24 +01001530 next = usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001531 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001532
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001533 /* insert pool before next: prev <-> pool <-> next */
1534 pool->nextpool = next;
1535 pool->prevpool = prev;
1536 next->prevpool = pool;
1537 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001538 goto success;
1539 }
1540
1541 struct arena_object* ao;
1542 uint nf; /* ao->nfreepools */
1543
1544 /* freeblock wasn't NULL, so the pool wasn't full,
1545 * and the pool is in a usedpools[] list.
1546 */
1547 if (--pool->ref.count != 0) {
1548 /* pool isn't empty: leave it in usedpools */
1549 goto success;
1550 }
1551 /* Pool is now empty: unlink from usedpools, and
1552 * link to the front of freepools. This ensures that
1553 * previously freed pools will be allocated later
1554 * (being not referenced, they are perhaps paged out).
1555 */
1556 next = pool->nextpool;
1557 prev = pool->prevpool;
1558 next->prevpool = prev;
1559 prev->nextpool = next;
1560
1561 /* Link the pool to freepools. This is a singly-linked
1562 * list, and pool->prevpool isn't used there.
1563 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001564 ao = &arenas[pool->arenaindex];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001565 pool->nextpool = ao->freepools;
1566 ao->freepools = pool;
1567 nf = ++ao->nfreepools;
1568
1569 /* All the rest is arena management. We just freed
1570 * a pool, and there are 4 cases for arena mgmt:
1571 * 1. If all the pools are free, return the arena to
1572 * the system free().
1573 * 2. If this is the only free pool in the arena,
1574 * add the arena back to the `usable_arenas` list.
1575 * 3. If the "next" arena has a smaller count of free
1576 * pools, we have to "slide this arena right" to
1577 * restore that usable_arenas is sorted in order of
1578 * nfreepools.
1579 * 4. Else there's nothing more to do.
1580 */
1581 if (nf == ao->ntotalpools) {
1582 /* Case 1. First unlink ao from usable_arenas.
1583 */
1584 assert(ao->prevarena == NULL ||
1585 ao->prevarena->address != 0);
1586 assert(ao ->nextarena == NULL ||
1587 ao->nextarena->address != 0);
1588
1589 /* Fix the pointer in the prevarena, or the
1590 * usable_arenas pointer.
1591 */
1592 if (ao->prevarena == NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001593 usable_arenas = ao->nextarena;
1594 assert(usable_arenas == NULL ||
1595 usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001596 }
1597 else {
1598 assert(ao->prevarena->nextarena == ao);
1599 ao->prevarena->nextarena =
1600 ao->nextarena;
1601 }
1602 /* Fix the pointer in the nextarena. */
1603 if (ao->nextarena != NULL) {
1604 assert(ao->nextarena->prevarena == ao);
1605 ao->nextarena->prevarena =
1606 ao->prevarena;
1607 }
1608 /* Record that this arena_object slot is
1609 * available to be reused.
1610 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001611 ao->nextarena = unused_arena_objects;
1612 unused_arena_objects = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001613
1614 /* Free the entire arena. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001615 _PyObject_Arena.free(_PyObject_Arena.ctx,
Victor Stinner9ed83c42017-10-31 12:18:10 -07001616 (void *)ao->address, ARENA_SIZE);
1617 ao->address = 0; /* mark unassociated */
Victor Stinner9e87e772017-11-24 12:09:24 +01001618 --narenas_currently_allocated;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001619
1620 goto success;
1621 }
1622
1623 if (nf == 1) {
1624 /* Case 2. Put ao at the head of
1625 * usable_arenas. Note that because
1626 * ao->nfreepools was 0 before, ao isn't
1627 * currently on the usable_arenas list.
1628 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001629 ao->nextarena = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001630 ao->prevarena = NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001631 if (usable_arenas)
1632 usable_arenas->prevarena = ao;
1633 usable_arenas = ao;
1634 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001635
1636 goto success;
1637 }
1638
1639 /* If this arena is now out of order, we need to keep
1640 * the list sorted. The list is kept sorted so that
1641 * the "most full" arenas are used first, which allows
1642 * the nearly empty arenas to be completely freed. In
1643 * a few un-scientific tests, it seems like this
1644 * approach allowed a lot more memory to be freed.
1645 */
1646 if (ao->nextarena == NULL ||
1647 nf <= ao->nextarena->nfreepools) {
1648 /* Case 4. Nothing to do. */
1649 goto success;
1650 }
1651 /* Case 3: We have to move the arena towards the end
1652 * of the list, because it has more free pools than
1653 * the arena to its right.
1654 * First unlink ao from usable_arenas.
1655 */
1656 if (ao->prevarena != NULL) {
1657 /* ao isn't at the head of the list */
1658 assert(ao->prevarena->nextarena == ao);
1659 ao->prevarena->nextarena = ao->nextarena;
1660 }
1661 else {
1662 /* ao is at the head of the list */
Victor Stinner9e87e772017-11-24 12:09:24 +01001663 assert(usable_arenas == ao);
1664 usable_arenas = ao->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001665 }
1666 ao->nextarena->prevarena = ao->prevarena;
1667
1668 /* Locate the new insertion point by iterating over
1669 * the list, using our nextarena pointer.
1670 */
1671 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1672 ao->prevarena = ao->nextarena;
1673 ao->nextarena = ao->nextarena->nextarena;
1674 }
1675
1676 /* Insert ao at this point. */
1677 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1678 assert(ao->prevarena->nextarena == ao->nextarena);
1679
1680 ao->prevarena->nextarena = ao;
1681 if (ao->nextarena != NULL) {
1682 ao->nextarena->prevarena = ao;
1683 }
1684
1685 /* Verify that the swaps worked. */
1686 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1687 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1688 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
Victor Stinner9e87e772017-11-24 12:09:24 +01001689 assert((usable_arenas == ao && ao->prevarena == NULL)
Victor Stinner9ed83c42017-10-31 12:18:10 -07001690 || ao->prevarena->nextarena == ao);
1691
1692 goto success;
1693
1694success:
1695 UNLOCK();
1696 return 1;
1697}
1698
1699
1700static void
1701_PyObject_Free(void *ctx, void *p)
1702{
1703 /* PyObject_Free(NULL) has no effect */
1704 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001705 return;
1706 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001707
Victor Stinner9e87e772017-11-24 12:09:24 +01001708 _Py_AllocatedBlocks--;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001709 if (!pymalloc_free(ctx, p)) {
1710 /* pymalloc didn't allocate this address */
1711 PyMem_RawFree(p);
1712 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001713}
1714
Neil Schemenauera35c6882001-02-27 04:45:05 +00001715
Victor Stinner9ed83c42017-10-31 12:18:10 -07001716/* pymalloc realloc.
1717
1718 If nbytes==0, then as the Python docs promise, we do not treat this like
1719 free(p), and return a non-NULL result.
1720
1721 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1722 newptr_p.
1723
1724 Return 0 if pymalloc didn't allocated p. */
1725static int
1726pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001727{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001728 void *bp;
1729 poolp pool;
1730 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001731
Victor Stinner9ed83c42017-10-31 12:18:10 -07001732 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001733
Benjamin Peterson05159c42009-12-03 03:01:27 +00001734#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001735 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001736 if (UNLIKELY(running_on_valgrind > 0)) {
1737 return 0;
1738 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001739#endif
1740
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001741 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001742 if (!address_in_range(p, pool)) {
1743 /* pymalloc is not managing this block.
1744
1745 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1746 over this block. However, if we do, we need to copy the valid data
1747 from the C-managed block to one of our blocks, and there's no
1748 portable way to know how much of the memory space starting at p is
1749 valid.
1750
1751 As bug 1185883 pointed out the hard way, it's possible that the
1752 C-managed block is "at the end" of allocated VM space, so that a
1753 memory fault can occur if we try to copy nbytes bytes starting at p.
1754 Instead we punt: let C continue to manage this block. */
1755 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001756 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001757
1758 /* pymalloc is in charge of this block */
1759 size = INDEX2SIZE(pool->szidx);
1760 if (nbytes <= size) {
1761 /* The block is staying the same or shrinking.
1762
1763 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1764 block to a smaller size class, but it wastes memory not to copy it.
1765
1766 The compromise here is to copy on shrink only if at least 25% of
1767 size can be shaved off. */
1768 if (4 * nbytes > 3 * size) {
1769 /* It's the same, or shrinking and new/old > 3/4. */
1770 *newptr_p = p;
1771 return 1;
1772 }
1773 size = nbytes;
1774 }
1775
1776 bp = _PyObject_Malloc(ctx, nbytes);
1777 if (bp != NULL) {
1778 memcpy(bp, p, size);
1779 _PyObject_Free(ctx, p);
1780 }
1781 *newptr_p = bp;
1782 return 1;
1783}
1784
1785
1786static void *
1787_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1788{
1789 void *ptr2;
1790
1791 if (ptr == NULL) {
1792 return _PyObject_Malloc(ctx, nbytes);
1793 }
1794
1795 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1796 return ptr2;
1797 }
1798
1799 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001800}
1801
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001802#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001803
1804/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001805/* pymalloc not enabled: Redirect the entry points to malloc. These will
1806 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001807
Antoine Pitrou92840532012-12-17 23:05:59 +01001808Py_ssize_t
1809_Py_GetAllocatedBlocks(void)
1810{
1811 return 0;
1812}
1813
Tim Peters1221c0a2002-03-23 00:20:15 +00001814#endif /* WITH_PYMALLOC */
1815
Victor Stinner34be807c2016-03-14 12:04:26 +01001816
Tim Petersddea2082002-03-23 10:03:50 +00001817/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001818/* A x-platform debugging allocator. This doesn't manage memory directly,
1819 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1820 */
Tim Petersddea2082002-03-23 10:03:50 +00001821
Tim Petersf6fb5012002-04-12 07:38:53 +00001822/* Special bytes broadcast into debug memory blocks at appropriate times.
1823 * Strings of these are unlikely to be valid addresses, floats, ints or
1824 * 7-bit ASCII.
1825 */
1826#undef CLEANBYTE
1827#undef DEADBYTE
1828#undef FORBIDDENBYTE
1829#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001830#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001831#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001832
Victor Stinner9e87e772017-11-24 12:09:24 +01001833static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1834
Tim Peterse0850172002-03-24 00:34:21 +00001835/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001836 * to supply a single place to set a breakpoint.
1837 */
Tim Peterse0850172002-03-24 00:34:21 +00001838static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001839bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001840{
Victor Stinner9e87e772017-11-24 12:09:24 +01001841 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001842}
1843
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001844#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001845
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001846/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1847static size_t
1848read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001849{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001850 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001851 size_t result = *q++;
1852 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001853
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001854 for (i = SST; --i > 0; ++q)
1855 result = (result << 8) | *q;
1856 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001857}
1858
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001859/* Write n as a big-endian size_t, MSB at address p, LSB at
1860 * p + sizeof(size_t) - 1.
1861 */
Tim Petersddea2082002-03-23 10:03:50 +00001862static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001863write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001864{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001865 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001866 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001867
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001868 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001869 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001870 n >>= 8;
1871 }
Tim Petersddea2082002-03-23 10:03:50 +00001872}
1873
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001874/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1875 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001876
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001877p[0: S]
1878 Number of bytes originally asked for. This is a size_t, big-endian (easier
1879 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001880p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001881 API ID. See PEP 445. This is a character, but seems undocumented.
1882p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001883 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001884p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001885 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001886 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001887 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001888 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001889p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001890 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001891p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001892 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1893 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001894 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001895 If "bad memory" is detected later, the serial number gives an
1896 excellent way to set a breakpoint on the next run, to capture the
1897 instant at which this block was passed out.
1898*/
1899
Victor Stinner0507bf52013-07-07 02:05:46 +02001900static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001901_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001902{
Victor Stinner0507bf52013-07-07 02:05:46 +02001903 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001904 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001905 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1906 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1907 size_t total; /* 2 * SST + nbytes + 2 * SST */
1908
1909 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
1910 /* integer overflow: can't represent total as a Py_ssize_t */
1911 return NULL;
1912 }
1913 total = nbytes + 4 * SST;
1914
1915 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
1916 * ^--- p ^--- data ^--- tail
1917 S: nbytes stored as size_t
1918 I: API identifier (1 byte)
1919 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
1920 C: Clean bytes used later to store actual data
1921 N: Serial number stored as size_t */
1922
1923 if (use_calloc) {
1924 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1925 }
1926 else {
1927 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
1928 }
1929 if (p == NULL) {
1930 return NULL;
1931 }
1932 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00001933
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001934 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00001935
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001936 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
1937 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07001938 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02001939 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00001940
Victor Stinner9ed83c42017-10-31 12:18:10 -07001941 if (nbytes > 0 && !use_calloc) {
1942 memset(data, CLEANBYTE, nbytes);
1943 }
Tim Petersddea2082002-03-23 10:03:50 +00001944
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001945 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001946 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001947 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01001948 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00001949
Victor Stinner9ed83c42017-10-31 12:18:10 -07001950 return data;
Tim Petersddea2082002-03-23 10:03:50 +00001951}
1952
Victor Stinnerdb067af2014-05-02 22:31:14 +02001953static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001954_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001955{
Victor Stinnerc4aec362016-03-14 22:26:53 +01001956 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001957}
1958
1959static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001960_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02001961{
1962 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001963 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001964 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01001965 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02001966}
1967
Victor Stinner9ed83c42017-10-31 12:18:10 -07001968
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001969/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001970 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00001971 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001972 Then calls the underlying free.
1973*/
Victor Stinner0507bf52013-07-07 02:05:46 +02001974static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01001975_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001976{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001977 /* PyMem_Free(NULL) has no effect */
1978 if (p == NULL) {
1979 return;
1980 }
1981
Victor Stinner0507bf52013-07-07 02:05:46 +02001982 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07001983 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001984 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00001985
Victor Stinner0507bf52013-07-07 02:05:46 +02001986 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001987 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001988 nbytes += 4 * SST;
1989 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02001990 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00001991}
1992
Victor Stinner9ed83c42017-10-31 12:18:10 -07001993
Victor Stinner0507bf52013-07-07 02:05:46 +02001994static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001995_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00001996{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001997 if (p == NULL) {
1998 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
1999 }
2000
Victor Stinner0507bf52013-07-07 02:05:46 +02002001 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002002 uint8_t *head; /* base address of malloc'ed pad block */
2003 uint8_t *data; /* pointer to data bytes */
2004 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002005 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2006 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002007 size_t original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002008 size_t block_serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002009#define ERASED_SIZE 64
2010 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00002011
Victor Stinner0507bf52013-07-07 02:05:46 +02002012 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002013
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002014 data = (uint8_t *)p;
2015 head = data - 2*SST;
2016 original_nbytes = read_size_t(head);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002017 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
2018 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002019 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002020 }
2021 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002022
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002023 tail = data + original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002024 block_serialno = read_size_t(tail + SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002025 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
2026 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
2027 */
2028 if (original_nbytes <= sizeof(save)) {
2029 memcpy(save, data, original_nbytes);
2030 memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST);
2031 }
2032 else {
2033 memcpy(save, data, ERASED_SIZE);
2034 memset(head, DEADBYTE, ERASED_SIZE + 2*SST);
2035 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
2036 memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002037 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002038
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002039 /* Resize and add decorations. */
2040 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
2041 if (r == NULL) {
2042 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002043 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002044 else {
2045 head = r;
2046 bumpserialno();
Victor Stinner9e87e772017-11-24 12:09:24 +01002047 block_serialno = serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002048 }
2049
2050 write_size_t(head, nbytes);
2051 head[SST] = (uint8_t)api->api_id;
2052 memset(head + SST + 1, FORBIDDENBYTE, SST-1);
2053 data = head + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02002054
Victor Stinner9ed83c42017-10-31 12:18:10 -07002055 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002056 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002057 write_size_t(tail + SST, block_serialno);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002058
2059 /* Restore saved bytes. */
2060 if (original_nbytes <= sizeof(save)) {
2061 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
2062 }
2063 else {
2064 size_t i = original_nbytes - ERASED_SIZE;
2065 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
2066 if (nbytes > i) {
2067 memcpy(data + i, &save[ERASED_SIZE],
2068 Py_MIN(nbytes - i, ERASED_SIZE));
2069 }
2070 }
2071
2072 if (r == NULL) {
2073 return NULL;
2074 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002075
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002076 if (nbytes > original_nbytes) {
2077 /* growing: mark new extra memory clean */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002078 memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002079 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002080
Victor Stinner9ed83c42017-10-31 12:18:10 -07002081 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002082}
2083
Victor Stinnerc4aec362016-03-14 22:26:53 +01002084static void
2085_PyMem_DebugCheckGIL(void)
2086{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002087 if (!PyGILState_Check())
2088 Py_FatalError("Python memory allocator called "
2089 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01002090}
2091
2092static void *
2093_PyMem_DebugMalloc(void *ctx, size_t nbytes)
2094{
2095 _PyMem_DebugCheckGIL();
2096 return _PyMem_DebugRawMalloc(ctx, nbytes);
2097}
2098
2099static void *
2100_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
2101{
2102 _PyMem_DebugCheckGIL();
2103 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
2104}
2105
Victor Stinner9ed83c42017-10-31 12:18:10 -07002106
Victor Stinnerc4aec362016-03-14 22:26:53 +01002107static void
2108_PyMem_DebugFree(void *ctx, void *ptr)
2109{
2110 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01002111 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002112}
2113
Victor Stinner9ed83c42017-10-31 12:18:10 -07002114
Victor Stinnerc4aec362016-03-14 22:26:53 +01002115static void *
2116_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2117{
2118 _PyMem_DebugCheckGIL();
2119 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2120}
2121
Tim Peters7ccfadf2002-04-01 06:04:21 +00002122/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002123 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00002124 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002125 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00002126 */
Victor Stinner0507bf52013-07-07 02:05:46 +02002127static void
2128_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002129{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002130 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002131 char msgbuf[64];
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002132 const char *msg;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002133 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002134 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002135 int i;
2136 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002137
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002138 if (p == NULL) {
2139 msg = "didn't expect a NULL pointer";
2140 goto error;
2141 }
Tim Petersddea2082002-03-23 10:03:50 +00002142
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002143 /* Check the API id */
2144 id = (char)q[-SST];
2145 if (id != api) {
2146 msg = msgbuf;
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002147 snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002148 msgbuf[sizeof(msgbuf)-1] = 0;
2149 goto error;
2150 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002151
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002152 /* Check the stuff at the start of p first: if there's underwrite
2153 * corruption, the number-of-bytes field may be nuts, and checking
2154 * the tail could lead to a segfault then.
2155 */
2156 for (i = SST-1; i >= 1; --i) {
2157 if (*(q-i) != FORBIDDENBYTE) {
2158 msg = "bad leading pad byte";
2159 goto error;
2160 }
2161 }
Tim Petersddea2082002-03-23 10:03:50 +00002162
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002163 nbytes = read_size_t(q - 2*SST);
2164 tail = q + nbytes;
2165 for (i = 0; i < SST; ++i) {
2166 if (tail[i] != FORBIDDENBYTE) {
2167 msg = "bad trailing pad byte";
2168 goto error;
2169 }
2170 }
Tim Petersddea2082002-03-23 10:03:50 +00002171
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002172 return;
Tim Petersd1139e02002-03-28 07:32:11 +00002173
2174error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002175 _PyObject_DebugDumpAddress(p);
2176 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00002177}
2178
Tim Peters7ccfadf2002-04-01 06:04:21 +00002179/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02002180static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002181_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002182{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002183 const uint8_t *q = (const uint8_t *)p;
2184 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002185 size_t nbytes, serial;
2186 int i;
2187 int ok;
2188 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002189
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002190 fprintf(stderr, "Debug memory block at address p=%p:", p);
2191 if (p == NULL) {
2192 fprintf(stderr, "\n");
2193 return;
2194 }
2195 id = (char)q[-SST];
2196 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00002197
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002198 nbytes = read_size_t(q - 2*SST);
2199 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
2200 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00002201
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002202 /* In case this is nuts, check the leading pad bytes first. */
2203 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2204 ok = 1;
2205 for (i = 1; i <= SST-1; ++i) {
2206 if (*(q-i) != FORBIDDENBYTE) {
2207 ok = 0;
2208 break;
2209 }
2210 }
2211 if (ok)
2212 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2213 else {
2214 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2215 FORBIDDENBYTE);
2216 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002217 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002218 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
2219 if (byte != FORBIDDENBYTE)
2220 fputs(" *** OUCH", stderr);
2221 fputc('\n', stderr);
2222 }
Tim Peters449b5a82002-04-28 06:14:45 +00002223
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002224 fputs(" Because memory is corrupted at the start, the "
2225 "count of bytes requested\n"
2226 " may be bogus, and checking the trailing pad "
2227 "bytes may segfault.\n", stderr);
2228 }
Tim Petersddea2082002-03-23 10:03:50 +00002229
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002230 tail = q + nbytes;
2231 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
2232 ok = 1;
2233 for (i = 0; i < SST; ++i) {
2234 if (tail[i] != FORBIDDENBYTE) {
2235 ok = 0;
2236 break;
2237 }
2238 }
2239 if (ok)
2240 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2241 else {
2242 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002243 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002244 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002245 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002246 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00002247 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002248 if (byte != FORBIDDENBYTE)
2249 fputs(" *** OUCH", stderr);
2250 fputc('\n', stderr);
2251 }
2252 }
Tim Petersddea2082002-03-23 10:03:50 +00002253
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002254 serial = read_size_t(tail + SST);
2255 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
2256 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00002257
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002258 if (nbytes > 0) {
2259 i = 0;
2260 fputs(" Data at p:", stderr);
2261 /* print up to 8 bytes at the start */
2262 while (q < tail && i < 8) {
2263 fprintf(stderr, " %02x", *q);
2264 ++i;
2265 ++q;
2266 }
2267 /* and up to 8 at the end */
2268 if (q < tail) {
2269 if (tail - q > 8) {
2270 fputs(" ...", stderr);
2271 q = tail - 8;
2272 }
2273 while (q < tail) {
2274 fprintf(stderr, " %02x", *q);
2275 ++q;
2276 }
2277 }
2278 fputc('\n', stderr);
2279 }
Victor Stinner0611c262016-03-15 22:22:13 +01002280 fputc('\n', stderr);
2281
2282 fflush(stderr);
2283 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00002284}
2285
David Malcolm49526f42012-06-22 14:55:41 -04002286
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002287static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002288printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002289{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002290 int i, k;
2291 char buf[100];
2292 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002293
David Malcolm49526f42012-06-22 14:55:41 -04002294 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002295 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002296 fputc(' ', out);
2297 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002298
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002299 /* Write the value with commas. */
2300 i = 22;
2301 buf[i--] = '\0';
2302 buf[i--] = '\n';
2303 k = 3;
2304 do {
2305 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002306 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002307 value = nextvalue;
2308 buf[i--] = (char)(digit + '0');
2309 --k;
2310 if (k == 0 && value && i >= 0) {
2311 k = 3;
2312 buf[i--] = ',';
2313 }
2314 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002315
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002316 while (i >= 0)
2317 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002318 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002319
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002320 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002321}
2322
David Malcolm49526f42012-06-22 14:55:41 -04002323void
2324_PyDebugAllocatorStats(FILE *out,
2325 const char *block_name, int num_blocks, size_t sizeof_block)
2326{
2327 char buf1[128];
2328 char buf2[128];
2329 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05002330 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002331 num_blocks, block_name, sizeof_block);
2332 PyOS_snprintf(buf2, sizeof(buf2),
2333 "%48s ", buf1);
2334 (void)printone(out, buf2, num_blocks * sizeof_block);
2335}
2336
Victor Stinner34be807c2016-03-14 12:04:26 +01002337
David Malcolm49526f42012-06-22 14:55:41 -04002338#ifdef WITH_PYMALLOC
2339
Victor Stinner34be807c2016-03-14 12:04:26 +01002340#ifdef Py_DEBUG
2341/* Is target in the list? The list is traversed via the nextpool pointers.
2342 * The list may be NULL-terminated, or circular. Return 1 if target is in
2343 * list, else 0.
2344 */
2345static int
2346pool_is_in_list(const poolp target, poolp list)
2347{
2348 poolp origlist = list;
2349 assert(target != NULL);
2350 if (list == NULL)
2351 return 0;
2352 do {
2353 if (target == list)
2354 return 1;
2355 list = list->nextpool;
2356 } while (list != NULL && list != origlist);
2357 return 0;
2358}
2359#endif
2360
David Malcolm49526f42012-06-22 14:55:41 -04002361/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002362 * In Py_DEBUG mode, also perform some expensive internal consistency
2363 * checks.
2364 */
Tim Peters7ccfadf2002-04-01 06:04:21 +00002365void
David Malcolm49526f42012-06-22 14:55:41 -04002366_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002367{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002368 uint i;
2369 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2370 /* # of pools, allocated blocks, and free blocks per class index */
2371 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2372 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2373 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2374 /* total # of allocated bytes in used and full pools */
2375 size_t allocated_bytes = 0;
2376 /* total # of available bytes in used pools */
2377 size_t available_bytes = 0;
2378 /* # of free pools + pools not yet carved out of current arena */
2379 uint numfreepools = 0;
2380 /* # of bytes for arena alignment padding */
2381 size_t arena_alignment = 0;
2382 /* # of bytes in used and full pools used for pool_headers */
2383 size_t pool_header_bytes = 0;
2384 /* # of bytes in used and full pools wasted due to quantization,
2385 * i.e. the necessarily leftover space at the ends of used and
2386 * full pools.
2387 */
2388 size_t quantization = 0;
2389 /* # of arenas actually allocated. */
2390 size_t narenas = 0;
2391 /* running total -- should equal narenas * ARENA_SIZE */
2392 size_t total;
2393 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002394
David Malcolm49526f42012-06-22 14:55:41 -04002395 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002396 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002397
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002398 for (i = 0; i < numclasses; ++i)
2399 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002400
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002401 /* Because full pools aren't linked to from anything, it's easiest
2402 * to march over all the arenas. If we're lucky, most of the memory
2403 * will be living in full pools -- would be a shame to miss them.
2404 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002405 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002406 uint j;
Victor Stinner9e87e772017-11-24 12:09:24 +01002407 uintptr_t base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002408
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002409 /* Skip arenas which are not allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002410 if (arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002411 continue;
2412 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002413
Victor Stinner9e87e772017-11-24 12:09:24 +01002414 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002415
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002416 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002417 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002418 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002419 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002420 base += POOL_SIZE;
2421 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002422
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002423 /* visit every pool in the arena */
Victor Stinner9e87e772017-11-24 12:09:24 +01002424 assert(base <= (uintptr_t) arenas[i].pool_address);
2425 for (j = 0; base < (uintptr_t) arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002426 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002427 poolp p = (poolp)base;
2428 const uint sz = p->szidx;
2429 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002430
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002431 if (p->ref.count == 0) {
2432 /* currently unused */
Victor Stinner34be807c2016-03-14 12:04:26 +01002433#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +01002434 assert(pool_is_in_list(p, arenas[i].freepools));
Victor Stinner34be807c2016-03-14 12:04:26 +01002435#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002436 continue;
2437 }
2438 ++numpools[sz];
2439 numblocks[sz] += p->ref.count;
2440 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2441 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002442#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002443 if (freeblocks > 0)
Victor Stinner9e87e772017-11-24 12:09:24 +01002444 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002445#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002446 }
2447 }
Victor Stinner9e87e772017-11-24 12:09:24 +01002448 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002449
David Malcolm49526f42012-06-22 14:55:41 -04002450 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002451 fputs("class size num pools blocks in use avail blocks\n"
2452 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04002453 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002454
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002455 for (i = 0; i < numclasses; ++i) {
2456 size_t p = numpools[i];
2457 size_t b = numblocks[i];
2458 size_t f = numfreeblocks[i];
2459 uint size = INDEX2SIZE(i);
2460 if (p == 0) {
2461 assert(b == 0 && f == 0);
2462 continue;
2463 }
David Malcolm49526f42012-06-22 14:55:41 -04002464 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002465 "%11" PY_FORMAT_SIZE_T "u "
2466 "%15" PY_FORMAT_SIZE_T "u "
2467 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002468 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002469 allocated_bytes += b * size;
2470 available_bytes += f * size;
2471 pool_header_bytes += p * POOL_OVERHEAD;
2472 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2473 }
David Malcolm49526f42012-06-22 14:55:41 -04002474 fputc('\n', out);
Victor Stinner34be807c2016-03-14 12:04:26 +01002475 if (_PyMem_DebugEnabled())
Victor Stinner9e87e772017-11-24 12:09:24 +01002476 (void)printone(out, "# times object malloc called", serialno);
2477 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2478 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2479 (void)printone(out, "# arenas highwater mark", narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04002480 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002481
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002482 PyOS_snprintf(buf, sizeof(buf),
2483 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2484 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002485 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002486
David Malcolm49526f42012-06-22 14:55:41 -04002487 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002488
David Malcolm49526f42012-06-22 14:55:41 -04002489 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2490 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002491
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002492 PyOS_snprintf(buf, sizeof(buf),
2493 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002494 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002495
David Malcolm49526f42012-06-22 14:55:41 -04002496 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2497 total += printone(out, "# bytes lost to quantization", quantization);
2498 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2499 (void)printone(out, "Total", total);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002500}
2501
David Malcolm49526f42012-06-22 14:55:41 -04002502#endif /* #ifdef WITH_PYMALLOC */