blob: 76ff6f9c99bc9c77d45b513fd39ec92cca880789 [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
Victor Stinnerd9ea5ca2020-04-15 02:57:50 +02002#include "pycore_pymem.h" // _PyTraceMalloc_Config
Tim Peters1221c0a2002-03-23 00:20:15 +00003
Benjamin Peterson3924f932016-09-18 19:12:48 -07004#include <stdbool.h>
5
Victor Stinner0611c262016-03-15 22:22:13 +01006
7/* Defined in tracemalloc.c */
8extern void _PyMem_DumpTraceback(int fd, const void *ptr);
9
10
Victor Stinner0507bf52013-07-07 02:05:46 +020011/* Python's malloc wrappers (see pymem.h) */
12
Victor Stinner34be8072016-03-14 12:04:26 +010013#undef uint
14#define uint unsigned int /* assuming >= 16 bits */
15
Victor Stinner0507bf52013-07-07 02:05:46 +020016/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010017static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
18static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
19static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070020static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010021
Victor Stinner0507bf52013-07-07 02:05:46 +020022static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020023static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020024static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010025static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020026
27static void _PyObject_DebugDumpAddress(const void *p);
Victor Stinner9e5d30c2020-03-07 00:54:20 +010028static void _PyMem_DebugCheckAddress(const char *func, char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020029
Victor Stinner5d39e042017-11-29 17:20:38 +010030static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain);
31
Nick Coghlan6ba64f42013-09-29 00:28:55 +100032#if defined(__has_feature) /* Clang */
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030033# if __has_feature(address_sanitizer) /* is ASAN enabled? */
Batuhan Taşkayac0052f32019-12-27 05:51:34 +030034# define _Py_NO_SANITIZE_ADDRESS \
35 __attribute__((no_sanitize("address")))
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030036# endif
37# if __has_feature(thread_sanitizer) /* is TSAN enabled? */
38# define _Py_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
39# endif
40# if __has_feature(memory_sanitizer) /* is MSAN enabled? */
41# define _Py_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
42# endif
43#elif defined(__GNUC__)
44# if defined(__SANITIZE_ADDRESS__) /* GCC 4.8+, is ASAN enabled? */
Batuhan Taşkayac0052f32019-12-27 05:51:34 +030045# define _Py_NO_SANITIZE_ADDRESS \
46 __attribute__((no_sanitize_address))
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030047# endif
Hai Shi7e479c82019-08-14 04:50:19 -050048 // TSAN is supported since GCC 5.1, but __SANITIZE_THREAD__ macro
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030049 // is provided only since GCC 7.
Hai Shi7e479c82019-08-14 04:50:19 -050050# if __GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030051# define _Py_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
52# endif
53#endif
54
Batuhan Taşkayac0052f32019-12-27 05:51:34 +030055#ifndef _Py_NO_SANITIZE_ADDRESS
56# define _Py_NO_SANITIZE_ADDRESS
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +030057#endif
58#ifndef _Py_NO_SANITIZE_THREAD
59# define _Py_NO_SANITIZE_THREAD
60#endif
61#ifndef _Py_NO_SANITIZE_MEMORY
62# define _Py_NO_SANITIZE_MEMORY
Nick Coghlan6ba64f42013-09-29 00:28:55 +100063#endif
64
Tim Peters1221c0a2002-03-23 00:20:15 +000065#ifdef WITH_PYMALLOC
66
Victor Stinner0507bf52013-07-07 02:05:46 +020067#ifdef MS_WINDOWS
68# include <windows.h>
69#elif defined(HAVE_MMAP)
70# include <sys/mman.h>
71# ifdef MAP_ANONYMOUS
72# define ARENAS_USE_MMAP
73# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020074#endif
75
Victor Stinner0507bf52013-07-07 02:05:46 +020076/* Forward declaration */
77static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020078static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020079static void _PyObject_Free(void *ctx, void *p);
80static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020081#endif
82
Victor Stinner0507bf52013-07-07 02:05:46 +020083
Victor Stinner9e00e802018-10-25 13:31:16 +020084/* bpo-35053: Declare tracemalloc configuration here rather than
85 Modules/_tracemalloc.c because _tracemalloc can be compiled as dynamic
86 library, whereas _Py_NewReference() requires it. */
87struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT;
88
89
Victor Stinner0507bf52013-07-07 02:05:46 +020090static void *
91_PyMem_RawMalloc(void *ctx, size_t size)
92{
Victor Stinnerdb067af2014-05-02 22:31:14 +020093 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020094 for malloc(0), which would be treated as an error. Some platforms would
95 return a pointer with no memory behind it, which would break pymalloc.
96 To solve these problems, allocate an extra byte. */
97 if (size == 0)
98 size = 1;
99 return malloc(size);
100}
101
102static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200103_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
104{
105 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
106 for calloc(0, 0), which would be treated as an error. Some platforms
107 would return a pointer with no memory behind it, which would break
108 pymalloc. To solve these problems, allocate an extra byte. */
109 if (nelem == 0 || elsize == 0) {
110 nelem = 1;
111 elsize = 1;
112 }
113 return calloc(nelem, elsize);
114}
115
116static void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200117_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
118{
119 if (size == 0)
120 size = 1;
121 return realloc(ptr, size);
122}
123
124static void
125_PyMem_RawFree(void *ctx, void *ptr)
126{
127 free(ptr);
128}
129
130
131#ifdef MS_WINDOWS
132static void *
133_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
134{
135 return VirtualAlloc(NULL, size,
136 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
137}
138
139static void
140_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
141{
Victor Stinner725e6682013-07-07 03:06:16 +0200142 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200143}
144
145#elif defined(ARENAS_USE_MMAP)
146static void *
147_PyObject_ArenaMmap(void *ctx, size_t size)
148{
149 void *ptr;
150 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
151 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
152 if (ptr == MAP_FAILED)
153 return NULL;
154 assert(ptr != NULL);
155 return ptr;
156}
157
158static void
159_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
160{
161 munmap(ptr, size);
162}
163
164#else
165static void *
166_PyObject_ArenaMalloc(void *ctx, size_t size)
167{
168 return malloc(size);
169}
170
171static void
172_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
173{
174 free(ptr);
175}
176#endif
177
Victor Stinner5d39e042017-11-29 17:20:38 +0100178#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200179#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100180# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
Victor Stinner0507bf52013-07-07 02:05:46 +0200181#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100182
183#define PYRAW_ALLOC MALLOC_ALLOC
184#ifdef WITH_PYMALLOC
185# define PYOBJ_ALLOC PYMALLOC_ALLOC
186#else
187# define PYOBJ_ALLOC MALLOC_ALLOC
188#endif
189#define PYMEM_ALLOC PYOBJ_ALLOC
Victor Stinner0507bf52013-07-07 02:05:46 +0200190
Victor Stinner0507bf52013-07-07 02:05:46 +0200191typedef struct {
192 /* We tag each block with an API ID in order to tag API violations */
193 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200194 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200195} debug_alloc_api_t;
196static struct {
197 debug_alloc_api_t raw;
198 debug_alloc_api_t mem;
199 debug_alloc_api_t obj;
200} _PyMem_Debug = {
Victor Stinner5d39e042017-11-29 17:20:38 +0100201 {'r', PYRAW_ALLOC},
202 {'m', PYMEM_ALLOC},
203 {'o', PYOBJ_ALLOC}
Victor Stinner0507bf52013-07-07 02:05:46 +0200204 };
205
Victor Stinner5d39e042017-11-29 17:20:38 +0100206#define PYDBGRAW_ALLOC \
207 {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
208#define PYDBGMEM_ALLOC \
209 {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
210#define PYDBGOBJ_ALLOC \
211 {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200212
Victor Stinner9e87e772017-11-24 12:09:24 +0100213#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100214static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC;
215static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC;
216static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100217#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100218static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC;
219static PyMemAllocatorEx _PyMem = PYMEM_ALLOC;
220static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100221#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600222
Victor Stinner0507bf52013-07-07 02:05:46 +0200223
Victor Stinner5d39e042017-11-29 17:20:38 +0100224static int
225pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug,
226 PyMemAllocatorEx *old_alloc)
227{
228 if (old_alloc != NULL) {
229 PyMem_GetAllocator(domain, old_alloc);
230 }
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800231
Victor Stinner5d39e042017-11-29 17:20:38 +0100232
233 PyMemAllocatorEx new_alloc;
234 switch(domain)
235 {
236 case PYMEM_DOMAIN_RAW:
237 new_alloc = (PyMemAllocatorEx)PYRAW_ALLOC;
238 break;
239 case PYMEM_DOMAIN_MEM:
240 new_alloc = (PyMemAllocatorEx)PYMEM_ALLOC;
241 break;
242 case PYMEM_DOMAIN_OBJ:
243 new_alloc = (PyMemAllocatorEx)PYOBJ_ALLOC;
244 break;
245 default:
246 /* unknown domain */
247 return -1;
248 }
249 PyMem_SetAllocator(domain, &new_alloc);
250 if (debug) {
251 _PyMem_SetupDebugHooksDomain(domain);
252 }
253 return 0;
254}
255
256
257int
258_PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain,
259 PyMemAllocatorEx *old_alloc)
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800260{
Victor Stinnerccb04422017-11-16 03:20:31 -0800261#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100262 const int debug = 1;
Victor Stinnerccb04422017-11-16 03:20:31 -0800263#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100264 const int debug = 0;
Victor Stinnerccb04422017-11-16 03:20:31 -0800265#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100266 return pymem_set_default_allocator(domain, debug, old_alloc);
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800267}
Victor Stinner0507bf52013-07-07 02:05:46 +0200268
Victor Stinner5d39e042017-11-29 17:20:38 +0100269
Victor Stinner34be8072016-03-14 12:04:26 +0100270int
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200271_PyMem_GetAllocatorName(const char *name, PyMemAllocatorName *allocator)
Victor Stinner34be8072016-03-14 12:04:26 +0100272{
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200273 if (name == NULL || *name == '\0') {
Victor Stinner34be8072016-03-14 12:04:26 +0100274 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200275 nameions): use default memory allocators */
276 *allocator = PYMEM_ALLOCATOR_DEFAULT;
Victor Stinner34be8072016-03-14 12:04:26 +0100277 }
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200278 else if (strcmp(name, "default") == 0) {
279 *allocator = PYMEM_ALLOCATOR_DEFAULT;
280 }
281 else if (strcmp(name, "debug") == 0) {
282 *allocator = PYMEM_ALLOCATOR_DEBUG;
283 }
284#ifdef WITH_PYMALLOC
285 else if (strcmp(name, "pymalloc") == 0) {
286 *allocator = PYMEM_ALLOCATOR_PYMALLOC;
287 }
288 else if (strcmp(name, "pymalloc_debug") == 0) {
289 *allocator = PYMEM_ALLOCATOR_PYMALLOC_DEBUG;
290 }
291#endif
292 else if (strcmp(name, "malloc") == 0) {
293 *allocator = PYMEM_ALLOCATOR_MALLOC;
294 }
295 else if (strcmp(name, "malloc_debug") == 0) {
296 *allocator = PYMEM_ALLOCATOR_MALLOC_DEBUG;
297 }
298 else {
299 /* unknown allocator */
300 return -1;
301 }
302 return 0;
303}
Victor Stinner34be8072016-03-14 12:04:26 +0100304
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200305
306int
307_PyMem_SetupAllocators(PyMemAllocatorName allocator)
308{
309 switch (allocator) {
310 case PYMEM_ALLOCATOR_NOT_SET:
311 /* do nothing */
312 break;
313
314 case PYMEM_ALLOCATOR_DEFAULT:
Victor Stinner5d39e042017-11-29 17:20:38 +0100315 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, NULL);
316 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_MEM, NULL);
317 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_OBJ, NULL);
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200318 break;
319
320 case PYMEM_ALLOCATOR_DEBUG:
Victor Stinner5d39e042017-11-29 17:20:38 +0100321 (void)pymem_set_default_allocator(PYMEM_DOMAIN_RAW, 1, NULL);
322 (void)pymem_set_default_allocator(PYMEM_DOMAIN_MEM, 1, NULL);
323 (void)pymem_set_default_allocator(PYMEM_DOMAIN_OBJ, 1, NULL);
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200324 break;
325
Victor Stinner34be8072016-03-14 12:04:26 +0100326#ifdef WITH_PYMALLOC
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200327 case PYMEM_ALLOCATOR_PYMALLOC:
328 case PYMEM_ALLOCATOR_PYMALLOC_DEBUG:
329 {
Victor Stinner5d39e042017-11-29 17:20:38 +0100330 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
331 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100332
Victor Stinner5d39e042017-11-29 17:20:38 +0100333 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
334 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &pymalloc);
335 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &pymalloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100336
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200337 if (allocator == PYMEM_ALLOCATOR_PYMALLOC_DEBUG) {
Victor Stinner34be8072016-03-14 12:04:26 +0100338 PyMem_SetupDebugHooks();
Victor Stinner5d39e042017-11-29 17:20:38 +0100339 }
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200340 break;
Victor Stinner34be8072016-03-14 12:04:26 +0100341 }
342#endif
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200343
344 case PYMEM_ALLOCATOR_MALLOC:
345 case PYMEM_ALLOCATOR_MALLOC_DEBUG:
346 {
Victor Stinner5d39e042017-11-29 17:20:38 +0100347 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
348 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
349 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &malloc_alloc);
350 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &malloc_alloc);
351
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200352 if (allocator == PYMEM_ALLOCATOR_MALLOC_DEBUG) {
Victor Stinner5d39e042017-11-29 17:20:38 +0100353 PyMem_SetupDebugHooks();
354 }
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200355 break;
Victor Stinner5d39e042017-11-29 17:20:38 +0100356 }
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200357
358 default:
Victor Stinner34be8072016-03-14 12:04:26 +0100359 /* unknown allocator */
360 return -1;
361 }
362 return 0;
363}
364
Victor Stinner5d39e042017-11-29 17:20:38 +0100365
366static int
367pymemallocator_eq(PyMemAllocatorEx *a, PyMemAllocatorEx *b)
368{
369 return (memcmp(a, b, sizeof(PyMemAllocatorEx)) == 0);
370}
371
372
373const char*
Victor Stinnerb16b4e42019-05-17 15:20:52 +0200374_PyMem_GetCurrentAllocatorName(void)
Victor Stinner5d39e042017-11-29 17:20:38 +0100375{
376 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
377#ifdef WITH_PYMALLOC
378 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
379#endif
380
381 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
382 pymemallocator_eq(&_PyMem, &malloc_alloc) &&
383 pymemallocator_eq(&_PyObject, &malloc_alloc))
384 {
385 return "malloc";
386 }
387#ifdef WITH_PYMALLOC
388 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
389 pymemallocator_eq(&_PyMem, &pymalloc) &&
390 pymemallocator_eq(&_PyObject, &pymalloc))
391 {
392 return "pymalloc";
393 }
394#endif
395
396 PyMemAllocatorEx dbg_raw = PYDBGRAW_ALLOC;
397 PyMemAllocatorEx dbg_mem = PYDBGMEM_ALLOC;
398 PyMemAllocatorEx dbg_obj = PYDBGOBJ_ALLOC;
399
400 if (pymemallocator_eq(&_PyMem_Raw, &dbg_raw) &&
401 pymemallocator_eq(&_PyMem, &dbg_mem) &&
402 pymemallocator_eq(&_PyObject, &dbg_obj))
403 {
404 /* Debug hooks installed */
405 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
406 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &malloc_alloc) &&
407 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &malloc_alloc))
408 {
409 return "malloc_debug";
410 }
411#ifdef WITH_PYMALLOC
412 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
413 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &pymalloc) &&
414 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &pymalloc))
415 {
416 return "pymalloc_debug";
417 }
418#endif
419 }
420 return NULL;
421}
422
423
424#undef MALLOC_ALLOC
425#undef PYMALLOC_ALLOC
426#undef PYRAW_ALLOC
427#undef PYMEM_ALLOC
428#undef PYOBJ_ALLOC
429#undef PYDBGRAW_ALLOC
430#undef PYDBGMEM_ALLOC
431#undef PYDBGOBJ_ALLOC
432
Victor Stinner0507bf52013-07-07 02:05:46 +0200433
Victor Stinner9e87e772017-11-24 12:09:24 +0100434static PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800435#ifdef MS_WINDOWS
Victor Stinner9e87e772017-11-24 12:09:24 +0100436 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800437#elif defined(ARENAS_USE_MMAP)
Victor Stinner9e87e772017-11-24 12:09:24 +0100438 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800439#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100440 _PyObject_ArenaMalloc, _PyObject_ArenaFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800441#endif
442 };
443
Victor Stinner0621e0e2016-04-19 17:02:55 +0200444#ifdef WITH_PYMALLOC
Victor Stinner34be8072016-03-14 12:04:26 +0100445static int
446_PyMem_DebugEnabled(void)
447{
448 return (_PyObject.malloc == _PyMem_DebugMalloc);
449}
450
Victor Stinner6bf992a2017-12-06 17:26:10 +0100451static int
Victor Stinner34be8072016-03-14 12:04:26 +0100452_PyMem_PymallocEnabled(void)
453{
454 if (_PyMem_DebugEnabled()) {
455 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
456 }
457 else {
458 return (_PyObject.malloc == _PyObject_Malloc);
459 }
460}
461#endif
462
Victor Stinner5d39e042017-11-29 17:20:38 +0100463
464static void
465_PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain)
Victor Stinner0507bf52013-07-07 02:05:46 +0200466{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200467 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200468
Victor Stinner5d39e042017-11-29 17:20:38 +0100469 if (domain == PYMEM_DOMAIN_RAW) {
470 if (_PyMem_Raw.malloc == _PyMem_DebugRawMalloc) {
471 return;
472 }
Victor Stinner34be8072016-03-14 12:04:26 +0100473
Victor Stinner0507bf52013-07-07 02:05:46 +0200474 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100475 alloc.ctx = &_PyMem_Debug.raw;
476 alloc.malloc = _PyMem_DebugRawMalloc;
477 alloc.calloc = _PyMem_DebugRawCalloc;
478 alloc.realloc = _PyMem_DebugRawRealloc;
479 alloc.free = _PyMem_DebugRawFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200480 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
481 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100482 else if (domain == PYMEM_DOMAIN_MEM) {
483 if (_PyMem.malloc == _PyMem_DebugMalloc) {
484 return;
485 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200486
Victor Stinnerad524372016-03-16 12:12:53 +0100487 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100488 alloc.ctx = &_PyMem_Debug.mem;
489 alloc.malloc = _PyMem_DebugMalloc;
490 alloc.calloc = _PyMem_DebugCalloc;
491 alloc.realloc = _PyMem_DebugRealloc;
492 alloc.free = _PyMem_DebugFree;
Victor Stinnerad524372016-03-16 12:12:53 +0100493 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
494 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100495 else if (domain == PYMEM_DOMAIN_OBJ) {
496 if (_PyObject.malloc == _PyMem_DebugMalloc) {
497 return;
498 }
Victor Stinnerad524372016-03-16 12:12:53 +0100499
Victor Stinner0507bf52013-07-07 02:05:46 +0200500 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100501 alloc.ctx = &_PyMem_Debug.obj;
502 alloc.malloc = _PyMem_DebugMalloc;
503 alloc.calloc = _PyMem_DebugCalloc;
504 alloc.realloc = _PyMem_DebugRealloc;
505 alloc.free = _PyMem_DebugFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200506 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
507 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200508}
509
Victor Stinner5d39e042017-11-29 17:20:38 +0100510
511void
512PyMem_SetupDebugHooks(void)
513{
514 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_RAW);
515 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_MEM);
516 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_OBJ);
517}
518
Victor Stinner0507bf52013-07-07 02:05:46 +0200519void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200520PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200521{
522 switch(domain)
523 {
524 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
525 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
526 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
527 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200528 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200529 allocator->ctx = NULL;
530 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200531 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200532 allocator->realloc = NULL;
533 allocator->free = NULL;
534 }
535}
536
537void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200538PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200539{
540 switch(domain)
541 {
542 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
543 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
544 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
545 /* ignore unknown domain */
546 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200547}
548
549void
550PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
551{
Victor Stinner9e87e772017-11-24 12:09:24 +0100552 *allocator = _PyObject_Arena;
Victor Stinner0507bf52013-07-07 02:05:46 +0200553}
554
555void
556PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
557{
Victor Stinner9e87e772017-11-24 12:09:24 +0100558 _PyObject_Arena = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200559}
560
561void *
562PyMem_RawMalloc(size_t size)
563{
564 /*
565 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
566 * Most python internals blindly use a signed Py_ssize_t to track
567 * things without checking for overflows or negatives.
568 * As size_t is unsigned, checking for size < 0 is not required.
569 */
570 if (size > (size_t)PY_SSIZE_T_MAX)
571 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200572 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
573}
574
Victor Stinnerdb067af2014-05-02 22:31:14 +0200575void *
576PyMem_RawCalloc(size_t nelem, size_t elsize)
577{
578 /* see PyMem_RawMalloc() */
579 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
580 return NULL;
581 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
582}
583
Victor Stinner0507bf52013-07-07 02:05:46 +0200584void*
585PyMem_RawRealloc(void *ptr, size_t new_size)
586{
587 /* see PyMem_RawMalloc() */
588 if (new_size > (size_t)PY_SSIZE_T_MAX)
589 return NULL;
590 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
591}
592
Victor Stinner9e87e772017-11-24 12:09:24 +0100593void PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200594{
595 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
596}
597
Victor Stinner9ed83c42017-10-31 12:18:10 -0700598
Victor Stinner0507bf52013-07-07 02:05:46 +0200599void *
600PyMem_Malloc(size_t size)
601{
602 /* see PyMem_RawMalloc() */
603 if (size > (size_t)PY_SSIZE_T_MAX)
604 return NULL;
605 return _PyMem.malloc(_PyMem.ctx, size);
606}
607
608void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200609PyMem_Calloc(size_t nelem, size_t elsize)
610{
611 /* see PyMem_RawMalloc() */
612 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
613 return NULL;
614 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
615}
616
617void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200618PyMem_Realloc(void *ptr, size_t new_size)
619{
620 /* see PyMem_RawMalloc() */
621 if (new_size > (size_t)PY_SSIZE_T_MAX)
622 return NULL;
623 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
624}
625
626void
627PyMem_Free(void *ptr)
628{
629 _PyMem.free(_PyMem.ctx, ptr);
630}
631
Victor Stinner9ed83c42017-10-31 12:18:10 -0700632
Victor Stinner46972b72017-11-24 22:55:40 +0100633wchar_t*
634_PyMem_RawWcsdup(const wchar_t *str)
635{
Victor Stinnerb64de462017-12-01 18:27:09 +0100636 assert(str != NULL);
637
Victor Stinner46972b72017-11-24 22:55:40 +0100638 size_t len = wcslen(str);
639 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) {
640 return NULL;
641 }
642
643 size_t size = (len + 1) * sizeof(wchar_t);
644 wchar_t *str2 = PyMem_RawMalloc(size);
645 if (str2 == NULL) {
646 return NULL;
647 }
648
649 memcpy(str2, str, size);
650 return str2;
651}
652
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200653char *
654_PyMem_RawStrdup(const char *str)
655{
Victor Stinnerb64de462017-12-01 18:27:09 +0100656 assert(str != NULL);
657 size_t size = strlen(str) + 1;
658 char *copy = PyMem_RawMalloc(size);
659 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200660 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100661 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200662 memcpy(copy, str, size);
663 return copy;
664}
665
666char *
667_PyMem_Strdup(const char *str)
668{
Victor Stinnerb64de462017-12-01 18:27:09 +0100669 assert(str != NULL);
670 size_t size = strlen(str) + 1;
671 char *copy = PyMem_Malloc(size);
672 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200673 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100674 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200675 memcpy(copy, str, size);
676 return copy;
677}
678
Victor Stinner0507bf52013-07-07 02:05:46 +0200679void *
680PyObject_Malloc(size_t size)
681{
682 /* see PyMem_RawMalloc() */
683 if (size > (size_t)PY_SSIZE_T_MAX)
684 return NULL;
685 return _PyObject.malloc(_PyObject.ctx, size);
686}
687
688void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200689PyObject_Calloc(size_t nelem, size_t elsize)
690{
691 /* see PyMem_RawMalloc() */
692 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
693 return NULL;
694 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
695}
696
697void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200698PyObject_Realloc(void *ptr, size_t new_size)
699{
700 /* see PyMem_RawMalloc() */
701 if (new_size > (size_t)PY_SSIZE_T_MAX)
702 return NULL;
703 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
704}
705
706void
707PyObject_Free(void *ptr)
708{
709 _PyObject.free(_PyObject.ctx, ptr);
710}
711
712
Benjamin Peterson05159c42009-12-03 03:01:27 +0000713/* If we're using GCC, use __builtin_expect() to reduce overhead of
714 the valgrind checks */
715#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
716# define UNLIKELY(value) __builtin_expect((value), 0)
Inada Naokifb265042019-07-17 21:23:57 +0900717# define LIKELY(value) __builtin_expect((value), 1)
Benjamin Peterson05159c42009-12-03 03:01:27 +0000718#else
719# define UNLIKELY(value) (value)
Inada Naokifb265042019-07-17 21:23:57 +0900720# define LIKELY(value) (value)
Benjamin Peterson05159c42009-12-03 03:01:27 +0000721#endif
722
Inada Naokifb265042019-07-17 21:23:57 +0900723#ifdef WITH_PYMALLOC
724
725#ifdef WITH_VALGRIND
726#include <valgrind/valgrind.h>
727
Benjamin Peterson05159c42009-12-03 03:01:27 +0000728/* -1 indicates that we haven't checked that we're running on valgrind yet. */
729static int running_on_valgrind = -1;
730#endif
731
Victor Stinner9ed83c42017-10-31 12:18:10 -0700732
Victor Stinner9e87e772017-11-24 12:09:24 +0100733/* An object allocator for Python.
734
735 Here is an introduction to the layers of the Python memory architecture,
736 showing where the object allocator is actually used (layer +2), It is
737 called for every object allocation and deallocation (PyObject_New/Del),
738 unless the object-specific allocators implement a proprietary allocation
739 scheme (ex.: ints use a simple free list). This is also the place where
740 the cyclic garbage collector operates selectively on container objects.
741
742
743 Object-specific allocators
744 _____ ______ ______ ________
745 [ int ] [ dict ] [ list ] ... [ string ] Python core |
746+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
747 _______________________________ | |
748 [ Python's object allocator ] | |
749+2 | ####### Object memory ####### | <------ Internal buffers ------> |
750 ______________________________________________________________ |
751 [ Python's raw memory allocator (PyMem_ API) ] |
752+1 | <----- Python memory (under PyMem manager's control) ------> | |
753 __________________________________________________________________
754 [ Underlying general-purpose allocator (ex: C library malloc) ]
755 0 | <------ Virtual memory allocated for the python process -------> |
756
757 =========================================================================
758 _______________________________________________________________________
759 [ OS-specific Virtual Memory Manager (VMM) ]
760-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
761 __________________________________ __________________________________
762 [ ] [ ]
763-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
764
765*/
766/*==========================================================================*/
767
768/* A fast, special-purpose memory allocator for small blocks, to be used
769 on top of a general-purpose malloc -- heavily based on previous art. */
770
771/* Vladimir Marangozov -- August 2000 */
772
773/*
774 * "Memory management is where the rubber meets the road -- if we do the wrong
775 * thing at any level, the results will not be good. And if we don't make the
776 * levels work well together, we are in serious trouble." (1)
777 *
778 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
779 * "Dynamic Storage Allocation: A Survey and Critical Review",
780 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
781 */
782
783/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
784
785/*==========================================================================*/
786
787/*
788 * Allocation strategy abstract:
789 *
790 * For small requests, the allocator sub-allocates <Big> blocks of memory.
791 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
792 * system's allocator.
793 *
794 * Small requests are grouped in size classes spaced 8 bytes apart, due
795 * to the required valid alignment of the returned address. Requests of
796 * a particular size are serviced from memory pools of 4K (one VMM page).
797 * Pools are fragmented on demand and contain free lists of blocks of one
798 * particular size class. In other words, there is a fixed-size allocator
799 * for each size class. Free pools are shared by the different allocators
800 * thus minimizing the space reserved for a particular size class.
801 *
802 * This allocation strategy is a variant of what is known as "simple
803 * segregated storage based on array of free lists". The main drawback of
804 * simple segregated storage is that we might end up with lot of reserved
805 * memory for the different free lists, which degenerate in time. To avoid
806 * this, we partition each free list in pools and we share dynamically the
807 * reserved space between all free lists. This technique is quite efficient
808 * for memory intensive programs which allocate mainly small-sized blocks.
809 *
810 * For small requests we have the following table:
811 *
812 * Request in bytes Size of allocated block Size class idx
813 * ----------------------------------------------------------------
814 * 1-8 8 0
815 * 9-16 16 1
816 * 17-24 24 2
817 * 25-32 32 3
818 * 33-40 40 4
819 * 41-48 48 5
820 * 49-56 56 6
821 * 57-64 64 7
822 * 65-72 72 8
823 * ... ... ...
824 * 497-504 504 62
825 * 505-512 512 63
826 *
827 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
828 * allocator.
829 */
830
831/*==========================================================================*/
832
833/*
834 * -- Main tunable settings section --
835 */
836
837/*
838 * Alignment of addresses returned to the user. 8-bytes alignment works
839 * on most current architectures (with 32-bit or 64-bit address busses).
840 * The alignment value is also used for grouping small requests in size
841 * classes spaced ALIGNMENT bytes apart.
842 *
843 * You shouldn't change this unless you know what you are doing.
844 */
Inada Naokif0be4bb2019-05-14 18:51:15 +0900845
846#if SIZEOF_VOID_P > 4
847#define ALIGNMENT 16 /* must be 2^N */
848#define ALIGNMENT_SHIFT 4
849#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100850#define ALIGNMENT 8 /* must be 2^N */
851#define ALIGNMENT_SHIFT 3
Inada Naokif0be4bb2019-05-14 18:51:15 +0900852#endif
Victor Stinner9e87e772017-11-24 12:09:24 +0100853
854/* Return the number of bytes in size class I, as a uint. */
855#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
856
857/*
858 * Max size threshold below which malloc requests are considered to be
859 * small enough in order to use preallocated memory pools. You can tune
860 * this value according to your application behaviour and memory needs.
861 *
862 * Note: a size threshold of 512 guarantees that newly created dictionaries
863 * will be allocated from preallocated memory pools on 64-bit.
864 *
865 * The following invariants must hold:
866 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
867 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
868 *
869 * Although not required, for better performance and space efficiency,
870 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
871 */
872#define SMALL_REQUEST_THRESHOLD 512
873#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
874
875/*
876 * The system's VMM page size can be obtained on most unices with a
877 * getpagesize() call or deduced from various header files. To make
878 * things simpler, we assume that it is 4K, which is OK for most systems.
879 * It is probably better if this is the native page size, but it doesn't
880 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
881 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
882 * violation fault. 4K is apparently OK for all the platforms that python
883 * currently targets.
884 */
885#define SYSTEM_PAGE_SIZE (4 * 1024)
886#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
887
888/*
889 * Maximum amount of memory managed by the allocator for small requests.
890 */
891#ifdef WITH_MEMORY_LIMITS
892#ifndef SMALL_MEMORY_LIMIT
893#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
894#endif
895#endif
896
Neil Schemenauer85b6b702021-03-29 19:51:15 -0700897#if !defined(WITH_PYMALLOC_RADIX_TREE)
898/* Use radix-tree to track arena memory regions, for address_in_range().
899 * Enable by default since it allows larger pool sizes. Can be disabled
900 * using -DWITH_PYMALLOC_RADIX_TREE=0 */
901#define WITH_PYMALLOC_RADIX_TREE 1
902#endif
903
904#if SIZEOF_VOID_P > 4
905/* on 64-bit platforms use larger pools and arenas if we can */
906#define USE_LARGE_ARENAS
907#if WITH_PYMALLOC_RADIX_TREE
908/* large pools only supported if radix-tree is enabled */
909#define USE_LARGE_POOLS
910#endif
911#endif
912
Victor Stinner9e87e772017-11-24 12:09:24 +0100913/*
914 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
915 * on a page boundary. This is a reserved virtual address space for the
916 * current process (obtained through a malloc()/mmap() call). In no way this
917 * means that the memory arenas will be used entirely. A malloc(<Big>) is
918 * usually an address range reservation for <Big> bytes, unless all pages within
919 * this space are referenced subsequently. So malloc'ing big blocks and not
920 * using them does not mean "wasting memory". It's an addressable range
921 * wastage...
922 *
923 * Arenas are allocated with mmap() on systems supporting anonymous memory
924 * mappings to reduce heap fragmentation.
925 */
Neil Schemenauer85b6b702021-03-29 19:51:15 -0700926#ifdef USE_LARGE_ARENAS
927#define ARENA_BITS 20 /* 1 MiB */
928#else
929#define ARENA_BITS 18 /* 256 KiB */
930#endif
931#define ARENA_SIZE (1 << ARENA_BITS)
932#define ARENA_SIZE_MASK (ARENA_SIZE - 1)
Victor Stinner9e87e772017-11-24 12:09:24 +0100933
934#ifdef WITH_MEMORY_LIMITS
935#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
936#endif
937
938/*
Neil Schemenauer85b6b702021-03-29 19:51:15 -0700939 * Size of the pools used for small blocks. Must be a power of 2.
Victor Stinner9e87e772017-11-24 12:09:24 +0100940 */
Neil Schemenauer85b6b702021-03-29 19:51:15 -0700941#ifdef USE_LARGE_POOLS
942#define POOL_BITS 14 /* 16 KiB */
943#else
944#define POOL_BITS 12 /* 4 KiB */
945#endif
946#define POOL_SIZE (1 << POOL_BITS)
947#define POOL_SIZE_MASK (POOL_SIZE - 1)
948
949#if !WITH_PYMALLOC_RADIX_TREE
950#if POOL_SIZE != SYSTEM_PAGE_SIZE
951# error "pool size must be equal to system page size"
952#endif
953#endif
Victor Stinner9e87e772017-11-24 12:09:24 +0100954
Tim Peters1c263e32019-05-31 21:16:04 -0500955#define MAX_POOLS_IN_ARENA (ARENA_SIZE / POOL_SIZE)
956#if MAX_POOLS_IN_ARENA * POOL_SIZE != ARENA_SIZE
957# error "arena size not an exact multiple of pool size"
958#endif
959
Victor Stinner9e87e772017-11-24 12:09:24 +0100960/*
961 * -- End of tunable settings section --
962 */
963
964/*==========================================================================*/
965
Victor Stinner9e87e772017-11-24 12:09:24 +0100966/* When you say memory, my mind reasons in terms of (pointers to) blocks */
967typedef uint8_t block;
968
969/* Pool for small blocks. */
970struct pool_header {
971 union { block *_padding;
972 uint count; } ref; /* number of allocated blocks */
973 block *freeblock; /* pool's free list head */
974 struct pool_header *nextpool; /* next pool of this size class */
975 struct pool_header *prevpool; /* previous pool "" */
976 uint arenaindex; /* index into arenas of base adr */
977 uint szidx; /* block size class index */
978 uint nextoffset; /* bytes to virgin block */
979 uint maxnextoffset; /* largest valid nextoffset */
980};
981
982typedef struct pool_header *poolp;
983
984/* Record keeping for arenas. */
985struct arena_object {
986 /* The address of the arena, as returned by malloc. Note that 0
987 * will never be returned by a successful malloc, and is used
988 * here to mark an arena_object that doesn't correspond to an
989 * allocated arena.
990 */
991 uintptr_t address;
992
993 /* Pool-aligned pointer to the next pool to be carved off. */
994 block* pool_address;
995
996 /* The number of available pools in the arena: free pools + never-
997 * allocated pools.
998 */
999 uint nfreepools;
1000
1001 /* The total number of pools in the arena, whether or not available. */
1002 uint ntotalpools;
1003
1004 /* Singly-linked list of available pools. */
1005 struct pool_header* freepools;
1006
1007 /* Whenever this arena_object is not associated with an allocated
1008 * arena, the nextarena member is used to link all unassociated
1009 * arena_objects in the singly-linked `unused_arena_objects` list.
1010 * The prevarena member is unused in this case.
1011 *
1012 * When this arena_object is associated with an allocated arena
1013 * with at least one available pool, both members are used in the
1014 * doubly-linked `usable_arenas` list, which is maintained in
1015 * increasing order of `nfreepools` values.
1016 *
1017 * Else this arena_object is associated with an allocated arena
1018 * all of whose pools are in use. `nextarena` and `prevarena`
1019 * are both meaningless in this case.
1020 */
1021 struct arena_object* nextarena;
1022 struct arena_object* prevarena;
1023};
1024
1025#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
1026
1027#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
1028
1029/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
1030#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
1031
1032/* Return total number of blocks in pool of size index I, as a uint. */
1033#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
1034
1035/*==========================================================================*/
1036
1037/*
Victor Stinner9e87e772017-11-24 12:09:24 +01001038 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
1039
1040This is involved. For an index i, usedpools[i+i] is the header for a list of
1041all partially used pools holding small blocks with "size class idx" i. So
1042usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
104316, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
1044
1045Pools are carved off an arena's highwater mark (an arena_object's pool_address
1046member) as needed. Once carved off, a pool is in one of three states forever
1047after:
1048
1049used == partially used, neither empty nor full
1050 At least one block in the pool is currently allocated, and at least one
1051 block in the pool is not currently allocated (note this implies a pool
1052 has room for at least two blocks).
1053 This is a pool's initial state, as a pool is created only when malloc
1054 needs space.
1055 The pool holds blocks of a fixed size, and is in the circular list headed
1056 at usedpools[i] (see above). It's linked to the other used pools of the
1057 same size class via the pool_header's nextpool and prevpool members.
1058 If all but one block is currently allocated, a malloc can cause a
1059 transition to the full state. If all but one block is not currently
1060 allocated, a free can cause a transition to the empty state.
1061
1062full == all the pool's blocks are currently allocated
1063 On transition to full, a pool is unlinked from its usedpools[] list.
1064 It's not linked to from anything then anymore, and its nextpool and
1065 prevpool members are meaningless until it transitions back to used.
1066 A free of a block in a full pool puts the pool back in the used state.
1067 Then it's linked in at the front of the appropriate usedpools[] list, so
1068 that the next allocation for its size class will reuse the freed block.
1069
1070empty == all the pool's blocks are currently available for allocation
1071 On transition to empty, a pool is unlinked from its usedpools[] list,
1072 and linked to the front of its arena_object's singly-linked freepools list,
1073 via its nextpool member. The prevpool member has no meaning in this case.
1074 Empty pools have no inherent size class: the next time a malloc finds
1075 an empty list in usedpools[], it takes the first pool off of freepools.
1076 If the size class needed happens to be the same as the size class the pool
1077 last had, some pool initialization can be skipped.
1078
1079
1080Block Management
1081
1082Blocks within pools are again carved out as needed. pool->freeblock points to
1083the start of a singly-linked list of free blocks within the pool. When a
1084block is freed, it's inserted at the front of its pool's freeblock list. Note
1085that the available blocks in a pool are *not* linked all together when a pool
1086is initialized. Instead only "the first two" (lowest addresses) blocks are
1087set up, returning the first such block, and setting pool->freeblock to a
1088one-block list holding the second such block. This is consistent with that
1089pymalloc strives at all levels (arena, pool, and block) never to touch a piece
1090of memory until it's actually needed.
1091
1092So long as a pool is in the used state, we're certain there *is* a block
1093available for allocating, and pool->freeblock is not NULL. If pool->freeblock
1094points to the end of the free list before we've carved the entire pool into
1095blocks, that means we simply haven't yet gotten to one of the higher-address
1096blocks. The offset from the pool_header to the start of "the next" virgin
1097block is stored in the pool_header nextoffset member, and the largest value
1098of nextoffset that makes sense is stored in the maxnextoffset member when a
1099pool is initialized. All the blocks in a pool have been passed out at least
1100once when and only when nextoffset > maxnextoffset.
1101
1102
1103Major obscurity: While the usedpools vector is declared to have poolp
1104entries, it doesn't really. It really contains two pointers per (conceptual)
1105poolp entry, the nextpool and prevpool members of a pool_header. The
1106excruciating initialization code below fools C so that
1107
1108 usedpool[i+i]
1109
1110"acts like" a genuine poolp, but only so long as you only reference its
1111nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
1112compensating for that a pool_header's nextpool and prevpool members
1113immediately follow a pool_header's first two members:
1114
1115 union { block *_padding;
1116 uint count; } ref;
1117 block *freeblock;
1118
1119each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
1120contains is a fudged-up pointer p such that *if* C believes it's a poolp
1121pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
1122circular list is empty).
1123
1124It's unclear why the usedpools setup is so convoluted. It could be to
1125minimize the amount of cache required to hold this heavily-referenced table
1126(which only *needs* the two interpool pointer members of a pool_header). OTOH,
1127referencing code has to remember to "double the index" and doing so isn't
1128free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
1129on that C doesn't insert any padding anywhere in a pool_header at or before
1130the prevpool member.
1131**************************************************************************** */
1132
1133#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
1134#define PT(x) PTA(x), PTA(x)
1135
1136static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
1137 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
1138#if NB_SMALL_SIZE_CLASSES > 8
1139 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
1140#if NB_SMALL_SIZE_CLASSES > 16
1141 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
1142#if NB_SMALL_SIZE_CLASSES > 24
1143 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
1144#if NB_SMALL_SIZE_CLASSES > 32
1145 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
1146#if NB_SMALL_SIZE_CLASSES > 40
1147 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
1148#if NB_SMALL_SIZE_CLASSES > 48
1149 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
1150#if NB_SMALL_SIZE_CLASSES > 56
1151 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
1152#if NB_SMALL_SIZE_CLASSES > 64
1153#error "NB_SMALL_SIZE_CLASSES should be less than 64"
1154#endif /* NB_SMALL_SIZE_CLASSES > 64 */
1155#endif /* NB_SMALL_SIZE_CLASSES > 56 */
1156#endif /* NB_SMALL_SIZE_CLASSES > 48 */
1157#endif /* NB_SMALL_SIZE_CLASSES > 40 */
1158#endif /* NB_SMALL_SIZE_CLASSES > 32 */
1159#endif /* NB_SMALL_SIZE_CLASSES > 24 */
1160#endif /* NB_SMALL_SIZE_CLASSES > 16 */
1161#endif /* NB_SMALL_SIZE_CLASSES > 8 */
1162};
1163
1164/*==========================================================================
1165Arena management.
1166
1167`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
1168which may not be currently used (== they're arena_objects that aren't
1169currently associated with an allocated arena). Note that arenas proper are
1170separately malloc'ed.
1171
1172Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
1173we do try to free() arenas, and use some mild heuristic strategies to increase
1174the likelihood that arenas eventually can be freed.
1175
1176unused_arena_objects
1177
1178 This is a singly-linked list of the arena_objects that are currently not
1179 being used (no arena is associated with them). Objects are taken off the
1180 head of the list in new_arena(), and are pushed on the head of the list in
1181 PyObject_Free() when the arena is empty. Key invariant: an arena_object
1182 is on this list if and only if its .address member is 0.
1183
1184usable_arenas
1185
1186 This is a doubly-linked list of the arena_objects associated with arenas
1187 that have pools available. These pools are either waiting to be reused,
1188 or have not been used before. The list is sorted to have the most-
1189 allocated arenas first (ascending order based on the nfreepools member).
1190 This means that the next allocation will come from a heavily used arena,
1191 which gives the nearly empty arenas a chance to be returned to the system.
1192 In my unscientific tests this dramatically improved the number of arenas
1193 that could be freed.
1194
1195Note that an arena_object associated with an arena all of whose pools are
1196currently in use isn't on either list.
Tim Peters1c263e32019-05-31 21:16:04 -05001197
1198Changed in Python 3.8: keeping usable_arenas sorted by number of free pools
1199used to be done by one-at-a-time linear search when an arena's number of
1200free pools changed. That could, overall, consume time quadratic in the
1201number of arenas. That didn't really matter when there were only a few
1202hundred arenas (typical!), but could be a timing disaster when there were
1203hundreds of thousands. See bpo-37029.
1204
1205Now we have a vector of "search fingers" to eliminate the need to search:
1206nfp2lasta[nfp] returns the last ("rightmost") arena in usable_arenas
1207with nfp free pools. This is NULL if and only if there is no arena with
1208nfp free pools in usable_arenas.
Victor Stinner9e87e772017-11-24 12:09:24 +01001209*/
1210
1211/* Array of objects used to track chunks of memory (arenas). */
1212static struct arena_object* arenas = NULL;
1213/* Number of slots currently allocated in the `arenas` vector. */
1214static uint maxarenas = 0;
1215
1216/* The head of the singly-linked, NULL-terminated list of available
1217 * arena_objects.
1218 */
1219static struct arena_object* unused_arena_objects = NULL;
1220
1221/* The head of the doubly-linked, NULL-terminated at each end, list of
1222 * arena_objects associated with arenas that have pools available.
1223 */
1224static struct arena_object* usable_arenas = NULL;
1225
Tim Peters1c263e32019-05-31 21:16:04 -05001226/* nfp2lasta[nfp] is the last arena in usable_arenas with nfp free pools */
1227static struct arena_object* nfp2lasta[MAX_POOLS_IN_ARENA + 1] = { NULL };
1228
Victor Stinner9e87e772017-11-24 12:09:24 +01001229/* How many arena_objects do we initially allocate?
1230 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
1231 * `arenas` vector.
1232 */
1233#define INITIAL_ARENA_OBJECTS 16
1234
1235/* Number of arenas allocated that haven't been free()'d. */
1236static size_t narenas_currently_allocated = 0;
1237
1238/* Total number of times malloc() called to allocate an arena. */
1239static size_t ntimes_arena_allocated = 0;
1240/* High water mark (max value ever seen) for narenas_currently_allocated. */
1241static size_t narenas_highwater = 0;
1242
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07001243static Py_ssize_t raw_allocated_blocks;
Victor Stinner9e87e772017-11-24 12:09:24 +01001244
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001245Py_ssize_t
1246_Py_GetAllocatedBlocks(void)
1247{
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07001248 Py_ssize_t n = raw_allocated_blocks;
1249 /* add up allocated blocks for used pools */
1250 for (uint i = 0; i < maxarenas; ++i) {
1251 /* Skip arenas which are not allocated. */
Tim Petersb64c2c62019-07-10 16:24:01 -05001252 if (arenas[i].address == 0) {
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07001253 continue;
1254 }
1255
1256 uintptr_t base = (uintptr_t)_Py_ALIGN_UP(arenas[i].address, POOL_SIZE);
1257
1258 /* visit every pool in the arena */
1259 assert(base <= (uintptr_t) arenas[i].pool_address);
1260 for (; base < (uintptr_t) arenas[i].pool_address; base += POOL_SIZE) {
1261 poolp p = (poolp)base;
1262 n += p->ref.count;
1263 }
1264 }
1265 return n;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001266}
1267
Neil Schemenauer85b6b702021-03-29 19:51:15 -07001268#if WITH_PYMALLOC_RADIX_TREE
1269/*==========================================================================*/
1270/* radix tree for tracking arena usage
1271
1272 bit allocation for keys
1273
1274 64-bit pointers and 2^20 arena size:
1275 16 -> ignored (POINTER_BITS - ADDRESS_BITS)
1276 10 -> MAP_TOP
1277 10 -> MAP_MID
1278 8 -> MAP_BOT
1279 20 -> ideal aligned arena
1280 ----
1281 64
1282
1283 32-bit pointers and 2^18 arena size:
1284 14 -> MAP_BOT
1285 18 -> ideal aligned arena
1286 ----
1287 32
1288
1289*/
1290
1291#if SIZEOF_VOID_P == 8
1292
1293/* number of bits in a pointer */
1294#define POINTER_BITS 64
1295
1296/* Current 64-bit processors are limited to 48-bit physical addresses. For
1297 * now, the top 17 bits of addresses will all be equal to bit 2**47. If that
1298 * changes in the future, this must be adjusted upwards.
1299 */
1300#define ADDRESS_BITS 48
1301
1302/* use the top and mid layers of the radix tree */
1303#define USE_INTERIOR_NODES
1304
1305#elif SIZEOF_VOID_P == 4
1306
1307#define POINTER_BITS 32
1308#define ADDRESS_BITS 32
1309
1310#else
1311
1312 /* Currently this code works for 64-bit or 32-bit pointers only. */
1313#error "obmalloc radix tree requires 64-bit or 32-bit pointers."
1314
1315#endif /* SIZEOF_VOID_P */
1316
1317/* arena_coverage_t members require this to be true */
1318#if ARENA_BITS >= 32
1319# error "arena size must be < 2^32"
1320#endif
1321
1322#ifdef USE_INTERIOR_NODES
1323/* number of bits used for MAP_TOP and MAP_MID nodes */
1324#define INTERIOR_BITS ((ADDRESS_BITS - ARENA_BITS + 2) / 3)
1325#else
1326#define INTERIOR_BITS 0
1327#endif
1328
1329#define MAP_TOP_BITS INTERIOR_BITS
1330#define MAP_TOP_LENGTH (1 << MAP_TOP_BITS)
1331#define MAP_TOP_MASK (MAP_BOT_LENGTH - 1)
1332
1333#define MAP_MID_BITS INTERIOR_BITS
1334#define MAP_MID_LENGTH (1 << MAP_MID_BITS)
1335#define MAP_MID_MASK (MAP_MID_LENGTH - 1)
1336
1337#define MAP_BOT_BITS (ADDRESS_BITS - ARENA_BITS - 2*INTERIOR_BITS)
1338#define MAP_BOT_LENGTH (1 << MAP_BOT_BITS)
1339#define MAP_BOT_MASK (MAP_BOT_LENGTH - 1)
1340
1341#define MAP_BOT_SHIFT ARENA_BITS
1342#define MAP_MID_SHIFT (MAP_BOT_BITS + MAP_BOT_SHIFT)
1343#define MAP_TOP_SHIFT (MAP_MID_BITS + MAP_MID_SHIFT)
1344
1345#define AS_UINT(p) ((uintptr_t)(p))
1346#define MAP_BOT_INDEX(p) ((AS_UINT(p) >> MAP_BOT_SHIFT) & MAP_BOT_MASK)
1347#define MAP_MID_INDEX(p) ((AS_UINT(p) >> MAP_MID_SHIFT) & MAP_MID_MASK)
1348#define MAP_TOP_INDEX(p) ((AS_UINT(p) >> MAP_TOP_SHIFT) & MAP_TOP_MASK)
1349
1350#if ADDRESS_BITS > POINTER_BITS
1351/* Return non-physical address bits of a pointer. Those bits should be same
1352 * for all valid pointers if ADDRESS_BITS set correctly. Linux has support for
1353 * 57-bit address space (Intel 5-level paging) but will not currently give
1354 * those addresses to user space.
1355 */
1356#define HIGH_BITS(p) (AS_UINT(p) >> ADDRESS_BITS)
1357#else
1358#define HIGH_BITS(p) 0
1359#endif
1360
1361
1362/* This is the leaf of the radix tree. See arena_map_mark_used() for the
1363 * meaning of these members. */
1364typedef struct {
1365 int32_t tail_hi;
1366 int32_t tail_lo;
1367} arena_coverage_t;
1368
1369typedef struct arena_map_bot {
1370 /* The members tail_hi and tail_lo are accessed together. So, it
1371 * better to have them as an array of structs, rather than two
1372 * arrays.
1373 */
1374 arena_coverage_t arenas[MAP_BOT_LENGTH];
1375} arena_map_bot_t;
1376
1377#ifdef USE_INTERIOR_NODES
1378typedef struct arena_map_mid {
1379 struct arena_map_bot *ptrs[MAP_MID_LENGTH];
1380} arena_map_mid_t;
1381
1382typedef struct arena_map_top {
1383 struct arena_map_mid *ptrs[MAP_TOP_LENGTH];
1384} arena_map_top_t;
1385#endif
1386
1387/* The root of radix tree. Note that by initializing like this, the memory
1388 * should be in the BSS. The OS will only memory map pages as the MAP_MID
1389 * nodes get used (OS pages are demand loaded as needed).
1390 */
1391#ifdef USE_INTERIOR_NODES
1392static arena_map_top_t arena_map_root;
1393/* accounting for number of used interior nodes */
1394static int arena_map_mid_count;
1395static int arena_map_bot_count;
1396#else
1397static arena_map_bot_t arena_map_root;
1398#endif
1399
1400/* Return a pointer to a bottom tree node, return NULL if it doesn't exist or
1401 * it cannot be created */
1402static arena_map_bot_t *
1403arena_map_get(block *p, int create)
1404{
1405#ifdef USE_INTERIOR_NODES
1406 /* sanity check that ADDRESS_BITS is correct */
1407 assert(HIGH_BITS(p) == HIGH_BITS(&arena_map_root));
1408 int i1 = MAP_TOP_INDEX(p);
1409 if (arena_map_root.ptrs[i1] == NULL) {
1410 if (!create) {
1411 return NULL;
1412 }
1413 arena_map_mid_t *n = PyMem_RawCalloc(1, sizeof(arena_map_mid_t));
1414 if (n == NULL) {
1415 return NULL;
1416 }
1417 arena_map_root.ptrs[i1] = n;
1418 arena_map_mid_count++;
1419 }
1420 int i2 = MAP_MID_INDEX(p);
1421 if (arena_map_root.ptrs[i1]->ptrs[i2] == NULL) {
1422 if (!create) {
1423 return NULL;
1424 }
1425 arena_map_bot_t *n = PyMem_RawCalloc(1, sizeof(arena_map_bot_t));
1426 if (n == NULL) {
1427 return NULL;
1428 }
1429 arena_map_root.ptrs[i1]->ptrs[i2] = n;
1430 arena_map_bot_count++;
1431 }
1432 return arena_map_root.ptrs[i1]->ptrs[i2];
1433#else
1434 return &arena_map_root;
1435#endif
1436}
1437
1438
1439/* The radix tree only tracks arenas. So, for 16 MiB arenas, we throw
1440 * away 24 bits of the address. That reduces the space requirement of
1441 * the tree compared to similar radix tree page-map schemes. In
1442 * exchange for slashing the space requirement, it needs more
1443 * computation to check an address.
1444 *
1445 * Tracking coverage is done by "ideal" arena address. It is easier to
1446 * explain in decimal so let's say that the arena size is 100 bytes.
1447 * Then, ideal addresses are 100, 200, 300, etc. For checking if a
1448 * pointer address is inside an actual arena, we have to check two ideal
1449 * arena addresses. E.g. if pointer is 357, we need to check 200 and
1450 * 300. In the rare case that an arena is aligned in the ideal way
1451 * (e.g. base address of arena is 200) then we only have to check one
1452 * ideal address.
1453 *
1454 * The tree nodes for 200 and 300 both store the address of arena.
1455 * There are two cases: the arena starts at a lower ideal arena and
1456 * extends to this one, or the arena starts in this arena and extends to
1457 * the next ideal arena. The tail_lo and tail_hi members correspond to
1458 * these two cases.
1459 */
1460
1461
1462/* mark or unmark addresses covered by arena */
1463static int
1464arena_map_mark_used(uintptr_t arena_base, int is_used)
1465{
1466 /* sanity check that ADDRESS_BITS is correct */
1467 assert(HIGH_BITS(arena_base) == HIGH_BITS(&arena_map_root));
1468 arena_map_bot_t *n_hi = arena_map_get((block *)arena_base, is_used);
1469 if (n_hi == NULL) {
1470 assert(is_used); /* otherwise node should already exist */
1471 return 0; /* failed to allocate space for node */
1472 }
1473 int i3 = MAP_BOT_INDEX((block *)arena_base);
1474 int32_t tail = (int32_t)(arena_base & ARENA_SIZE_MASK);
1475 if (tail == 0) {
1476 /* is ideal arena address */
1477 n_hi->arenas[i3].tail_hi = is_used ? -1 : 0;
1478 }
1479 else {
1480 /* arena_base address is not ideal (aligned to arena size) and
1481 * so it potentially covers two MAP_BOT nodes. Get the MAP_BOT node
1482 * for the next arena. Note that it might be in different MAP_TOP
1483 * and MAP_MID nodes as well so we need to call arena_map_get()
1484 * again (do the full tree traversal).
1485 */
1486 n_hi->arenas[i3].tail_hi = is_used ? tail : 0;
1487 uintptr_t arena_base_next = arena_base + ARENA_SIZE;
1488 /* If arena_base is a legit arena address, so is arena_base_next - 1
1489 * (last address in arena). If arena_base_next overflows then it
1490 * must overflow to 0. However, that would mean arena_base was
1491 * "ideal" and we should not be in this case. */
1492 assert(arena_base < arena_base_next);
1493 arena_map_bot_t *n_lo = arena_map_get((block *)arena_base_next, is_used);
1494 if (n_lo == NULL) {
1495 assert(is_used); /* otherwise should already exist */
1496 n_hi->arenas[i3].tail_hi = 0;
1497 return 0; /* failed to allocate space for node */
1498 }
1499 int i3_next = MAP_BOT_INDEX(arena_base_next);
1500 n_lo->arenas[i3_next].tail_lo = is_used ? tail : 0;
1501 }
1502 return 1;
1503}
1504
1505/* Return true if 'p' is a pointer inside an obmalloc arena.
1506 * _PyObject_Free() calls this so it needs to be very fast. */
1507static int
1508arena_map_is_used(block *p)
1509{
1510 arena_map_bot_t *n = arena_map_get(p, 0);
1511 if (n == NULL) {
1512 return 0;
1513 }
1514 int i3 = MAP_BOT_INDEX(p);
1515 /* ARENA_BITS must be < 32 so that the tail is a non-negative int32_t. */
1516 int32_t hi = n->arenas[i3].tail_hi;
1517 int32_t lo = n->arenas[i3].tail_lo;
1518 int32_t tail = (int32_t)(AS_UINT(p) & ARENA_SIZE_MASK);
1519 return (tail < lo) || (tail >= hi && hi != 0);
1520}
1521
1522/* end of radix tree logic */
1523/*==========================================================================*/
1524#endif /* WITH_PYMALLOC_RADIX_TREE */
1525
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001526
Thomas Woutersa9773292006-04-21 09:43:23 +00001527/* Allocate a new arena. If we run out of memory, return NULL. Else
1528 * allocate a new arena, and return the address of an arena_object
1529 * describing the new arena. It's expected that the caller will set
1530 * `usable_arenas` to the return value.
1531 */
1532static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +00001533new_arena(void)
1534{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001535 struct arena_object* arenaobj;
1536 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +01001537 void *address;
Victor Stinner34be8072016-03-14 12:04:26 +01001538 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +00001539
Victor Stinner34be8072016-03-14 12:04:26 +01001540 if (debug_stats == -1) {
Serhiy Storchaka4ae06c52017-12-12 13:55:04 +02001541 const char *opt = Py_GETENV("PYTHONMALLOCSTATS");
Victor Stinner34be8072016-03-14 12:04:26 +01001542 debug_stats = (opt != NULL && *opt != '\0');
1543 }
1544 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -04001545 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be8072016-03-14 12:04:26 +01001546
Victor Stinner9e87e772017-11-24 12:09:24 +01001547 if (unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001548 uint i;
1549 uint numarenas;
1550 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +00001551
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001552 /* Double the number of arena objects on each allocation.
1553 * Note that it's possible for `numarenas` to overflow.
1554 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001555 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1556 if (numarenas <= maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001557 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001558#if SIZEOF_SIZE_T <= SIZEOF_INT
Victor Stinner9e87e772017-11-24 12:09:24 +01001559 if (numarenas > SIZE_MAX / sizeof(*arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001560 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001561#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001562 nbytes = numarenas * sizeof(*arenas);
1563 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001564 if (arenaobj == NULL)
1565 return NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001566 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +00001567
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001568 /* We might need to fix pointers that were copied. However,
1569 * new_arena only gets called when all the pages in the
1570 * previous arenas are full. Thus, there are *no* pointers
1571 * into the old array. Thus, we don't have to worry about
1572 * invalid pointers. Just to be sure, some asserts:
1573 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001574 assert(usable_arenas == NULL);
1575 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +00001576
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001577 /* Put the new arenas on the unused_arena_objects list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001578 for (i = maxarenas; i < numarenas; ++i) {
1579 arenas[i].address = 0; /* mark as unassociated */
1580 arenas[i].nextarena = i < numarenas - 1 ?
1581 &arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001582 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001583
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001584 /* Update globals. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001585 unused_arena_objects = &arenas[maxarenas];
1586 maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001587 }
Tim Petersd97a1c02002-03-30 06:09:22 +00001588
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001589 /* Take the next available arena object off the head of the list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001590 assert(unused_arena_objects != NULL);
1591 arenaobj = unused_arena_objects;
1592 unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001593 assert(arenaobj->address == 0);
Victor Stinner9e87e772017-11-24 12:09:24 +01001594 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
Neil Schemenauer85b6b702021-03-29 19:51:15 -07001595#if WITH_PYMALLOC_RADIX_TREE
1596 if (address != NULL) {
1597 if (!arena_map_mark_used((uintptr_t)address, 1)) {
1598 /* marking arena in radix tree failed, abort */
1599 _PyObject_Arena.free(_PyObject_Arena.ctx, address, ARENA_SIZE);
1600 address = NULL;
1601 }
1602 }
1603#endif
Victor Stinner0507bf52013-07-07 02:05:46 +02001604 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001605 /* The allocation failed: return NULL after putting the
1606 * arenaobj back.
1607 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001608 arenaobj->nextarena = unused_arena_objects;
1609 unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001610 return NULL;
1611 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001612 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +00001613
Victor Stinner9e87e772017-11-24 12:09:24 +01001614 ++narenas_currently_allocated;
1615 ++ntimes_arena_allocated;
1616 if (narenas_currently_allocated > narenas_highwater)
1617 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001618 arenaobj->freepools = NULL;
1619 /* pool_address <- first pool-aligned address in the arena
1620 nfreepools <- number of whole pools that fit after alignment */
Victor Stinner9e87e772017-11-24 12:09:24 +01001621 arenaobj->pool_address = (block*)arenaobj->address;
Tim Peters1c263e32019-05-31 21:16:04 -05001622 arenaobj->nfreepools = MAX_POOLS_IN_ARENA;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001623 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1624 if (excess != 0) {
1625 --arenaobj->nfreepools;
1626 arenaobj->pool_address += POOL_SIZE - excess;
1627 }
1628 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001629
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001630 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +00001631}
1632
Victor Stinner9ed83c42017-10-31 12:18:10 -07001633
Neil Schemenauer85b6b702021-03-29 19:51:15 -07001634
1635#if WITH_PYMALLOC_RADIX_TREE
1636/* Return true if and only if P is an address that was allocated by
1637 pymalloc. When the radix tree is used, 'poolp' is unused.
1638 */
1639static bool
1640address_in_range(void *p, poolp pool)
1641{
1642 return arena_map_is_used(p);
1643}
1644#else
Thomas Woutersa9773292006-04-21 09:43:23 +00001645/*
Benjamin Peterson3924f932016-09-18 19:12:48 -07001646address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +00001647
1648Return true if and only if P is an address that was allocated by pymalloc.
1649POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1650(the caller is asked to compute this because the macro expands POOL more than
1651once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -07001652variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +00001653called on every alloc/realloc/free, micro-efficiency is important here).
1654
1655Tricky: Let B be the arena base address associated with the pool, B =
1656arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1657
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001658 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001659
1660Subtracting B throughout, this is true iff
1661
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001662 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001663
1664By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1665
1666Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1667before the first arena has been allocated. `arenas` is still NULL in that
1668case. We're relying on that maxarenas is also 0 in that case, so that
1669(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1670into a NULL arenas.
1671
1672Details: given P and POOL, the arena_object corresponding to P is AO =
1673arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1674stores, etc), POOL is the correct address of P's pool, AO.address is the
1675correct base address of the pool's arena, and P must be within ARENA_SIZE of
1676AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -07001677(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +00001678controls P.
1679
1680Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1681call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1682in this case -- it may even be uninitialized trash. If the trash arenaindex
1683is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1684control P.
1685
1686Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1687allocated arena, obmalloc controls all the memory in slice AO.address :
1688AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1689so P doesn't lie in that slice, so the macro correctly reports that P is not
1690controlled by obmalloc.
1691
1692Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1693arena_object (one not currently associated with an allocated arena),
1694AO.address is 0, and the second test in the macro reduces to:
1695
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001696 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001697
1698If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1699that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1700of the test still passes, and the third clause (AO.address != 0) is necessary
1701to get the correct result: AO.address is 0 in this case, so the macro
1702correctly reports that P is not controlled by obmalloc (despite that P lies in
1703slice AO.address : AO.address + ARENA_SIZE).
1704
1705Note: The third (AO.address != 0) clause was added in Python 2.5. Before
17062.5, arenas were never free()'ed, and an arenaindex < maxarena always
1707corresponded to a currently-allocated arena, so the "P is not controlled by
1708obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1709was impossible.
1710
1711Note that the logic is excruciating, and reading up possibly uninitialized
1712memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1713creates problems for some memory debuggers. The overwhelming advantage is
1714that this test determines whether an arbitrary address is controlled by
1715obmalloc in a small constant time, independent of the number of arenas
1716obmalloc controls. Since this test is needed at every entry point, it's
1717extremely desirable that it be this fast.
1718*/
Thomas Woutersa9773292006-04-21 09:43:23 +00001719
Batuhan Taşkayac0052f32019-12-27 05:51:34 +03001720static bool _Py_NO_SANITIZE_ADDRESS
Alexey Izbyshevfd3a91c2018-11-12 02:14:51 +03001721 _Py_NO_SANITIZE_THREAD
1722 _Py_NO_SANITIZE_MEMORY
Benjamin Peterson3924f932016-09-18 19:12:48 -07001723address_in_range(void *p, poolp pool)
1724{
1725 // Since address_in_range may be reading from memory which was not allocated
1726 // by Python, it is important that pool->arenaindex is read only once, as
1727 // another thread may be concurrently modifying the value without holding
1728 // the GIL. The following dance forces the compiler to read pool->arenaindex
1729 // only once.
1730 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Victor Stinner9e87e772017-11-24 12:09:24 +01001731 return arenaindex < maxarenas &&
1732 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1733 arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -07001734}
Tim Peters338e0102002-04-01 19:23:44 +00001735
Neil Schemenauer85b6b702021-03-29 19:51:15 -07001736#endif /* !WITH_PYMALLOC_RADIX_TREE */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001737
Neil Schemenauera35c6882001-02-27 04:45:05 +00001738/*==========================================================================*/
1739
Inada Naokifb265042019-07-17 21:23:57 +09001740// Called when freelist is exhausted. Extend the freelist if there is
1741// space for a block. Otherwise, remove this pool from usedpools.
1742static void
1743pymalloc_pool_extend(poolp pool, uint size)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001744{
Inada Naokifb265042019-07-17 21:23:57 +09001745 if (UNLIKELY(pool->nextoffset <= pool->maxnextoffset)) {
1746 /* There is room for another block. */
1747 pool->freeblock = (block*)pool + pool->nextoffset;
1748 pool->nextoffset += INDEX2SIZE(size);
1749 *(block **)(pool->freeblock) = NULL;
1750 return;
1751 }
1752
1753 /* Pool is full, unlink from used pools. */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001754 poolp next;
Inada Naokifb265042019-07-17 21:23:57 +09001755 next = pool->nextpool;
1756 pool = pool->prevpool;
1757 next->prevpool = pool;
1758 pool->nextpool = next;
1759}
Neil Schemenauera35c6882001-02-27 04:45:05 +00001760
Inada Naokifb265042019-07-17 21:23:57 +09001761/* called when pymalloc_alloc can not allocate a block from usedpool.
1762 * This function takes new pool and allocate a block from it.
1763 */
1764static void*
1765allocate_from_new_pool(uint size)
1766{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001767 /* There isn't a pool of the right size class immediately
1768 * available: use a free pool.
1769 */
Inada Naokifb265042019-07-17 21:23:57 +09001770 if (UNLIKELY(usable_arenas == NULL)) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001771 /* No arena has a free pool: allocate a new arena. */
1772#ifdef WITH_MEMORY_LIMITS
Victor Stinner9e87e772017-11-24 12:09:24 +01001773 if (narenas_currently_allocated >= MAX_ARENAS) {
Inada Naokifb265042019-07-17 21:23:57 +09001774 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001775 }
1776#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001777 usable_arenas = new_arena();
1778 if (usable_arenas == NULL) {
Inada Naokifb265042019-07-17 21:23:57 +09001779 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001780 }
Inada Naokifb265042019-07-17 21:23:57 +09001781 usable_arenas->nextarena = usable_arenas->prevarena = NULL;
Tim Peters1c263e32019-05-31 21:16:04 -05001782 assert(nfp2lasta[usable_arenas->nfreepools] == NULL);
1783 nfp2lasta[usable_arenas->nfreepools] = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001784 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001785 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001786
Tim Peters1c263e32019-05-31 21:16:04 -05001787 /* This arena already had the smallest nfreepools value, so decreasing
1788 * nfreepools doesn't change that, and we don't need to rearrange the
1789 * usable_arenas list. However, if the arena becomes wholly allocated,
1790 * we need to remove its arena_object from usable_arenas.
1791 */
1792 assert(usable_arenas->nfreepools > 0);
1793 if (nfp2lasta[usable_arenas->nfreepools] == usable_arenas) {
1794 /* It's the last of this size, so there won't be any. */
1795 nfp2lasta[usable_arenas->nfreepools] = NULL;
1796 }
1797 /* If any free pools will remain, it will be the new smallest. */
1798 if (usable_arenas->nfreepools > 1) {
1799 assert(nfp2lasta[usable_arenas->nfreepools - 1] == NULL);
1800 nfp2lasta[usable_arenas->nfreepools - 1] = usable_arenas;
1801 }
1802
Victor Stinner9ed83c42017-10-31 12:18:10 -07001803 /* Try to get a cached free pool. */
Inada Naokifb265042019-07-17 21:23:57 +09001804 poolp pool = usable_arenas->freepools;
1805 if (LIKELY(pool != NULL)) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001806 /* Unlink from cached pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001807 usable_arenas->freepools = pool->nextpool;
Inada Naokifb265042019-07-17 21:23:57 +09001808 usable_arenas->nfreepools--;
1809 if (UNLIKELY(usable_arenas->nfreepools == 0)) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001810 /* Wholly allocated: remove. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001811 assert(usable_arenas->freepools == NULL);
1812 assert(usable_arenas->nextarena == NULL ||
1813 usable_arenas->nextarena->prevarena ==
1814 usable_arenas);
Victor Stinner9e87e772017-11-24 12:09:24 +01001815 usable_arenas = usable_arenas->nextarena;
1816 if (usable_arenas != NULL) {
1817 usable_arenas->prevarena = NULL;
1818 assert(usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001819 }
1820 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001821 else {
1822 /* nfreepools > 0: it must be that freepools
1823 * isn't NULL, or that we haven't yet carved
1824 * off all the arena's pools for the first
1825 * time.
1826 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001827 assert(usable_arenas->freepools != NULL ||
1828 usable_arenas->pool_address <=
1829 (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001830 ARENA_SIZE - POOL_SIZE);
1831 }
Inada Naokifb265042019-07-17 21:23:57 +09001832 }
1833 else {
1834 /* Carve off a new pool. */
1835 assert(usable_arenas->nfreepools > 0);
1836 assert(usable_arenas->freepools == NULL);
1837 pool = (poolp)usable_arenas->pool_address;
1838 assert((block*)pool <= (block*)usable_arenas->address +
1839 ARENA_SIZE - POOL_SIZE);
1840 pool->arenaindex = (uint)(usable_arenas - arenas);
1841 assert(&arenas[pool->arenaindex] == usable_arenas);
1842 pool->szidx = DUMMY_SIZE_IDX;
1843 usable_arenas->pool_address += POOL_SIZE;
1844 --usable_arenas->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001845
Inada Naokifb265042019-07-17 21:23:57 +09001846 if (usable_arenas->nfreepools == 0) {
1847 assert(usable_arenas->nextarena == NULL ||
1848 usable_arenas->nextarena->prevarena ==
1849 usable_arenas);
1850 /* Unlink the arena: it is completely allocated. */
1851 usable_arenas = usable_arenas->nextarena;
1852 if (usable_arenas != NULL) {
1853 usable_arenas->prevarena = NULL;
1854 assert(usable_arenas->address != 0);
1855 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001856 }
Inada Naokifb265042019-07-17 21:23:57 +09001857 }
1858
1859 /* Frontlink to used pools. */
1860 block *bp;
1861 poolp next = usedpools[size + size]; /* == prev */
1862 pool->nextpool = next;
1863 pool->prevpool = next;
1864 next->nextpool = pool;
1865 next->prevpool = pool;
1866 pool->ref.count = 1;
1867 if (pool->szidx == size) {
1868 /* Luckily, this pool last contained blocks
1869 * of the same size class, so its header
1870 * and free list are already initialized.
Victor Stinner9ed83c42017-10-31 12:18:10 -07001871 */
Inada Naokifb265042019-07-17 21:23:57 +09001872 bp = pool->freeblock;
1873 assert(bp != NULL);
1874 pool->freeblock = *(block **)bp;
1875 return bp;
1876 }
1877 /*
1878 * Initialize the pool header, set up the free list to
1879 * contain just the second block, and return the first
1880 * block.
1881 */
1882 pool->szidx = size;
1883 size = INDEX2SIZE(size);
1884 bp = (block *)pool + POOL_OVERHEAD;
1885 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1886 pool->maxnextoffset = POOL_SIZE - size;
1887 pool->freeblock = bp + size;
1888 *(block **)(pool->freeblock) = NULL;
1889 return bp;
1890}
1891
1892/* pymalloc allocator
1893
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001894 Return a pointer to newly allocated memory if pymalloc allocated memory.
Inada Naokifb265042019-07-17 21:23:57 +09001895
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001896 Return NULL if pymalloc failed to allocate the memory block: on bigger
Inada Naokifb265042019-07-17 21:23:57 +09001897 requests, on error in the code below (as a last chance to serve the request)
1898 or when the max memory limit has been reached.
1899*/
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001900static inline void*
1901pymalloc_alloc(void *ctx, size_t nbytes)
Inada Naokifb265042019-07-17 21:23:57 +09001902{
1903#ifdef WITH_VALGRIND
1904 if (UNLIKELY(running_on_valgrind == -1)) {
1905 running_on_valgrind = RUNNING_ON_VALGRIND;
1906 }
1907 if (UNLIKELY(running_on_valgrind)) {
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001908 return NULL;
Inada Naokifb265042019-07-17 21:23:57 +09001909 }
1910#endif
1911
1912 if (UNLIKELY(nbytes == 0)) {
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001913 return NULL;
Inada Naokifb265042019-07-17 21:23:57 +09001914 }
1915 if (UNLIKELY(nbytes > SMALL_REQUEST_THRESHOLD)) {
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001916 return NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001917 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001918
Inada Naokifb265042019-07-17 21:23:57 +09001919 uint size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
1920 poolp pool = usedpools[size + size];
1921 block *bp;
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001922
Inada Naokifb265042019-07-17 21:23:57 +09001923 if (LIKELY(pool != pool->nextpool)) {
1924 /*
1925 * There is a used pool for this size class.
1926 * Pick up the head block of its free list.
1927 */
1928 ++pool->ref.count;
1929 bp = pool->freeblock;
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001930 assert(bp != NULL);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001931
Inada Naokifb265042019-07-17 21:23:57 +09001932 if (UNLIKELY((pool->freeblock = *(block **)bp) == NULL)) {
1933 // Reached the end of the free list, try to extend it.
1934 pymalloc_pool_extend(pool, size);
1935 }
1936 }
1937 else {
1938 /* There isn't a pool of the right size class immediately
1939 * available: use a free pool.
1940 */
1941 bp = allocate_from_new_pool(size);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001942 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001943
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001944 return (void *)bp;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001945}
1946
Victor Stinner9ed83c42017-10-31 12:18:10 -07001947
Victor Stinnerdb067af2014-05-02 22:31:14 +02001948static void *
1949_PyObject_Malloc(void *ctx, size_t nbytes)
1950{
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001951 void* ptr = pymalloc_alloc(ctx, nbytes);
1952 if (LIKELY(ptr != NULL)) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001953 return ptr;
1954 }
1955
1956 ptr = PyMem_RawMalloc(nbytes);
1957 if (ptr != NULL) {
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07001958 raw_allocated_blocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001959 }
1960 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001961}
1962
Victor Stinner9ed83c42017-10-31 12:18:10 -07001963
Victor Stinnerdb067af2014-05-02 22:31:14 +02001964static void *
1965_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1966{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001967 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
1968 size_t nbytes = nelem * elsize;
1969
Victor Stinner18f8dcf2019-08-20 12:28:02 +01001970 void* ptr = pymalloc_alloc(ctx, nbytes);
1971 if (LIKELY(ptr != NULL)) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001972 memset(ptr, 0, nbytes);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001973 return ptr;
1974 }
1975
1976 ptr = PyMem_RawCalloc(nelem, elsize);
1977 if (ptr != NULL) {
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07001978 raw_allocated_blocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001979 }
1980 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001981}
1982
Neil Schemenauera35c6882001-02-27 04:45:05 +00001983
Inada Naokifb265042019-07-17 21:23:57 +09001984static void
1985insert_to_usedpool(poolp pool)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001986{
Inada Naokifb265042019-07-17 21:23:57 +09001987 assert(pool->ref.count > 0); /* else the pool is empty */
Neil Schemenauera35c6882001-02-27 04:45:05 +00001988
Inada Naokifb265042019-07-17 21:23:57 +09001989 uint size = pool->szidx;
1990 poolp next = usedpools[size + size];
1991 poolp prev = next->prevpool;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001992
Inada Naokifb265042019-07-17 21:23:57 +09001993 /* insert pool before next: prev <-> pool <-> next */
1994 pool->nextpool = next;
1995 pool->prevpool = prev;
1996 next->prevpool = pool;
1997 prev->nextpool = pool;
1998}
Benjamin Peterson05159c42009-12-03 03:01:27 +00001999
Inada Naokifb265042019-07-17 21:23:57 +09002000static void
2001insert_to_freepool(poolp pool)
2002{
2003 poolp next = pool->nextpool;
2004 poolp prev = pool->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002005 next->prevpool = prev;
2006 prev->nextpool = next;
2007
2008 /* Link the pool to freepools. This is a singly-linked
2009 * list, and pool->prevpool isn't used there.
2010 */
Inada Naokifb265042019-07-17 21:23:57 +09002011 struct arena_object *ao = &arenas[pool->arenaindex];
Victor Stinner9ed83c42017-10-31 12:18:10 -07002012 pool->nextpool = ao->freepools;
2013 ao->freepools = pool;
Inada Naokifb265042019-07-17 21:23:57 +09002014 uint nf = ao->nfreepools;
Tim Peters1c263e32019-05-31 21:16:04 -05002015 /* If this is the rightmost arena with this number of free pools,
2016 * nfp2lasta[nf] needs to change. Caution: if nf is 0, there
2017 * are no arenas in usable_arenas with that value.
2018 */
2019 struct arena_object* lastnf = nfp2lasta[nf];
Victor Stinner18f8dcf2019-08-20 12:28:02 +01002020 assert((nf == 0 && lastnf == NULL) ||
2021 (nf > 0 &&
Tim Peters1c263e32019-05-31 21:16:04 -05002022 lastnf != NULL &&
2023 lastnf->nfreepools == nf &&
2024 (lastnf->nextarena == NULL ||
2025 nf < lastnf->nextarena->nfreepools)));
2026 if (lastnf == ao) { /* it is the rightmost */
2027 struct arena_object* p = ao->prevarena;
2028 nfp2lasta[nf] = (p != NULL && p->nfreepools == nf) ? p : NULL;
2029 }
2030 ao->nfreepools = ++nf;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002031
2032 /* All the rest is arena management. We just freed
2033 * a pool, and there are 4 cases for arena mgmt:
2034 * 1. If all the pools are free, return the arena to
Tim Petersd1c85a22019-06-12 22:41:03 -05002035 * the system free(). Except if this is the last
2036 * arena in the list, keep it to avoid thrashing:
2037 * keeping one wholly free arena in the list avoids
2038 * pathological cases where a simple loop would
2039 * otherwise provoke needing to allocate and free an
2040 * arena on every iteration. See bpo-37257.
Victor Stinner9ed83c42017-10-31 12:18:10 -07002041 * 2. If this is the only free pool in the arena,
2042 * add the arena back to the `usable_arenas` list.
2043 * 3. If the "next" arena has a smaller count of free
2044 * pools, we have to "slide this arena right" to
2045 * restore that usable_arenas is sorted in order of
2046 * nfreepools.
2047 * 4. Else there's nothing more to do.
2048 */
Tim Petersd1c85a22019-06-12 22:41:03 -05002049 if (nf == ao->ntotalpools && ao->nextarena != NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07002050 /* Case 1. First unlink ao from usable_arenas.
2051 */
2052 assert(ao->prevarena == NULL ||
2053 ao->prevarena->address != 0);
2054 assert(ao ->nextarena == NULL ||
2055 ao->nextarena->address != 0);
2056
2057 /* Fix the pointer in the prevarena, or the
2058 * usable_arenas pointer.
2059 */
2060 if (ao->prevarena == NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01002061 usable_arenas = ao->nextarena;
2062 assert(usable_arenas == NULL ||
2063 usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002064 }
2065 else {
2066 assert(ao->prevarena->nextarena == ao);
2067 ao->prevarena->nextarena =
2068 ao->nextarena;
2069 }
2070 /* Fix the pointer in the nextarena. */
2071 if (ao->nextarena != NULL) {
2072 assert(ao->nextarena->prevarena == ao);
2073 ao->nextarena->prevarena =
2074 ao->prevarena;
2075 }
2076 /* Record that this arena_object slot is
2077 * available to be reused.
2078 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002079 ao->nextarena = unused_arena_objects;
2080 unused_arena_objects = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002081
Neil Schemenauer85b6b702021-03-29 19:51:15 -07002082#if WITH_PYMALLOC_RADIX_TREE
2083 /* mark arena region as not under control of obmalloc */
2084 arena_map_mark_used(ao->address, 0);
2085#endif
2086
Victor Stinner9ed83c42017-10-31 12:18:10 -07002087 /* Free the entire arena. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002088 _PyObject_Arena.free(_PyObject_Arena.ctx,
Victor Stinner9ed83c42017-10-31 12:18:10 -07002089 (void *)ao->address, ARENA_SIZE);
2090 ao->address = 0; /* mark unassociated */
Victor Stinner9e87e772017-11-24 12:09:24 +01002091 --narenas_currently_allocated;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002092
Inada Naokifb265042019-07-17 21:23:57 +09002093 return;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002094 }
2095
2096 if (nf == 1) {
2097 /* Case 2. Put ao at the head of
2098 * usable_arenas. Note that because
2099 * ao->nfreepools was 0 before, ao isn't
2100 * currently on the usable_arenas list.
2101 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002102 ao->nextarena = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002103 ao->prevarena = NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01002104 if (usable_arenas)
2105 usable_arenas->prevarena = ao;
2106 usable_arenas = ao;
2107 assert(usable_arenas->address != 0);
Tim Peters1c263e32019-05-31 21:16:04 -05002108 if (nfp2lasta[1] == NULL) {
2109 nfp2lasta[1] = ao;
2110 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07002111
Inada Naokifb265042019-07-17 21:23:57 +09002112 return;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002113 }
2114
2115 /* If this arena is now out of order, we need to keep
2116 * the list sorted. The list is kept sorted so that
2117 * the "most full" arenas are used first, which allows
2118 * the nearly empty arenas to be completely freed. In
2119 * a few un-scientific tests, it seems like this
2120 * approach allowed a lot more memory to be freed.
2121 */
Tim Peters1c263e32019-05-31 21:16:04 -05002122 /* If this is the only arena with nf, record that. */
2123 if (nfp2lasta[nf] == NULL) {
2124 nfp2lasta[nf] = ao;
2125 } /* else the rightmost with nf doesn't change */
2126 /* If this was the rightmost of the old size, it remains in place. */
2127 if (ao == lastnf) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07002128 /* Case 4. Nothing to do. */
Inada Naokifb265042019-07-17 21:23:57 +09002129 return;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002130 }
Tim Peters1c263e32019-05-31 21:16:04 -05002131 /* If ao were the only arena in the list, the last block would have
2132 * gotten us out.
2133 */
2134 assert(ao->nextarena != NULL);
2135
2136 /* Case 3: We have to move the arena towards the end of the list,
2137 * because it has more free pools than the arena to its right. It needs
2138 * to move to follow lastnf.
Victor Stinner9ed83c42017-10-31 12:18:10 -07002139 * First unlink ao from usable_arenas.
2140 */
2141 if (ao->prevarena != NULL) {
2142 /* ao isn't at the head of the list */
2143 assert(ao->prevarena->nextarena == ao);
2144 ao->prevarena->nextarena = ao->nextarena;
2145 }
2146 else {
2147 /* ao is at the head of the list */
Victor Stinner9e87e772017-11-24 12:09:24 +01002148 assert(usable_arenas == ao);
2149 usable_arenas = ao->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002150 }
2151 ao->nextarena->prevarena = ao->prevarena;
Tim Peters1c263e32019-05-31 21:16:04 -05002152 /* And insert after lastnf. */
2153 ao->prevarena = lastnf;
2154 ao->nextarena = lastnf->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002155 if (ao->nextarena != NULL) {
2156 ao->nextarena->prevarena = ao;
2157 }
Tim Peters1c263e32019-05-31 21:16:04 -05002158 lastnf->nextarena = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002159 /* Verify that the swaps worked. */
2160 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
2161 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
2162 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
Victor Stinner9e87e772017-11-24 12:09:24 +01002163 assert((usable_arenas == ao && ao->prevarena == NULL)
Victor Stinner9ed83c42017-10-31 12:18:10 -07002164 || ao->prevarena->nextarena == ao);
Inada Naokifb265042019-07-17 21:23:57 +09002165}
Victor Stinner9ed83c42017-10-31 12:18:10 -07002166
Inada Naokifb265042019-07-17 21:23:57 +09002167/* Free a memory block allocated by pymalloc_alloc().
2168 Return 1 if it was freed.
2169 Return 0 if the block was not allocated by pymalloc_alloc(). */
2170static inline int
2171pymalloc_free(void *ctx, void *p)
2172{
2173 assert(p != NULL);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002174
Inada Naokifb265042019-07-17 21:23:57 +09002175#ifdef WITH_VALGRIND
2176 if (UNLIKELY(running_on_valgrind > 0)) {
2177 return 0;
2178 }
2179#endif
2180
2181 poolp pool = POOL_ADDR(p);
2182 if (UNLIKELY(!address_in_range(p, pool))) {
2183 return 0;
2184 }
2185 /* We allocated this address. */
2186
2187 /* Link p to the start of the pool's freeblock list. Since
2188 * the pool had at least the p block outstanding, the pool
2189 * wasn't empty (so it's already in a usedpools[] list, or
2190 * was full and is in no list -- it's not in the freeblocks
2191 * list in any case).
2192 */
2193 assert(pool->ref.count > 0); /* else it was empty */
2194 block *lastfree = pool->freeblock;
2195 *(block **)p = lastfree;
2196 pool->freeblock = (block *)p;
2197 pool->ref.count--;
2198
2199 if (UNLIKELY(lastfree == NULL)) {
2200 /* Pool was full, so doesn't currently live in any list:
2201 * link it to the front of the appropriate usedpools[] list.
2202 * This mimics LRU pool usage for new allocations and
2203 * targets optimal filling when several pools contain
2204 * blocks of the same size class.
2205 */
2206 insert_to_usedpool(pool);
2207 return 1;
2208 }
2209
2210 /* freeblock wasn't NULL, so the pool wasn't full,
2211 * and the pool is in a usedpools[] list.
2212 */
2213 if (LIKELY(pool->ref.count != 0)) {
2214 /* pool isn't empty: leave it in usedpools */
2215 return 1;
2216 }
2217
2218 /* Pool is now empty: unlink from usedpools, and
2219 * link to the front of freepools. This ensures that
2220 * previously freed pools will be allocated later
2221 * (being not referenced, they are perhaps paged out).
2222 */
2223 insert_to_freepool(pool);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002224 return 1;
2225}
2226
2227
2228static void
2229_PyObject_Free(void *ctx, void *p)
2230{
2231 /* PyObject_Free(NULL) has no effect */
2232 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002233 return;
2234 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00002235
Inada Naokifb265042019-07-17 21:23:57 +09002236 if (UNLIKELY(!pymalloc_free(ctx, p))) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07002237 /* pymalloc didn't allocate this address */
2238 PyMem_RawFree(p);
Neil Schemenauer5d25f2b2019-07-10 12:04:16 -07002239 raw_allocated_blocks--;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002240 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00002241}
2242
Neil Schemenauera35c6882001-02-27 04:45:05 +00002243
Victor Stinner9ed83c42017-10-31 12:18:10 -07002244/* pymalloc realloc.
2245
2246 If nbytes==0, then as the Python docs promise, we do not treat this like
2247 free(p), and return a non-NULL result.
2248
2249 Return 1 if pymalloc reallocated memory and wrote the new pointer into
2250 newptr_p.
2251
2252 Return 0 if pymalloc didn't allocated p. */
2253static int
2254pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00002255{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002256 void *bp;
2257 poolp pool;
2258 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00002259
Victor Stinner9ed83c42017-10-31 12:18:10 -07002260 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00002261
Benjamin Peterson05159c42009-12-03 03:01:27 +00002262#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002263 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002264 if (UNLIKELY(running_on_valgrind > 0)) {
2265 return 0;
2266 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00002267#endif
2268
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002269 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002270 if (!address_in_range(p, pool)) {
2271 /* pymalloc is not managing this block.
2272
2273 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
2274 over this block. However, if we do, we need to copy the valid data
2275 from the C-managed block to one of our blocks, and there's no
2276 portable way to know how much of the memory space starting at p is
2277 valid.
2278
2279 As bug 1185883 pointed out the hard way, it's possible that the
2280 C-managed block is "at the end" of allocated VM space, so that a
2281 memory fault can occur if we try to copy nbytes bytes starting at p.
2282 Instead we punt: let C continue to manage this block. */
2283 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002284 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07002285
2286 /* pymalloc is in charge of this block */
2287 size = INDEX2SIZE(pool->szidx);
2288 if (nbytes <= size) {
2289 /* The block is staying the same or shrinking.
2290
2291 If it's shrinking, there's a tradeoff: it costs cycles to copy the
2292 block to a smaller size class, but it wastes memory not to copy it.
2293
2294 The compromise here is to copy on shrink only if at least 25% of
2295 size can be shaved off. */
2296 if (4 * nbytes > 3 * size) {
2297 /* It's the same, or shrinking and new/old > 3/4. */
2298 *newptr_p = p;
2299 return 1;
2300 }
2301 size = nbytes;
2302 }
2303
2304 bp = _PyObject_Malloc(ctx, nbytes);
2305 if (bp != NULL) {
2306 memcpy(bp, p, size);
2307 _PyObject_Free(ctx, p);
2308 }
2309 *newptr_p = bp;
2310 return 1;
2311}
2312
2313
2314static void *
2315_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
2316{
2317 void *ptr2;
2318
2319 if (ptr == NULL) {
2320 return _PyObject_Malloc(ctx, nbytes);
2321 }
2322
2323 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
2324 return ptr2;
2325 }
2326
2327 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00002328}
2329
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002330#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00002331
2332/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002333/* pymalloc not enabled: Redirect the entry points to malloc. These will
2334 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00002335
Antoine Pitrou92840532012-12-17 23:05:59 +01002336Py_ssize_t
2337_Py_GetAllocatedBlocks(void)
2338{
2339 return 0;
2340}
2341
Tim Peters1221c0a2002-03-23 00:20:15 +00002342#endif /* WITH_PYMALLOC */
2343
Victor Stinner34be8072016-03-14 12:04:26 +01002344
Tim Petersddea2082002-03-23 10:03:50 +00002345/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00002346/* A x-platform debugging allocator. This doesn't manage memory directly,
2347 * it wraps a real allocator, adding extra debugging info to the memory blocks.
2348 */
Tim Petersddea2082002-03-23 10:03:50 +00002349
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002350/* Uncomment this define to add the "serialno" field */
2351/* #define PYMEM_DEBUG_SERIALNO */
2352
2353#ifdef PYMEM_DEBUG_SERIALNO
Victor Stinner9e87e772017-11-24 12:09:24 +01002354static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
2355
Tim Peterse0850172002-03-24 00:34:21 +00002356/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002357 * to supply a single place to set a breakpoint.
2358 */
Tim Peterse0850172002-03-24 00:34:21 +00002359static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00002360bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00002361{
Victor Stinner9e87e772017-11-24 12:09:24 +01002362 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00002363}
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002364#endif
Tim Peterse0850172002-03-24 00:34:21 +00002365
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002366#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00002367
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002368#ifdef PYMEM_DEBUG_SERIALNO
2369# define PYMEM_DEBUG_EXTRA_BYTES 4 * SST
2370#else
2371# define PYMEM_DEBUG_EXTRA_BYTES 3 * SST
2372#endif
2373
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002374/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
2375static size_t
2376read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002377{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002378 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002379 size_t result = *q++;
2380 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002381
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002382 for (i = SST; --i > 0; ++q)
2383 result = (result << 8) | *q;
2384 return result;
Tim Petersddea2082002-03-23 10:03:50 +00002385}
2386
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002387/* Write n as a big-endian size_t, MSB at address p, LSB at
2388 * p + sizeof(size_t) - 1.
2389 */
Tim Petersddea2082002-03-23 10:03:50 +00002390static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002391write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00002392{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002393 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002394 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002395
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002396 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002397 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002398 n >>= 8;
2399 }
Tim Petersddea2082002-03-23 10:03:50 +00002400}
2401
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002402/* Let S = sizeof(size_t). The debug malloc asks for 4 * S extra bytes and
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002403 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00002404
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002405p[0: S]
2406 Number of bytes originally asked for. This is a size_t, big-endian (easier
2407 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02002408p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05002409 API ID. See PEP 445. This is a character, but seems undocumented.
2410p[S+1: 2*S]
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002411 Copies of PYMEM_FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002412p[2*S: 2*S+n]
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002413 The requested memory, filled with copies of PYMEM_CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00002414 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002415 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00002416 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002417p[2*S+n: 2*S+n+S]
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002418 Copies of PYMEM_FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002419p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02002420 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
2421 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002422 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00002423 If "bad memory" is detected later, the serial number gives an
2424 excellent way to set a breakpoint on the next run, to capture the
2425 instant at which this block was passed out.
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002426
2427If PYMEM_DEBUG_SERIALNO is not defined (default), the debug malloc only asks
2428for 3 * S extra bytes, and omits the last serialno field.
Tim Petersddea2082002-03-23 10:03:50 +00002429*/
2430
Victor Stinner0507bf52013-07-07 02:05:46 +02002431static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002432_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002433{
Victor Stinner0507bf52013-07-07 02:05:46 +02002434 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002435 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002436 uint8_t *data; /* p + 2*SST == pointer to data bytes */
2437 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002438 size_t total; /* nbytes + PYMEM_DEBUG_EXTRA_BYTES */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002439
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002440 if (nbytes > (size_t)PY_SSIZE_T_MAX - PYMEM_DEBUG_EXTRA_BYTES) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07002441 /* integer overflow: can't represent total as a Py_ssize_t */
2442 return NULL;
2443 }
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002444 total = nbytes + PYMEM_DEBUG_EXTRA_BYTES;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002445
2446 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002447 ^--- p ^--- data ^--- tail
Victor Stinner9ed83c42017-10-31 12:18:10 -07002448 S: nbytes stored as size_t
2449 I: API identifier (1 byte)
2450 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
2451 C: Clean bytes used later to store actual data
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002452 N: Serial number stored as size_t
2453
2454 If PYMEM_DEBUG_SERIALNO is not defined (default), the last NNNN field
2455 is omitted. */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002456
2457 if (use_calloc) {
2458 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
2459 }
2460 else {
2461 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
2462 }
2463 if (p == NULL) {
2464 return NULL;
2465 }
2466 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002467
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002468#ifdef PYMEM_DEBUG_SERIALNO
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002469 bumpserialno();
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002470#endif
Tim Petersddea2082002-03-23 10:03:50 +00002471
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002472 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
2473 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07002474 p[SST] = (uint8_t)api->api_id;
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002475 memset(p + SST + 1, PYMEM_FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00002476
Victor Stinner9ed83c42017-10-31 12:18:10 -07002477 if (nbytes > 0 && !use_calloc) {
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002478 memset(data, PYMEM_CLEANBYTE, nbytes);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002479 }
Tim Petersddea2082002-03-23 10:03:50 +00002480
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002481 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002482 tail = data + nbytes;
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002483 memset(tail, PYMEM_FORBIDDENBYTE, SST);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002484#ifdef PYMEM_DEBUG_SERIALNO
Victor Stinner9e87e772017-11-24 12:09:24 +01002485 write_size_t(tail + SST, serialno);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002486#endif
Tim Petersddea2082002-03-23 10:03:50 +00002487
Victor Stinner9ed83c42017-10-31 12:18:10 -07002488 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002489}
2490
Victor Stinnerdb067af2014-05-02 22:31:14 +02002491static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002492_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002493{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002494 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002495}
2496
2497static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002498_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002499{
2500 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002501 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002502 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01002503 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002504}
2505
Victor Stinner9ed83c42017-10-31 12:18:10 -07002506
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002507/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002508 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002509 Then fills the original bytes with PYMEM_DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00002510 Then calls the underlying free.
2511*/
Victor Stinner0507bf52013-07-07 02:05:46 +02002512static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01002513_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002514{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002515 /* PyMem_Free(NULL) has no effect */
2516 if (p == NULL) {
2517 return;
2518 }
2519
Victor Stinner0507bf52013-07-07 02:05:46 +02002520 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002521 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002522 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00002523
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002524 _PyMem_DebugCheckAddress(__func__, api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002525 nbytes = read_size_t(q);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002526 nbytes += PYMEM_DEBUG_EXTRA_BYTES;
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002527 memset(q, PYMEM_DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02002528 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00002529}
2530
Victor Stinner9ed83c42017-10-31 12:18:10 -07002531
Victor Stinner0507bf52013-07-07 02:05:46 +02002532static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002533_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00002534{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002535 if (p == NULL) {
2536 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
2537 }
2538
Victor Stinner0507bf52013-07-07 02:05:46 +02002539 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002540 uint8_t *head; /* base address of malloc'ed pad block */
2541 uint8_t *data; /* pointer to data bytes */
2542 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002543 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2544 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002545 size_t original_nbytes;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002546#define ERASED_SIZE 64
2547 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00002548
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002549 _PyMem_DebugCheckAddress(__func__, api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002550
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002551 data = (uint8_t *)p;
2552 head = data - 2*SST;
2553 original_nbytes = read_size_t(head);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002554 if (nbytes > (size_t)PY_SSIZE_T_MAX - PYMEM_DEBUG_EXTRA_BYTES) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07002555 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002556 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002557 }
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002558 total = nbytes + PYMEM_DEBUG_EXTRA_BYTES;
Tim Petersddea2082002-03-23 10:03:50 +00002559
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002560 tail = data + original_nbytes;
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002561#ifdef PYMEM_DEBUG_SERIALNO
2562 size_t block_serialno = read_size_t(tail + SST);
2563#endif
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002564 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
2565 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
2566 */
2567 if (original_nbytes <= sizeof(save)) {
2568 memcpy(save, data, original_nbytes);
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002569 memset(data - 2 * SST, PYMEM_DEADBYTE,
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002570 original_nbytes + PYMEM_DEBUG_EXTRA_BYTES);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002571 }
2572 else {
2573 memcpy(save, data, ERASED_SIZE);
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002574 memset(head, PYMEM_DEADBYTE, ERASED_SIZE + 2 * SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002575 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002576 memset(tail - ERASED_SIZE, PYMEM_DEADBYTE,
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002577 ERASED_SIZE + PYMEM_DEBUG_EXTRA_BYTES - 2 * SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002578 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002579
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002580 /* Resize and add decorations. */
2581 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
2582 if (r == NULL) {
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002583 /* if realloc() failed: rewrite header and footer which have
2584 just been erased */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002585 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002586 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002587 else {
2588 head = r;
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002589#ifdef PYMEM_DEBUG_SERIALNO
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002590 bumpserialno();
Victor Stinner9e87e772017-11-24 12:09:24 +01002591 block_serialno = serialno;
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002592#endif
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002593 }
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002594 data = head + 2*SST;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002595
2596 write_size_t(head, nbytes);
2597 head[SST] = (uint8_t)api->api_id;
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002598 memset(head + SST + 1, PYMEM_FORBIDDENBYTE, SST-1);
Victor Stinnerc4266362013-07-09 00:44:43 +02002599
Victor Stinner9ed83c42017-10-31 12:18:10 -07002600 tail = data + nbytes;
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002601 memset(tail, PYMEM_FORBIDDENBYTE, SST);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002602#ifdef PYMEM_DEBUG_SERIALNO
Victor Stinner9e87e772017-11-24 12:09:24 +01002603 write_size_t(tail + SST, block_serialno);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002604#endif
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002605
2606 /* Restore saved bytes. */
2607 if (original_nbytes <= sizeof(save)) {
2608 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
2609 }
2610 else {
2611 size_t i = original_nbytes - ERASED_SIZE;
2612 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
2613 if (nbytes > i) {
2614 memcpy(data + i, &save[ERASED_SIZE],
2615 Py_MIN(nbytes - i, ERASED_SIZE));
2616 }
2617 }
2618
2619 if (r == NULL) {
2620 return NULL;
2621 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002622
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002623 if (nbytes > original_nbytes) {
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002624 /* growing: mark new extra memory clean */
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002625 memset(data + original_nbytes, PYMEM_CLEANBYTE,
2626 nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002627 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002628
Victor Stinner9ed83c42017-10-31 12:18:10 -07002629 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002630}
2631
Victor Stinnerd12d0e72019-11-07 12:42:07 +01002632static inline void
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002633_PyMem_DebugCheckGIL(const char *func)
Victor Stinnerc4aec362016-03-14 22:26:53 +01002634{
Victor Stinnerd12d0e72019-11-07 12:42:07 +01002635 if (!PyGILState_Check()) {
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002636 _Py_FatalErrorFunc(func,
2637 "Python memory allocator called "
2638 "without holding the GIL");
Victor Stinnerd12d0e72019-11-07 12:42:07 +01002639 }
Victor Stinnerc4aec362016-03-14 22:26:53 +01002640}
2641
2642static void *
2643_PyMem_DebugMalloc(void *ctx, size_t nbytes)
2644{
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002645 _PyMem_DebugCheckGIL(__func__);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002646 return _PyMem_DebugRawMalloc(ctx, nbytes);
2647}
2648
2649static void *
2650_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
2651{
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002652 _PyMem_DebugCheckGIL(__func__);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002653 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
2654}
2655
Victor Stinner9ed83c42017-10-31 12:18:10 -07002656
Victor Stinnerc4aec362016-03-14 22:26:53 +01002657static void
2658_PyMem_DebugFree(void *ctx, void *ptr)
2659{
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002660 _PyMem_DebugCheckGIL(__func__);
Victor Stinner0aed3a42016-03-23 11:30:43 +01002661 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002662}
2663
Victor Stinner9ed83c42017-10-31 12:18:10 -07002664
Victor Stinnerc4aec362016-03-14 22:26:53 +01002665static void *
2666_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2667{
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002668 _PyMem_DebugCheckGIL(__func__);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002669 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2670}
2671
Tim Peters7ccfadf2002-04-01 06:04:21 +00002672/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002673 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00002674 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002675 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00002676 */
Victor Stinner0507bf52013-07-07 02:05:46 +02002677static void
Victor Stinner9e5d30c2020-03-07 00:54:20 +01002678_PyMem_DebugCheckAddress(const char *func, char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002679{
Victor Stinner87d3b9d2020-03-25 19:27:36 +01002680 assert(p != NULL);
2681
Benjamin Peterson19517e42016-09-18 19:22:22 -07002682 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002683 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002684 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002685 int i;
2686 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002687
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002688 /* Check the API id */
2689 id = (char)q[-SST];
2690 if (id != api) {
Victor Stinner87d3b9d2020-03-25 19:27:36 +01002691 _PyObject_DebugDumpAddress(p);
2692 _Py_FatalErrorFormat(func,
2693 "bad ID: Allocated using API '%c', "
2694 "verified using API '%c'",
2695 id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002696 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002697
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002698 /* Check the stuff at the start of p first: if there's underwrite
2699 * corruption, the number-of-bytes field may be nuts, and checking
2700 * the tail could lead to a segfault then.
2701 */
2702 for (i = SST-1; i >= 1; --i) {
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002703 if (*(q-i) != PYMEM_FORBIDDENBYTE) {
Victor Stinner87d3b9d2020-03-25 19:27:36 +01002704 _PyObject_DebugDumpAddress(p);
2705 _Py_FatalErrorFunc(func, "bad leading pad byte");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002706 }
2707 }
Tim Petersddea2082002-03-23 10:03:50 +00002708
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002709 nbytes = read_size_t(q - 2*SST);
2710 tail = q + nbytes;
2711 for (i = 0; i < SST; ++i) {
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002712 if (tail[i] != PYMEM_FORBIDDENBYTE) {
Victor Stinner87d3b9d2020-03-25 19:27:36 +01002713 _PyObject_DebugDumpAddress(p);
2714 _Py_FatalErrorFunc(func, "bad trailing pad byte");
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002715 }
2716 }
Tim Petersddea2082002-03-23 10:03:50 +00002717}
2718
Tim Peters7ccfadf2002-04-01 06:04:21 +00002719/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02002720static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002721_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002722{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002723 const uint8_t *q = (const uint8_t *)p;
2724 const uint8_t *tail;
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002725 size_t nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002726 int i;
2727 int ok;
2728 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002729
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002730 fprintf(stderr, "Debug memory block at address p=%p:", p);
2731 if (p == NULL) {
2732 fprintf(stderr, "\n");
2733 return;
2734 }
2735 id = (char)q[-SST];
2736 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00002737
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002738 nbytes = read_size_t(q - 2*SST);
Victor Stinnerd36cf5f2020-06-10 18:38:05 +02002739 fprintf(stderr, " %zu bytes originally requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00002740
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002741 /* In case this is nuts, check the leading pad bytes first. */
2742 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2743 ok = 1;
2744 for (i = 1; i <= SST-1; ++i) {
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002745 if (*(q-i) != PYMEM_FORBIDDENBYTE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002746 ok = 0;
2747 break;
2748 }
2749 }
2750 if (ok)
2751 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2752 else {
2753 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002754 PYMEM_FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002755 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002756 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002757 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002758 if (byte != PYMEM_FORBIDDENBYTE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002759 fputs(" *** OUCH", stderr);
2760 fputc('\n', stderr);
2761 }
Tim Peters449b5a82002-04-28 06:14:45 +00002762
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002763 fputs(" Because memory is corrupted at the start, the "
2764 "count of bytes requested\n"
2765 " may be bogus, and checking the trailing pad "
2766 "bytes may segfault.\n", stderr);
2767 }
Tim Petersddea2082002-03-23 10:03:50 +00002768
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002769 tail = q + nbytes;
Zackery Spytz1a2252e2019-05-06 10:56:51 -06002770 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, (void *)tail);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002771 ok = 1;
2772 for (i = 0; i < SST; ++i) {
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002773 if (tail[i] != PYMEM_FORBIDDENBYTE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002774 ok = 0;
2775 break;
2776 }
2777 }
2778 if (ok)
2779 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2780 else {
2781 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002782 PYMEM_FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002783 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002784 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002785 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00002786 i, byte);
Victor Stinner60ec6ef2019-10-07 22:31:42 +02002787 if (byte != PYMEM_FORBIDDENBYTE)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002788 fputs(" *** OUCH", stderr);
2789 fputc('\n', stderr);
2790 }
2791 }
Tim Petersddea2082002-03-23 10:03:50 +00002792
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002793#ifdef PYMEM_DEBUG_SERIALNO
2794 size_t serial = read_size_t(tail + SST);
Victor Stinnerd36cf5f2020-06-10 18:38:05 +02002795 fprintf(stderr,
2796 " The block was made by call #%zu to debug malloc/realloc.\n",
2797 serial);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02002798#endif
Tim Petersddea2082002-03-23 10:03:50 +00002799
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002800 if (nbytes > 0) {
2801 i = 0;
2802 fputs(" Data at p:", stderr);
2803 /* print up to 8 bytes at the start */
2804 while (q < tail && i < 8) {
2805 fprintf(stderr, " %02x", *q);
2806 ++i;
2807 ++q;
2808 }
2809 /* and up to 8 at the end */
2810 if (q < tail) {
2811 if (tail - q > 8) {
2812 fputs(" ...", stderr);
2813 q = tail - 8;
2814 }
2815 while (q < tail) {
2816 fprintf(stderr, " %02x", *q);
2817 ++q;
2818 }
2819 }
2820 fputc('\n', stderr);
2821 }
Victor Stinner0611c262016-03-15 22:22:13 +01002822 fputc('\n', stderr);
2823
2824 fflush(stderr);
2825 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00002826}
2827
David Malcolm49526f42012-06-22 14:55:41 -04002828
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002829static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002830printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002831{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002832 int i, k;
2833 char buf[100];
2834 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002835
David Malcolm49526f42012-06-22 14:55:41 -04002836 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002837 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002838 fputc(' ', out);
2839 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002840
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002841 /* Write the value with commas. */
2842 i = 22;
2843 buf[i--] = '\0';
2844 buf[i--] = '\n';
2845 k = 3;
2846 do {
2847 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002848 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002849 value = nextvalue;
2850 buf[i--] = (char)(digit + '0');
2851 --k;
2852 if (k == 0 && value && i >= 0) {
2853 k = 3;
2854 buf[i--] = ',';
2855 }
2856 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002857
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002858 while (i >= 0)
2859 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002860 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002861
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002862 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002863}
2864
David Malcolm49526f42012-06-22 14:55:41 -04002865void
2866_PyDebugAllocatorStats(FILE *out,
2867 const char *block_name, int num_blocks, size_t sizeof_block)
2868{
2869 char buf1[128];
2870 char buf2[128];
2871 PyOS_snprintf(buf1, sizeof(buf1),
Victor Stinnerd36cf5f2020-06-10 18:38:05 +02002872 "%d %ss * %zd bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002873 num_blocks, block_name, sizeof_block);
2874 PyOS_snprintf(buf2, sizeof(buf2),
2875 "%48s ", buf1);
2876 (void)printone(out, buf2, num_blocks * sizeof_block);
2877}
2878
Victor Stinner34be8072016-03-14 12:04:26 +01002879
David Malcolm49526f42012-06-22 14:55:41 -04002880#ifdef WITH_PYMALLOC
2881
Victor Stinner34be8072016-03-14 12:04:26 +01002882#ifdef Py_DEBUG
2883/* Is target in the list? The list is traversed via the nextpool pointers.
2884 * The list may be NULL-terminated, or circular. Return 1 if target is in
2885 * list, else 0.
2886 */
2887static int
2888pool_is_in_list(const poolp target, poolp list)
2889{
2890 poolp origlist = list;
2891 assert(target != NULL);
2892 if (list == NULL)
2893 return 0;
2894 do {
2895 if (target == list)
2896 return 1;
2897 list = list->nextpool;
2898 } while (list != NULL && list != origlist);
2899 return 0;
2900}
2901#endif
2902
David Malcolm49526f42012-06-22 14:55:41 -04002903/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002904 * In Py_DEBUG mode, also perform some expensive internal consistency
2905 * checks.
Victor Stinner6bf992a2017-12-06 17:26:10 +01002906 *
2907 * Return 0 if the memory debug hooks are not installed or no statistics was
Leo Ariasc3d95082018-02-03 18:36:10 -06002908 * written into out, return 1 otherwise.
Tim Peters08d82152002-04-18 22:25:03 +00002909 */
Victor Stinner6bf992a2017-12-06 17:26:10 +01002910int
David Malcolm49526f42012-06-22 14:55:41 -04002911_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002912{
Victor Stinner6bf992a2017-12-06 17:26:10 +01002913 if (!_PyMem_PymallocEnabled()) {
2914 return 0;
2915 }
2916
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002917 uint i;
2918 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2919 /* # of pools, allocated blocks, and free blocks per class index */
2920 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2921 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2922 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2923 /* total # of allocated bytes in used and full pools */
2924 size_t allocated_bytes = 0;
2925 /* total # of available bytes in used pools */
2926 size_t available_bytes = 0;
2927 /* # of free pools + pools not yet carved out of current arena */
2928 uint numfreepools = 0;
2929 /* # of bytes for arena alignment padding */
2930 size_t arena_alignment = 0;
2931 /* # of bytes in used and full pools used for pool_headers */
2932 size_t pool_header_bytes = 0;
2933 /* # of bytes in used and full pools wasted due to quantization,
2934 * i.e. the necessarily leftover space at the ends of used and
2935 * full pools.
2936 */
2937 size_t quantization = 0;
2938 /* # of arenas actually allocated. */
2939 size_t narenas = 0;
2940 /* running total -- should equal narenas * ARENA_SIZE */
2941 size_t total;
2942 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002943
David Malcolm49526f42012-06-22 14:55:41 -04002944 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002945 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002946
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002947 for (i = 0; i < numclasses; ++i)
2948 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002949
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002950 /* Because full pools aren't linked to from anything, it's easiest
2951 * to march over all the arenas. If we're lucky, most of the memory
2952 * will be living in full pools -- would be a shame to miss them.
2953 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002954 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002955 uint j;
Victor Stinner9e87e772017-11-24 12:09:24 +01002956 uintptr_t base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002957
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002958 /* Skip arenas which are not allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002959 if (arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002960 continue;
2961 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002962
Victor Stinner9e87e772017-11-24 12:09:24 +01002963 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002964
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002965 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002966 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002967 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002968 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002969 base += POOL_SIZE;
2970 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002971
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002972 /* visit every pool in the arena */
Victor Stinner9e87e772017-11-24 12:09:24 +01002973 assert(base <= (uintptr_t) arenas[i].pool_address);
2974 for (j = 0; base < (uintptr_t) arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002975 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002976 poolp p = (poolp)base;
2977 const uint sz = p->szidx;
2978 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002979
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002980 if (p->ref.count == 0) {
2981 /* currently unused */
Victor Stinner34be8072016-03-14 12:04:26 +01002982#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +01002983 assert(pool_is_in_list(p, arenas[i].freepools));
Victor Stinner34be8072016-03-14 12:04:26 +01002984#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002985 continue;
2986 }
2987 ++numpools[sz];
2988 numblocks[sz] += p->ref.count;
2989 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2990 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002991#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002992 if (freeblocks > 0)
Victor Stinner9e87e772017-11-24 12:09:24 +01002993 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002994#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002995 }
2996 }
Victor Stinner9e87e772017-11-24 12:09:24 +01002997 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002998
David Malcolm49526f42012-06-22 14:55:41 -04002999 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00003000 fputs("class size num pools blocks in use avail blocks\n"
3001 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04003002 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00003003
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00003004 for (i = 0; i < numclasses; ++i) {
3005 size_t p = numpools[i];
3006 size_t b = numblocks[i];
3007 size_t f = numfreeblocks[i];
3008 uint size = INDEX2SIZE(i);
3009 if (p == 0) {
3010 assert(b == 0 && f == 0);
3011 continue;
3012 }
Victor Stinnerd36cf5f2020-06-10 18:38:05 +02003013 fprintf(out, "%5u %6u %11zu %15zu %13zu\n",
Stefan Krah735bb122010-11-26 10:54:09 +00003014 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00003015 allocated_bytes += b * size;
3016 available_bytes += f * size;
3017 pool_header_bytes += p * POOL_OVERHEAD;
3018 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
3019 }
David Malcolm49526f42012-06-22 14:55:41 -04003020 fputc('\n', out);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02003021#ifdef PYMEM_DEBUG_SERIALNO
3022 if (_PyMem_DebugEnabled()) {
Victor Stinner9e87e772017-11-24 12:09:24 +01003023 (void)printone(out, "# times object malloc called", serialno);
Victor Stinnere8f9acf2019-04-12 21:54:06 +02003024 }
3025#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01003026 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
3027 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
3028 (void)printone(out, "# arenas highwater mark", narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04003029 (void)printone(out, "# arenas allocated current", narenas);
Neil Schemenauer85b6b702021-03-29 19:51:15 -07003030#ifdef USE_INTERIOR_NODES
3031 (void)printone(out, "# arena map mid nodes", arena_map_mid_count);
3032 (void)printone(out, "# arena map bot nodes", arena_map_bot_count);
3033 fputc('\n', out);
3034#endif
3035
Thomas Woutersa9773292006-04-21 09:43:23 +00003036
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00003037 PyOS_snprintf(buf, sizeof(buf),
Victor Stinnerd36cf5f2020-06-10 18:38:05 +02003038 "%zu arenas * %d bytes/arena",
3039 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04003040 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00003041
David Malcolm49526f42012-06-22 14:55:41 -04003042 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00003043
David Malcolm49526f42012-06-22 14:55:41 -04003044 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
3045 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00003046
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00003047 PyOS_snprintf(buf, sizeof(buf),
3048 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04003049 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00003050
David Malcolm49526f42012-06-22 14:55:41 -04003051 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
3052 total += printone(out, "# bytes lost to quantization", quantization);
3053 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
Neil Schemenauer85b6b702021-03-29 19:51:15 -07003054#ifdef WITH_PYMALLOC_RADIX_TREE
3055 total += printone(out, "# bytes lost to arena map root", sizeof(arena_map_root));
3056#endif
3057#ifdef USE_INTERIOR_NODES
3058 total += printone(out, "# bytes lost to arena map mid",
3059 sizeof(arena_map_mid_t) * arena_map_mid_count);
3060 total += printone(out, "# bytes lost to arena map bot",
3061 sizeof(arena_map_bot_t) * arena_map_bot_count);
3062#endif
David Malcolm49526f42012-06-22 14:55:41 -04003063 (void)printone(out, "Total", total);
Victor Stinner6bf992a2017-12-06 17:26:10 +01003064 return 1;
Tim Peters7ccfadf2002-04-01 06:04:21 +00003065}
3066
David Malcolm49526f42012-06-22 14:55:41 -04003067#endif /* #ifdef WITH_PYMALLOC */