blob: 88ded83a29e41aeba00419324a73cf5c765cc85e [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
Victor Stinner2be00d92018-10-31 20:19:24 +01002#include "internal/mem.h"
Tim Peters1221c0a2002-03-23 00:20:15 +00003
Benjamin Peterson3924f932016-09-18 19:12:48 -07004#include <stdbool.h>
5
Victor Stinner0611c262016-03-15 22:22:13 +01006
7/* Defined in tracemalloc.c */
8extern void _PyMem_DumpTraceback(int fd, const void *ptr);
9
10
Victor Stinner0507bf52013-07-07 02:05:46 +020011/* Python's malloc wrappers (see pymem.h) */
12
Victor Stinner34be807c2016-03-14 12:04:26 +010013#undef uint
14#define uint unsigned int /* assuming >= 16 bits */
15
Victor Stinner0507bf52013-07-07 02:05:46 +020016/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010017static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
18static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
19static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070020static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010021
Victor Stinner0507bf52013-07-07 02:05:46 +020022static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020023static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020024static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010025static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020026
27static void _PyObject_DebugDumpAddress(const void *p);
28static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020029
Victor Stinner5d39e042017-11-29 17:20:38 +010030static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain);
31
Nick Coghlan6ba64f42013-09-29 00:28:55 +100032#if defined(__has_feature) /* Clang */
33 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
34 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070035 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100036 #else
37 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
38 #endif
39#else
40 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
41 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070042 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100043 #else
44 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
45 #endif
46#endif
47
Tim Peters1221c0a2002-03-23 00:20:15 +000048#ifdef WITH_PYMALLOC
49
Victor Stinner0507bf52013-07-07 02:05:46 +020050#ifdef MS_WINDOWS
51# include <windows.h>
52#elif defined(HAVE_MMAP)
53# include <sys/mman.h>
54# ifdef MAP_ANONYMOUS
55# define ARENAS_USE_MMAP
56# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020057#endif
58
Victor Stinner0507bf52013-07-07 02:05:46 +020059/* Forward declaration */
60static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020061static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020062static void _PyObject_Free(void *ctx, void *p);
63static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020064#endif
65
Victor Stinner0507bf52013-07-07 02:05:46 +020066
Victor Stinner9e00e802018-10-25 13:31:16 +020067/* bpo-35053: Declare tracemalloc configuration here rather than
68 Modules/_tracemalloc.c because _tracemalloc can be compiled as dynamic
69 library, whereas _Py_NewReference() requires it. */
70struct _PyTraceMalloc_Config _Py_tracemalloc_config = _PyTraceMalloc_Config_INIT;
71
72
Victor Stinner0507bf52013-07-07 02:05:46 +020073static void *
74_PyMem_RawMalloc(void *ctx, size_t size)
75{
Victor Stinnerdb067af2014-05-02 22:31:14 +020076 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020077 for malloc(0), which would be treated as an error. Some platforms would
78 return a pointer with no memory behind it, which would break pymalloc.
79 To solve these problems, allocate an extra byte. */
80 if (size == 0)
81 size = 1;
82 return malloc(size);
83}
84
85static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020086_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
87{
88 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
89 for calloc(0, 0), which would be treated as an error. Some platforms
90 would return a pointer with no memory behind it, which would break
91 pymalloc. To solve these problems, allocate an extra byte. */
92 if (nelem == 0 || elsize == 0) {
93 nelem = 1;
94 elsize = 1;
95 }
96 return calloc(nelem, elsize);
97}
98
99static void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200100_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
101{
102 if (size == 0)
103 size = 1;
104 return realloc(ptr, size);
105}
106
107static void
108_PyMem_RawFree(void *ctx, void *ptr)
109{
110 free(ptr);
111}
112
113
114#ifdef MS_WINDOWS
115static void *
116_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
117{
118 return VirtualAlloc(NULL, size,
119 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
120}
121
122static void
123_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
124{
Victor Stinner725e6682013-07-07 03:06:16 +0200125 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200126}
127
128#elif defined(ARENAS_USE_MMAP)
129static void *
130_PyObject_ArenaMmap(void *ctx, size_t size)
131{
132 void *ptr;
133 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
134 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
135 if (ptr == MAP_FAILED)
136 return NULL;
137 assert(ptr != NULL);
138 return ptr;
139}
140
141static void
142_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
143{
144 munmap(ptr, size);
145}
146
147#else
148static void *
149_PyObject_ArenaMalloc(void *ctx, size_t size)
150{
151 return malloc(size);
152}
153
154static void
155_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
156{
157 free(ptr);
158}
159#endif
160
Victor Stinner5d39e042017-11-29 17:20:38 +0100161#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200162#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100163# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
Victor Stinner0507bf52013-07-07 02:05:46 +0200164#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100165
166#define PYRAW_ALLOC MALLOC_ALLOC
167#ifdef WITH_PYMALLOC
168# define PYOBJ_ALLOC PYMALLOC_ALLOC
169#else
170# define PYOBJ_ALLOC MALLOC_ALLOC
171#endif
172#define PYMEM_ALLOC PYOBJ_ALLOC
Victor Stinner0507bf52013-07-07 02:05:46 +0200173
Victor Stinner0507bf52013-07-07 02:05:46 +0200174typedef struct {
175 /* We tag each block with an API ID in order to tag API violations */
176 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200177 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200178} debug_alloc_api_t;
179static struct {
180 debug_alloc_api_t raw;
181 debug_alloc_api_t mem;
182 debug_alloc_api_t obj;
183} _PyMem_Debug = {
Victor Stinner5d39e042017-11-29 17:20:38 +0100184 {'r', PYRAW_ALLOC},
185 {'m', PYMEM_ALLOC},
186 {'o', PYOBJ_ALLOC}
Victor Stinner0507bf52013-07-07 02:05:46 +0200187 };
188
Victor Stinner5d39e042017-11-29 17:20:38 +0100189#define PYDBGRAW_ALLOC \
190 {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
191#define PYDBGMEM_ALLOC \
192 {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
193#define PYDBGOBJ_ALLOC \
194 {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200195
Victor Stinner9e87e772017-11-24 12:09:24 +0100196#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100197static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC;
198static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC;
199static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100200#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100201static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC;
202static PyMemAllocatorEx _PyMem = PYMEM_ALLOC;
203static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100204#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600205
Victor Stinner0507bf52013-07-07 02:05:46 +0200206
Victor Stinner5d39e042017-11-29 17:20:38 +0100207static int
208pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug,
209 PyMemAllocatorEx *old_alloc)
210{
211 if (old_alloc != NULL) {
212 PyMem_GetAllocator(domain, old_alloc);
213 }
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800214
Victor Stinner5d39e042017-11-29 17:20:38 +0100215
216 PyMemAllocatorEx new_alloc;
217 switch(domain)
218 {
219 case PYMEM_DOMAIN_RAW:
220 new_alloc = (PyMemAllocatorEx)PYRAW_ALLOC;
221 break;
222 case PYMEM_DOMAIN_MEM:
223 new_alloc = (PyMemAllocatorEx)PYMEM_ALLOC;
224 break;
225 case PYMEM_DOMAIN_OBJ:
226 new_alloc = (PyMemAllocatorEx)PYOBJ_ALLOC;
227 break;
228 default:
229 /* unknown domain */
230 return -1;
231 }
232 PyMem_SetAllocator(domain, &new_alloc);
233 if (debug) {
234 _PyMem_SetupDebugHooksDomain(domain);
235 }
236 return 0;
237}
238
239
240int
241_PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain,
242 PyMemAllocatorEx *old_alloc)
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800243{
Victor Stinnerccb04422017-11-16 03:20:31 -0800244#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100245 const int debug = 1;
Victor Stinnerccb04422017-11-16 03:20:31 -0800246#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100247 const int debug = 0;
Victor Stinnerccb04422017-11-16 03:20:31 -0800248#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100249 return pymem_set_default_allocator(domain, debug, old_alloc);
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800250}
Victor Stinner0507bf52013-07-07 02:05:46 +0200251
Victor Stinner5d39e042017-11-29 17:20:38 +0100252
Victor Stinner34be807c2016-03-14 12:04:26 +0100253int
254_PyMem_SetupAllocators(const char *opt)
255{
256 if (opt == NULL || *opt == '\0') {
257 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
Victor Stinner5d39e042017-11-29 17:20:38 +0100258 options): use default memory allocators */
259 opt = "default";
Victor Stinner34be807c2016-03-14 12:04:26 +0100260 }
261
Victor Stinner5d39e042017-11-29 17:20:38 +0100262 if (strcmp(opt, "default") == 0) {
263 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, NULL);
264 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_MEM, NULL);
265 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_OBJ, NULL);
Victor Stinner34be807c2016-03-14 12:04:26 +0100266 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100267 else if (strcmp(opt, "debug") == 0) {
268 (void)pymem_set_default_allocator(PYMEM_DOMAIN_RAW, 1, NULL);
269 (void)pymem_set_default_allocator(PYMEM_DOMAIN_MEM, 1, NULL);
270 (void)pymem_set_default_allocator(PYMEM_DOMAIN_OBJ, 1, NULL);
Victor Stinner34be807c2016-03-14 12:04:26 +0100271 }
272#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100273 else if (strcmp(opt, "pymalloc") == 0 || strcmp(opt, "pymalloc_debug") == 0) {
274 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
275 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
Victor Stinner34be807c2016-03-14 12:04:26 +0100276
Victor Stinner5d39e042017-11-29 17:20:38 +0100277 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
278 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &pymalloc);
279 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &pymalloc);
Victor Stinner34be807c2016-03-14 12:04:26 +0100280
Victor Stinner5d39e042017-11-29 17:20:38 +0100281 if (strcmp(opt, "pymalloc_debug") == 0) {
Victor Stinner34be807c2016-03-14 12:04:26 +0100282 PyMem_SetupDebugHooks();
Victor Stinner5d39e042017-11-29 17:20:38 +0100283 }
Victor Stinner34be807c2016-03-14 12:04:26 +0100284 }
285#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100286 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0) {
287 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
288 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
289 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &malloc_alloc);
290 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &malloc_alloc);
291
292 if (strcmp(opt, "malloc_debug") == 0) {
293 PyMem_SetupDebugHooks();
294 }
295 }
Victor Stinner34be807c2016-03-14 12:04:26 +0100296 else {
297 /* unknown allocator */
298 return -1;
299 }
300 return 0;
301}
302
Victor Stinner5d39e042017-11-29 17:20:38 +0100303
304static int
305pymemallocator_eq(PyMemAllocatorEx *a, PyMemAllocatorEx *b)
306{
307 return (memcmp(a, b, sizeof(PyMemAllocatorEx)) == 0);
308}
309
310
311const char*
312_PyMem_GetAllocatorsName(void)
313{
314 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
315#ifdef WITH_PYMALLOC
316 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
317#endif
318
319 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
320 pymemallocator_eq(&_PyMem, &malloc_alloc) &&
321 pymemallocator_eq(&_PyObject, &malloc_alloc))
322 {
323 return "malloc";
324 }
325#ifdef WITH_PYMALLOC
326 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
327 pymemallocator_eq(&_PyMem, &pymalloc) &&
328 pymemallocator_eq(&_PyObject, &pymalloc))
329 {
330 return "pymalloc";
331 }
332#endif
333
334 PyMemAllocatorEx dbg_raw = PYDBGRAW_ALLOC;
335 PyMemAllocatorEx dbg_mem = PYDBGMEM_ALLOC;
336 PyMemAllocatorEx dbg_obj = PYDBGOBJ_ALLOC;
337
338 if (pymemallocator_eq(&_PyMem_Raw, &dbg_raw) &&
339 pymemallocator_eq(&_PyMem, &dbg_mem) &&
340 pymemallocator_eq(&_PyObject, &dbg_obj))
341 {
342 /* Debug hooks installed */
343 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
344 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &malloc_alloc) &&
345 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &malloc_alloc))
346 {
347 return "malloc_debug";
348 }
349#ifdef WITH_PYMALLOC
350 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
351 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &pymalloc) &&
352 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &pymalloc))
353 {
354 return "pymalloc_debug";
355 }
356#endif
357 }
358 return NULL;
359}
360
361
362#undef MALLOC_ALLOC
363#undef PYMALLOC_ALLOC
364#undef PYRAW_ALLOC
365#undef PYMEM_ALLOC
366#undef PYOBJ_ALLOC
367#undef PYDBGRAW_ALLOC
368#undef PYDBGMEM_ALLOC
369#undef PYDBGOBJ_ALLOC
370
Victor Stinner0507bf52013-07-07 02:05:46 +0200371
Victor Stinner9e87e772017-11-24 12:09:24 +0100372static PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800373#ifdef MS_WINDOWS
Victor Stinner9e87e772017-11-24 12:09:24 +0100374 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800375#elif defined(ARENAS_USE_MMAP)
Victor Stinner9e87e772017-11-24 12:09:24 +0100376 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800377#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100378 _PyObject_ArenaMalloc, _PyObject_ArenaFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800379#endif
380 };
381
Victor Stinner0621e0e2016-04-19 17:02:55 +0200382#ifdef WITH_PYMALLOC
Victor Stinner34be807c2016-03-14 12:04:26 +0100383static int
384_PyMem_DebugEnabled(void)
385{
386 return (_PyObject.malloc == _PyMem_DebugMalloc);
387}
388
Victor Stinner6bf992a2017-12-06 17:26:10 +0100389static int
Victor Stinner34be807c2016-03-14 12:04:26 +0100390_PyMem_PymallocEnabled(void)
391{
392 if (_PyMem_DebugEnabled()) {
393 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
394 }
395 else {
396 return (_PyObject.malloc == _PyObject_Malloc);
397 }
398}
399#endif
400
Victor Stinner5d39e042017-11-29 17:20:38 +0100401
402static void
403_PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain)
Victor Stinner0507bf52013-07-07 02:05:46 +0200404{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200405 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200406
Victor Stinner5d39e042017-11-29 17:20:38 +0100407 if (domain == PYMEM_DOMAIN_RAW) {
408 if (_PyMem_Raw.malloc == _PyMem_DebugRawMalloc) {
409 return;
410 }
Victor Stinner34be807c2016-03-14 12:04:26 +0100411
Victor Stinner0507bf52013-07-07 02:05:46 +0200412 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100413 alloc.ctx = &_PyMem_Debug.raw;
414 alloc.malloc = _PyMem_DebugRawMalloc;
415 alloc.calloc = _PyMem_DebugRawCalloc;
416 alloc.realloc = _PyMem_DebugRawRealloc;
417 alloc.free = _PyMem_DebugRawFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200418 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
419 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100420 else if (domain == PYMEM_DOMAIN_MEM) {
421 if (_PyMem.malloc == _PyMem_DebugMalloc) {
422 return;
423 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200424
Victor Stinnerad524372016-03-16 12:12:53 +0100425 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100426 alloc.ctx = &_PyMem_Debug.mem;
427 alloc.malloc = _PyMem_DebugMalloc;
428 alloc.calloc = _PyMem_DebugCalloc;
429 alloc.realloc = _PyMem_DebugRealloc;
430 alloc.free = _PyMem_DebugFree;
Victor Stinnerad524372016-03-16 12:12:53 +0100431 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
432 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100433 else if (domain == PYMEM_DOMAIN_OBJ) {
434 if (_PyObject.malloc == _PyMem_DebugMalloc) {
435 return;
436 }
Victor Stinnerad524372016-03-16 12:12:53 +0100437
Victor Stinner0507bf52013-07-07 02:05:46 +0200438 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100439 alloc.ctx = &_PyMem_Debug.obj;
440 alloc.malloc = _PyMem_DebugMalloc;
441 alloc.calloc = _PyMem_DebugCalloc;
442 alloc.realloc = _PyMem_DebugRealloc;
443 alloc.free = _PyMem_DebugFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200444 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
445 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200446}
447
Victor Stinner5d39e042017-11-29 17:20:38 +0100448
449void
450PyMem_SetupDebugHooks(void)
451{
452 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_RAW);
453 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_MEM);
454 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_OBJ);
455}
456
Victor Stinner0507bf52013-07-07 02:05:46 +0200457void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200458PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200459{
460 switch(domain)
461 {
462 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
463 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
464 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
465 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200466 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200467 allocator->ctx = NULL;
468 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200469 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200470 allocator->realloc = NULL;
471 allocator->free = NULL;
472 }
473}
474
475void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200476PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200477{
478 switch(domain)
479 {
480 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
481 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
482 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
483 /* ignore unknown domain */
484 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200485}
486
487void
488PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
489{
Victor Stinner9e87e772017-11-24 12:09:24 +0100490 *allocator = _PyObject_Arena;
Victor Stinner0507bf52013-07-07 02:05:46 +0200491}
492
493void
494PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
495{
Victor Stinner9e87e772017-11-24 12:09:24 +0100496 _PyObject_Arena = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200497}
498
499void *
500PyMem_RawMalloc(size_t size)
501{
502 /*
503 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
504 * Most python internals blindly use a signed Py_ssize_t to track
505 * things without checking for overflows or negatives.
506 * As size_t is unsigned, checking for size < 0 is not required.
507 */
508 if (size > (size_t)PY_SSIZE_T_MAX)
509 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200510 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
511}
512
Victor Stinnerdb067af2014-05-02 22:31:14 +0200513void *
514PyMem_RawCalloc(size_t nelem, size_t elsize)
515{
516 /* see PyMem_RawMalloc() */
517 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
518 return NULL;
519 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
520}
521
Victor Stinner0507bf52013-07-07 02:05:46 +0200522void*
523PyMem_RawRealloc(void *ptr, size_t new_size)
524{
525 /* see PyMem_RawMalloc() */
526 if (new_size > (size_t)PY_SSIZE_T_MAX)
527 return NULL;
528 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
529}
530
Victor Stinner9e87e772017-11-24 12:09:24 +0100531void PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200532{
533 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
534}
535
Victor Stinner9ed83c42017-10-31 12:18:10 -0700536
Victor Stinner0507bf52013-07-07 02:05:46 +0200537void *
538PyMem_Malloc(size_t size)
539{
540 /* see PyMem_RawMalloc() */
541 if (size > (size_t)PY_SSIZE_T_MAX)
542 return NULL;
543 return _PyMem.malloc(_PyMem.ctx, size);
544}
545
546void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200547PyMem_Calloc(size_t nelem, size_t elsize)
548{
549 /* see PyMem_RawMalloc() */
550 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
551 return NULL;
552 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
553}
554
555void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200556PyMem_Realloc(void *ptr, size_t new_size)
557{
558 /* see PyMem_RawMalloc() */
559 if (new_size > (size_t)PY_SSIZE_T_MAX)
560 return NULL;
561 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
562}
563
564void
565PyMem_Free(void *ptr)
566{
567 _PyMem.free(_PyMem.ctx, ptr);
568}
569
Victor Stinner9ed83c42017-10-31 12:18:10 -0700570
Victor Stinner46972b72017-11-24 22:55:40 +0100571wchar_t*
572_PyMem_RawWcsdup(const wchar_t *str)
573{
Victor Stinnerb64de462017-12-01 18:27:09 +0100574 assert(str != NULL);
575
Victor Stinner46972b72017-11-24 22:55:40 +0100576 size_t len = wcslen(str);
577 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) {
578 return NULL;
579 }
580
581 size_t size = (len + 1) * sizeof(wchar_t);
582 wchar_t *str2 = PyMem_RawMalloc(size);
583 if (str2 == NULL) {
584 return NULL;
585 }
586
587 memcpy(str2, str, size);
588 return str2;
589}
590
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200591char *
592_PyMem_RawStrdup(const char *str)
593{
Victor Stinnerb64de462017-12-01 18:27:09 +0100594 assert(str != NULL);
595 size_t size = strlen(str) + 1;
596 char *copy = PyMem_RawMalloc(size);
597 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200598 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100599 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200600 memcpy(copy, str, size);
601 return copy;
602}
603
604char *
605_PyMem_Strdup(const char *str)
606{
Victor Stinnerb64de462017-12-01 18:27:09 +0100607 assert(str != NULL);
608 size_t size = strlen(str) + 1;
609 char *copy = PyMem_Malloc(size);
610 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200611 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100612 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200613 memcpy(copy, str, size);
614 return copy;
615}
616
Victor Stinner0507bf52013-07-07 02:05:46 +0200617void *
618PyObject_Malloc(size_t size)
619{
620 /* see PyMem_RawMalloc() */
621 if (size > (size_t)PY_SSIZE_T_MAX)
622 return NULL;
623 return _PyObject.malloc(_PyObject.ctx, size);
624}
625
626void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200627PyObject_Calloc(size_t nelem, size_t elsize)
628{
629 /* see PyMem_RawMalloc() */
630 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
631 return NULL;
632 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
633}
634
635void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200636PyObject_Realloc(void *ptr, size_t new_size)
637{
638 /* see PyMem_RawMalloc() */
639 if (new_size > (size_t)PY_SSIZE_T_MAX)
640 return NULL;
641 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
642}
643
644void
645PyObject_Free(void *ptr)
646{
647 _PyObject.free(_PyObject.ctx, ptr);
648}
649
650
651#ifdef WITH_PYMALLOC
652
Benjamin Peterson05159c42009-12-03 03:01:27 +0000653#ifdef WITH_VALGRIND
654#include <valgrind/valgrind.h>
655
656/* If we're using GCC, use __builtin_expect() to reduce overhead of
657 the valgrind checks */
658#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
659# define UNLIKELY(value) __builtin_expect((value), 0)
660#else
661# define UNLIKELY(value) (value)
662#endif
663
664/* -1 indicates that we haven't checked that we're running on valgrind yet. */
665static int running_on_valgrind = -1;
666#endif
667
Victor Stinner9ed83c42017-10-31 12:18:10 -0700668
Victor Stinner9e87e772017-11-24 12:09:24 +0100669/* An object allocator for Python.
670
671 Here is an introduction to the layers of the Python memory architecture,
672 showing where the object allocator is actually used (layer +2), It is
673 called for every object allocation and deallocation (PyObject_New/Del),
674 unless the object-specific allocators implement a proprietary allocation
675 scheme (ex.: ints use a simple free list). This is also the place where
676 the cyclic garbage collector operates selectively on container objects.
677
678
679 Object-specific allocators
680 _____ ______ ______ ________
681 [ int ] [ dict ] [ list ] ... [ string ] Python core |
682+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
683 _______________________________ | |
684 [ Python's object allocator ] | |
685+2 | ####### Object memory ####### | <------ Internal buffers ------> |
686 ______________________________________________________________ |
687 [ Python's raw memory allocator (PyMem_ API) ] |
688+1 | <----- Python memory (under PyMem manager's control) ------> | |
689 __________________________________________________________________
690 [ Underlying general-purpose allocator (ex: C library malloc) ]
691 0 | <------ Virtual memory allocated for the python process -------> |
692
693 =========================================================================
694 _______________________________________________________________________
695 [ OS-specific Virtual Memory Manager (VMM) ]
696-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
697 __________________________________ __________________________________
698 [ ] [ ]
699-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
700
701*/
702/*==========================================================================*/
703
704/* A fast, special-purpose memory allocator for small blocks, to be used
705 on top of a general-purpose malloc -- heavily based on previous art. */
706
707/* Vladimir Marangozov -- August 2000 */
708
709/*
710 * "Memory management is where the rubber meets the road -- if we do the wrong
711 * thing at any level, the results will not be good. And if we don't make the
712 * levels work well together, we are in serious trouble." (1)
713 *
714 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
715 * "Dynamic Storage Allocation: A Survey and Critical Review",
716 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
717 */
718
719/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
720
721/*==========================================================================*/
722
723/*
724 * Allocation strategy abstract:
725 *
726 * For small requests, the allocator sub-allocates <Big> blocks of memory.
727 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
728 * system's allocator.
729 *
730 * Small requests are grouped in size classes spaced 8 bytes apart, due
731 * to the required valid alignment of the returned address. Requests of
732 * a particular size are serviced from memory pools of 4K (one VMM page).
733 * Pools are fragmented on demand and contain free lists of blocks of one
734 * particular size class. In other words, there is a fixed-size allocator
735 * for each size class. Free pools are shared by the different allocators
736 * thus minimizing the space reserved for a particular size class.
737 *
738 * This allocation strategy is a variant of what is known as "simple
739 * segregated storage based on array of free lists". The main drawback of
740 * simple segregated storage is that we might end up with lot of reserved
741 * memory for the different free lists, which degenerate in time. To avoid
742 * this, we partition each free list in pools and we share dynamically the
743 * reserved space between all free lists. This technique is quite efficient
744 * for memory intensive programs which allocate mainly small-sized blocks.
745 *
746 * For small requests we have the following table:
747 *
748 * Request in bytes Size of allocated block Size class idx
749 * ----------------------------------------------------------------
750 * 1-8 8 0
751 * 9-16 16 1
752 * 17-24 24 2
753 * 25-32 32 3
754 * 33-40 40 4
755 * 41-48 48 5
756 * 49-56 56 6
757 * 57-64 64 7
758 * 65-72 72 8
759 * ... ... ...
760 * 497-504 504 62
761 * 505-512 512 63
762 *
763 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
764 * allocator.
765 */
766
767/*==========================================================================*/
768
769/*
770 * -- Main tunable settings section --
771 */
772
773/*
774 * Alignment of addresses returned to the user. 8-bytes alignment works
775 * on most current architectures (with 32-bit or 64-bit address busses).
776 * The alignment value is also used for grouping small requests in size
777 * classes spaced ALIGNMENT bytes apart.
778 *
779 * You shouldn't change this unless you know what you are doing.
780 */
781#define ALIGNMENT 8 /* must be 2^N */
782#define ALIGNMENT_SHIFT 3
783
784/* Return the number of bytes in size class I, as a uint. */
785#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
786
787/*
788 * Max size threshold below which malloc requests are considered to be
789 * small enough in order to use preallocated memory pools. You can tune
790 * this value according to your application behaviour and memory needs.
791 *
792 * Note: a size threshold of 512 guarantees that newly created dictionaries
793 * will be allocated from preallocated memory pools on 64-bit.
794 *
795 * The following invariants must hold:
796 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
797 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
798 *
799 * Although not required, for better performance and space efficiency,
800 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
801 */
802#define SMALL_REQUEST_THRESHOLD 512
803#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
804
805/*
806 * The system's VMM page size can be obtained on most unices with a
807 * getpagesize() call or deduced from various header files. To make
808 * things simpler, we assume that it is 4K, which is OK for most systems.
809 * It is probably better if this is the native page size, but it doesn't
810 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
811 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
812 * violation fault. 4K is apparently OK for all the platforms that python
813 * currently targets.
814 */
815#define SYSTEM_PAGE_SIZE (4 * 1024)
816#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
817
818/*
819 * Maximum amount of memory managed by the allocator for small requests.
820 */
821#ifdef WITH_MEMORY_LIMITS
822#ifndef SMALL_MEMORY_LIMIT
823#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
824#endif
825#endif
826
827/*
828 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
829 * on a page boundary. This is a reserved virtual address space for the
830 * current process (obtained through a malloc()/mmap() call). In no way this
831 * means that the memory arenas will be used entirely. A malloc(<Big>) is
832 * usually an address range reservation for <Big> bytes, unless all pages within
833 * this space are referenced subsequently. So malloc'ing big blocks and not
834 * using them does not mean "wasting memory". It's an addressable range
835 * wastage...
836 *
837 * Arenas are allocated with mmap() on systems supporting anonymous memory
838 * mappings to reduce heap fragmentation.
839 */
840#define ARENA_SIZE (256 << 10) /* 256KB */
841
842#ifdef WITH_MEMORY_LIMITS
843#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
844#endif
845
846/*
847 * Size of the pools used for small blocks. Should be a power of 2,
848 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
849 */
850#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
851#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
852
853/*
854 * -- End of tunable settings section --
855 */
856
857/*==========================================================================*/
858
Victor Stinner9e87e772017-11-24 12:09:24 +0100859/* When you say memory, my mind reasons in terms of (pointers to) blocks */
860typedef uint8_t block;
861
862/* Pool for small blocks. */
863struct pool_header {
864 union { block *_padding;
865 uint count; } ref; /* number of allocated blocks */
866 block *freeblock; /* pool's free list head */
867 struct pool_header *nextpool; /* next pool of this size class */
868 struct pool_header *prevpool; /* previous pool "" */
869 uint arenaindex; /* index into arenas of base adr */
870 uint szidx; /* block size class index */
871 uint nextoffset; /* bytes to virgin block */
872 uint maxnextoffset; /* largest valid nextoffset */
873};
874
875typedef struct pool_header *poolp;
876
877/* Record keeping for arenas. */
878struct arena_object {
879 /* The address of the arena, as returned by malloc. Note that 0
880 * will never be returned by a successful malloc, and is used
881 * here to mark an arena_object that doesn't correspond to an
882 * allocated arena.
883 */
884 uintptr_t address;
885
886 /* Pool-aligned pointer to the next pool to be carved off. */
887 block* pool_address;
888
889 /* The number of available pools in the arena: free pools + never-
890 * allocated pools.
891 */
892 uint nfreepools;
893
894 /* The total number of pools in the arena, whether or not available. */
895 uint ntotalpools;
896
897 /* Singly-linked list of available pools. */
898 struct pool_header* freepools;
899
900 /* Whenever this arena_object is not associated with an allocated
901 * arena, the nextarena member is used to link all unassociated
902 * arena_objects in the singly-linked `unused_arena_objects` list.
903 * The prevarena member is unused in this case.
904 *
905 * When this arena_object is associated with an allocated arena
906 * with at least one available pool, both members are used in the
907 * doubly-linked `usable_arenas` list, which is maintained in
908 * increasing order of `nfreepools` values.
909 *
910 * Else this arena_object is associated with an allocated arena
911 * all of whose pools are in use. `nextarena` and `prevarena`
912 * are both meaningless in this case.
913 */
914 struct arena_object* nextarena;
915 struct arena_object* prevarena;
916};
917
918#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
919
920#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
921
922/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
923#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
924
925/* Return total number of blocks in pool of size index I, as a uint. */
926#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
927
928/*==========================================================================*/
929
930/*
Victor Stinner9e87e772017-11-24 12:09:24 +0100931 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
932
933This is involved. For an index i, usedpools[i+i] is the header for a list of
934all partially used pools holding small blocks with "size class idx" i. So
935usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
93616, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
937
938Pools are carved off an arena's highwater mark (an arena_object's pool_address
939member) as needed. Once carved off, a pool is in one of three states forever
940after:
941
942used == partially used, neither empty nor full
943 At least one block in the pool is currently allocated, and at least one
944 block in the pool is not currently allocated (note this implies a pool
945 has room for at least two blocks).
946 This is a pool's initial state, as a pool is created only when malloc
947 needs space.
948 The pool holds blocks of a fixed size, and is in the circular list headed
949 at usedpools[i] (see above). It's linked to the other used pools of the
950 same size class via the pool_header's nextpool and prevpool members.
951 If all but one block is currently allocated, a malloc can cause a
952 transition to the full state. If all but one block is not currently
953 allocated, a free can cause a transition to the empty state.
954
955full == all the pool's blocks are currently allocated
956 On transition to full, a pool is unlinked from its usedpools[] list.
957 It's not linked to from anything then anymore, and its nextpool and
958 prevpool members are meaningless until it transitions back to used.
959 A free of a block in a full pool puts the pool back in the used state.
960 Then it's linked in at the front of the appropriate usedpools[] list, so
961 that the next allocation for its size class will reuse the freed block.
962
963empty == all the pool's blocks are currently available for allocation
964 On transition to empty, a pool is unlinked from its usedpools[] list,
965 and linked to the front of its arena_object's singly-linked freepools list,
966 via its nextpool member. The prevpool member has no meaning in this case.
967 Empty pools have no inherent size class: the next time a malloc finds
968 an empty list in usedpools[], it takes the first pool off of freepools.
969 If the size class needed happens to be the same as the size class the pool
970 last had, some pool initialization can be skipped.
971
972
973Block Management
974
975Blocks within pools are again carved out as needed. pool->freeblock points to
976the start of a singly-linked list of free blocks within the pool. When a
977block is freed, it's inserted at the front of its pool's freeblock list. Note
978that the available blocks in a pool are *not* linked all together when a pool
979is initialized. Instead only "the first two" (lowest addresses) blocks are
980set up, returning the first such block, and setting pool->freeblock to a
981one-block list holding the second such block. This is consistent with that
982pymalloc strives at all levels (arena, pool, and block) never to touch a piece
983of memory until it's actually needed.
984
985So long as a pool is in the used state, we're certain there *is* a block
986available for allocating, and pool->freeblock is not NULL. If pool->freeblock
987points to the end of the free list before we've carved the entire pool into
988blocks, that means we simply haven't yet gotten to one of the higher-address
989blocks. The offset from the pool_header to the start of "the next" virgin
990block is stored in the pool_header nextoffset member, and the largest value
991of nextoffset that makes sense is stored in the maxnextoffset member when a
992pool is initialized. All the blocks in a pool have been passed out at least
993once when and only when nextoffset > maxnextoffset.
994
995
996Major obscurity: While the usedpools vector is declared to have poolp
997entries, it doesn't really. It really contains two pointers per (conceptual)
998poolp entry, the nextpool and prevpool members of a pool_header. The
999excruciating initialization code below fools C so that
1000
1001 usedpool[i+i]
1002
1003"acts like" a genuine poolp, but only so long as you only reference its
1004nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
1005compensating for that a pool_header's nextpool and prevpool members
1006immediately follow a pool_header's first two members:
1007
1008 union { block *_padding;
1009 uint count; } ref;
1010 block *freeblock;
1011
1012each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
1013contains is a fudged-up pointer p such that *if* C believes it's a poolp
1014pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
1015circular list is empty).
1016
1017It's unclear why the usedpools setup is so convoluted. It could be to
1018minimize the amount of cache required to hold this heavily-referenced table
1019(which only *needs* the two interpool pointer members of a pool_header). OTOH,
1020referencing code has to remember to "double the index" and doing so isn't
1021free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
1022on that C doesn't insert any padding anywhere in a pool_header at or before
1023the prevpool member.
1024**************************************************************************** */
1025
1026#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
1027#define PT(x) PTA(x), PTA(x)
1028
1029static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
1030 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
1031#if NB_SMALL_SIZE_CLASSES > 8
1032 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
1033#if NB_SMALL_SIZE_CLASSES > 16
1034 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
1035#if NB_SMALL_SIZE_CLASSES > 24
1036 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
1037#if NB_SMALL_SIZE_CLASSES > 32
1038 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
1039#if NB_SMALL_SIZE_CLASSES > 40
1040 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
1041#if NB_SMALL_SIZE_CLASSES > 48
1042 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
1043#if NB_SMALL_SIZE_CLASSES > 56
1044 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
1045#if NB_SMALL_SIZE_CLASSES > 64
1046#error "NB_SMALL_SIZE_CLASSES should be less than 64"
1047#endif /* NB_SMALL_SIZE_CLASSES > 64 */
1048#endif /* NB_SMALL_SIZE_CLASSES > 56 */
1049#endif /* NB_SMALL_SIZE_CLASSES > 48 */
1050#endif /* NB_SMALL_SIZE_CLASSES > 40 */
1051#endif /* NB_SMALL_SIZE_CLASSES > 32 */
1052#endif /* NB_SMALL_SIZE_CLASSES > 24 */
1053#endif /* NB_SMALL_SIZE_CLASSES > 16 */
1054#endif /* NB_SMALL_SIZE_CLASSES > 8 */
1055};
1056
1057/*==========================================================================
1058Arena management.
1059
1060`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
1061which may not be currently used (== they're arena_objects that aren't
1062currently associated with an allocated arena). Note that arenas proper are
1063separately malloc'ed.
1064
1065Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
1066we do try to free() arenas, and use some mild heuristic strategies to increase
1067the likelihood that arenas eventually can be freed.
1068
1069unused_arena_objects
1070
1071 This is a singly-linked list of the arena_objects that are currently not
1072 being used (no arena is associated with them). Objects are taken off the
1073 head of the list in new_arena(), and are pushed on the head of the list in
1074 PyObject_Free() when the arena is empty. Key invariant: an arena_object
1075 is on this list if and only if its .address member is 0.
1076
1077usable_arenas
1078
1079 This is a doubly-linked list of the arena_objects associated with arenas
1080 that have pools available. These pools are either waiting to be reused,
1081 or have not been used before. The list is sorted to have the most-
1082 allocated arenas first (ascending order based on the nfreepools member).
1083 This means that the next allocation will come from a heavily used arena,
1084 which gives the nearly empty arenas a chance to be returned to the system.
1085 In my unscientific tests this dramatically improved the number of arenas
1086 that could be freed.
1087
1088Note that an arena_object associated with an arena all of whose pools are
1089currently in use isn't on either list.
1090*/
1091
1092/* Array of objects used to track chunks of memory (arenas). */
1093static struct arena_object* arenas = NULL;
1094/* Number of slots currently allocated in the `arenas` vector. */
1095static uint maxarenas = 0;
1096
1097/* The head of the singly-linked, NULL-terminated list of available
1098 * arena_objects.
1099 */
1100static struct arena_object* unused_arena_objects = NULL;
1101
1102/* The head of the doubly-linked, NULL-terminated at each end, list of
1103 * arena_objects associated with arenas that have pools available.
1104 */
1105static struct arena_object* usable_arenas = NULL;
1106
1107/* How many arena_objects do we initially allocate?
1108 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
1109 * `arenas` vector.
1110 */
1111#define INITIAL_ARENA_OBJECTS 16
1112
1113/* Number of arenas allocated that haven't been free()'d. */
1114static size_t narenas_currently_allocated = 0;
1115
1116/* Total number of times malloc() called to allocate an arena. */
1117static size_t ntimes_arena_allocated = 0;
1118/* High water mark (max value ever seen) for narenas_currently_allocated. */
1119static size_t narenas_highwater = 0;
1120
1121static Py_ssize_t _Py_AllocatedBlocks = 0;
1122
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001123Py_ssize_t
1124_Py_GetAllocatedBlocks(void)
1125{
Victor Stinner9e87e772017-11-24 12:09:24 +01001126 return _Py_AllocatedBlocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001127}
1128
1129
Thomas Woutersa9773292006-04-21 09:43:23 +00001130/* Allocate a new arena. If we run out of memory, return NULL. Else
1131 * allocate a new arena, and return the address of an arena_object
1132 * describing the new arena. It's expected that the caller will set
1133 * `usable_arenas` to the return value.
1134 */
1135static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +00001136new_arena(void)
1137{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001138 struct arena_object* arenaobj;
1139 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +01001140 void *address;
Victor Stinner34be807c2016-03-14 12:04:26 +01001141 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +00001142
Victor Stinner34be807c2016-03-14 12:04:26 +01001143 if (debug_stats == -1) {
Serhiy Storchaka4ae06c52017-12-12 13:55:04 +02001144 const char *opt = Py_GETENV("PYTHONMALLOCSTATS");
Victor Stinner34be807c2016-03-14 12:04:26 +01001145 debug_stats = (opt != NULL && *opt != '\0');
1146 }
1147 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -04001148 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be807c2016-03-14 12:04:26 +01001149
Victor Stinner9e87e772017-11-24 12:09:24 +01001150 if (unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001151 uint i;
1152 uint numarenas;
1153 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +00001154
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001155 /* Double the number of arena objects on each allocation.
1156 * Note that it's possible for `numarenas` to overflow.
1157 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001158 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1159 if (numarenas <= maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001160 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001161#if SIZEOF_SIZE_T <= SIZEOF_INT
Victor Stinner9e87e772017-11-24 12:09:24 +01001162 if (numarenas > SIZE_MAX / sizeof(*arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001163 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001164#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001165 nbytes = numarenas * sizeof(*arenas);
1166 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001167 if (arenaobj == NULL)
1168 return NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001169 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +00001170
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001171 /* We might need to fix pointers that were copied. However,
1172 * new_arena only gets called when all the pages in the
1173 * previous arenas are full. Thus, there are *no* pointers
1174 * into the old array. Thus, we don't have to worry about
1175 * invalid pointers. Just to be sure, some asserts:
1176 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001177 assert(usable_arenas == NULL);
1178 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +00001179
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001180 /* Put the new arenas on the unused_arena_objects list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001181 for (i = maxarenas; i < numarenas; ++i) {
1182 arenas[i].address = 0; /* mark as unassociated */
1183 arenas[i].nextarena = i < numarenas - 1 ?
1184 &arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001185 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001186
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001187 /* Update globals. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001188 unused_arena_objects = &arenas[maxarenas];
1189 maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001190 }
Tim Petersd97a1c02002-03-30 06:09:22 +00001191
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001192 /* Take the next available arena object off the head of the list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001193 assert(unused_arena_objects != NULL);
1194 arenaobj = unused_arena_objects;
1195 unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001196 assert(arenaobj->address == 0);
Victor Stinner9e87e772017-11-24 12:09:24 +01001197 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +02001198 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001199 /* The allocation failed: return NULL after putting the
1200 * arenaobj back.
1201 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001202 arenaobj->nextarena = unused_arena_objects;
1203 unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001204 return NULL;
1205 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001206 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +00001207
Victor Stinner9e87e772017-11-24 12:09:24 +01001208 ++narenas_currently_allocated;
1209 ++ntimes_arena_allocated;
1210 if (narenas_currently_allocated > narenas_highwater)
1211 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001212 arenaobj->freepools = NULL;
1213 /* pool_address <- first pool-aligned address in the arena
1214 nfreepools <- number of whole pools that fit after alignment */
Victor Stinner9e87e772017-11-24 12:09:24 +01001215 arenaobj->pool_address = (block*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001216 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
1217 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
1218 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1219 if (excess != 0) {
1220 --arenaobj->nfreepools;
1221 arenaobj->pool_address += POOL_SIZE - excess;
1222 }
1223 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001224
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001225 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +00001226}
1227
Victor Stinner9ed83c42017-10-31 12:18:10 -07001228
Thomas Woutersa9773292006-04-21 09:43:23 +00001229/*
Benjamin Peterson3924f932016-09-18 19:12:48 -07001230address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +00001231
1232Return true if and only if P is an address that was allocated by pymalloc.
1233POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1234(the caller is asked to compute this because the macro expands POOL more than
1235once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -07001236variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +00001237called on every alloc/realloc/free, micro-efficiency is important here).
1238
1239Tricky: Let B be the arena base address associated with the pool, B =
1240arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1241
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001242 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001243
1244Subtracting B throughout, this is true iff
1245
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001246 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001247
1248By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1249
1250Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1251before the first arena has been allocated. `arenas` is still NULL in that
1252case. We're relying on that maxarenas is also 0 in that case, so that
1253(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1254into a NULL arenas.
1255
1256Details: given P and POOL, the arena_object corresponding to P is AO =
1257arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1258stores, etc), POOL is the correct address of P's pool, AO.address is the
1259correct base address of the pool's arena, and P must be within ARENA_SIZE of
1260AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -07001261(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +00001262controls P.
1263
1264Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1265call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1266in this case -- it may even be uninitialized trash. If the trash arenaindex
1267is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1268control P.
1269
1270Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1271allocated arena, obmalloc controls all the memory in slice AO.address :
1272AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1273so P doesn't lie in that slice, so the macro correctly reports that P is not
1274controlled by obmalloc.
1275
1276Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1277arena_object (one not currently associated with an allocated arena),
1278AO.address is 0, and the second test in the macro reduces to:
1279
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001280 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001281
1282If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1283that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1284of the test still passes, and the third clause (AO.address != 0) is necessary
1285to get the correct result: AO.address is 0 in this case, so the macro
1286correctly reports that P is not controlled by obmalloc (despite that P lies in
1287slice AO.address : AO.address + ARENA_SIZE).
1288
1289Note: The third (AO.address != 0) clause was added in Python 2.5. Before
12902.5, arenas were never free()'ed, and an arenaindex < maxarena always
1291corresponded to a currently-allocated arena, so the "P is not controlled by
1292obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1293was impossible.
1294
1295Note that the logic is excruciating, and reading up possibly uninitialized
1296memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1297creates problems for some memory debuggers. The overwhelming advantage is
1298that this test determines whether an arbitrary address is controlled by
1299obmalloc in a small constant time, independent of the number of arenas
1300obmalloc controls. Since this test is needed at every entry point, it's
1301extremely desirable that it be this fast.
1302*/
Thomas Woutersa9773292006-04-21 09:43:23 +00001303
Benjamin Peterson3924f932016-09-18 19:12:48 -07001304static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
1305address_in_range(void *p, poolp pool)
1306{
1307 // Since address_in_range may be reading from memory which was not allocated
1308 // by Python, it is important that pool->arenaindex is read only once, as
1309 // another thread may be concurrently modifying the value without holding
1310 // the GIL. The following dance forces the compiler to read pool->arenaindex
1311 // only once.
1312 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Victor Stinner9e87e772017-11-24 12:09:24 +01001313 return arenaindex < maxarenas &&
1314 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1315 arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -07001316}
Tim Peters338e0102002-04-01 19:23:44 +00001317
Victor Stinner9ed83c42017-10-31 12:18:10 -07001318
Neil Schemenauera35c6882001-02-27 04:45:05 +00001319/*==========================================================================*/
1320
Victor Stinner9ed83c42017-10-31 12:18:10 -07001321/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +00001322
Victor Stinner9ed83c42017-10-31 12:18:10 -07001323 The basic blocks are ordered by decreasing execution frequency,
1324 which minimizes the number of jumps in the most common cases,
1325 improves branching prediction and instruction scheduling (small
1326 block allocations typically result in a couple of instructions).
1327 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +00001328
Victor Stinner9ed83c42017-10-31 12:18:10 -07001329 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
1330
1331 Return 0 if pymalloc failed to allocate the memory block: on bigger
1332 requests, on error in the code below (as a last chance to serve the request)
1333 or when the max memory limit has been reached. */
1334static int
1335pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001336{
Victor Stinner9e87e772017-11-24 12:09:24 +01001337 block *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001338 poolp pool;
1339 poolp next;
1340 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001341
Benjamin Peterson05159c42009-12-03 03:01:27 +00001342#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001343 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001344 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001345 }
1346 if (UNLIKELY(running_on_valgrind)) {
1347 return 0;
1348 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001349#endif
1350
Victor Stinner9ed83c42017-10-31 12:18:10 -07001351 if (nbytes == 0) {
1352 return 0;
1353 }
1354 if (nbytes > SMALL_REQUEST_THRESHOLD) {
1355 return 0;
1356 }
T. Wouters06bb4872017-03-31 10:10:19 -07001357
Victor Stinner9ed83c42017-10-31 12:18:10 -07001358 /*
1359 * Most frequent paths first
1360 */
1361 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Victor Stinner9e87e772017-11-24 12:09:24 +01001362 pool = usedpools[size + size];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001363 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001364 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -07001365 * There is a used pool for this size class.
1366 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001367 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001368 ++pool->ref.count;
1369 bp = pool->freeblock;
1370 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001371 if ((pool->freeblock = *(block **)bp) != NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001372 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001373 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001374
Victor Stinner9ed83c42017-10-31 12:18:10 -07001375 /*
1376 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001377 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001378 if (pool->nextoffset <= pool->maxnextoffset) {
1379 /* There is room for another block. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001380 pool->freeblock = (block*)pool +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001381 pool->nextoffset;
1382 pool->nextoffset += INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001383 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001384 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001385 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001386
Victor Stinner9ed83c42017-10-31 12:18:10 -07001387 /* Pool is full, unlink from used pools. */
1388 next = pool->nextpool;
1389 pool = pool->prevpool;
1390 next->prevpool = pool;
1391 pool->nextpool = next;
1392 goto success;
1393 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001394
Victor Stinner9ed83c42017-10-31 12:18:10 -07001395 /* There isn't a pool of the right size class immediately
1396 * available: use a free pool.
1397 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001398 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001399 /* No arena has a free pool: allocate a new arena. */
1400#ifdef WITH_MEMORY_LIMITS
Victor Stinner9e87e772017-11-24 12:09:24 +01001401 if (narenas_currently_allocated >= MAX_ARENAS) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001402 goto failed;
1403 }
1404#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001405 usable_arenas = new_arena();
1406 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001407 goto failed;
1408 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001409 usable_arenas->nextarena =
1410 usable_arenas->prevarena = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001411 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001412 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001413
1414 /* Try to get a cached free pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001415 pool = usable_arenas->freepools;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001416 if (pool != NULL) {
1417 /* Unlink from cached pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001418 usable_arenas->freepools = pool->nextpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001419
1420 /* This arena already had the smallest nfreepools
1421 * value, so decreasing nfreepools doesn't change
1422 * that, and we don't need to rearrange the
1423 * usable_arenas list. However, if the arena has
1424 * become wholly allocated, we need to remove its
1425 * arena_object from usable_arenas.
1426 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001427 --usable_arenas->nfreepools;
1428 if (usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001429 /* Wholly allocated: remove. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001430 assert(usable_arenas->freepools == NULL);
1431 assert(usable_arenas->nextarena == NULL ||
1432 usable_arenas->nextarena->prevarena ==
1433 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001434
Victor Stinner9e87e772017-11-24 12:09:24 +01001435 usable_arenas = usable_arenas->nextarena;
1436 if (usable_arenas != NULL) {
1437 usable_arenas->prevarena = NULL;
1438 assert(usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001439 }
1440 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001441 else {
1442 /* nfreepools > 0: it must be that freepools
1443 * isn't NULL, or that we haven't yet carved
1444 * off all the arena's pools for the first
1445 * time.
1446 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001447 assert(usable_arenas->freepools != NULL ||
1448 usable_arenas->pool_address <=
1449 (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001450 ARENA_SIZE - POOL_SIZE);
1451 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001452
Victor Stinner9ed83c42017-10-31 12:18:10 -07001453 init_pool:
1454 /* Frontlink to used pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001455 next = usedpools[size + size]; /* == prev */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001456 pool->nextpool = next;
1457 pool->prevpool = next;
1458 next->nextpool = pool;
1459 next->prevpool = pool;
1460 pool->ref.count = 1;
1461 if (pool->szidx == size) {
1462 /* Luckily, this pool last contained blocks
1463 * of the same size class, so its header
1464 * and free list are already initialized.
1465 */
1466 bp = pool->freeblock;
1467 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001468 pool->freeblock = *(block **)bp;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001469 goto success;
1470 }
1471 /*
1472 * Initialize the pool header, set up the free list to
1473 * contain just the second block, and return the first
1474 * block.
1475 */
1476 pool->szidx = size;
1477 size = INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001478 bp = (block *)pool + POOL_OVERHEAD;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001479 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1480 pool->maxnextoffset = POOL_SIZE - size;
1481 pool->freeblock = bp + size;
Victor Stinner9e87e772017-11-24 12:09:24 +01001482 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001483 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001484 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001485
Victor Stinner9ed83c42017-10-31 12:18:10 -07001486 /* Carve off a new pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001487 assert(usable_arenas->nfreepools > 0);
1488 assert(usable_arenas->freepools == NULL);
1489 pool = (poolp)usable_arenas->pool_address;
1490 assert((block*)pool <= (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001491 ARENA_SIZE - POOL_SIZE);
Victor Stinner9e87e772017-11-24 12:09:24 +01001492 pool->arenaindex = (uint)(usable_arenas - arenas);
1493 assert(&arenas[pool->arenaindex] == usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001494 pool->szidx = DUMMY_SIZE_IDX;
Victor Stinner9e87e772017-11-24 12:09:24 +01001495 usable_arenas->pool_address += POOL_SIZE;
1496 --usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001497
Victor Stinner9e87e772017-11-24 12:09:24 +01001498 if (usable_arenas->nfreepools == 0) {
1499 assert(usable_arenas->nextarena == NULL ||
1500 usable_arenas->nextarena->prevarena ==
1501 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001502 /* Unlink the arena: it is completely allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001503 usable_arenas = usable_arenas->nextarena;
1504 if (usable_arenas != NULL) {
1505 usable_arenas->prevarena = NULL;
1506 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001507 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001508 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001509
1510 goto init_pool;
1511
1512success:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001513 assert(bp != NULL);
1514 *ptr_p = (void *)bp;
1515 return 1;
1516
1517failed:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001518 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001519}
1520
Victor Stinner9ed83c42017-10-31 12:18:10 -07001521
Victor Stinnerdb067af2014-05-02 22:31:14 +02001522static void *
1523_PyObject_Malloc(void *ctx, size_t nbytes)
1524{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001525 void* ptr;
1526 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001527 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001528 return ptr;
1529 }
1530
1531 ptr = PyMem_RawMalloc(nbytes);
1532 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001533 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001534 }
1535 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001536}
1537
Victor Stinner9ed83c42017-10-31 12:18:10 -07001538
Victor Stinnerdb067af2014-05-02 22:31:14 +02001539static void *
1540_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1541{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001542 void* ptr;
1543
1544 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
1545 size_t nbytes = nelem * elsize;
1546
1547 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
1548 memset(ptr, 0, nbytes);
Victor Stinner9e87e772017-11-24 12:09:24 +01001549 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001550 return ptr;
1551 }
1552
1553 ptr = PyMem_RawCalloc(nelem, elsize);
1554 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001555 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001556 }
1557 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001558}
1559
Neil Schemenauera35c6882001-02-27 04:45:05 +00001560
Victor Stinner9ed83c42017-10-31 12:18:10 -07001561/* Free a memory block allocated by pymalloc_alloc().
1562 Return 1 if it was freed.
1563 Return 0 if the block was not allocated by pymalloc_alloc(). */
1564static int
1565pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001566{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001567 poolp pool;
Victor Stinner9e87e772017-11-24 12:09:24 +01001568 block *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001569 poolp next, prev;
1570 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001571
Victor Stinner9ed83c42017-10-31 12:18:10 -07001572 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001573
Benjamin Peterson05159c42009-12-03 03:01:27 +00001574#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001575 if (UNLIKELY(running_on_valgrind > 0)) {
1576 return 0;
1577 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001578#endif
1579
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001580 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001581 if (!address_in_range(p, pool)) {
1582 return 0;
1583 }
1584 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001585
Victor Stinner9ed83c42017-10-31 12:18:10 -07001586 /* Link p to the start of the pool's freeblock list. Since
1587 * the pool had at least the p block outstanding, the pool
1588 * wasn't empty (so it's already in a usedpools[] list, or
1589 * was full and is in no list -- it's not in the freeblocks
1590 * list in any case).
1591 */
1592 assert(pool->ref.count > 0); /* else it was empty */
Victor Stinner9e87e772017-11-24 12:09:24 +01001593 *(block **)p = lastfree = pool->freeblock;
1594 pool->freeblock = (block *)p;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001595 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001596 /* Pool was full, so doesn't currently live in any list:
1597 * link it to the front of the appropriate usedpools[] list.
1598 * This mimics LRU pool usage for new allocations and
1599 * targets optimal filling when several pools contain
1600 * blocks of the same size class.
1601 */
1602 --pool->ref.count;
1603 assert(pool->ref.count > 0); /* else the pool is empty */
1604 size = pool->szidx;
Victor Stinner9e87e772017-11-24 12:09:24 +01001605 next = usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001606 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001607
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001608 /* insert pool before next: prev <-> pool <-> next */
1609 pool->nextpool = next;
1610 pool->prevpool = prev;
1611 next->prevpool = pool;
1612 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001613 goto success;
1614 }
1615
1616 struct arena_object* ao;
1617 uint nf; /* ao->nfreepools */
1618
1619 /* freeblock wasn't NULL, so the pool wasn't full,
1620 * and the pool is in a usedpools[] list.
1621 */
1622 if (--pool->ref.count != 0) {
1623 /* pool isn't empty: leave it in usedpools */
1624 goto success;
1625 }
1626 /* Pool is now empty: unlink from usedpools, and
1627 * link to the front of freepools. This ensures that
1628 * previously freed pools will be allocated later
1629 * (being not referenced, they are perhaps paged out).
1630 */
1631 next = pool->nextpool;
1632 prev = pool->prevpool;
1633 next->prevpool = prev;
1634 prev->nextpool = next;
1635
1636 /* Link the pool to freepools. This is a singly-linked
1637 * list, and pool->prevpool isn't used there.
1638 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001639 ao = &arenas[pool->arenaindex];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001640 pool->nextpool = ao->freepools;
1641 ao->freepools = pool;
1642 nf = ++ao->nfreepools;
1643
1644 /* All the rest is arena management. We just freed
1645 * a pool, and there are 4 cases for arena mgmt:
1646 * 1. If all the pools are free, return the arena to
1647 * the system free().
1648 * 2. If this is the only free pool in the arena,
1649 * add the arena back to the `usable_arenas` list.
1650 * 3. If the "next" arena has a smaller count of free
1651 * pools, we have to "slide this arena right" to
1652 * restore that usable_arenas is sorted in order of
1653 * nfreepools.
1654 * 4. Else there's nothing more to do.
1655 */
1656 if (nf == ao->ntotalpools) {
1657 /* Case 1. First unlink ao from usable_arenas.
1658 */
1659 assert(ao->prevarena == NULL ||
1660 ao->prevarena->address != 0);
1661 assert(ao ->nextarena == NULL ||
1662 ao->nextarena->address != 0);
1663
1664 /* Fix the pointer in the prevarena, or the
1665 * usable_arenas pointer.
1666 */
1667 if (ao->prevarena == NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001668 usable_arenas = ao->nextarena;
1669 assert(usable_arenas == NULL ||
1670 usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001671 }
1672 else {
1673 assert(ao->prevarena->nextarena == ao);
1674 ao->prevarena->nextarena =
1675 ao->nextarena;
1676 }
1677 /* Fix the pointer in the nextarena. */
1678 if (ao->nextarena != NULL) {
1679 assert(ao->nextarena->prevarena == ao);
1680 ao->nextarena->prevarena =
1681 ao->prevarena;
1682 }
1683 /* Record that this arena_object slot is
1684 * available to be reused.
1685 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001686 ao->nextarena = unused_arena_objects;
1687 unused_arena_objects = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001688
1689 /* Free the entire arena. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001690 _PyObject_Arena.free(_PyObject_Arena.ctx,
Victor Stinner9ed83c42017-10-31 12:18:10 -07001691 (void *)ao->address, ARENA_SIZE);
1692 ao->address = 0; /* mark unassociated */
Victor Stinner9e87e772017-11-24 12:09:24 +01001693 --narenas_currently_allocated;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001694
1695 goto success;
1696 }
1697
1698 if (nf == 1) {
1699 /* Case 2. Put ao at the head of
1700 * usable_arenas. Note that because
1701 * ao->nfreepools was 0 before, ao isn't
1702 * currently on the usable_arenas list.
1703 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001704 ao->nextarena = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001705 ao->prevarena = NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001706 if (usable_arenas)
1707 usable_arenas->prevarena = ao;
1708 usable_arenas = ao;
1709 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001710
1711 goto success;
1712 }
1713
1714 /* If this arena is now out of order, we need to keep
1715 * the list sorted. The list is kept sorted so that
1716 * the "most full" arenas are used first, which allows
1717 * the nearly empty arenas to be completely freed. In
1718 * a few un-scientific tests, it seems like this
1719 * approach allowed a lot more memory to be freed.
1720 */
1721 if (ao->nextarena == NULL ||
1722 nf <= ao->nextarena->nfreepools) {
1723 /* Case 4. Nothing to do. */
1724 goto success;
1725 }
1726 /* Case 3: We have to move the arena towards the end
1727 * of the list, because it has more free pools than
1728 * the arena to its right.
1729 * First unlink ao from usable_arenas.
1730 */
1731 if (ao->prevarena != NULL) {
1732 /* ao isn't at the head of the list */
1733 assert(ao->prevarena->nextarena == ao);
1734 ao->prevarena->nextarena = ao->nextarena;
1735 }
1736 else {
1737 /* ao is at the head of the list */
Victor Stinner9e87e772017-11-24 12:09:24 +01001738 assert(usable_arenas == ao);
1739 usable_arenas = ao->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001740 }
1741 ao->nextarena->prevarena = ao->prevarena;
1742
1743 /* Locate the new insertion point by iterating over
1744 * the list, using our nextarena pointer.
1745 */
1746 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1747 ao->prevarena = ao->nextarena;
1748 ao->nextarena = ao->nextarena->nextarena;
1749 }
1750
1751 /* Insert ao at this point. */
1752 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1753 assert(ao->prevarena->nextarena == ao->nextarena);
1754
1755 ao->prevarena->nextarena = ao;
1756 if (ao->nextarena != NULL) {
1757 ao->nextarena->prevarena = ao;
1758 }
1759
1760 /* Verify that the swaps worked. */
1761 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1762 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1763 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
Victor Stinner9e87e772017-11-24 12:09:24 +01001764 assert((usable_arenas == ao && ao->prevarena == NULL)
Victor Stinner9ed83c42017-10-31 12:18:10 -07001765 || ao->prevarena->nextarena == ao);
1766
1767 goto success;
1768
1769success:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001770 return 1;
1771}
1772
1773
1774static void
1775_PyObject_Free(void *ctx, void *p)
1776{
1777 /* PyObject_Free(NULL) has no effect */
1778 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001779 return;
1780 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001781
Victor Stinner9e87e772017-11-24 12:09:24 +01001782 _Py_AllocatedBlocks--;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001783 if (!pymalloc_free(ctx, p)) {
1784 /* pymalloc didn't allocate this address */
1785 PyMem_RawFree(p);
1786 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001787}
1788
Neil Schemenauera35c6882001-02-27 04:45:05 +00001789
Victor Stinner9ed83c42017-10-31 12:18:10 -07001790/* pymalloc realloc.
1791
1792 If nbytes==0, then as the Python docs promise, we do not treat this like
1793 free(p), and return a non-NULL result.
1794
1795 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1796 newptr_p.
1797
1798 Return 0 if pymalloc didn't allocated p. */
1799static int
1800pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001801{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001802 void *bp;
1803 poolp pool;
1804 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001805
Victor Stinner9ed83c42017-10-31 12:18:10 -07001806 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001807
Benjamin Peterson05159c42009-12-03 03:01:27 +00001808#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001809 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001810 if (UNLIKELY(running_on_valgrind > 0)) {
1811 return 0;
1812 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001813#endif
1814
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001815 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001816 if (!address_in_range(p, pool)) {
1817 /* pymalloc is not managing this block.
1818
1819 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1820 over this block. However, if we do, we need to copy the valid data
1821 from the C-managed block to one of our blocks, and there's no
1822 portable way to know how much of the memory space starting at p is
1823 valid.
1824
1825 As bug 1185883 pointed out the hard way, it's possible that the
1826 C-managed block is "at the end" of allocated VM space, so that a
1827 memory fault can occur if we try to copy nbytes bytes starting at p.
1828 Instead we punt: let C continue to manage this block. */
1829 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001830 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001831
1832 /* pymalloc is in charge of this block */
1833 size = INDEX2SIZE(pool->szidx);
1834 if (nbytes <= size) {
1835 /* The block is staying the same or shrinking.
1836
1837 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1838 block to a smaller size class, but it wastes memory not to copy it.
1839
1840 The compromise here is to copy on shrink only if at least 25% of
1841 size can be shaved off. */
1842 if (4 * nbytes > 3 * size) {
1843 /* It's the same, or shrinking and new/old > 3/4. */
1844 *newptr_p = p;
1845 return 1;
1846 }
1847 size = nbytes;
1848 }
1849
1850 bp = _PyObject_Malloc(ctx, nbytes);
1851 if (bp != NULL) {
1852 memcpy(bp, p, size);
1853 _PyObject_Free(ctx, p);
1854 }
1855 *newptr_p = bp;
1856 return 1;
1857}
1858
1859
1860static void *
1861_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1862{
1863 void *ptr2;
1864
1865 if (ptr == NULL) {
1866 return _PyObject_Malloc(ctx, nbytes);
1867 }
1868
1869 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1870 return ptr2;
1871 }
1872
1873 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001874}
1875
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001876#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001877
1878/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001879/* pymalloc not enabled: Redirect the entry points to malloc. These will
1880 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001881
Antoine Pitrou92840532012-12-17 23:05:59 +01001882Py_ssize_t
1883_Py_GetAllocatedBlocks(void)
1884{
1885 return 0;
1886}
1887
Tim Peters1221c0a2002-03-23 00:20:15 +00001888#endif /* WITH_PYMALLOC */
1889
Victor Stinner34be807c2016-03-14 12:04:26 +01001890
Tim Petersddea2082002-03-23 10:03:50 +00001891/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001892/* A x-platform debugging allocator. This doesn't manage memory directly,
1893 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1894 */
Tim Petersddea2082002-03-23 10:03:50 +00001895
Tim Petersf6fb5012002-04-12 07:38:53 +00001896/* Special bytes broadcast into debug memory blocks at appropriate times.
1897 * Strings of these are unlikely to be valid addresses, floats, ints or
1898 * 7-bit ASCII.
1899 */
1900#undef CLEANBYTE
1901#undef DEADBYTE
1902#undef FORBIDDENBYTE
1903#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001904#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001905#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001906
Victor Stinner9e87e772017-11-24 12:09:24 +01001907static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1908
Tim Peterse0850172002-03-24 00:34:21 +00001909/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001910 * to supply a single place to set a breakpoint.
1911 */
Tim Peterse0850172002-03-24 00:34:21 +00001912static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001913bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001914{
Victor Stinner9e87e772017-11-24 12:09:24 +01001915 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001916}
1917
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001918#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001919
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001920/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1921static size_t
1922read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001923{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001924 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001925 size_t result = *q++;
1926 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001927
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001928 for (i = SST; --i > 0; ++q)
1929 result = (result << 8) | *q;
1930 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001931}
1932
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001933/* Write n as a big-endian size_t, MSB at address p, LSB at
1934 * p + sizeof(size_t) - 1.
1935 */
Tim Petersddea2082002-03-23 10:03:50 +00001936static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001937write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001938{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001939 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001940 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001941
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001942 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001943 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001944 n >>= 8;
1945 }
Tim Petersddea2082002-03-23 10:03:50 +00001946}
1947
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001948/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1949 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001950
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001951p[0: S]
1952 Number of bytes originally asked for. This is a size_t, big-endian (easier
1953 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001954p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001955 API ID. See PEP 445. This is a character, but seems undocumented.
1956p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001957 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001958p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001959 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001960 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001961 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001962 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001963p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001964 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001965p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001966 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1967 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001968 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001969 If "bad memory" is detected later, the serial number gives an
1970 excellent way to set a breakpoint on the next run, to capture the
1971 instant at which this block was passed out.
1972*/
1973
Victor Stinner0507bf52013-07-07 02:05:46 +02001974static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001975_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001976{
Victor Stinner0507bf52013-07-07 02:05:46 +02001977 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001978 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001979 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1980 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1981 size_t total; /* 2 * SST + nbytes + 2 * SST */
1982
1983 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
1984 /* integer overflow: can't represent total as a Py_ssize_t */
1985 return NULL;
1986 }
1987 total = nbytes + 4 * SST;
1988
1989 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
1990 * ^--- p ^--- data ^--- tail
1991 S: nbytes stored as size_t
1992 I: API identifier (1 byte)
1993 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
1994 C: Clean bytes used later to store actual data
1995 N: Serial number stored as size_t */
1996
1997 if (use_calloc) {
1998 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1999 }
2000 else {
2001 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
2002 }
2003 if (p == NULL) {
2004 return NULL;
2005 }
2006 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002007
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002008 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00002009
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002010 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
2011 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07002012 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02002013 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00002014
Victor Stinner9ed83c42017-10-31 12:18:10 -07002015 if (nbytes > 0 && !use_calloc) {
2016 memset(data, CLEANBYTE, nbytes);
2017 }
Tim Petersddea2082002-03-23 10:03:50 +00002018
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002019 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002020 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002021 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002022 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00002023
Victor Stinner9ed83c42017-10-31 12:18:10 -07002024 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002025}
2026
Victor Stinnerdb067af2014-05-02 22:31:14 +02002027static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002028_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002029{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002030 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002031}
2032
2033static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002034_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002035{
2036 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002037 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002038 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01002039 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002040}
2041
Victor Stinner9ed83c42017-10-31 12:18:10 -07002042
Victor Stinner82af0b62018-10-23 17:39:40 +02002043/* Heuristic checking if the memory has been freed. Rely on the debug hooks on
2044 Python memory allocators which fills the memory with DEADBYTE (0xDB) when
2045 memory is deallocated. */
2046int
2047_PyMem_IsFreed(void *ptr, size_t size)
2048{
2049 unsigned char *bytes = ptr;
2050 for (size_t i=0; i < size; i++) {
2051 if (bytes[i] != DEADBYTE) {
2052 return 0;
2053 }
2054 }
2055 return 1;
2056}
2057
2058
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002059/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002060 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00002061 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00002062 Then calls the underlying free.
2063*/
Victor Stinner0507bf52013-07-07 02:05:46 +02002064static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01002065_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002066{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002067 /* PyMem_Free(NULL) has no effect */
2068 if (p == NULL) {
2069 return;
2070 }
2071
Victor Stinner0507bf52013-07-07 02:05:46 +02002072 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002073 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002074 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00002075
Victor Stinner0507bf52013-07-07 02:05:46 +02002076 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002077 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002078 nbytes += 4 * SST;
2079 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02002080 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00002081}
2082
Victor Stinner9ed83c42017-10-31 12:18:10 -07002083
Victor Stinner0507bf52013-07-07 02:05:46 +02002084static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002085_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00002086{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002087 if (p == NULL) {
2088 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
2089 }
2090
Victor Stinner0507bf52013-07-07 02:05:46 +02002091 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002092 uint8_t *head; /* base address of malloc'ed pad block */
2093 uint8_t *data; /* pointer to data bytes */
2094 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002095 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2096 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002097 size_t original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002098 size_t block_serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002099#define ERASED_SIZE 64
2100 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00002101
Victor Stinner0507bf52013-07-07 02:05:46 +02002102 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002103
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002104 data = (uint8_t *)p;
2105 head = data - 2*SST;
2106 original_nbytes = read_size_t(head);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002107 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
2108 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002109 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002110 }
2111 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002112
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002113 tail = data + original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002114 block_serialno = read_size_t(tail + SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002115 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
2116 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
2117 */
2118 if (original_nbytes <= sizeof(save)) {
2119 memcpy(save, data, original_nbytes);
2120 memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST);
2121 }
2122 else {
2123 memcpy(save, data, ERASED_SIZE);
2124 memset(head, DEADBYTE, ERASED_SIZE + 2*SST);
2125 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
2126 memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002127 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002128
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002129 /* Resize and add decorations. */
2130 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
2131 if (r == NULL) {
2132 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002133 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002134 else {
2135 head = r;
2136 bumpserialno();
Victor Stinner9e87e772017-11-24 12:09:24 +01002137 block_serialno = serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002138 }
2139
2140 write_size_t(head, nbytes);
2141 head[SST] = (uint8_t)api->api_id;
2142 memset(head + SST + 1, FORBIDDENBYTE, SST-1);
2143 data = head + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02002144
Victor Stinner9ed83c42017-10-31 12:18:10 -07002145 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002146 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002147 write_size_t(tail + SST, block_serialno);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002148
2149 /* Restore saved bytes. */
2150 if (original_nbytes <= sizeof(save)) {
2151 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
2152 }
2153 else {
2154 size_t i = original_nbytes - ERASED_SIZE;
2155 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
2156 if (nbytes > i) {
2157 memcpy(data + i, &save[ERASED_SIZE],
2158 Py_MIN(nbytes - i, ERASED_SIZE));
2159 }
2160 }
2161
2162 if (r == NULL) {
2163 return NULL;
2164 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002165
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002166 if (nbytes > original_nbytes) {
2167 /* growing: mark new extra memory clean */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002168 memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002169 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002170
Victor Stinner9ed83c42017-10-31 12:18:10 -07002171 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002172}
2173
Victor Stinnerc4aec362016-03-14 22:26:53 +01002174static void
2175_PyMem_DebugCheckGIL(void)
2176{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002177 if (!PyGILState_Check())
2178 Py_FatalError("Python memory allocator called "
2179 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01002180}
2181
2182static void *
2183_PyMem_DebugMalloc(void *ctx, size_t nbytes)
2184{
2185 _PyMem_DebugCheckGIL();
2186 return _PyMem_DebugRawMalloc(ctx, nbytes);
2187}
2188
2189static void *
2190_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
2191{
2192 _PyMem_DebugCheckGIL();
2193 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
2194}
2195
Victor Stinner9ed83c42017-10-31 12:18:10 -07002196
Victor Stinnerc4aec362016-03-14 22:26:53 +01002197static void
2198_PyMem_DebugFree(void *ctx, void *ptr)
2199{
2200 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01002201 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002202}
2203
Victor Stinner9ed83c42017-10-31 12:18:10 -07002204
Victor Stinnerc4aec362016-03-14 22:26:53 +01002205static void *
2206_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2207{
2208 _PyMem_DebugCheckGIL();
2209 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2210}
2211
Tim Peters7ccfadf2002-04-01 06:04:21 +00002212/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002213 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00002214 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002215 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00002216 */
Victor Stinner0507bf52013-07-07 02:05:46 +02002217static void
2218_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002219{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002220 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002221 char msgbuf[64];
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002222 const char *msg;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002223 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002224 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002225 int i;
2226 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002227
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002228 if (p == NULL) {
2229 msg = "didn't expect a NULL pointer";
2230 goto error;
2231 }
Tim Petersddea2082002-03-23 10:03:50 +00002232
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002233 /* Check the API id */
2234 id = (char)q[-SST];
2235 if (id != api) {
2236 msg = msgbuf;
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002237 snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002238 msgbuf[sizeof(msgbuf)-1] = 0;
2239 goto error;
2240 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002241
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002242 /* Check the stuff at the start of p first: if there's underwrite
2243 * corruption, the number-of-bytes field may be nuts, and checking
2244 * the tail could lead to a segfault then.
2245 */
2246 for (i = SST-1; i >= 1; --i) {
2247 if (*(q-i) != FORBIDDENBYTE) {
2248 msg = "bad leading pad byte";
2249 goto error;
2250 }
2251 }
Tim Petersddea2082002-03-23 10:03:50 +00002252
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002253 nbytes = read_size_t(q - 2*SST);
2254 tail = q + nbytes;
2255 for (i = 0; i < SST; ++i) {
2256 if (tail[i] != FORBIDDENBYTE) {
2257 msg = "bad trailing pad byte";
2258 goto error;
2259 }
2260 }
Tim Petersddea2082002-03-23 10:03:50 +00002261
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002262 return;
Tim Petersd1139e02002-03-28 07:32:11 +00002263
2264error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002265 _PyObject_DebugDumpAddress(p);
2266 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00002267}
2268
Tim Peters7ccfadf2002-04-01 06:04:21 +00002269/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02002270static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002271_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002272{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002273 const uint8_t *q = (const uint8_t *)p;
2274 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002275 size_t nbytes, serial;
2276 int i;
2277 int ok;
2278 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002279
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002280 fprintf(stderr, "Debug memory block at address p=%p:", p);
2281 if (p == NULL) {
2282 fprintf(stderr, "\n");
2283 return;
2284 }
2285 id = (char)q[-SST];
2286 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00002287
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002288 nbytes = read_size_t(q - 2*SST);
2289 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
2290 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00002291
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002292 /* In case this is nuts, check the leading pad bytes first. */
2293 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2294 ok = 1;
2295 for (i = 1; i <= SST-1; ++i) {
2296 if (*(q-i) != FORBIDDENBYTE) {
2297 ok = 0;
2298 break;
2299 }
2300 }
2301 if (ok)
2302 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2303 else {
2304 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2305 FORBIDDENBYTE);
2306 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002307 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002308 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
2309 if (byte != FORBIDDENBYTE)
2310 fputs(" *** OUCH", stderr);
2311 fputc('\n', stderr);
2312 }
Tim Peters449b5a82002-04-28 06:14:45 +00002313
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002314 fputs(" Because memory is corrupted at the start, the "
2315 "count of bytes requested\n"
2316 " may be bogus, and checking the trailing pad "
2317 "bytes may segfault.\n", stderr);
2318 }
Tim Petersddea2082002-03-23 10:03:50 +00002319
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002320 tail = q + nbytes;
2321 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
2322 ok = 1;
2323 for (i = 0; i < SST; ++i) {
2324 if (tail[i] != FORBIDDENBYTE) {
2325 ok = 0;
2326 break;
2327 }
2328 }
2329 if (ok)
2330 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2331 else {
2332 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002333 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002334 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002335 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002336 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00002337 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002338 if (byte != FORBIDDENBYTE)
2339 fputs(" *** OUCH", stderr);
2340 fputc('\n', stderr);
2341 }
2342 }
Tim Petersddea2082002-03-23 10:03:50 +00002343
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002344 serial = read_size_t(tail + SST);
2345 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
2346 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00002347
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002348 if (nbytes > 0) {
2349 i = 0;
2350 fputs(" Data at p:", stderr);
2351 /* print up to 8 bytes at the start */
2352 while (q < tail && i < 8) {
2353 fprintf(stderr, " %02x", *q);
2354 ++i;
2355 ++q;
2356 }
2357 /* and up to 8 at the end */
2358 if (q < tail) {
2359 if (tail - q > 8) {
2360 fputs(" ...", stderr);
2361 q = tail - 8;
2362 }
2363 while (q < tail) {
2364 fprintf(stderr, " %02x", *q);
2365 ++q;
2366 }
2367 }
2368 fputc('\n', stderr);
2369 }
Victor Stinner0611c262016-03-15 22:22:13 +01002370 fputc('\n', stderr);
2371
2372 fflush(stderr);
2373 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00002374}
2375
David Malcolm49526f42012-06-22 14:55:41 -04002376
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002377static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002378printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002379{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002380 int i, k;
2381 char buf[100];
2382 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002383
David Malcolm49526f42012-06-22 14:55:41 -04002384 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002385 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002386 fputc(' ', out);
2387 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002388
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002389 /* Write the value with commas. */
2390 i = 22;
2391 buf[i--] = '\0';
2392 buf[i--] = '\n';
2393 k = 3;
2394 do {
2395 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002396 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002397 value = nextvalue;
2398 buf[i--] = (char)(digit + '0');
2399 --k;
2400 if (k == 0 && value && i >= 0) {
2401 k = 3;
2402 buf[i--] = ',';
2403 }
2404 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002405
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002406 while (i >= 0)
2407 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002408 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002409
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002410 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002411}
2412
David Malcolm49526f42012-06-22 14:55:41 -04002413void
2414_PyDebugAllocatorStats(FILE *out,
2415 const char *block_name, int num_blocks, size_t sizeof_block)
2416{
2417 char buf1[128];
2418 char buf2[128];
2419 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05002420 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002421 num_blocks, block_name, sizeof_block);
2422 PyOS_snprintf(buf2, sizeof(buf2),
2423 "%48s ", buf1);
2424 (void)printone(out, buf2, num_blocks * sizeof_block);
2425}
2426
Victor Stinner34be807c2016-03-14 12:04:26 +01002427
David Malcolm49526f42012-06-22 14:55:41 -04002428#ifdef WITH_PYMALLOC
2429
Victor Stinner34be807c2016-03-14 12:04:26 +01002430#ifdef Py_DEBUG
2431/* Is target in the list? The list is traversed via the nextpool pointers.
2432 * The list may be NULL-terminated, or circular. Return 1 if target is in
2433 * list, else 0.
2434 */
2435static int
2436pool_is_in_list(const poolp target, poolp list)
2437{
2438 poolp origlist = list;
2439 assert(target != NULL);
2440 if (list == NULL)
2441 return 0;
2442 do {
2443 if (target == list)
2444 return 1;
2445 list = list->nextpool;
2446 } while (list != NULL && list != origlist);
2447 return 0;
2448}
2449#endif
2450
David Malcolm49526f42012-06-22 14:55:41 -04002451/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002452 * In Py_DEBUG mode, also perform some expensive internal consistency
2453 * checks.
Victor Stinner6bf992a2017-12-06 17:26:10 +01002454 *
2455 * Return 0 if the memory debug hooks are not installed or no statistics was
Leo Ariasc3d95082018-02-03 18:36:10 -06002456 * written into out, return 1 otherwise.
Tim Peters08d82152002-04-18 22:25:03 +00002457 */
Victor Stinner6bf992a2017-12-06 17:26:10 +01002458int
David Malcolm49526f42012-06-22 14:55:41 -04002459_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002460{
Victor Stinner6bf992a2017-12-06 17:26:10 +01002461 if (!_PyMem_PymallocEnabled()) {
2462 return 0;
2463 }
2464
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002465 uint i;
2466 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2467 /* # of pools, allocated blocks, and free blocks per class index */
2468 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2469 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2470 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2471 /* total # of allocated bytes in used and full pools */
2472 size_t allocated_bytes = 0;
2473 /* total # of available bytes in used pools */
2474 size_t available_bytes = 0;
2475 /* # of free pools + pools not yet carved out of current arena */
2476 uint numfreepools = 0;
2477 /* # of bytes for arena alignment padding */
2478 size_t arena_alignment = 0;
2479 /* # of bytes in used and full pools used for pool_headers */
2480 size_t pool_header_bytes = 0;
2481 /* # of bytes in used and full pools wasted due to quantization,
2482 * i.e. the necessarily leftover space at the ends of used and
2483 * full pools.
2484 */
2485 size_t quantization = 0;
2486 /* # of arenas actually allocated. */
2487 size_t narenas = 0;
2488 /* running total -- should equal narenas * ARENA_SIZE */
2489 size_t total;
2490 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002491
David Malcolm49526f42012-06-22 14:55:41 -04002492 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002493 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002494
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002495 for (i = 0; i < numclasses; ++i)
2496 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002497
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002498 /* Because full pools aren't linked to from anything, it's easiest
2499 * to march over all the arenas. If we're lucky, most of the memory
2500 * will be living in full pools -- would be a shame to miss them.
2501 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002502 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002503 uint j;
Victor Stinner9e87e772017-11-24 12:09:24 +01002504 uintptr_t base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002505
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002506 /* Skip arenas which are not allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002507 if (arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002508 continue;
2509 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002510
Victor Stinner9e87e772017-11-24 12:09:24 +01002511 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002512
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002513 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002514 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002515 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002516 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002517 base += POOL_SIZE;
2518 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002519
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002520 /* visit every pool in the arena */
Victor Stinner9e87e772017-11-24 12:09:24 +01002521 assert(base <= (uintptr_t) arenas[i].pool_address);
2522 for (j = 0; base < (uintptr_t) arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002523 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002524 poolp p = (poolp)base;
2525 const uint sz = p->szidx;
2526 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002527
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002528 if (p->ref.count == 0) {
2529 /* currently unused */
Victor Stinner34be807c2016-03-14 12:04:26 +01002530#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +01002531 assert(pool_is_in_list(p, arenas[i].freepools));
Victor Stinner34be807c2016-03-14 12:04:26 +01002532#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002533 continue;
2534 }
2535 ++numpools[sz];
2536 numblocks[sz] += p->ref.count;
2537 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2538 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002539#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002540 if (freeblocks > 0)
Victor Stinner9e87e772017-11-24 12:09:24 +01002541 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002542#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002543 }
2544 }
Victor Stinner9e87e772017-11-24 12:09:24 +01002545 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002546
David Malcolm49526f42012-06-22 14:55:41 -04002547 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002548 fputs("class size num pools blocks in use avail blocks\n"
2549 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04002550 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002551
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002552 for (i = 0; i < numclasses; ++i) {
2553 size_t p = numpools[i];
2554 size_t b = numblocks[i];
2555 size_t f = numfreeblocks[i];
2556 uint size = INDEX2SIZE(i);
2557 if (p == 0) {
2558 assert(b == 0 && f == 0);
2559 continue;
2560 }
David Malcolm49526f42012-06-22 14:55:41 -04002561 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002562 "%11" PY_FORMAT_SIZE_T "u "
2563 "%15" PY_FORMAT_SIZE_T "u "
2564 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002565 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002566 allocated_bytes += b * size;
2567 available_bytes += f * size;
2568 pool_header_bytes += p * POOL_OVERHEAD;
2569 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2570 }
David Malcolm49526f42012-06-22 14:55:41 -04002571 fputc('\n', out);
Victor Stinner34be807c2016-03-14 12:04:26 +01002572 if (_PyMem_DebugEnabled())
Victor Stinner9e87e772017-11-24 12:09:24 +01002573 (void)printone(out, "# times object malloc called", serialno);
2574 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2575 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2576 (void)printone(out, "# arenas highwater mark", narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04002577 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002578
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002579 PyOS_snprintf(buf, sizeof(buf),
2580 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2581 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002582 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002583
David Malcolm49526f42012-06-22 14:55:41 -04002584 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002585
David Malcolm49526f42012-06-22 14:55:41 -04002586 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2587 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002588
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002589 PyOS_snprintf(buf, sizeof(buf),
2590 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002591 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002592
David Malcolm49526f42012-06-22 14:55:41 -04002593 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2594 total += printone(out, "# bytes lost to quantization", quantization);
2595 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2596 (void)printone(out, "Total", total);
Victor Stinner6bf992a2017-12-06 17:26:10 +01002597 return 1;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002598}
2599
David Malcolm49526f42012-06-22 14:55:41 -04002600#endif /* #ifdef WITH_PYMALLOC */