blob: eb7cbfca157c6c5d88c6a5969ac4afff043475ed [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
Benjamin Peterson3924f932016-09-18 19:12:48 -07003#include <stdbool.h>
4
Victor Stinner0611c262016-03-15 22:22:13 +01005
6/* Defined in tracemalloc.c */
7extern void _PyMem_DumpTraceback(int fd, const void *ptr);
8
9
Victor Stinner0507bf52013-07-07 02:05:46 +020010/* Python's malloc wrappers (see pymem.h) */
11
Victor Stinner34be8072016-03-14 12:04:26 +010012#undef uint
13#define uint unsigned int /* assuming >= 16 bits */
14
Victor Stinner0507bf52013-07-07 02:05:46 +020015/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010016static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
17static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
18static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070019static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010020
Victor Stinner0507bf52013-07-07 02:05:46 +020021static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020022static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010024static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020025
26static void _PyObject_DebugDumpAddress(const void *p);
27static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020028
Victor Stinner5d39e042017-11-29 17:20:38 +010029static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain);
30
Nick Coghlan6ba64f42013-09-29 00:28:55 +100031#if defined(__has_feature) /* Clang */
32 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
33 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070034 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100035 #else
36 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
37 #endif
38#else
39 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
40 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070041 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100042 #else
43 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
44 #endif
45#endif
46
Tim Peters1221c0a2002-03-23 00:20:15 +000047#ifdef WITH_PYMALLOC
48
Victor Stinner0507bf52013-07-07 02:05:46 +020049#ifdef MS_WINDOWS
50# include <windows.h>
51#elif defined(HAVE_MMAP)
52# include <sys/mman.h>
53# ifdef MAP_ANONYMOUS
54# define ARENAS_USE_MMAP
55# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020056#endif
57
Victor Stinner0507bf52013-07-07 02:05:46 +020058/* Forward declaration */
59static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020060static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020061static void _PyObject_Free(void *ctx, void *p);
62static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020063#endif
64
Victor Stinner0507bf52013-07-07 02:05:46 +020065
66static void *
67_PyMem_RawMalloc(void *ctx, size_t size)
68{
Victor Stinnerdb067af2014-05-02 22:31:14 +020069 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020070 for malloc(0), which would be treated as an error. Some platforms would
71 return a pointer with no memory behind it, which would break pymalloc.
72 To solve these problems, allocate an extra byte. */
73 if (size == 0)
74 size = 1;
75 return malloc(size);
76}
77
78static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020079_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
80{
81 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
82 for calloc(0, 0), which would be treated as an error. Some platforms
83 would return a pointer with no memory behind it, which would break
84 pymalloc. To solve these problems, allocate an extra byte. */
85 if (nelem == 0 || elsize == 0) {
86 nelem = 1;
87 elsize = 1;
88 }
89 return calloc(nelem, elsize);
90}
91
92static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020093_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
94{
95 if (size == 0)
96 size = 1;
97 return realloc(ptr, size);
98}
99
100static void
101_PyMem_RawFree(void *ctx, void *ptr)
102{
103 free(ptr);
104}
105
106
107#ifdef MS_WINDOWS
108static void *
109_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
110{
111 return VirtualAlloc(NULL, size,
112 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
113}
114
115static void
116_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
117{
Victor Stinner725e6682013-07-07 03:06:16 +0200118 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200119}
120
121#elif defined(ARENAS_USE_MMAP)
122static void *
123_PyObject_ArenaMmap(void *ctx, size_t size)
124{
125 void *ptr;
126 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
127 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128 if (ptr == MAP_FAILED)
129 return NULL;
130 assert(ptr != NULL);
131 return ptr;
132}
133
134static void
135_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
136{
137 munmap(ptr, size);
138}
139
140#else
141static void *
142_PyObject_ArenaMalloc(void *ctx, size_t size)
143{
144 return malloc(size);
145}
146
147static void
148_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
149{
150 free(ptr);
151}
152#endif
153
Victor Stinner5d39e042017-11-29 17:20:38 +0100154#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200155#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100156# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
Victor Stinner0507bf52013-07-07 02:05:46 +0200157#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100158
159#define PYRAW_ALLOC MALLOC_ALLOC
160#ifdef WITH_PYMALLOC
161# define PYOBJ_ALLOC PYMALLOC_ALLOC
162#else
163# define PYOBJ_ALLOC MALLOC_ALLOC
164#endif
165#define PYMEM_ALLOC PYOBJ_ALLOC
Victor Stinner0507bf52013-07-07 02:05:46 +0200166
Victor Stinner0507bf52013-07-07 02:05:46 +0200167typedef struct {
168 /* We tag each block with an API ID in order to tag API violations */
169 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200170 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200171} debug_alloc_api_t;
172static struct {
173 debug_alloc_api_t raw;
174 debug_alloc_api_t mem;
175 debug_alloc_api_t obj;
176} _PyMem_Debug = {
Victor Stinner5d39e042017-11-29 17:20:38 +0100177 {'r', PYRAW_ALLOC},
178 {'m', PYMEM_ALLOC},
179 {'o', PYOBJ_ALLOC}
Victor Stinner0507bf52013-07-07 02:05:46 +0200180 };
181
Victor Stinner5d39e042017-11-29 17:20:38 +0100182#define PYDBGRAW_ALLOC \
183 {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
184#define PYDBGMEM_ALLOC \
185 {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
186#define PYDBGOBJ_ALLOC \
187 {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200188
Victor Stinner9e87e772017-11-24 12:09:24 +0100189#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100190static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC;
191static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC;
192static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100193#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100194static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC;
195static PyMemAllocatorEx _PyMem = PYMEM_ALLOC;
196static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100197#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600198
Victor Stinner0507bf52013-07-07 02:05:46 +0200199
Victor Stinner5d39e042017-11-29 17:20:38 +0100200static int
201pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug,
202 PyMemAllocatorEx *old_alloc)
203{
204 if (old_alloc != NULL) {
205 PyMem_GetAllocator(domain, old_alloc);
206 }
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800207
Victor Stinner5d39e042017-11-29 17:20:38 +0100208
209 PyMemAllocatorEx new_alloc;
210 switch(domain)
211 {
212 case PYMEM_DOMAIN_RAW:
213 new_alloc = (PyMemAllocatorEx)PYRAW_ALLOC;
214 break;
215 case PYMEM_DOMAIN_MEM:
216 new_alloc = (PyMemAllocatorEx)PYMEM_ALLOC;
217 break;
218 case PYMEM_DOMAIN_OBJ:
219 new_alloc = (PyMemAllocatorEx)PYOBJ_ALLOC;
220 break;
221 default:
222 /* unknown domain */
223 return -1;
224 }
225 PyMem_SetAllocator(domain, &new_alloc);
226 if (debug) {
227 _PyMem_SetupDebugHooksDomain(domain);
228 }
229 return 0;
230}
231
232
233int
234_PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain,
235 PyMemAllocatorEx *old_alloc)
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800236{
Victor Stinnerccb04422017-11-16 03:20:31 -0800237#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100238 const int debug = 1;
Victor Stinnerccb04422017-11-16 03:20:31 -0800239#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100240 const int debug = 0;
Victor Stinnerccb04422017-11-16 03:20:31 -0800241#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100242 return pymem_set_default_allocator(domain, debug, old_alloc);
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800243}
Victor Stinner0507bf52013-07-07 02:05:46 +0200244
Victor Stinner5d39e042017-11-29 17:20:38 +0100245
Victor Stinner34be8072016-03-14 12:04:26 +0100246int
247_PyMem_SetupAllocators(const char *opt)
248{
249 if (opt == NULL || *opt == '\0') {
250 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
Victor Stinner5d39e042017-11-29 17:20:38 +0100251 options): use default memory allocators */
252 opt = "default";
Victor Stinner34be8072016-03-14 12:04:26 +0100253 }
254
Victor Stinner5d39e042017-11-29 17:20:38 +0100255 if (strcmp(opt, "default") == 0) {
256 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, NULL);
257 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_MEM, NULL);
258 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_OBJ, NULL);
Victor Stinner34be8072016-03-14 12:04:26 +0100259 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100260 else if (strcmp(opt, "debug") == 0) {
261 (void)pymem_set_default_allocator(PYMEM_DOMAIN_RAW, 1, NULL);
262 (void)pymem_set_default_allocator(PYMEM_DOMAIN_MEM, 1, NULL);
263 (void)pymem_set_default_allocator(PYMEM_DOMAIN_OBJ, 1, NULL);
Victor Stinner34be8072016-03-14 12:04:26 +0100264 }
265#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100266 else if (strcmp(opt, "pymalloc") == 0 || strcmp(opt, "pymalloc_debug") == 0) {
267 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
268 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100269
Victor Stinner5d39e042017-11-29 17:20:38 +0100270 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
271 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &pymalloc);
272 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &pymalloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100273
Victor Stinner5d39e042017-11-29 17:20:38 +0100274 if (strcmp(opt, "pymalloc_debug") == 0) {
Victor Stinner34be8072016-03-14 12:04:26 +0100275 PyMem_SetupDebugHooks();
Victor Stinner5d39e042017-11-29 17:20:38 +0100276 }
Victor Stinner34be8072016-03-14 12:04:26 +0100277 }
278#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100279 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0) {
280 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
281 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
282 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &malloc_alloc);
283 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &malloc_alloc);
284
285 if (strcmp(opt, "malloc_debug") == 0) {
286 PyMem_SetupDebugHooks();
287 }
288 }
Victor Stinner34be8072016-03-14 12:04:26 +0100289 else {
290 /* unknown allocator */
291 return -1;
292 }
293 return 0;
294}
295
Victor Stinner5d39e042017-11-29 17:20:38 +0100296
297static int
298pymemallocator_eq(PyMemAllocatorEx *a, PyMemAllocatorEx *b)
299{
300 return (memcmp(a, b, sizeof(PyMemAllocatorEx)) == 0);
301}
302
303
304const char*
305_PyMem_GetAllocatorsName(void)
306{
307 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
308#ifdef WITH_PYMALLOC
309 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
310#endif
311
312 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
313 pymemallocator_eq(&_PyMem, &malloc_alloc) &&
314 pymemallocator_eq(&_PyObject, &malloc_alloc))
315 {
316 return "malloc";
317 }
318#ifdef WITH_PYMALLOC
319 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
320 pymemallocator_eq(&_PyMem, &pymalloc) &&
321 pymemallocator_eq(&_PyObject, &pymalloc))
322 {
323 return "pymalloc";
324 }
325#endif
326
327 PyMemAllocatorEx dbg_raw = PYDBGRAW_ALLOC;
328 PyMemAllocatorEx dbg_mem = PYDBGMEM_ALLOC;
329 PyMemAllocatorEx dbg_obj = PYDBGOBJ_ALLOC;
330
331 if (pymemallocator_eq(&_PyMem_Raw, &dbg_raw) &&
332 pymemallocator_eq(&_PyMem, &dbg_mem) &&
333 pymemallocator_eq(&_PyObject, &dbg_obj))
334 {
335 /* Debug hooks installed */
336 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
337 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &malloc_alloc) &&
338 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &malloc_alloc))
339 {
340 return "malloc_debug";
341 }
342#ifdef WITH_PYMALLOC
343 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
344 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &pymalloc) &&
345 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &pymalloc))
346 {
347 return "pymalloc_debug";
348 }
349#endif
350 }
351 return NULL;
352}
353
354
355#undef MALLOC_ALLOC
356#undef PYMALLOC_ALLOC
357#undef PYRAW_ALLOC
358#undef PYMEM_ALLOC
359#undef PYOBJ_ALLOC
360#undef PYDBGRAW_ALLOC
361#undef PYDBGMEM_ALLOC
362#undef PYDBGOBJ_ALLOC
363
Victor Stinner0507bf52013-07-07 02:05:46 +0200364
Victor Stinner9e87e772017-11-24 12:09:24 +0100365static PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800366#ifdef MS_WINDOWS
Victor Stinner9e87e772017-11-24 12:09:24 +0100367 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800368#elif defined(ARENAS_USE_MMAP)
Victor Stinner9e87e772017-11-24 12:09:24 +0100369 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800370#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100371 _PyObject_ArenaMalloc, _PyObject_ArenaFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800372#endif
373 };
374
Victor Stinner0621e0e2016-04-19 17:02:55 +0200375#ifdef WITH_PYMALLOC
Victor Stinner34be8072016-03-14 12:04:26 +0100376static int
377_PyMem_DebugEnabled(void)
378{
379 return (_PyObject.malloc == _PyMem_DebugMalloc);
380}
381
Victor Stinner6bf992a2017-12-06 17:26:10 +0100382static int
Victor Stinner34be8072016-03-14 12:04:26 +0100383_PyMem_PymallocEnabled(void)
384{
385 if (_PyMem_DebugEnabled()) {
386 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
387 }
388 else {
389 return (_PyObject.malloc == _PyObject_Malloc);
390 }
391}
392#endif
393
Victor Stinner5d39e042017-11-29 17:20:38 +0100394
395static void
396_PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain)
Victor Stinner0507bf52013-07-07 02:05:46 +0200397{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200398 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200399
Victor Stinner5d39e042017-11-29 17:20:38 +0100400 if (domain == PYMEM_DOMAIN_RAW) {
401 if (_PyMem_Raw.malloc == _PyMem_DebugRawMalloc) {
402 return;
403 }
Victor Stinner34be8072016-03-14 12:04:26 +0100404
Victor Stinner0507bf52013-07-07 02:05:46 +0200405 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100406 alloc.ctx = &_PyMem_Debug.raw;
407 alloc.malloc = _PyMem_DebugRawMalloc;
408 alloc.calloc = _PyMem_DebugRawCalloc;
409 alloc.realloc = _PyMem_DebugRawRealloc;
410 alloc.free = _PyMem_DebugRawFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200411 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
412 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100413 else if (domain == PYMEM_DOMAIN_MEM) {
414 if (_PyMem.malloc == _PyMem_DebugMalloc) {
415 return;
416 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200417
Victor Stinnerad524372016-03-16 12:12:53 +0100418 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100419 alloc.ctx = &_PyMem_Debug.mem;
420 alloc.malloc = _PyMem_DebugMalloc;
421 alloc.calloc = _PyMem_DebugCalloc;
422 alloc.realloc = _PyMem_DebugRealloc;
423 alloc.free = _PyMem_DebugFree;
Victor Stinnerad524372016-03-16 12:12:53 +0100424 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
425 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100426 else if (domain == PYMEM_DOMAIN_OBJ) {
427 if (_PyObject.malloc == _PyMem_DebugMalloc) {
428 return;
429 }
Victor Stinnerad524372016-03-16 12:12:53 +0100430
Victor Stinner0507bf52013-07-07 02:05:46 +0200431 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100432 alloc.ctx = &_PyMem_Debug.obj;
433 alloc.malloc = _PyMem_DebugMalloc;
434 alloc.calloc = _PyMem_DebugCalloc;
435 alloc.realloc = _PyMem_DebugRealloc;
436 alloc.free = _PyMem_DebugFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200437 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
438 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200439}
440
Victor Stinner5d39e042017-11-29 17:20:38 +0100441
442void
443PyMem_SetupDebugHooks(void)
444{
445 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_RAW);
446 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_MEM);
447 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_OBJ);
448}
449
Victor Stinner0507bf52013-07-07 02:05:46 +0200450void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200451PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200452{
453 switch(domain)
454 {
455 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
456 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
457 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
458 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200459 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200460 allocator->ctx = NULL;
461 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200462 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200463 allocator->realloc = NULL;
464 allocator->free = NULL;
465 }
466}
467
468void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200469PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200470{
471 switch(domain)
472 {
473 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
474 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
475 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
476 /* ignore unknown domain */
477 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200478}
479
480void
481PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
482{
Victor Stinner9e87e772017-11-24 12:09:24 +0100483 *allocator = _PyObject_Arena;
Victor Stinner0507bf52013-07-07 02:05:46 +0200484}
485
486void
487PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
488{
Victor Stinner9e87e772017-11-24 12:09:24 +0100489 _PyObject_Arena = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200490}
491
492void *
493PyMem_RawMalloc(size_t size)
494{
495 /*
496 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
497 * Most python internals blindly use a signed Py_ssize_t to track
498 * things without checking for overflows or negatives.
499 * As size_t is unsigned, checking for size < 0 is not required.
500 */
501 if (size > (size_t)PY_SSIZE_T_MAX)
502 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200503 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
504}
505
Victor Stinnerdb067af2014-05-02 22:31:14 +0200506void *
507PyMem_RawCalloc(size_t nelem, size_t elsize)
508{
509 /* see PyMem_RawMalloc() */
510 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
511 return NULL;
512 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
513}
514
Victor Stinner0507bf52013-07-07 02:05:46 +0200515void*
516PyMem_RawRealloc(void *ptr, size_t new_size)
517{
518 /* see PyMem_RawMalloc() */
519 if (new_size > (size_t)PY_SSIZE_T_MAX)
520 return NULL;
521 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
522}
523
Victor Stinner9e87e772017-11-24 12:09:24 +0100524void PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200525{
526 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
527}
528
Victor Stinner9ed83c42017-10-31 12:18:10 -0700529
Victor Stinner0507bf52013-07-07 02:05:46 +0200530void *
531PyMem_Malloc(size_t size)
532{
533 /* see PyMem_RawMalloc() */
534 if (size > (size_t)PY_SSIZE_T_MAX)
535 return NULL;
536 return _PyMem.malloc(_PyMem.ctx, size);
537}
538
539void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200540PyMem_Calloc(size_t nelem, size_t elsize)
541{
542 /* see PyMem_RawMalloc() */
543 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
544 return NULL;
545 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
546}
547
548void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200549PyMem_Realloc(void *ptr, size_t new_size)
550{
551 /* see PyMem_RawMalloc() */
552 if (new_size > (size_t)PY_SSIZE_T_MAX)
553 return NULL;
554 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
555}
556
557void
558PyMem_Free(void *ptr)
559{
560 _PyMem.free(_PyMem.ctx, ptr);
561}
562
Victor Stinner9ed83c42017-10-31 12:18:10 -0700563
Victor Stinner46972b72017-11-24 22:55:40 +0100564wchar_t*
565_PyMem_RawWcsdup(const wchar_t *str)
566{
Victor Stinnerb64de462017-12-01 18:27:09 +0100567 assert(str != NULL);
568
Victor Stinner46972b72017-11-24 22:55:40 +0100569 size_t len = wcslen(str);
570 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) {
571 return NULL;
572 }
573
574 size_t size = (len + 1) * sizeof(wchar_t);
575 wchar_t *str2 = PyMem_RawMalloc(size);
576 if (str2 == NULL) {
577 return NULL;
578 }
579
580 memcpy(str2, str, size);
581 return str2;
582}
583
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200584char *
585_PyMem_RawStrdup(const char *str)
586{
Victor Stinnerb64de462017-12-01 18:27:09 +0100587 assert(str != NULL);
588 size_t size = strlen(str) + 1;
589 char *copy = PyMem_RawMalloc(size);
590 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200591 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100592 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200593 memcpy(copy, str, size);
594 return copy;
595}
596
597char *
598_PyMem_Strdup(const char *str)
599{
Victor Stinnerb64de462017-12-01 18:27:09 +0100600 assert(str != NULL);
601 size_t size = strlen(str) + 1;
602 char *copy = PyMem_Malloc(size);
603 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200604 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100605 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200606 memcpy(copy, str, size);
607 return copy;
608}
609
Victor Stinner0507bf52013-07-07 02:05:46 +0200610void *
611PyObject_Malloc(size_t size)
612{
613 /* see PyMem_RawMalloc() */
614 if (size > (size_t)PY_SSIZE_T_MAX)
615 return NULL;
616 return _PyObject.malloc(_PyObject.ctx, size);
617}
618
619void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200620PyObject_Calloc(size_t nelem, size_t elsize)
621{
622 /* see PyMem_RawMalloc() */
623 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
624 return NULL;
625 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
626}
627
628void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200629PyObject_Realloc(void *ptr, size_t new_size)
630{
631 /* see PyMem_RawMalloc() */
632 if (new_size > (size_t)PY_SSIZE_T_MAX)
633 return NULL;
634 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
635}
636
637void
638PyObject_Free(void *ptr)
639{
640 _PyObject.free(_PyObject.ctx, ptr);
641}
642
643
644#ifdef WITH_PYMALLOC
645
Benjamin Peterson05159c42009-12-03 03:01:27 +0000646#ifdef WITH_VALGRIND
647#include <valgrind/valgrind.h>
648
649/* If we're using GCC, use __builtin_expect() to reduce overhead of
650 the valgrind checks */
651#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
652# define UNLIKELY(value) __builtin_expect((value), 0)
653#else
654# define UNLIKELY(value) (value)
655#endif
656
657/* -1 indicates that we haven't checked that we're running on valgrind yet. */
658static int running_on_valgrind = -1;
659#endif
660
Victor Stinner9ed83c42017-10-31 12:18:10 -0700661
Victor Stinner9e87e772017-11-24 12:09:24 +0100662/* An object allocator for Python.
663
664 Here is an introduction to the layers of the Python memory architecture,
665 showing where the object allocator is actually used (layer +2), It is
666 called for every object allocation and deallocation (PyObject_New/Del),
667 unless the object-specific allocators implement a proprietary allocation
668 scheme (ex.: ints use a simple free list). This is also the place where
669 the cyclic garbage collector operates selectively on container objects.
670
671
672 Object-specific allocators
673 _____ ______ ______ ________
674 [ int ] [ dict ] [ list ] ... [ string ] Python core |
675+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
676 _______________________________ | |
677 [ Python's object allocator ] | |
678+2 | ####### Object memory ####### | <------ Internal buffers ------> |
679 ______________________________________________________________ |
680 [ Python's raw memory allocator (PyMem_ API) ] |
681+1 | <----- Python memory (under PyMem manager's control) ------> | |
682 __________________________________________________________________
683 [ Underlying general-purpose allocator (ex: C library malloc) ]
684 0 | <------ Virtual memory allocated for the python process -------> |
685
686 =========================================================================
687 _______________________________________________________________________
688 [ OS-specific Virtual Memory Manager (VMM) ]
689-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
690 __________________________________ __________________________________
691 [ ] [ ]
692-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
693
694*/
695/*==========================================================================*/
696
697/* A fast, special-purpose memory allocator for small blocks, to be used
698 on top of a general-purpose malloc -- heavily based on previous art. */
699
700/* Vladimir Marangozov -- August 2000 */
701
702/*
703 * "Memory management is where the rubber meets the road -- if we do the wrong
704 * thing at any level, the results will not be good. And if we don't make the
705 * levels work well together, we are in serious trouble." (1)
706 *
707 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
708 * "Dynamic Storage Allocation: A Survey and Critical Review",
709 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
710 */
711
712/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
713
714/*==========================================================================*/
715
716/*
717 * Allocation strategy abstract:
718 *
719 * For small requests, the allocator sub-allocates <Big> blocks of memory.
720 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
721 * system's allocator.
722 *
723 * Small requests are grouped in size classes spaced 8 bytes apart, due
724 * to the required valid alignment of the returned address. Requests of
725 * a particular size are serviced from memory pools of 4K (one VMM page).
726 * Pools are fragmented on demand and contain free lists of blocks of one
727 * particular size class. In other words, there is a fixed-size allocator
728 * for each size class. Free pools are shared by the different allocators
729 * thus minimizing the space reserved for a particular size class.
730 *
731 * This allocation strategy is a variant of what is known as "simple
732 * segregated storage based on array of free lists". The main drawback of
733 * simple segregated storage is that we might end up with lot of reserved
734 * memory for the different free lists, which degenerate in time. To avoid
735 * this, we partition each free list in pools and we share dynamically the
736 * reserved space between all free lists. This technique is quite efficient
737 * for memory intensive programs which allocate mainly small-sized blocks.
738 *
739 * For small requests we have the following table:
740 *
741 * Request in bytes Size of allocated block Size class idx
742 * ----------------------------------------------------------------
743 * 1-8 8 0
744 * 9-16 16 1
745 * 17-24 24 2
746 * 25-32 32 3
747 * 33-40 40 4
748 * 41-48 48 5
749 * 49-56 56 6
750 * 57-64 64 7
751 * 65-72 72 8
752 * ... ... ...
753 * 497-504 504 62
754 * 505-512 512 63
755 *
756 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
757 * allocator.
758 */
759
760/*==========================================================================*/
761
762/*
763 * -- Main tunable settings section --
764 */
765
766/*
767 * Alignment of addresses returned to the user. 8-bytes alignment works
768 * on most current architectures (with 32-bit or 64-bit address busses).
769 * The alignment value is also used for grouping small requests in size
770 * classes spaced ALIGNMENT bytes apart.
771 *
772 * You shouldn't change this unless you know what you are doing.
773 */
774#define ALIGNMENT 8 /* must be 2^N */
775#define ALIGNMENT_SHIFT 3
776
777/* Return the number of bytes in size class I, as a uint. */
778#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
779
780/*
781 * Max size threshold below which malloc requests are considered to be
782 * small enough in order to use preallocated memory pools. You can tune
783 * this value according to your application behaviour and memory needs.
784 *
785 * Note: a size threshold of 512 guarantees that newly created dictionaries
786 * will be allocated from preallocated memory pools on 64-bit.
787 *
788 * The following invariants must hold:
789 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
790 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
791 *
792 * Although not required, for better performance and space efficiency,
793 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
794 */
795#define SMALL_REQUEST_THRESHOLD 512
796#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
797
798/*
799 * The system's VMM page size can be obtained on most unices with a
800 * getpagesize() call or deduced from various header files. To make
801 * things simpler, we assume that it is 4K, which is OK for most systems.
802 * It is probably better if this is the native page size, but it doesn't
803 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
804 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
805 * violation fault. 4K is apparently OK for all the platforms that python
806 * currently targets.
807 */
808#define SYSTEM_PAGE_SIZE (4 * 1024)
809#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
810
811/*
812 * Maximum amount of memory managed by the allocator for small requests.
813 */
814#ifdef WITH_MEMORY_LIMITS
815#ifndef SMALL_MEMORY_LIMIT
816#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
817#endif
818#endif
819
820/*
821 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
822 * on a page boundary. This is a reserved virtual address space for the
823 * current process (obtained through a malloc()/mmap() call). In no way this
824 * means that the memory arenas will be used entirely. A malloc(<Big>) is
825 * usually an address range reservation for <Big> bytes, unless all pages within
826 * this space are referenced subsequently. So malloc'ing big blocks and not
827 * using them does not mean "wasting memory". It's an addressable range
828 * wastage...
829 *
830 * Arenas are allocated with mmap() on systems supporting anonymous memory
831 * mappings to reduce heap fragmentation.
832 */
833#define ARENA_SIZE (256 << 10) /* 256KB */
834
835#ifdef WITH_MEMORY_LIMITS
836#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
837#endif
838
839/*
840 * Size of the pools used for small blocks. Should be a power of 2,
841 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
842 */
843#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
844#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
845
846/*
847 * -- End of tunable settings section --
848 */
849
850/*==========================================================================*/
851
Victor Stinner9e87e772017-11-24 12:09:24 +0100852/* When you say memory, my mind reasons in terms of (pointers to) blocks */
853typedef uint8_t block;
854
855/* Pool for small blocks. */
856struct pool_header {
857 union { block *_padding;
858 uint count; } ref; /* number of allocated blocks */
859 block *freeblock; /* pool's free list head */
860 struct pool_header *nextpool; /* next pool of this size class */
861 struct pool_header *prevpool; /* previous pool "" */
862 uint arenaindex; /* index into arenas of base adr */
863 uint szidx; /* block size class index */
864 uint nextoffset; /* bytes to virgin block */
865 uint maxnextoffset; /* largest valid nextoffset */
866};
867
868typedef struct pool_header *poolp;
869
870/* Record keeping for arenas. */
871struct arena_object {
872 /* The address of the arena, as returned by malloc. Note that 0
873 * will never be returned by a successful malloc, and is used
874 * here to mark an arena_object that doesn't correspond to an
875 * allocated arena.
876 */
877 uintptr_t address;
878
879 /* Pool-aligned pointer to the next pool to be carved off. */
880 block* pool_address;
881
882 /* The number of available pools in the arena: free pools + never-
883 * allocated pools.
884 */
885 uint nfreepools;
886
887 /* The total number of pools in the arena, whether or not available. */
888 uint ntotalpools;
889
890 /* Singly-linked list of available pools. */
891 struct pool_header* freepools;
892
893 /* Whenever this arena_object is not associated with an allocated
894 * arena, the nextarena member is used to link all unassociated
895 * arena_objects in the singly-linked `unused_arena_objects` list.
896 * The prevarena member is unused in this case.
897 *
898 * When this arena_object is associated with an allocated arena
899 * with at least one available pool, both members are used in the
900 * doubly-linked `usable_arenas` list, which is maintained in
901 * increasing order of `nfreepools` values.
902 *
903 * Else this arena_object is associated with an allocated arena
904 * all of whose pools are in use. `nextarena` and `prevarena`
905 * are both meaningless in this case.
906 */
907 struct arena_object* nextarena;
908 struct arena_object* prevarena;
909};
910
911#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
912
913#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
914
915/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
916#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
917
918/* Return total number of blocks in pool of size index I, as a uint. */
919#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
920
921/*==========================================================================*/
922
923/*
Victor Stinner9e87e772017-11-24 12:09:24 +0100924 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
925
926This is involved. For an index i, usedpools[i+i] is the header for a list of
927all partially used pools holding small blocks with "size class idx" i. So
928usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
92916, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
930
931Pools are carved off an arena's highwater mark (an arena_object's pool_address
932member) as needed. Once carved off, a pool is in one of three states forever
933after:
934
935used == partially used, neither empty nor full
936 At least one block in the pool is currently allocated, and at least one
937 block in the pool is not currently allocated (note this implies a pool
938 has room for at least two blocks).
939 This is a pool's initial state, as a pool is created only when malloc
940 needs space.
941 The pool holds blocks of a fixed size, and is in the circular list headed
942 at usedpools[i] (see above). It's linked to the other used pools of the
943 same size class via the pool_header's nextpool and prevpool members.
944 If all but one block is currently allocated, a malloc can cause a
945 transition to the full state. If all but one block is not currently
946 allocated, a free can cause a transition to the empty state.
947
948full == all the pool's blocks are currently allocated
949 On transition to full, a pool is unlinked from its usedpools[] list.
950 It's not linked to from anything then anymore, and its nextpool and
951 prevpool members are meaningless until it transitions back to used.
952 A free of a block in a full pool puts the pool back in the used state.
953 Then it's linked in at the front of the appropriate usedpools[] list, so
954 that the next allocation for its size class will reuse the freed block.
955
956empty == all the pool's blocks are currently available for allocation
957 On transition to empty, a pool is unlinked from its usedpools[] list,
958 and linked to the front of its arena_object's singly-linked freepools list,
959 via its nextpool member. The prevpool member has no meaning in this case.
960 Empty pools have no inherent size class: the next time a malloc finds
961 an empty list in usedpools[], it takes the first pool off of freepools.
962 If the size class needed happens to be the same as the size class the pool
963 last had, some pool initialization can be skipped.
964
965
966Block Management
967
968Blocks within pools are again carved out as needed. pool->freeblock points to
969the start of a singly-linked list of free blocks within the pool. When a
970block is freed, it's inserted at the front of its pool's freeblock list. Note
971that the available blocks in a pool are *not* linked all together when a pool
972is initialized. Instead only "the first two" (lowest addresses) blocks are
973set up, returning the first such block, and setting pool->freeblock to a
974one-block list holding the second such block. This is consistent with that
975pymalloc strives at all levels (arena, pool, and block) never to touch a piece
976of memory until it's actually needed.
977
978So long as a pool is in the used state, we're certain there *is* a block
979available for allocating, and pool->freeblock is not NULL. If pool->freeblock
980points to the end of the free list before we've carved the entire pool into
981blocks, that means we simply haven't yet gotten to one of the higher-address
982blocks. The offset from the pool_header to the start of "the next" virgin
983block is stored in the pool_header nextoffset member, and the largest value
984of nextoffset that makes sense is stored in the maxnextoffset member when a
985pool is initialized. All the blocks in a pool have been passed out at least
986once when and only when nextoffset > maxnextoffset.
987
988
989Major obscurity: While the usedpools vector is declared to have poolp
990entries, it doesn't really. It really contains two pointers per (conceptual)
991poolp entry, the nextpool and prevpool members of a pool_header. The
992excruciating initialization code below fools C so that
993
994 usedpool[i+i]
995
996"acts like" a genuine poolp, but only so long as you only reference its
997nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
998compensating for that a pool_header's nextpool and prevpool members
999immediately follow a pool_header's first two members:
1000
1001 union { block *_padding;
1002 uint count; } ref;
1003 block *freeblock;
1004
1005each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
1006contains is a fudged-up pointer p such that *if* C believes it's a poolp
1007pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
1008circular list is empty).
1009
1010It's unclear why the usedpools setup is so convoluted. It could be to
1011minimize the amount of cache required to hold this heavily-referenced table
1012(which only *needs* the two interpool pointer members of a pool_header). OTOH,
1013referencing code has to remember to "double the index" and doing so isn't
1014free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
1015on that C doesn't insert any padding anywhere in a pool_header at or before
1016the prevpool member.
1017**************************************************************************** */
1018
1019#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
1020#define PT(x) PTA(x), PTA(x)
1021
1022static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
1023 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
1024#if NB_SMALL_SIZE_CLASSES > 8
1025 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
1026#if NB_SMALL_SIZE_CLASSES > 16
1027 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
1028#if NB_SMALL_SIZE_CLASSES > 24
1029 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
1030#if NB_SMALL_SIZE_CLASSES > 32
1031 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
1032#if NB_SMALL_SIZE_CLASSES > 40
1033 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
1034#if NB_SMALL_SIZE_CLASSES > 48
1035 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
1036#if NB_SMALL_SIZE_CLASSES > 56
1037 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
1038#if NB_SMALL_SIZE_CLASSES > 64
1039#error "NB_SMALL_SIZE_CLASSES should be less than 64"
1040#endif /* NB_SMALL_SIZE_CLASSES > 64 */
1041#endif /* NB_SMALL_SIZE_CLASSES > 56 */
1042#endif /* NB_SMALL_SIZE_CLASSES > 48 */
1043#endif /* NB_SMALL_SIZE_CLASSES > 40 */
1044#endif /* NB_SMALL_SIZE_CLASSES > 32 */
1045#endif /* NB_SMALL_SIZE_CLASSES > 24 */
1046#endif /* NB_SMALL_SIZE_CLASSES > 16 */
1047#endif /* NB_SMALL_SIZE_CLASSES > 8 */
1048};
1049
1050/*==========================================================================
1051Arena management.
1052
1053`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
1054which may not be currently used (== they're arena_objects that aren't
1055currently associated with an allocated arena). Note that arenas proper are
1056separately malloc'ed.
1057
1058Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
1059we do try to free() arenas, and use some mild heuristic strategies to increase
1060the likelihood that arenas eventually can be freed.
1061
1062unused_arena_objects
1063
1064 This is a singly-linked list of the arena_objects that are currently not
1065 being used (no arena is associated with them). Objects are taken off the
1066 head of the list in new_arena(), and are pushed on the head of the list in
1067 PyObject_Free() when the arena is empty. Key invariant: an arena_object
1068 is on this list if and only if its .address member is 0.
1069
1070usable_arenas
1071
1072 This is a doubly-linked list of the arena_objects associated with arenas
1073 that have pools available. These pools are either waiting to be reused,
1074 or have not been used before. The list is sorted to have the most-
1075 allocated arenas first (ascending order based on the nfreepools member).
1076 This means that the next allocation will come from a heavily used arena,
1077 which gives the nearly empty arenas a chance to be returned to the system.
1078 In my unscientific tests this dramatically improved the number of arenas
1079 that could be freed.
1080
1081Note that an arena_object associated with an arena all of whose pools are
1082currently in use isn't on either list.
1083*/
1084
1085/* Array of objects used to track chunks of memory (arenas). */
1086static struct arena_object* arenas = NULL;
1087/* Number of slots currently allocated in the `arenas` vector. */
1088static uint maxarenas = 0;
1089
1090/* The head of the singly-linked, NULL-terminated list of available
1091 * arena_objects.
1092 */
1093static struct arena_object* unused_arena_objects = NULL;
1094
1095/* The head of the doubly-linked, NULL-terminated at each end, list of
1096 * arena_objects associated with arenas that have pools available.
1097 */
1098static struct arena_object* usable_arenas = NULL;
1099
1100/* How many arena_objects do we initially allocate?
1101 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
1102 * `arenas` vector.
1103 */
1104#define INITIAL_ARENA_OBJECTS 16
1105
1106/* Number of arenas allocated that haven't been free()'d. */
1107static size_t narenas_currently_allocated = 0;
1108
1109/* Total number of times malloc() called to allocate an arena. */
1110static size_t ntimes_arena_allocated = 0;
1111/* High water mark (max value ever seen) for narenas_currently_allocated. */
1112static size_t narenas_highwater = 0;
1113
1114static Py_ssize_t _Py_AllocatedBlocks = 0;
1115
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001116Py_ssize_t
1117_Py_GetAllocatedBlocks(void)
1118{
Victor Stinner9e87e772017-11-24 12:09:24 +01001119 return _Py_AllocatedBlocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001120}
1121
1122
Thomas Woutersa9773292006-04-21 09:43:23 +00001123/* Allocate a new arena. If we run out of memory, return NULL. Else
1124 * allocate a new arena, and return the address of an arena_object
1125 * describing the new arena. It's expected that the caller will set
1126 * `usable_arenas` to the return value.
1127 */
1128static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +00001129new_arena(void)
1130{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001131 struct arena_object* arenaobj;
1132 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +01001133 void *address;
Victor Stinner34be8072016-03-14 12:04:26 +01001134 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +00001135
Victor Stinner34be8072016-03-14 12:04:26 +01001136 if (debug_stats == -1) {
Serhiy Storchaka4ae06c52017-12-12 13:55:04 +02001137 const char *opt = Py_GETENV("PYTHONMALLOCSTATS");
Victor Stinner34be8072016-03-14 12:04:26 +01001138 debug_stats = (opt != NULL && *opt != '\0');
1139 }
1140 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -04001141 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be8072016-03-14 12:04:26 +01001142
Victor Stinner9e87e772017-11-24 12:09:24 +01001143 if (unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001144 uint i;
1145 uint numarenas;
1146 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +00001147
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001148 /* Double the number of arena objects on each allocation.
1149 * Note that it's possible for `numarenas` to overflow.
1150 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001151 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1152 if (numarenas <= maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001153 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001154#if SIZEOF_SIZE_T <= SIZEOF_INT
Victor Stinner9e87e772017-11-24 12:09:24 +01001155 if (numarenas > SIZE_MAX / sizeof(*arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001156 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001157#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001158 nbytes = numarenas * sizeof(*arenas);
1159 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001160 if (arenaobj == NULL)
1161 return NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001162 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +00001163
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001164 /* We might need to fix pointers that were copied. However,
1165 * new_arena only gets called when all the pages in the
1166 * previous arenas are full. Thus, there are *no* pointers
1167 * into the old array. Thus, we don't have to worry about
1168 * invalid pointers. Just to be sure, some asserts:
1169 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001170 assert(usable_arenas == NULL);
1171 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +00001172
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001173 /* Put the new arenas on the unused_arena_objects list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001174 for (i = maxarenas; i < numarenas; ++i) {
1175 arenas[i].address = 0; /* mark as unassociated */
1176 arenas[i].nextarena = i < numarenas - 1 ?
1177 &arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001178 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001179
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001180 /* Update globals. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001181 unused_arena_objects = &arenas[maxarenas];
1182 maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001183 }
Tim Petersd97a1c02002-03-30 06:09:22 +00001184
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001185 /* Take the next available arena object off the head of the list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001186 assert(unused_arena_objects != NULL);
1187 arenaobj = unused_arena_objects;
1188 unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001189 assert(arenaobj->address == 0);
Victor Stinner9e87e772017-11-24 12:09:24 +01001190 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +02001191 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001192 /* The allocation failed: return NULL after putting the
1193 * arenaobj back.
1194 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001195 arenaobj->nextarena = unused_arena_objects;
1196 unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001197 return NULL;
1198 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001199 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +00001200
Victor Stinner9e87e772017-11-24 12:09:24 +01001201 ++narenas_currently_allocated;
1202 ++ntimes_arena_allocated;
1203 if (narenas_currently_allocated > narenas_highwater)
1204 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001205 arenaobj->freepools = NULL;
1206 /* pool_address <- first pool-aligned address in the arena
1207 nfreepools <- number of whole pools that fit after alignment */
Victor Stinner9e87e772017-11-24 12:09:24 +01001208 arenaobj->pool_address = (block*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001209 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
1210 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
1211 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1212 if (excess != 0) {
1213 --arenaobj->nfreepools;
1214 arenaobj->pool_address += POOL_SIZE - excess;
1215 }
1216 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001217
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001218 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +00001219}
1220
Victor Stinner9ed83c42017-10-31 12:18:10 -07001221
Thomas Woutersa9773292006-04-21 09:43:23 +00001222/*
Benjamin Peterson3924f932016-09-18 19:12:48 -07001223address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +00001224
1225Return true if and only if P is an address that was allocated by pymalloc.
1226POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1227(the caller is asked to compute this because the macro expands POOL more than
1228once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -07001229variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +00001230called on every alloc/realloc/free, micro-efficiency is important here).
1231
1232Tricky: Let B be the arena base address associated with the pool, B =
1233arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1234
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001235 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001236
1237Subtracting B throughout, this is true iff
1238
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001239 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001240
1241By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1242
1243Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1244before the first arena has been allocated. `arenas` is still NULL in that
1245case. We're relying on that maxarenas is also 0 in that case, so that
1246(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1247into a NULL arenas.
1248
1249Details: given P and POOL, the arena_object corresponding to P is AO =
1250arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1251stores, etc), POOL is the correct address of P's pool, AO.address is the
1252correct base address of the pool's arena, and P must be within ARENA_SIZE of
1253AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -07001254(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +00001255controls P.
1256
1257Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1258call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1259in this case -- it may even be uninitialized trash. If the trash arenaindex
1260is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1261control P.
1262
1263Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1264allocated arena, obmalloc controls all the memory in slice AO.address :
1265AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1266so P doesn't lie in that slice, so the macro correctly reports that P is not
1267controlled by obmalloc.
1268
1269Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1270arena_object (one not currently associated with an allocated arena),
1271AO.address is 0, and the second test in the macro reduces to:
1272
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001273 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001274
1275If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1276that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1277of the test still passes, and the third clause (AO.address != 0) is necessary
1278to get the correct result: AO.address is 0 in this case, so the macro
1279correctly reports that P is not controlled by obmalloc (despite that P lies in
1280slice AO.address : AO.address + ARENA_SIZE).
1281
1282Note: The third (AO.address != 0) clause was added in Python 2.5. Before
12832.5, arenas were never free()'ed, and an arenaindex < maxarena always
1284corresponded to a currently-allocated arena, so the "P is not controlled by
1285obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1286was impossible.
1287
1288Note that the logic is excruciating, and reading up possibly uninitialized
1289memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1290creates problems for some memory debuggers. The overwhelming advantage is
1291that this test determines whether an arbitrary address is controlled by
1292obmalloc in a small constant time, independent of the number of arenas
1293obmalloc controls. Since this test is needed at every entry point, it's
1294extremely desirable that it be this fast.
1295*/
Thomas Woutersa9773292006-04-21 09:43:23 +00001296
Benjamin Peterson3924f932016-09-18 19:12:48 -07001297static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
1298address_in_range(void *p, poolp pool)
1299{
1300 // Since address_in_range may be reading from memory which was not allocated
1301 // by Python, it is important that pool->arenaindex is read only once, as
1302 // another thread may be concurrently modifying the value without holding
1303 // the GIL. The following dance forces the compiler to read pool->arenaindex
1304 // only once.
1305 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Victor Stinner9e87e772017-11-24 12:09:24 +01001306 return arenaindex < maxarenas &&
1307 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1308 arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -07001309}
Tim Peters338e0102002-04-01 19:23:44 +00001310
Victor Stinner9ed83c42017-10-31 12:18:10 -07001311
Neil Schemenauera35c6882001-02-27 04:45:05 +00001312/*==========================================================================*/
1313
Victor Stinner9ed83c42017-10-31 12:18:10 -07001314/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +00001315
Victor Stinner9ed83c42017-10-31 12:18:10 -07001316 The basic blocks are ordered by decreasing execution frequency,
1317 which minimizes the number of jumps in the most common cases,
1318 improves branching prediction and instruction scheduling (small
1319 block allocations typically result in a couple of instructions).
1320 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +00001321
Victor Stinner9ed83c42017-10-31 12:18:10 -07001322 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
1323
1324 Return 0 if pymalloc failed to allocate the memory block: on bigger
1325 requests, on error in the code below (as a last chance to serve the request)
1326 or when the max memory limit has been reached. */
1327static int
1328pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001329{
Victor Stinner9e87e772017-11-24 12:09:24 +01001330 block *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001331 poolp pool;
1332 poolp next;
1333 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001334
Benjamin Peterson05159c42009-12-03 03:01:27 +00001335#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001336 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001337 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001338 }
1339 if (UNLIKELY(running_on_valgrind)) {
1340 return 0;
1341 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001342#endif
1343
Victor Stinner9ed83c42017-10-31 12:18:10 -07001344 if (nbytes == 0) {
1345 return 0;
1346 }
1347 if (nbytes > SMALL_REQUEST_THRESHOLD) {
1348 return 0;
1349 }
T. Wouters06bb4872017-03-31 10:10:19 -07001350
Victor Stinner9ed83c42017-10-31 12:18:10 -07001351 /*
1352 * Most frequent paths first
1353 */
1354 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Victor Stinner9e87e772017-11-24 12:09:24 +01001355 pool = usedpools[size + size];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001356 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001357 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -07001358 * There is a used pool for this size class.
1359 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001360 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001361 ++pool->ref.count;
1362 bp = pool->freeblock;
1363 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001364 if ((pool->freeblock = *(block **)bp) != NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001365 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001366 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001367
Victor Stinner9ed83c42017-10-31 12:18:10 -07001368 /*
1369 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001370 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001371 if (pool->nextoffset <= pool->maxnextoffset) {
1372 /* There is room for another block. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001373 pool->freeblock = (block*)pool +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001374 pool->nextoffset;
1375 pool->nextoffset += INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001376 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001377 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001378 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001379
Victor Stinner9ed83c42017-10-31 12:18:10 -07001380 /* Pool is full, unlink from used pools. */
1381 next = pool->nextpool;
1382 pool = pool->prevpool;
1383 next->prevpool = pool;
1384 pool->nextpool = next;
1385 goto success;
1386 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001387
Victor Stinner9ed83c42017-10-31 12:18:10 -07001388 /* There isn't a pool of the right size class immediately
1389 * available: use a free pool.
1390 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001391 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001392 /* No arena has a free pool: allocate a new arena. */
1393#ifdef WITH_MEMORY_LIMITS
Victor Stinner9e87e772017-11-24 12:09:24 +01001394 if (narenas_currently_allocated >= MAX_ARENAS) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001395 goto failed;
1396 }
1397#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001398 usable_arenas = new_arena();
1399 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001400 goto failed;
1401 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001402 usable_arenas->nextarena =
1403 usable_arenas->prevarena = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001404 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001405 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001406
1407 /* Try to get a cached free pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001408 pool = usable_arenas->freepools;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001409 if (pool != NULL) {
1410 /* Unlink from cached pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001411 usable_arenas->freepools = pool->nextpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001412
1413 /* This arena already had the smallest nfreepools
1414 * value, so decreasing nfreepools doesn't change
1415 * that, and we don't need to rearrange the
1416 * usable_arenas list. However, if the arena has
1417 * become wholly allocated, we need to remove its
1418 * arena_object from usable_arenas.
1419 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001420 --usable_arenas->nfreepools;
1421 if (usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001422 /* Wholly allocated: remove. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001423 assert(usable_arenas->freepools == NULL);
1424 assert(usable_arenas->nextarena == NULL ||
1425 usable_arenas->nextarena->prevarena ==
1426 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001427
Victor Stinner9e87e772017-11-24 12:09:24 +01001428 usable_arenas = usable_arenas->nextarena;
1429 if (usable_arenas != NULL) {
1430 usable_arenas->prevarena = NULL;
1431 assert(usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001432 }
1433 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001434 else {
1435 /* nfreepools > 0: it must be that freepools
1436 * isn't NULL, or that we haven't yet carved
1437 * off all the arena's pools for the first
1438 * time.
1439 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001440 assert(usable_arenas->freepools != NULL ||
1441 usable_arenas->pool_address <=
1442 (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001443 ARENA_SIZE - POOL_SIZE);
1444 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001445
Victor Stinner9ed83c42017-10-31 12:18:10 -07001446 init_pool:
1447 /* Frontlink to used pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001448 next = usedpools[size + size]; /* == prev */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001449 pool->nextpool = next;
1450 pool->prevpool = next;
1451 next->nextpool = pool;
1452 next->prevpool = pool;
1453 pool->ref.count = 1;
1454 if (pool->szidx == size) {
1455 /* Luckily, this pool last contained blocks
1456 * of the same size class, so its header
1457 * and free list are already initialized.
1458 */
1459 bp = pool->freeblock;
1460 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001461 pool->freeblock = *(block **)bp;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001462 goto success;
1463 }
1464 /*
1465 * Initialize the pool header, set up the free list to
1466 * contain just the second block, and return the first
1467 * block.
1468 */
1469 pool->szidx = size;
1470 size = INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001471 bp = (block *)pool + POOL_OVERHEAD;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001472 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1473 pool->maxnextoffset = POOL_SIZE - size;
1474 pool->freeblock = bp + size;
Victor Stinner9e87e772017-11-24 12:09:24 +01001475 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001476 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001477 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001478
Victor Stinner9ed83c42017-10-31 12:18:10 -07001479 /* Carve off a new pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001480 assert(usable_arenas->nfreepools > 0);
1481 assert(usable_arenas->freepools == NULL);
1482 pool = (poolp)usable_arenas->pool_address;
1483 assert((block*)pool <= (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001484 ARENA_SIZE - POOL_SIZE);
Victor Stinner9e87e772017-11-24 12:09:24 +01001485 pool->arenaindex = (uint)(usable_arenas - arenas);
1486 assert(&arenas[pool->arenaindex] == usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001487 pool->szidx = DUMMY_SIZE_IDX;
Victor Stinner9e87e772017-11-24 12:09:24 +01001488 usable_arenas->pool_address += POOL_SIZE;
1489 --usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001490
Victor Stinner9e87e772017-11-24 12:09:24 +01001491 if (usable_arenas->nfreepools == 0) {
1492 assert(usable_arenas->nextarena == NULL ||
1493 usable_arenas->nextarena->prevarena ==
1494 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001495 /* Unlink the arena: it is completely allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001496 usable_arenas = usable_arenas->nextarena;
1497 if (usable_arenas != NULL) {
1498 usable_arenas->prevarena = NULL;
1499 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001500 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001501 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001502
1503 goto init_pool;
1504
1505success:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001506 assert(bp != NULL);
1507 *ptr_p = (void *)bp;
1508 return 1;
1509
1510failed:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001511 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001512}
1513
Victor Stinner9ed83c42017-10-31 12:18:10 -07001514
Victor Stinnerdb067af2014-05-02 22:31:14 +02001515static void *
1516_PyObject_Malloc(void *ctx, size_t nbytes)
1517{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001518 void* ptr;
1519 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001520 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001521 return ptr;
1522 }
1523
1524 ptr = PyMem_RawMalloc(nbytes);
1525 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001526 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001527 }
1528 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001529}
1530
Victor Stinner9ed83c42017-10-31 12:18:10 -07001531
Victor Stinnerdb067af2014-05-02 22:31:14 +02001532static void *
1533_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1534{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001535 void* ptr;
1536
1537 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
1538 size_t nbytes = nelem * elsize;
1539
1540 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
1541 memset(ptr, 0, nbytes);
Victor Stinner9e87e772017-11-24 12:09:24 +01001542 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001543 return ptr;
1544 }
1545
1546 ptr = PyMem_RawCalloc(nelem, elsize);
1547 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001548 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001549 }
1550 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001551}
1552
Neil Schemenauera35c6882001-02-27 04:45:05 +00001553
Victor Stinner9ed83c42017-10-31 12:18:10 -07001554/* Free a memory block allocated by pymalloc_alloc().
1555 Return 1 if it was freed.
1556 Return 0 if the block was not allocated by pymalloc_alloc(). */
1557static int
1558pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001559{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001560 poolp pool;
Victor Stinner9e87e772017-11-24 12:09:24 +01001561 block *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001562 poolp next, prev;
1563 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001564
Victor Stinner9ed83c42017-10-31 12:18:10 -07001565 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001566
Benjamin Peterson05159c42009-12-03 03:01:27 +00001567#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001568 if (UNLIKELY(running_on_valgrind > 0)) {
1569 return 0;
1570 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001571#endif
1572
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001573 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001574 if (!address_in_range(p, pool)) {
1575 return 0;
1576 }
1577 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001578
Victor Stinner9ed83c42017-10-31 12:18:10 -07001579 /* Link p to the start of the pool's freeblock list. Since
1580 * the pool had at least the p block outstanding, the pool
1581 * wasn't empty (so it's already in a usedpools[] list, or
1582 * was full and is in no list -- it's not in the freeblocks
1583 * list in any case).
1584 */
1585 assert(pool->ref.count > 0); /* else it was empty */
Victor Stinner9e87e772017-11-24 12:09:24 +01001586 *(block **)p = lastfree = pool->freeblock;
1587 pool->freeblock = (block *)p;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001588 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001589 /* Pool was full, so doesn't currently live in any list:
1590 * link it to the front of the appropriate usedpools[] list.
1591 * This mimics LRU pool usage for new allocations and
1592 * targets optimal filling when several pools contain
1593 * blocks of the same size class.
1594 */
1595 --pool->ref.count;
1596 assert(pool->ref.count > 0); /* else the pool is empty */
1597 size = pool->szidx;
Victor Stinner9e87e772017-11-24 12:09:24 +01001598 next = usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001599 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001600
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001601 /* insert pool before next: prev <-> pool <-> next */
1602 pool->nextpool = next;
1603 pool->prevpool = prev;
1604 next->prevpool = pool;
1605 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001606 goto success;
1607 }
1608
1609 struct arena_object* ao;
1610 uint nf; /* ao->nfreepools */
1611
1612 /* freeblock wasn't NULL, so the pool wasn't full,
1613 * and the pool is in a usedpools[] list.
1614 */
1615 if (--pool->ref.count != 0) {
1616 /* pool isn't empty: leave it in usedpools */
1617 goto success;
1618 }
1619 /* Pool is now empty: unlink from usedpools, and
1620 * link to the front of freepools. This ensures that
1621 * previously freed pools will be allocated later
1622 * (being not referenced, they are perhaps paged out).
1623 */
1624 next = pool->nextpool;
1625 prev = pool->prevpool;
1626 next->prevpool = prev;
1627 prev->nextpool = next;
1628
1629 /* Link the pool to freepools. This is a singly-linked
1630 * list, and pool->prevpool isn't used there.
1631 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001632 ao = &arenas[pool->arenaindex];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001633 pool->nextpool = ao->freepools;
1634 ao->freepools = pool;
1635 nf = ++ao->nfreepools;
1636
1637 /* All the rest is arena management. We just freed
1638 * a pool, and there are 4 cases for arena mgmt:
1639 * 1. If all the pools are free, return the arena to
1640 * the system free().
1641 * 2. If this is the only free pool in the arena,
1642 * add the arena back to the `usable_arenas` list.
1643 * 3. If the "next" arena has a smaller count of free
1644 * pools, we have to "slide this arena right" to
1645 * restore that usable_arenas is sorted in order of
1646 * nfreepools.
1647 * 4. Else there's nothing more to do.
1648 */
1649 if (nf == ao->ntotalpools) {
1650 /* Case 1. First unlink ao from usable_arenas.
1651 */
1652 assert(ao->prevarena == NULL ||
1653 ao->prevarena->address != 0);
1654 assert(ao ->nextarena == NULL ||
1655 ao->nextarena->address != 0);
1656
1657 /* Fix the pointer in the prevarena, or the
1658 * usable_arenas pointer.
1659 */
1660 if (ao->prevarena == NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001661 usable_arenas = ao->nextarena;
1662 assert(usable_arenas == NULL ||
1663 usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001664 }
1665 else {
1666 assert(ao->prevarena->nextarena == ao);
1667 ao->prevarena->nextarena =
1668 ao->nextarena;
1669 }
1670 /* Fix the pointer in the nextarena. */
1671 if (ao->nextarena != NULL) {
1672 assert(ao->nextarena->prevarena == ao);
1673 ao->nextarena->prevarena =
1674 ao->prevarena;
1675 }
1676 /* Record that this arena_object slot is
1677 * available to be reused.
1678 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001679 ao->nextarena = unused_arena_objects;
1680 unused_arena_objects = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001681
1682 /* Free the entire arena. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001683 _PyObject_Arena.free(_PyObject_Arena.ctx,
Victor Stinner9ed83c42017-10-31 12:18:10 -07001684 (void *)ao->address, ARENA_SIZE);
1685 ao->address = 0; /* mark unassociated */
Victor Stinner9e87e772017-11-24 12:09:24 +01001686 --narenas_currently_allocated;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001687
1688 goto success;
1689 }
1690
1691 if (nf == 1) {
1692 /* Case 2. Put ao at the head of
1693 * usable_arenas. Note that because
1694 * ao->nfreepools was 0 before, ao isn't
1695 * currently on the usable_arenas list.
1696 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001697 ao->nextarena = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001698 ao->prevarena = NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001699 if (usable_arenas)
1700 usable_arenas->prevarena = ao;
1701 usable_arenas = ao;
1702 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001703
1704 goto success;
1705 }
1706
1707 /* If this arena is now out of order, we need to keep
1708 * the list sorted. The list is kept sorted so that
1709 * the "most full" arenas are used first, which allows
1710 * the nearly empty arenas to be completely freed. In
1711 * a few un-scientific tests, it seems like this
1712 * approach allowed a lot more memory to be freed.
1713 */
1714 if (ao->nextarena == NULL ||
1715 nf <= ao->nextarena->nfreepools) {
1716 /* Case 4. Nothing to do. */
1717 goto success;
1718 }
1719 /* Case 3: We have to move the arena towards the end
1720 * of the list, because it has more free pools than
1721 * the arena to its right.
1722 * First unlink ao from usable_arenas.
1723 */
1724 if (ao->prevarena != NULL) {
1725 /* ao isn't at the head of the list */
1726 assert(ao->prevarena->nextarena == ao);
1727 ao->prevarena->nextarena = ao->nextarena;
1728 }
1729 else {
1730 /* ao is at the head of the list */
Victor Stinner9e87e772017-11-24 12:09:24 +01001731 assert(usable_arenas == ao);
1732 usable_arenas = ao->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001733 }
1734 ao->nextarena->prevarena = ao->prevarena;
1735
1736 /* Locate the new insertion point by iterating over
1737 * the list, using our nextarena pointer.
1738 */
1739 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1740 ao->prevarena = ao->nextarena;
1741 ao->nextarena = ao->nextarena->nextarena;
1742 }
1743
1744 /* Insert ao at this point. */
1745 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1746 assert(ao->prevarena->nextarena == ao->nextarena);
1747
1748 ao->prevarena->nextarena = ao;
1749 if (ao->nextarena != NULL) {
1750 ao->nextarena->prevarena = ao;
1751 }
1752
1753 /* Verify that the swaps worked. */
1754 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1755 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1756 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
Victor Stinner9e87e772017-11-24 12:09:24 +01001757 assert((usable_arenas == ao && ao->prevarena == NULL)
Victor Stinner9ed83c42017-10-31 12:18:10 -07001758 || ao->prevarena->nextarena == ao);
1759
1760 goto success;
1761
1762success:
Victor Stinner9ed83c42017-10-31 12:18:10 -07001763 return 1;
1764}
1765
1766
1767static void
1768_PyObject_Free(void *ctx, void *p)
1769{
1770 /* PyObject_Free(NULL) has no effect */
1771 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001772 return;
1773 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001774
Victor Stinner9e87e772017-11-24 12:09:24 +01001775 _Py_AllocatedBlocks--;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001776 if (!pymalloc_free(ctx, p)) {
1777 /* pymalloc didn't allocate this address */
1778 PyMem_RawFree(p);
1779 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001780}
1781
Neil Schemenauera35c6882001-02-27 04:45:05 +00001782
Victor Stinner9ed83c42017-10-31 12:18:10 -07001783/* pymalloc realloc.
1784
1785 If nbytes==0, then as the Python docs promise, we do not treat this like
1786 free(p), and return a non-NULL result.
1787
1788 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1789 newptr_p.
1790
1791 Return 0 if pymalloc didn't allocated p. */
1792static int
1793pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001794{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001795 void *bp;
1796 poolp pool;
1797 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001798
Victor Stinner9ed83c42017-10-31 12:18:10 -07001799 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001800
Benjamin Peterson05159c42009-12-03 03:01:27 +00001801#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001802 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001803 if (UNLIKELY(running_on_valgrind > 0)) {
1804 return 0;
1805 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001806#endif
1807
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001808 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001809 if (!address_in_range(p, pool)) {
1810 /* pymalloc is not managing this block.
1811
1812 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1813 over this block. However, if we do, we need to copy the valid data
1814 from the C-managed block to one of our blocks, and there's no
1815 portable way to know how much of the memory space starting at p is
1816 valid.
1817
1818 As bug 1185883 pointed out the hard way, it's possible that the
1819 C-managed block is "at the end" of allocated VM space, so that a
1820 memory fault can occur if we try to copy nbytes bytes starting at p.
1821 Instead we punt: let C continue to manage this block. */
1822 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001823 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001824
1825 /* pymalloc is in charge of this block */
1826 size = INDEX2SIZE(pool->szidx);
1827 if (nbytes <= size) {
1828 /* The block is staying the same or shrinking.
1829
1830 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1831 block to a smaller size class, but it wastes memory not to copy it.
1832
1833 The compromise here is to copy on shrink only if at least 25% of
1834 size can be shaved off. */
1835 if (4 * nbytes > 3 * size) {
1836 /* It's the same, or shrinking and new/old > 3/4. */
1837 *newptr_p = p;
1838 return 1;
1839 }
1840 size = nbytes;
1841 }
1842
1843 bp = _PyObject_Malloc(ctx, nbytes);
1844 if (bp != NULL) {
1845 memcpy(bp, p, size);
1846 _PyObject_Free(ctx, p);
1847 }
1848 *newptr_p = bp;
1849 return 1;
1850}
1851
1852
1853static void *
1854_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1855{
1856 void *ptr2;
1857
1858 if (ptr == NULL) {
1859 return _PyObject_Malloc(ctx, nbytes);
1860 }
1861
1862 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1863 return ptr2;
1864 }
1865
1866 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001867}
1868
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001869#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001870
1871/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001872/* pymalloc not enabled: Redirect the entry points to malloc. These will
1873 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001874
Antoine Pitrou92840532012-12-17 23:05:59 +01001875Py_ssize_t
1876_Py_GetAllocatedBlocks(void)
1877{
1878 return 0;
1879}
1880
Tim Peters1221c0a2002-03-23 00:20:15 +00001881#endif /* WITH_PYMALLOC */
1882
Victor Stinner34be8072016-03-14 12:04:26 +01001883
Tim Petersddea2082002-03-23 10:03:50 +00001884/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001885/* A x-platform debugging allocator. This doesn't manage memory directly,
1886 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1887 */
Tim Petersddea2082002-03-23 10:03:50 +00001888
Tim Petersf6fb5012002-04-12 07:38:53 +00001889/* Special bytes broadcast into debug memory blocks at appropriate times.
1890 * Strings of these are unlikely to be valid addresses, floats, ints or
1891 * 7-bit ASCII.
1892 */
1893#undef CLEANBYTE
1894#undef DEADBYTE
1895#undef FORBIDDENBYTE
1896#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001897#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001898#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001899
Victor Stinner9e87e772017-11-24 12:09:24 +01001900static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1901
Tim Peterse0850172002-03-24 00:34:21 +00001902/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001903 * to supply a single place to set a breakpoint.
1904 */
Tim Peterse0850172002-03-24 00:34:21 +00001905static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001906bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001907{
Victor Stinner9e87e772017-11-24 12:09:24 +01001908 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001909}
1910
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001911#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001912
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001913/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1914static size_t
1915read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001916{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001917 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001918 size_t result = *q++;
1919 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001920
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001921 for (i = SST; --i > 0; ++q)
1922 result = (result << 8) | *q;
1923 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001924}
1925
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001926/* Write n as a big-endian size_t, MSB at address p, LSB at
1927 * p + sizeof(size_t) - 1.
1928 */
Tim Petersddea2082002-03-23 10:03:50 +00001929static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001930write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001931{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001932 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001933 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001934
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001935 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001936 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001937 n >>= 8;
1938 }
Tim Petersddea2082002-03-23 10:03:50 +00001939}
1940
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001941/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1942 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001943
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001944p[0: S]
1945 Number of bytes originally asked for. This is a size_t, big-endian (easier
1946 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001947p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001948 API ID. See PEP 445. This is a character, but seems undocumented.
1949p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001950 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001951p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001952 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001953 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001954 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001955 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001956p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001957 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001958p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001959 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1960 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001961 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00001962 If "bad memory" is detected later, the serial number gives an
1963 excellent way to set a breakpoint on the next run, to capture the
1964 instant at which this block was passed out.
1965*/
1966
Victor Stinner0507bf52013-07-07 02:05:46 +02001967static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01001968_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00001969{
Victor Stinner0507bf52013-07-07 02:05:46 +02001970 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02001971 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001972 uint8_t *data; /* p + 2*SST == pointer to data bytes */
1973 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
1974 size_t total; /* 2 * SST + nbytes + 2 * SST */
1975
1976 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
1977 /* integer overflow: can't represent total as a Py_ssize_t */
1978 return NULL;
1979 }
1980 total = nbytes + 4 * SST;
1981
1982 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
1983 * ^--- p ^--- data ^--- tail
1984 S: nbytes stored as size_t
1985 I: API identifier (1 byte)
1986 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
1987 C: Clean bytes used later to store actual data
1988 N: Serial number stored as size_t */
1989
1990 if (use_calloc) {
1991 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
1992 }
1993 else {
1994 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
1995 }
1996 if (p == NULL) {
1997 return NULL;
1998 }
1999 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002000
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002001 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00002002
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002003 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
2004 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07002005 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02002006 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00002007
Victor Stinner9ed83c42017-10-31 12:18:10 -07002008 if (nbytes > 0 && !use_calloc) {
2009 memset(data, CLEANBYTE, nbytes);
2010 }
Tim Petersddea2082002-03-23 10:03:50 +00002011
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002012 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002013 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002014 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002015 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00002016
Victor Stinner9ed83c42017-10-31 12:18:10 -07002017 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002018}
2019
Victor Stinnerdb067af2014-05-02 22:31:14 +02002020static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002021_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002022{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002023 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002024}
2025
2026static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002027_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002028{
2029 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002030 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002031 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01002032 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002033}
2034
Victor Stinner9ed83c42017-10-31 12:18:10 -07002035
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002036/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002037 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00002038 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00002039 Then calls the underlying free.
2040*/
Victor Stinner0507bf52013-07-07 02:05:46 +02002041static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01002042_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002043{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002044 /* PyMem_Free(NULL) has no effect */
2045 if (p == NULL) {
2046 return;
2047 }
2048
Victor Stinner0507bf52013-07-07 02:05:46 +02002049 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002050 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002051 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00002052
Victor Stinner0507bf52013-07-07 02:05:46 +02002053 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002054 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002055 nbytes += 4 * SST;
2056 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02002057 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00002058}
2059
Victor Stinner9ed83c42017-10-31 12:18:10 -07002060
Victor Stinner0507bf52013-07-07 02:05:46 +02002061static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002062_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00002063{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002064 if (p == NULL) {
2065 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
2066 }
2067
Victor Stinner0507bf52013-07-07 02:05:46 +02002068 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002069 uint8_t *head; /* base address of malloc'ed pad block */
2070 uint8_t *data; /* pointer to data bytes */
2071 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002072 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2073 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002074 size_t original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002075 size_t block_serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002076#define ERASED_SIZE 64
2077 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00002078
Victor Stinner0507bf52013-07-07 02:05:46 +02002079 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002080
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002081 data = (uint8_t *)p;
2082 head = data - 2*SST;
2083 original_nbytes = read_size_t(head);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002084 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
2085 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002086 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002087 }
2088 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002089
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002090 tail = data + original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002091 block_serialno = read_size_t(tail + SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002092 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
2093 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
2094 */
2095 if (original_nbytes <= sizeof(save)) {
2096 memcpy(save, data, original_nbytes);
2097 memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST);
2098 }
2099 else {
2100 memcpy(save, data, ERASED_SIZE);
2101 memset(head, DEADBYTE, ERASED_SIZE + 2*SST);
2102 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
2103 memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002104 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002105
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002106 /* Resize and add decorations. */
2107 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
2108 if (r == NULL) {
2109 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002110 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002111 else {
2112 head = r;
2113 bumpserialno();
Victor Stinner9e87e772017-11-24 12:09:24 +01002114 block_serialno = serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002115 }
2116
2117 write_size_t(head, nbytes);
2118 head[SST] = (uint8_t)api->api_id;
2119 memset(head + SST + 1, FORBIDDENBYTE, SST-1);
2120 data = head + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02002121
Victor Stinner9ed83c42017-10-31 12:18:10 -07002122 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002123 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002124 write_size_t(tail + SST, block_serialno);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002125
2126 /* Restore saved bytes. */
2127 if (original_nbytes <= sizeof(save)) {
2128 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
2129 }
2130 else {
2131 size_t i = original_nbytes - ERASED_SIZE;
2132 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
2133 if (nbytes > i) {
2134 memcpy(data + i, &save[ERASED_SIZE],
2135 Py_MIN(nbytes - i, ERASED_SIZE));
2136 }
2137 }
2138
2139 if (r == NULL) {
2140 return NULL;
2141 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002142
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002143 if (nbytes > original_nbytes) {
2144 /* growing: mark new extra memory clean */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002145 memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002146 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002147
Victor Stinner9ed83c42017-10-31 12:18:10 -07002148 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002149}
2150
Victor Stinnerc4aec362016-03-14 22:26:53 +01002151static void
2152_PyMem_DebugCheckGIL(void)
2153{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002154 if (!PyGILState_Check())
2155 Py_FatalError("Python memory allocator called "
2156 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01002157}
2158
2159static void *
2160_PyMem_DebugMalloc(void *ctx, size_t nbytes)
2161{
2162 _PyMem_DebugCheckGIL();
2163 return _PyMem_DebugRawMalloc(ctx, nbytes);
2164}
2165
2166static void *
2167_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
2168{
2169 _PyMem_DebugCheckGIL();
2170 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
2171}
2172
Victor Stinner9ed83c42017-10-31 12:18:10 -07002173
Victor Stinnerc4aec362016-03-14 22:26:53 +01002174static void
2175_PyMem_DebugFree(void *ctx, void *ptr)
2176{
2177 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01002178 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002179}
2180
Victor Stinner9ed83c42017-10-31 12:18:10 -07002181
Victor Stinnerc4aec362016-03-14 22:26:53 +01002182static void *
2183_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2184{
2185 _PyMem_DebugCheckGIL();
2186 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2187}
2188
Tim Peters7ccfadf2002-04-01 06:04:21 +00002189/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002190 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00002191 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002192 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00002193 */
Victor Stinner0507bf52013-07-07 02:05:46 +02002194static void
2195_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002196{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002197 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002198 char msgbuf[64];
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002199 const char *msg;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002200 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002201 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002202 int i;
2203 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002204
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002205 if (p == NULL) {
2206 msg = "didn't expect a NULL pointer";
2207 goto error;
2208 }
Tim Petersddea2082002-03-23 10:03:50 +00002209
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002210 /* Check the API id */
2211 id = (char)q[-SST];
2212 if (id != api) {
2213 msg = msgbuf;
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002214 snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002215 msgbuf[sizeof(msgbuf)-1] = 0;
2216 goto error;
2217 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002218
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002219 /* Check the stuff at the start of p first: if there's underwrite
2220 * corruption, the number-of-bytes field may be nuts, and checking
2221 * the tail could lead to a segfault then.
2222 */
2223 for (i = SST-1; i >= 1; --i) {
2224 if (*(q-i) != FORBIDDENBYTE) {
2225 msg = "bad leading pad byte";
2226 goto error;
2227 }
2228 }
Tim Petersddea2082002-03-23 10:03:50 +00002229
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002230 nbytes = read_size_t(q - 2*SST);
2231 tail = q + nbytes;
2232 for (i = 0; i < SST; ++i) {
2233 if (tail[i] != FORBIDDENBYTE) {
2234 msg = "bad trailing pad byte";
2235 goto error;
2236 }
2237 }
Tim Petersddea2082002-03-23 10:03:50 +00002238
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002239 return;
Tim Petersd1139e02002-03-28 07:32:11 +00002240
2241error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002242 _PyObject_DebugDumpAddress(p);
2243 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00002244}
2245
Tim Peters7ccfadf2002-04-01 06:04:21 +00002246/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02002247static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002248_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002249{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002250 const uint8_t *q = (const uint8_t *)p;
2251 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002252 size_t nbytes, serial;
2253 int i;
2254 int ok;
2255 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002256
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002257 fprintf(stderr, "Debug memory block at address p=%p:", p);
2258 if (p == NULL) {
2259 fprintf(stderr, "\n");
2260 return;
2261 }
2262 id = (char)q[-SST];
2263 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00002264
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002265 nbytes = read_size_t(q - 2*SST);
2266 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
2267 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00002268
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002269 /* In case this is nuts, check the leading pad bytes first. */
2270 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2271 ok = 1;
2272 for (i = 1; i <= SST-1; ++i) {
2273 if (*(q-i) != FORBIDDENBYTE) {
2274 ok = 0;
2275 break;
2276 }
2277 }
2278 if (ok)
2279 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2280 else {
2281 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2282 FORBIDDENBYTE);
2283 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002284 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002285 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
2286 if (byte != FORBIDDENBYTE)
2287 fputs(" *** OUCH", stderr);
2288 fputc('\n', stderr);
2289 }
Tim Peters449b5a82002-04-28 06:14:45 +00002290
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002291 fputs(" Because memory is corrupted at the start, the "
2292 "count of bytes requested\n"
2293 " may be bogus, and checking the trailing pad "
2294 "bytes may segfault.\n", stderr);
2295 }
Tim Petersddea2082002-03-23 10:03:50 +00002296
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002297 tail = q + nbytes;
2298 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
2299 ok = 1;
2300 for (i = 0; i < SST; ++i) {
2301 if (tail[i] != FORBIDDENBYTE) {
2302 ok = 0;
2303 break;
2304 }
2305 }
2306 if (ok)
2307 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2308 else {
2309 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002310 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002311 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002312 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002313 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00002314 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002315 if (byte != FORBIDDENBYTE)
2316 fputs(" *** OUCH", stderr);
2317 fputc('\n', stderr);
2318 }
2319 }
Tim Petersddea2082002-03-23 10:03:50 +00002320
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002321 serial = read_size_t(tail + SST);
2322 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
2323 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00002324
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002325 if (nbytes > 0) {
2326 i = 0;
2327 fputs(" Data at p:", stderr);
2328 /* print up to 8 bytes at the start */
2329 while (q < tail && i < 8) {
2330 fprintf(stderr, " %02x", *q);
2331 ++i;
2332 ++q;
2333 }
2334 /* and up to 8 at the end */
2335 if (q < tail) {
2336 if (tail - q > 8) {
2337 fputs(" ...", stderr);
2338 q = tail - 8;
2339 }
2340 while (q < tail) {
2341 fprintf(stderr, " %02x", *q);
2342 ++q;
2343 }
2344 }
2345 fputc('\n', stderr);
2346 }
Victor Stinner0611c262016-03-15 22:22:13 +01002347 fputc('\n', stderr);
2348
2349 fflush(stderr);
2350 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00002351}
2352
David Malcolm49526f42012-06-22 14:55:41 -04002353
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002354static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002355printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002356{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002357 int i, k;
2358 char buf[100];
2359 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002360
David Malcolm49526f42012-06-22 14:55:41 -04002361 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002362 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002363 fputc(' ', out);
2364 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002365
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002366 /* Write the value with commas. */
2367 i = 22;
2368 buf[i--] = '\0';
2369 buf[i--] = '\n';
2370 k = 3;
2371 do {
2372 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002373 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002374 value = nextvalue;
2375 buf[i--] = (char)(digit + '0');
2376 --k;
2377 if (k == 0 && value && i >= 0) {
2378 k = 3;
2379 buf[i--] = ',';
2380 }
2381 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002382
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002383 while (i >= 0)
2384 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002385 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002386
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002387 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002388}
2389
David Malcolm49526f42012-06-22 14:55:41 -04002390void
2391_PyDebugAllocatorStats(FILE *out,
2392 const char *block_name, int num_blocks, size_t sizeof_block)
2393{
2394 char buf1[128];
2395 char buf2[128];
2396 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05002397 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002398 num_blocks, block_name, sizeof_block);
2399 PyOS_snprintf(buf2, sizeof(buf2),
2400 "%48s ", buf1);
2401 (void)printone(out, buf2, num_blocks * sizeof_block);
2402}
2403
Victor Stinner34be8072016-03-14 12:04:26 +01002404
David Malcolm49526f42012-06-22 14:55:41 -04002405#ifdef WITH_PYMALLOC
2406
Victor Stinner34be8072016-03-14 12:04:26 +01002407#ifdef Py_DEBUG
2408/* Is target in the list? The list is traversed via the nextpool pointers.
2409 * The list may be NULL-terminated, or circular. Return 1 if target is in
2410 * list, else 0.
2411 */
2412static int
2413pool_is_in_list(const poolp target, poolp list)
2414{
2415 poolp origlist = list;
2416 assert(target != NULL);
2417 if (list == NULL)
2418 return 0;
2419 do {
2420 if (target == list)
2421 return 1;
2422 list = list->nextpool;
2423 } while (list != NULL && list != origlist);
2424 return 0;
2425}
2426#endif
2427
David Malcolm49526f42012-06-22 14:55:41 -04002428/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002429 * In Py_DEBUG mode, also perform some expensive internal consistency
2430 * checks.
Victor Stinner6bf992a2017-12-06 17:26:10 +01002431 *
2432 * Return 0 if the memory debug hooks are not installed or no statistics was
Leo Ariasc3d95082018-02-03 18:36:10 -06002433 * written into out, return 1 otherwise.
Tim Peters08d82152002-04-18 22:25:03 +00002434 */
Victor Stinner6bf992a2017-12-06 17:26:10 +01002435int
David Malcolm49526f42012-06-22 14:55:41 -04002436_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002437{
Victor Stinner6bf992a2017-12-06 17:26:10 +01002438 if (!_PyMem_PymallocEnabled()) {
2439 return 0;
2440 }
2441
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002442 uint i;
2443 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2444 /* # of pools, allocated blocks, and free blocks per class index */
2445 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2446 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2447 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2448 /* total # of allocated bytes in used and full pools */
2449 size_t allocated_bytes = 0;
2450 /* total # of available bytes in used pools */
2451 size_t available_bytes = 0;
2452 /* # of free pools + pools not yet carved out of current arena */
2453 uint numfreepools = 0;
2454 /* # of bytes for arena alignment padding */
2455 size_t arena_alignment = 0;
2456 /* # of bytes in used and full pools used for pool_headers */
2457 size_t pool_header_bytes = 0;
2458 /* # of bytes in used and full pools wasted due to quantization,
2459 * i.e. the necessarily leftover space at the ends of used and
2460 * full pools.
2461 */
2462 size_t quantization = 0;
2463 /* # of arenas actually allocated. */
2464 size_t narenas = 0;
2465 /* running total -- should equal narenas * ARENA_SIZE */
2466 size_t total;
2467 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002468
David Malcolm49526f42012-06-22 14:55:41 -04002469 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002470 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002471
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002472 for (i = 0; i < numclasses; ++i)
2473 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002474
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002475 /* Because full pools aren't linked to from anything, it's easiest
2476 * to march over all the arenas. If we're lucky, most of the memory
2477 * will be living in full pools -- would be a shame to miss them.
2478 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002479 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002480 uint j;
Victor Stinner9e87e772017-11-24 12:09:24 +01002481 uintptr_t base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002482
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002483 /* Skip arenas which are not allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002484 if (arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002485 continue;
2486 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002487
Victor Stinner9e87e772017-11-24 12:09:24 +01002488 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002489
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002490 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002491 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002492 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002493 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002494 base += POOL_SIZE;
2495 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002496
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002497 /* visit every pool in the arena */
Victor Stinner9e87e772017-11-24 12:09:24 +01002498 assert(base <= (uintptr_t) arenas[i].pool_address);
2499 for (j = 0; base < (uintptr_t) arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002500 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002501 poolp p = (poolp)base;
2502 const uint sz = p->szidx;
2503 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002504
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002505 if (p->ref.count == 0) {
2506 /* currently unused */
Victor Stinner34be8072016-03-14 12:04:26 +01002507#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +01002508 assert(pool_is_in_list(p, arenas[i].freepools));
Victor Stinner34be8072016-03-14 12:04:26 +01002509#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002510 continue;
2511 }
2512 ++numpools[sz];
2513 numblocks[sz] += p->ref.count;
2514 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2515 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002516#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002517 if (freeblocks > 0)
Victor Stinner9e87e772017-11-24 12:09:24 +01002518 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002519#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002520 }
2521 }
Victor Stinner9e87e772017-11-24 12:09:24 +01002522 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002523
David Malcolm49526f42012-06-22 14:55:41 -04002524 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002525 fputs("class size num pools blocks in use avail blocks\n"
2526 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04002527 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002528
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002529 for (i = 0; i < numclasses; ++i) {
2530 size_t p = numpools[i];
2531 size_t b = numblocks[i];
2532 size_t f = numfreeblocks[i];
2533 uint size = INDEX2SIZE(i);
2534 if (p == 0) {
2535 assert(b == 0 && f == 0);
2536 continue;
2537 }
David Malcolm49526f42012-06-22 14:55:41 -04002538 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002539 "%11" PY_FORMAT_SIZE_T "u "
2540 "%15" PY_FORMAT_SIZE_T "u "
2541 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002542 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002543 allocated_bytes += b * size;
2544 available_bytes += f * size;
2545 pool_header_bytes += p * POOL_OVERHEAD;
2546 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2547 }
David Malcolm49526f42012-06-22 14:55:41 -04002548 fputc('\n', out);
Victor Stinner34be8072016-03-14 12:04:26 +01002549 if (_PyMem_DebugEnabled())
Victor Stinner9e87e772017-11-24 12:09:24 +01002550 (void)printone(out, "# times object malloc called", serialno);
2551 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2552 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2553 (void)printone(out, "# arenas highwater mark", narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04002554 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002555
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002556 PyOS_snprintf(buf, sizeof(buf),
2557 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2558 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002559 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002560
David Malcolm49526f42012-06-22 14:55:41 -04002561 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002562
David Malcolm49526f42012-06-22 14:55:41 -04002563 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2564 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002565
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002566 PyOS_snprintf(buf, sizeof(buf),
2567 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002568 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002569
David Malcolm49526f42012-06-22 14:55:41 -04002570 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2571 total += printone(out, "# bytes lost to quantization", quantization);
2572 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2573 (void)printone(out, "Total", total);
Victor Stinner6bf992a2017-12-06 17:26:10 +01002574 return 1;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002575}
2576
David Malcolm49526f42012-06-22 14:55:41 -04002577#endif /* #ifdef WITH_PYMALLOC */