blob: 0e485d62114daf3bfe0b71d93033e173a09a1fed [file] [log] [blame]
Tim Peters1221c0a2002-03-23 00:20:15 +00001#include "Python.h"
2
Benjamin Peterson3924f932016-09-18 19:12:48 -07003#include <stdbool.h>
4
Victor Stinner0611c262016-03-15 22:22:13 +01005
6/* Defined in tracemalloc.c */
7extern void _PyMem_DumpTraceback(int fd, const void *ptr);
8
9
Victor Stinner0507bf52013-07-07 02:05:46 +020010/* Python's malloc wrappers (see pymem.h) */
11
Victor Stinner34be8072016-03-14 12:04:26 +010012#undef uint
13#define uint unsigned int /* assuming >= 16 bits */
14
Victor Stinner0507bf52013-07-07 02:05:46 +020015/* Forward declaration */
Victor Stinnerc4aec362016-03-14 22:26:53 +010016static void* _PyMem_DebugRawMalloc(void *ctx, size_t size);
17static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize);
18static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size);
Victor Stinner9ed83c42017-10-31 12:18:10 -070019static void _PyMem_DebugRawFree(void *ctx, void *ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +010020
Victor Stinner0507bf52013-07-07 02:05:46 +020021static void* _PyMem_DebugMalloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020022static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020023static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size);
Victor Stinnerc4aec362016-03-14 22:26:53 +010024static void _PyMem_DebugFree(void *ctx, void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020025
26static void _PyObject_DebugDumpAddress(const void *p);
27static void _PyMem_DebugCheckAddress(char api_id, const void *p);
Victor Stinner0507bf52013-07-07 02:05:46 +020028
Victor Stinner5d39e042017-11-29 17:20:38 +010029static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain);
30
Nick Coghlan6ba64f42013-09-29 00:28:55 +100031#if defined(__has_feature) /* Clang */
32 #if __has_feature(address_sanitizer) /* is ASAN enabled? */
33 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070034 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100035 #else
36 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
37 #endif
38#else
39 #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */
40 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \
Benjamin Peterson3924f932016-09-18 19:12:48 -070041 __attribute__((no_address_safety_analysis))
Nick Coghlan6ba64f42013-09-29 00:28:55 +100042 #else
43 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
44 #endif
45#endif
46
Tim Peters1221c0a2002-03-23 00:20:15 +000047#ifdef WITH_PYMALLOC
48
Victor Stinner0507bf52013-07-07 02:05:46 +020049#ifdef MS_WINDOWS
50# include <windows.h>
51#elif defined(HAVE_MMAP)
52# include <sys/mman.h>
53# ifdef MAP_ANONYMOUS
54# define ARENAS_USE_MMAP
55# endif
Antoine Pitrou6f26be02011-05-03 18:18:59 +020056#endif
57
Victor Stinner0507bf52013-07-07 02:05:46 +020058/* Forward declaration */
59static void* _PyObject_Malloc(void *ctx, size_t size);
Victor Stinnerdb067af2014-05-02 22:31:14 +020060static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize);
Victor Stinner0507bf52013-07-07 02:05:46 +020061static void _PyObject_Free(void *ctx, void *p);
62static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size);
Martin v. Löwiscd83fa82013-06-27 12:23:29 +020063#endif
64
Victor Stinner0507bf52013-07-07 02:05:46 +020065
66static void *
67_PyMem_RawMalloc(void *ctx, size_t size)
68{
Victor Stinnerdb067af2014-05-02 22:31:14 +020069 /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL
Victor Stinner0507bf52013-07-07 02:05:46 +020070 for malloc(0), which would be treated as an error. Some platforms would
71 return a pointer with no memory behind it, which would break pymalloc.
72 To solve these problems, allocate an extra byte. */
73 if (size == 0)
74 size = 1;
75 return malloc(size);
76}
77
78static void *
Victor Stinnerdb067af2014-05-02 22:31:14 +020079_PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize)
80{
81 /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL
82 for calloc(0, 0), which would be treated as an error. Some platforms
83 would return a pointer with no memory behind it, which would break
84 pymalloc. To solve these problems, allocate an extra byte. */
85 if (nelem == 0 || elsize == 0) {
86 nelem = 1;
87 elsize = 1;
88 }
89 return calloc(nelem, elsize);
90}
91
92static void *
Victor Stinner0507bf52013-07-07 02:05:46 +020093_PyMem_RawRealloc(void *ctx, void *ptr, size_t size)
94{
95 if (size == 0)
96 size = 1;
97 return realloc(ptr, size);
98}
99
100static void
101_PyMem_RawFree(void *ctx, void *ptr)
102{
103 free(ptr);
104}
105
106
107#ifdef MS_WINDOWS
108static void *
109_PyObject_ArenaVirtualAlloc(void *ctx, size_t size)
110{
111 return VirtualAlloc(NULL, size,
112 MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
113}
114
115static void
116_PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size)
117{
Victor Stinner725e6682013-07-07 03:06:16 +0200118 VirtualFree(ptr, 0, MEM_RELEASE);
Victor Stinner0507bf52013-07-07 02:05:46 +0200119}
120
121#elif defined(ARENAS_USE_MMAP)
122static void *
123_PyObject_ArenaMmap(void *ctx, size_t size)
124{
125 void *ptr;
126 ptr = mmap(NULL, size, PROT_READ|PROT_WRITE,
127 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
128 if (ptr == MAP_FAILED)
129 return NULL;
130 assert(ptr != NULL);
131 return ptr;
132}
133
134static void
135_PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size)
136{
137 munmap(ptr, size);
138}
139
140#else
141static void *
142_PyObject_ArenaMalloc(void *ctx, size_t size)
143{
144 return malloc(size);
145}
146
147static void
148_PyObject_ArenaFree(void *ctx, void *ptr, size_t size)
149{
150 free(ptr);
151}
152#endif
153
Victor Stinner5d39e042017-11-29 17:20:38 +0100154#define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200155#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100156# define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free}
Victor Stinner0507bf52013-07-07 02:05:46 +0200157#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100158
159#define PYRAW_ALLOC MALLOC_ALLOC
160#ifdef WITH_PYMALLOC
161# define PYOBJ_ALLOC PYMALLOC_ALLOC
162#else
163# define PYOBJ_ALLOC MALLOC_ALLOC
164#endif
165#define PYMEM_ALLOC PYOBJ_ALLOC
Victor Stinner0507bf52013-07-07 02:05:46 +0200166
Victor Stinner0507bf52013-07-07 02:05:46 +0200167typedef struct {
168 /* We tag each block with an API ID in order to tag API violations */
169 char api_id;
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200170 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200171} debug_alloc_api_t;
172static struct {
173 debug_alloc_api_t raw;
174 debug_alloc_api_t mem;
175 debug_alloc_api_t obj;
176} _PyMem_Debug = {
Victor Stinner5d39e042017-11-29 17:20:38 +0100177 {'r', PYRAW_ALLOC},
178 {'m', PYMEM_ALLOC},
179 {'o', PYOBJ_ALLOC}
Victor Stinner0507bf52013-07-07 02:05:46 +0200180 };
181
Victor Stinner5d39e042017-11-29 17:20:38 +0100182#define PYDBGRAW_ALLOC \
183 {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree}
184#define PYDBGMEM_ALLOC \
185 {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
186#define PYDBGOBJ_ALLOC \
187 {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree}
Victor Stinner0507bf52013-07-07 02:05:46 +0200188
Victor Stinner9e87e772017-11-24 12:09:24 +0100189#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100190static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC;
191static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC;
192static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100193#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100194static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC;
195static PyMemAllocatorEx _PyMem = PYMEM_ALLOC;
196static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC;
Victor Stinner9e87e772017-11-24 12:09:24 +0100197#endif
Eric Snow2ebc5ce2017-09-07 23:51:28 -0600198
Victor Stinner0507bf52013-07-07 02:05:46 +0200199
Victor Stinner5d39e042017-11-29 17:20:38 +0100200static int
201pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug,
202 PyMemAllocatorEx *old_alloc)
203{
204 if (old_alloc != NULL) {
205 PyMem_GetAllocator(domain, old_alloc);
206 }
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800207
Victor Stinner5d39e042017-11-29 17:20:38 +0100208
209 PyMemAllocatorEx new_alloc;
210 switch(domain)
211 {
212 case PYMEM_DOMAIN_RAW:
213 new_alloc = (PyMemAllocatorEx)PYRAW_ALLOC;
214 break;
215 case PYMEM_DOMAIN_MEM:
216 new_alloc = (PyMemAllocatorEx)PYMEM_ALLOC;
217 break;
218 case PYMEM_DOMAIN_OBJ:
219 new_alloc = (PyMemAllocatorEx)PYOBJ_ALLOC;
220 break;
221 default:
222 /* unknown domain */
223 return -1;
224 }
225 PyMem_SetAllocator(domain, &new_alloc);
226 if (debug) {
227 _PyMem_SetupDebugHooksDomain(domain);
228 }
229 return 0;
230}
231
232
233int
234_PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain,
235 PyMemAllocatorEx *old_alloc)
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800236{
Victor Stinnerccb04422017-11-16 03:20:31 -0800237#ifdef Py_DEBUG
Victor Stinner5d39e042017-11-29 17:20:38 +0100238 const int debug = 1;
Victor Stinnerccb04422017-11-16 03:20:31 -0800239#else
Victor Stinner5d39e042017-11-29 17:20:38 +0100240 const int debug = 0;
Victor Stinnerccb04422017-11-16 03:20:31 -0800241#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100242 return pymem_set_default_allocator(domain, debug, old_alloc);
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800243}
Victor Stinner0507bf52013-07-07 02:05:46 +0200244
Victor Stinner5d39e042017-11-29 17:20:38 +0100245
Victor Stinner34be8072016-03-14 12:04:26 +0100246int
247_PyMem_SetupAllocators(const char *opt)
248{
249 if (opt == NULL || *opt == '\0') {
250 /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line
Victor Stinner5d39e042017-11-29 17:20:38 +0100251 options): use default memory allocators */
252 opt = "default";
Victor Stinner34be8072016-03-14 12:04:26 +0100253 }
254
Victor Stinner5d39e042017-11-29 17:20:38 +0100255 if (strcmp(opt, "default") == 0) {
256 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, NULL);
257 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_MEM, NULL);
258 (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_OBJ, NULL);
Victor Stinner34be8072016-03-14 12:04:26 +0100259 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100260 else if (strcmp(opt, "debug") == 0) {
261 (void)pymem_set_default_allocator(PYMEM_DOMAIN_RAW, 1, NULL);
262 (void)pymem_set_default_allocator(PYMEM_DOMAIN_MEM, 1, NULL);
263 (void)pymem_set_default_allocator(PYMEM_DOMAIN_OBJ, 1, NULL);
Victor Stinner34be8072016-03-14 12:04:26 +0100264 }
265#ifdef WITH_PYMALLOC
Victor Stinner5d39e042017-11-29 17:20:38 +0100266 else if (strcmp(opt, "pymalloc") == 0 || strcmp(opt, "pymalloc_debug") == 0) {
267 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
268 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100269
Victor Stinner5d39e042017-11-29 17:20:38 +0100270 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
271 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &pymalloc);
272 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &pymalloc);
Victor Stinner34be8072016-03-14 12:04:26 +0100273
Victor Stinner5d39e042017-11-29 17:20:38 +0100274 if (strcmp(opt, "pymalloc_debug") == 0) {
Victor Stinner34be8072016-03-14 12:04:26 +0100275 PyMem_SetupDebugHooks();
Victor Stinner5d39e042017-11-29 17:20:38 +0100276 }
Victor Stinner34be8072016-03-14 12:04:26 +0100277 }
278#endif
Victor Stinner5d39e042017-11-29 17:20:38 +0100279 else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0) {
280 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
281 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc);
282 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &malloc_alloc);
283 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &malloc_alloc);
284
285 if (strcmp(opt, "malloc_debug") == 0) {
286 PyMem_SetupDebugHooks();
287 }
288 }
Victor Stinner34be8072016-03-14 12:04:26 +0100289 else {
290 /* unknown allocator */
291 return -1;
292 }
293 return 0;
294}
295
Victor Stinner5d39e042017-11-29 17:20:38 +0100296
297static int
298pymemallocator_eq(PyMemAllocatorEx *a, PyMemAllocatorEx *b)
299{
300 return (memcmp(a, b, sizeof(PyMemAllocatorEx)) == 0);
301}
302
303
304const char*
305_PyMem_GetAllocatorsName(void)
306{
307 PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC;
308#ifdef WITH_PYMALLOC
309 PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC;
310#endif
311
312 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
313 pymemallocator_eq(&_PyMem, &malloc_alloc) &&
314 pymemallocator_eq(&_PyObject, &malloc_alloc))
315 {
316 return "malloc";
317 }
318#ifdef WITH_PYMALLOC
319 if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) &&
320 pymemallocator_eq(&_PyMem, &pymalloc) &&
321 pymemallocator_eq(&_PyObject, &pymalloc))
322 {
323 return "pymalloc";
324 }
325#endif
326
327 PyMemAllocatorEx dbg_raw = PYDBGRAW_ALLOC;
328 PyMemAllocatorEx dbg_mem = PYDBGMEM_ALLOC;
329 PyMemAllocatorEx dbg_obj = PYDBGOBJ_ALLOC;
330
331 if (pymemallocator_eq(&_PyMem_Raw, &dbg_raw) &&
332 pymemallocator_eq(&_PyMem, &dbg_mem) &&
333 pymemallocator_eq(&_PyObject, &dbg_obj))
334 {
335 /* Debug hooks installed */
336 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
337 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &malloc_alloc) &&
338 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &malloc_alloc))
339 {
340 return "malloc_debug";
341 }
342#ifdef WITH_PYMALLOC
343 if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) &&
344 pymemallocator_eq(&_PyMem_Debug.mem.alloc, &pymalloc) &&
345 pymemallocator_eq(&_PyMem_Debug.obj.alloc, &pymalloc))
346 {
347 return "pymalloc_debug";
348 }
349#endif
350 }
351 return NULL;
352}
353
354
355#undef MALLOC_ALLOC
356#undef PYMALLOC_ALLOC
357#undef PYRAW_ALLOC
358#undef PYMEM_ALLOC
359#undef PYOBJ_ALLOC
360#undef PYDBGRAW_ALLOC
361#undef PYDBGMEM_ALLOC
362#undef PYDBGOBJ_ALLOC
363
Victor Stinner0507bf52013-07-07 02:05:46 +0200364
Victor Stinner9e87e772017-11-24 12:09:24 +0100365static PyObjectArenaAllocator _PyObject_Arena = {NULL,
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800366#ifdef MS_WINDOWS
Victor Stinner9e87e772017-11-24 12:09:24 +0100367 _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800368#elif defined(ARENAS_USE_MMAP)
Victor Stinner9e87e772017-11-24 12:09:24 +0100369 _PyObject_ArenaMmap, _PyObject_ArenaMunmap
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800370#else
Victor Stinner9e87e772017-11-24 12:09:24 +0100371 _PyObject_ArenaMalloc, _PyObject_ArenaFree
Victor Stinnerf7e5b562017-11-15 15:48:08 -0800372#endif
373 };
374
Victor Stinner0621e0e2016-04-19 17:02:55 +0200375#ifdef WITH_PYMALLOC
Victor Stinner34be8072016-03-14 12:04:26 +0100376static int
377_PyMem_DebugEnabled(void)
378{
379 return (_PyObject.malloc == _PyMem_DebugMalloc);
380}
381
Victor Stinner6bf992a2017-12-06 17:26:10 +0100382static int
Victor Stinner34be8072016-03-14 12:04:26 +0100383_PyMem_PymallocEnabled(void)
384{
385 if (_PyMem_DebugEnabled()) {
386 return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc);
387 }
388 else {
389 return (_PyObject.malloc == _PyObject_Malloc);
390 }
391}
392#endif
393
Victor Stinner5d39e042017-11-29 17:20:38 +0100394
395static void
396_PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain)
Victor Stinner0507bf52013-07-07 02:05:46 +0200397{
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200398 PyMemAllocatorEx alloc;
Victor Stinner0507bf52013-07-07 02:05:46 +0200399
Victor Stinner5d39e042017-11-29 17:20:38 +0100400 if (domain == PYMEM_DOMAIN_RAW) {
401 if (_PyMem_Raw.malloc == _PyMem_DebugRawMalloc) {
402 return;
403 }
Victor Stinner34be8072016-03-14 12:04:26 +0100404
Victor Stinner0507bf52013-07-07 02:05:46 +0200405 PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100406 alloc.ctx = &_PyMem_Debug.raw;
407 alloc.malloc = _PyMem_DebugRawMalloc;
408 alloc.calloc = _PyMem_DebugRawCalloc;
409 alloc.realloc = _PyMem_DebugRawRealloc;
410 alloc.free = _PyMem_DebugRawFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200411 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc);
412 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100413 else if (domain == PYMEM_DOMAIN_MEM) {
414 if (_PyMem.malloc == _PyMem_DebugMalloc) {
415 return;
416 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200417
Victor Stinnerad524372016-03-16 12:12:53 +0100418 PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100419 alloc.ctx = &_PyMem_Debug.mem;
420 alloc.malloc = _PyMem_DebugMalloc;
421 alloc.calloc = _PyMem_DebugCalloc;
422 alloc.realloc = _PyMem_DebugRealloc;
423 alloc.free = _PyMem_DebugFree;
Victor Stinnerad524372016-03-16 12:12:53 +0100424 PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc);
425 }
Victor Stinner5d39e042017-11-29 17:20:38 +0100426 else if (domain == PYMEM_DOMAIN_OBJ) {
427 if (_PyObject.malloc == _PyMem_DebugMalloc) {
428 return;
429 }
Victor Stinnerad524372016-03-16 12:12:53 +0100430
Victor Stinner0507bf52013-07-07 02:05:46 +0200431 PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc);
Victor Stinner5d39e042017-11-29 17:20:38 +0100432 alloc.ctx = &_PyMem_Debug.obj;
433 alloc.malloc = _PyMem_DebugMalloc;
434 alloc.calloc = _PyMem_DebugCalloc;
435 alloc.realloc = _PyMem_DebugRealloc;
436 alloc.free = _PyMem_DebugFree;
Victor Stinner0507bf52013-07-07 02:05:46 +0200437 PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc);
438 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200439}
440
Victor Stinner5d39e042017-11-29 17:20:38 +0100441
442void
443PyMem_SetupDebugHooks(void)
444{
445 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_RAW);
446 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_MEM);
447 _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_OBJ);
448}
449
Victor Stinner0507bf52013-07-07 02:05:46 +0200450void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200451PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200452{
453 switch(domain)
454 {
455 case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break;
456 case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break;
457 case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break;
458 default:
Victor Stinnerdb067af2014-05-02 22:31:14 +0200459 /* unknown domain: set all attributes to NULL */
Victor Stinner0507bf52013-07-07 02:05:46 +0200460 allocator->ctx = NULL;
461 allocator->malloc = NULL;
Victor Stinnerdb067af2014-05-02 22:31:14 +0200462 allocator->calloc = NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200463 allocator->realloc = NULL;
464 allocator->free = NULL;
465 }
466}
467
468void
Victor Stinnerd8f0d922014-06-02 21:57:10 +0200469PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator)
Victor Stinner0507bf52013-07-07 02:05:46 +0200470{
471 switch(domain)
472 {
473 case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break;
474 case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break;
475 case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break;
476 /* ignore unknown domain */
477 }
Victor Stinner0507bf52013-07-07 02:05:46 +0200478}
479
480void
481PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator)
482{
Victor Stinner9e87e772017-11-24 12:09:24 +0100483 *allocator = _PyObject_Arena;
Victor Stinner0507bf52013-07-07 02:05:46 +0200484}
485
486void
487PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator)
488{
Victor Stinner9e87e772017-11-24 12:09:24 +0100489 _PyObject_Arena = *allocator;
Victor Stinner0507bf52013-07-07 02:05:46 +0200490}
491
492void *
493PyMem_RawMalloc(size_t size)
494{
495 /*
496 * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.
497 * Most python internals blindly use a signed Py_ssize_t to track
498 * things without checking for overflows or negatives.
499 * As size_t is unsigned, checking for size < 0 is not required.
500 */
501 if (size > (size_t)PY_SSIZE_T_MAX)
502 return NULL;
Victor Stinner0507bf52013-07-07 02:05:46 +0200503 return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size);
504}
505
Victor Stinnerdb067af2014-05-02 22:31:14 +0200506void *
507PyMem_RawCalloc(size_t nelem, size_t elsize)
508{
509 /* see PyMem_RawMalloc() */
510 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
511 return NULL;
512 return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize);
513}
514
Victor Stinner0507bf52013-07-07 02:05:46 +0200515void*
516PyMem_RawRealloc(void *ptr, size_t new_size)
517{
518 /* see PyMem_RawMalloc() */
519 if (new_size > (size_t)PY_SSIZE_T_MAX)
520 return NULL;
521 return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size);
522}
523
Victor Stinner9e87e772017-11-24 12:09:24 +0100524void PyMem_RawFree(void *ptr)
Victor Stinner0507bf52013-07-07 02:05:46 +0200525{
526 _PyMem_Raw.free(_PyMem_Raw.ctx, ptr);
527}
528
Victor Stinner9ed83c42017-10-31 12:18:10 -0700529
Victor Stinner0507bf52013-07-07 02:05:46 +0200530void *
531PyMem_Malloc(size_t size)
532{
533 /* see PyMem_RawMalloc() */
534 if (size > (size_t)PY_SSIZE_T_MAX)
535 return NULL;
536 return _PyMem.malloc(_PyMem.ctx, size);
537}
538
539void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200540PyMem_Calloc(size_t nelem, size_t elsize)
541{
542 /* see PyMem_RawMalloc() */
543 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
544 return NULL;
545 return _PyMem.calloc(_PyMem.ctx, nelem, elsize);
546}
547
548void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200549PyMem_Realloc(void *ptr, size_t new_size)
550{
551 /* see PyMem_RawMalloc() */
552 if (new_size > (size_t)PY_SSIZE_T_MAX)
553 return NULL;
554 return _PyMem.realloc(_PyMem.ctx, ptr, new_size);
555}
556
557void
558PyMem_Free(void *ptr)
559{
560 _PyMem.free(_PyMem.ctx, ptr);
561}
562
Victor Stinner9ed83c42017-10-31 12:18:10 -0700563
Victor Stinner46972b72017-11-24 22:55:40 +0100564wchar_t*
565_PyMem_RawWcsdup(const wchar_t *str)
566{
Victor Stinnerb64de462017-12-01 18:27:09 +0100567 assert(str != NULL);
568
Victor Stinner46972b72017-11-24 22:55:40 +0100569 size_t len = wcslen(str);
570 if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) {
571 return NULL;
572 }
573
574 size_t size = (len + 1) * sizeof(wchar_t);
575 wchar_t *str2 = PyMem_RawMalloc(size);
576 if (str2 == NULL) {
577 return NULL;
578 }
579
580 memcpy(str2, str, size);
581 return str2;
582}
583
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200584char *
585_PyMem_RawStrdup(const char *str)
586{
Victor Stinnerb64de462017-12-01 18:27:09 +0100587 assert(str != NULL);
588 size_t size = strlen(str) + 1;
589 char *copy = PyMem_RawMalloc(size);
590 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200591 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100592 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200593 memcpy(copy, str, size);
594 return copy;
595}
596
597char *
598_PyMem_Strdup(const char *str)
599{
Victor Stinnerb64de462017-12-01 18:27:09 +0100600 assert(str != NULL);
601 size_t size = strlen(str) + 1;
602 char *copy = PyMem_Malloc(size);
603 if (copy == NULL) {
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200604 return NULL;
Victor Stinnerb64de462017-12-01 18:27:09 +0100605 }
Victor Stinner49fc8ec2013-07-07 23:30:24 +0200606 memcpy(copy, str, size);
607 return copy;
608}
609
Victor Stinner0507bf52013-07-07 02:05:46 +0200610void *
611PyObject_Malloc(size_t size)
612{
613 /* see PyMem_RawMalloc() */
614 if (size > (size_t)PY_SSIZE_T_MAX)
615 return NULL;
616 return _PyObject.malloc(_PyObject.ctx, size);
617}
618
619void *
Victor Stinnerdb067af2014-05-02 22:31:14 +0200620PyObject_Calloc(size_t nelem, size_t elsize)
621{
622 /* see PyMem_RawMalloc() */
623 if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize)
624 return NULL;
625 return _PyObject.calloc(_PyObject.ctx, nelem, elsize);
626}
627
628void *
Victor Stinner0507bf52013-07-07 02:05:46 +0200629PyObject_Realloc(void *ptr, size_t new_size)
630{
631 /* see PyMem_RawMalloc() */
632 if (new_size > (size_t)PY_SSIZE_T_MAX)
633 return NULL;
634 return _PyObject.realloc(_PyObject.ctx, ptr, new_size);
635}
636
637void
638PyObject_Free(void *ptr)
639{
640 _PyObject.free(_PyObject.ctx, ptr);
641}
642
643
644#ifdef WITH_PYMALLOC
645
Benjamin Peterson05159c42009-12-03 03:01:27 +0000646#ifdef WITH_VALGRIND
647#include <valgrind/valgrind.h>
648
649/* If we're using GCC, use __builtin_expect() to reduce overhead of
650 the valgrind checks */
651#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)
652# define UNLIKELY(value) __builtin_expect((value), 0)
653#else
654# define UNLIKELY(value) (value)
655#endif
656
657/* -1 indicates that we haven't checked that we're running on valgrind yet. */
658static int running_on_valgrind = -1;
659#endif
660
Victor Stinner9ed83c42017-10-31 12:18:10 -0700661
Victor Stinner9e87e772017-11-24 12:09:24 +0100662/* An object allocator for Python.
663
664 Here is an introduction to the layers of the Python memory architecture,
665 showing where the object allocator is actually used (layer +2), It is
666 called for every object allocation and deallocation (PyObject_New/Del),
667 unless the object-specific allocators implement a proprietary allocation
668 scheme (ex.: ints use a simple free list). This is also the place where
669 the cyclic garbage collector operates selectively on container objects.
670
671
672 Object-specific allocators
673 _____ ______ ______ ________
674 [ int ] [ dict ] [ list ] ... [ string ] Python core |
675+3 | <----- Object-specific memory -----> | <-- Non-object memory --> |
676 _______________________________ | |
677 [ Python's object allocator ] | |
678+2 | ####### Object memory ####### | <------ Internal buffers ------> |
679 ______________________________________________________________ |
680 [ Python's raw memory allocator (PyMem_ API) ] |
681+1 | <----- Python memory (under PyMem manager's control) ------> | |
682 __________________________________________________________________
683 [ Underlying general-purpose allocator (ex: C library malloc) ]
684 0 | <------ Virtual memory allocated for the python process -------> |
685
686 =========================================================================
687 _______________________________________________________________________
688 [ OS-specific Virtual Memory Manager (VMM) ]
689-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |
690 __________________________________ __________________________________
691 [ ] [ ]
692-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |
693
694*/
695/*==========================================================================*/
696
697/* A fast, special-purpose memory allocator for small blocks, to be used
698 on top of a general-purpose malloc -- heavily based on previous art. */
699
700/* Vladimir Marangozov -- August 2000 */
701
702/*
703 * "Memory management is where the rubber meets the road -- if we do the wrong
704 * thing at any level, the results will not be good. And if we don't make the
705 * levels work well together, we are in serious trouble." (1)
706 *
707 * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,
708 * "Dynamic Storage Allocation: A Survey and Critical Review",
709 * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.
710 */
711
712/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */
713
714/*==========================================================================*/
715
716/*
717 * Allocation strategy abstract:
718 *
719 * For small requests, the allocator sub-allocates <Big> blocks of memory.
720 * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the
721 * system's allocator.
722 *
723 * Small requests are grouped in size classes spaced 8 bytes apart, due
724 * to the required valid alignment of the returned address. Requests of
725 * a particular size are serviced from memory pools of 4K (one VMM page).
726 * Pools are fragmented on demand and contain free lists of blocks of one
727 * particular size class. In other words, there is a fixed-size allocator
728 * for each size class. Free pools are shared by the different allocators
729 * thus minimizing the space reserved for a particular size class.
730 *
731 * This allocation strategy is a variant of what is known as "simple
732 * segregated storage based on array of free lists". The main drawback of
733 * simple segregated storage is that we might end up with lot of reserved
734 * memory for the different free lists, which degenerate in time. To avoid
735 * this, we partition each free list in pools and we share dynamically the
736 * reserved space between all free lists. This technique is quite efficient
737 * for memory intensive programs which allocate mainly small-sized blocks.
738 *
739 * For small requests we have the following table:
740 *
741 * Request in bytes Size of allocated block Size class idx
742 * ----------------------------------------------------------------
743 * 1-8 8 0
744 * 9-16 16 1
745 * 17-24 24 2
746 * 25-32 32 3
747 * 33-40 40 4
748 * 41-48 48 5
749 * 49-56 56 6
750 * 57-64 64 7
751 * 65-72 72 8
752 * ... ... ...
753 * 497-504 504 62
754 * 505-512 512 63
755 *
756 * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying
757 * allocator.
758 */
759
760/*==========================================================================*/
761
762/*
763 * -- Main tunable settings section --
764 */
765
766/*
767 * Alignment of addresses returned to the user. 8-bytes alignment works
768 * on most current architectures (with 32-bit or 64-bit address busses).
769 * The alignment value is also used for grouping small requests in size
770 * classes spaced ALIGNMENT bytes apart.
771 *
772 * You shouldn't change this unless you know what you are doing.
773 */
774#define ALIGNMENT 8 /* must be 2^N */
775#define ALIGNMENT_SHIFT 3
776
777/* Return the number of bytes in size class I, as a uint. */
778#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)
779
780/*
781 * Max size threshold below which malloc requests are considered to be
782 * small enough in order to use preallocated memory pools. You can tune
783 * this value according to your application behaviour and memory needs.
784 *
785 * Note: a size threshold of 512 guarantees that newly created dictionaries
786 * will be allocated from preallocated memory pools on 64-bit.
787 *
788 * The following invariants must hold:
789 * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512
790 * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT
791 *
792 * Although not required, for better performance and space efficiency,
793 * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.
794 */
795#define SMALL_REQUEST_THRESHOLD 512
796#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)
797
798/*
799 * The system's VMM page size can be obtained on most unices with a
800 * getpagesize() call or deduced from various header files. To make
801 * things simpler, we assume that it is 4K, which is OK for most systems.
802 * It is probably better if this is the native page size, but it doesn't
803 * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page
804 * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation
805 * violation fault. 4K is apparently OK for all the platforms that python
806 * currently targets.
807 */
808#define SYSTEM_PAGE_SIZE (4 * 1024)
809#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)
810
811/*
812 * Maximum amount of memory managed by the allocator for small requests.
813 */
814#ifdef WITH_MEMORY_LIMITS
815#ifndef SMALL_MEMORY_LIMIT
816#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */
817#endif
818#endif
819
820/*
821 * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned
822 * on a page boundary. This is a reserved virtual address space for the
823 * current process (obtained through a malloc()/mmap() call). In no way this
824 * means that the memory arenas will be used entirely. A malloc(<Big>) is
825 * usually an address range reservation for <Big> bytes, unless all pages within
826 * this space are referenced subsequently. So malloc'ing big blocks and not
827 * using them does not mean "wasting memory". It's an addressable range
828 * wastage...
829 *
830 * Arenas are allocated with mmap() on systems supporting anonymous memory
831 * mappings to reduce heap fragmentation.
832 */
833#define ARENA_SIZE (256 << 10) /* 256KB */
834
835#ifdef WITH_MEMORY_LIMITS
836#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)
837#endif
838
839/*
840 * Size of the pools used for small blocks. Should be a power of 2,
841 * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.
842 */
843#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */
844#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK
845
846/*
847 * -- End of tunable settings section --
848 */
849
850/*==========================================================================*/
851
852/*
853 * Locking
854 *
855 * To reduce lock contention, it would probably be better to refine the
856 * crude function locking with per size class locking. I'm not positive
857 * however, whether it's worth switching to such locking policy because
858 * of the performance penalty it might introduce.
859 *
860 * The following macros describe the simplest (should also be the fastest)
861 * lock object on a particular platform and the init/fini/lock/unlock
862 * operations on it. The locks defined here are not expected to be recursive
863 * because it is assumed that they will always be called in the order:
864 * INIT, [LOCK, UNLOCK]*, FINI.
865 */
866
867/*
868 * Python's threads are serialized, so object malloc locking is disabled.
869 */
870#define SIMPLELOCK_DECL(lock) /* simple lock declaration */
871#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */
872#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */
873#define SIMPLELOCK_LOCK(lock) /* acquire released lock */
874#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */
875
876/* When you say memory, my mind reasons in terms of (pointers to) blocks */
877typedef uint8_t block;
878
879/* Pool for small blocks. */
880struct pool_header {
881 union { block *_padding;
882 uint count; } ref; /* number of allocated blocks */
883 block *freeblock; /* pool's free list head */
884 struct pool_header *nextpool; /* next pool of this size class */
885 struct pool_header *prevpool; /* previous pool "" */
886 uint arenaindex; /* index into arenas of base adr */
887 uint szidx; /* block size class index */
888 uint nextoffset; /* bytes to virgin block */
889 uint maxnextoffset; /* largest valid nextoffset */
890};
891
892typedef struct pool_header *poolp;
893
894/* Record keeping for arenas. */
895struct arena_object {
896 /* The address of the arena, as returned by malloc. Note that 0
897 * will never be returned by a successful malloc, and is used
898 * here to mark an arena_object that doesn't correspond to an
899 * allocated arena.
900 */
901 uintptr_t address;
902
903 /* Pool-aligned pointer to the next pool to be carved off. */
904 block* pool_address;
905
906 /* The number of available pools in the arena: free pools + never-
907 * allocated pools.
908 */
909 uint nfreepools;
910
911 /* The total number of pools in the arena, whether or not available. */
912 uint ntotalpools;
913
914 /* Singly-linked list of available pools. */
915 struct pool_header* freepools;
916
917 /* Whenever this arena_object is not associated with an allocated
918 * arena, the nextarena member is used to link all unassociated
919 * arena_objects in the singly-linked `unused_arena_objects` list.
920 * The prevarena member is unused in this case.
921 *
922 * When this arena_object is associated with an allocated arena
923 * with at least one available pool, both members are used in the
924 * doubly-linked `usable_arenas` list, which is maintained in
925 * increasing order of `nfreepools` values.
926 *
927 * Else this arena_object is associated with an allocated arena
928 * all of whose pools are in use. `nextarena` and `prevarena`
929 * are both meaningless in this case.
930 */
931 struct arena_object* nextarena;
932 struct arena_object* prevarena;
933};
934
935#define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT)
936
937#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */
938
939/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
940#define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE))
941
942/* Return total number of blocks in pool of size index I, as a uint. */
943#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))
944
945/*==========================================================================*/
946
947/*
948 * This malloc lock
949 */
950SIMPLELOCK_DECL(_malloc_lock)
951#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)
952#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)
953#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)
954#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)
955
956/*
957 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
958
959This is involved. For an index i, usedpools[i+i] is the header for a list of
960all partially used pools holding small blocks with "size class idx" i. So
961usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size
96216, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.
963
964Pools are carved off an arena's highwater mark (an arena_object's pool_address
965member) as needed. Once carved off, a pool is in one of three states forever
966after:
967
968used == partially used, neither empty nor full
969 At least one block in the pool is currently allocated, and at least one
970 block in the pool is not currently allocated (note this implies a pool
971 has room for at least two blocks).
972 This is a pool's initial state, as a pool is created only when malloc
973 needs space.
974 The pool holds blocks of a fixed size, and is in the circular list headed
975 at usedpools[i] (see above). It's linked to the other used pools of the
976 same size class via the pool_header's nextpool and prevpool members.
977 If all but one block is currently allocated, a malloc can cause a
978 transition to the full state. If all but one block is not currently
979 allocated, a free can cause a transition to the empty state.
980
981full == all the pool's blocks are currently allocated
982 On transition to full, a pool is unlinked from its usedpools[] list.
983 It's not linked to from anything then anymore, and its nextpool and
984 prevpool members are meaningless until it transitions back to used.
985 A free of a block in a full pool puts the pool back in the used state.
986 Then it's linked in at the front of the appropriate usedpools[] list, so
987 that the next allocation for its size class will reuse the freed block.
988
989empty == all the pool's blocks are currently available for allocation
990 On transition to empty, a pool is unlinked from its usedpools[] list,
991 and linked to the front of its arena_object's singly-linked freepools list,
992 via its nextpool member. The prevpool member has no meaning in this case.
993 Empty pools have no inherent size class: the next time a malloc finds
994 an empty list in usedpools[], it takes the first pool off of freepools.
995 If the size class needed happens to be the same as the size class the pool
996 last had, some pool initialization can be skipped.
997
998
999Block Management
1000
1001Blocks within pools are again carved out as needed. pool->freeblock points to
1002the start of a singly-linked list of free blocks within the pool. When a
1003block is freed, it's inserted at the front of its pool's freeblock list. Note
1004that the available blocks in a pool are *not* linked all together when a pool
1005is initialized. Instead only "the first two" (lowest addresses) blocks are
1006set up, returning the first such block, and setting pool->freeblock to a
1007one-block list holding the second such block. This is consistent with that
1008pymalloc strives at all levels (arena, pool, and block) never to touch a piece
1009of memory until it's actually needed.
1010
1011So long as a pool is in the used state, we're certain there *is* a block
1012available for allocating, and pool->freeblock is not NULL. If pool->freeblock
1013points to the end of the free list before we've carved the entire pool into
1014blocks, that means we simply haven't yet gotten to one of the higher-address
1015blocks. The offset from the pool_header to the start of "the next" virgin
1016block is stored in the pool_header nextoffset member, and the largest value
1017of nextoffset that makes sense is stored in the maxnextoffset member when a
1018pool is initialized. All the blocks in a pool have been passed out at least
1019once when and only when nextoffset > maxnextoffset.
1020
1021
1022Major obscurity: While the usedpools vector is declared to have poolp
1023entries, it doesn't really. It really contains two pointers per (conceptual)
1024poolp entry, the nextpool and prevpool members of a pool_header. The
1025excruciating initialization code below fools C so that
1026
1027 usedpool[i+i]
1028
1029"acts like" a genuine poolp, but only so long as you only reference its
1030nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is
1031compensating for that a pool_header's nextpool and prevpool members
1032immediately follow a pool_header's first two members:
1033
1034 union { block *_padding;
1035 uint count; } ref;
1036 block *freeblock;
1037
1038each of which consume sizeof(block *) bytes. So what usedpools[i+i] really
1039contains is a fudged-up pointer p such that *if* C believes it's a poolp
1040pointer, then p->nextpool and p->prevpool are both p (meaning that the headed
1041circular list is empty).
1042
1043It's unclear why the usedpools setup is so convoluted. It could be to
1044minimize the amount of cache required to hold this heavily-referenced table
1045(which only *needs* the two interpool pointer members of a pool_header). OTOH,
1046referencing code has to remember to "double the index" and doing so isn't
1047free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying
1048on that C doesn't insert any padding anywhere in a pool_header at or before
1049the prevpool member.
1050**************************************************************************** */
1051
1052#define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *)))
1053#define PT(x) PTA(x), PTA(x)
1054
1055static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {
1056 PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)
1057#if NB_SMALL_SIZE_CLASSES > 8
1058 , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)
1059#if NB_SMALL_SIZE_CLASSES > 16
1060 , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)
1061#if NB_SMALL_SIZE_CLASSES > 24
1062 , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)
1063#if NB_SMALL_SIZE_CLASSES > 32
1064 , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)
1065#if NB_SMALL_SIZE_CLASSES > 40
1066 , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)
1067#if NB_SMALL_SIZE_CLASSES > 48
1068 , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)
1069#if NB_SMALL_SIZE_CLASSES > 56
1070 , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)
1071#if NB_SMALL_SIZE_CLASSES > 64
1072#error "NB_SMALL_SIZE_CLASSES should be less than 64"
1073#endif /* NB_SMALL_SIZE_CLASSES > 64 */
1074#endif /* NB_SMALL_SIZE_CLASSES > 56 */
1075#endif /* NB_SMALL_SIZE_CLASSES > 48 */
1076#endif /* NB_SMALL_SIZE_CLASSES > 40 */
1077#endif /* NB_SMALL_SIZE_CLASSES > 32 */
1078#endif /* NB_SMALL_SIZE_CLASSES > 24 */
1079#endif /* NB_SMALL_SIZE_CLASSES > 16 */
1080#endif /* NB_SMALL_SIZE_CLASSES > 8 */
1081};
1082
1083/*==========================================================================
1084Arena management.
1085
1086`arenas` is a vector of arena_objects. It contains maxarenas entries, some of
1087which may not be currently used (== they're arena_objects that aren't
1088currently associated with an allocated arena). Note that arenas proper are
1089separately malloc'ed.
1090
1091Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,
1092we do try to free() arenas, and use some mild heuristic strategies to increase
1093the likelihood that arenas eventually can be freed.
1094
1095unused_arena_objects
1096
1097 This is a singly-linked list of the arena_objects that are currently not
1098 being used (no arena is associated with them). Objects are taken off the
1099 head of the list in new_arena(), and are pushed on the head of the list in
1100 PyObject_Free() when the arena is empty. Key invariant: an arena_object
1101 is on this list if and only if its .address member is 0.
1102
1103usable_arenas
1104
1105 This is a doubly-linked list of the arena_objects associated with arenas
1106 that have pools available. These pools are either waiting to be reused,
1107 or have not been used before. The list is sorted to have the most-
1108 allocated arenas first (ascending order based on the nfreepools member).
1109 This means that the next allocation will come from a heavily used arena,
1110 which gives the nearly empty arenas a chance to be returned to the system.
1111 In my unscientific tests this dramatically improved the number of arenas
1112 that could be freed.
1113
1114Note that an arena_object associated with an arena all of whose pools are
1115currently in use isn't on either list.
1116*/
1117
1118/* Array of objects used to track chunks of memory (arenas). */
1119static struct arena_object* arenas = NULL;
1120/* Number of slots currently allocated in the `arenas` vector. */
1121static uint maxarenas = 0;
1122
1123/* The head of the singly-linked, NULL-terminated list of available
1124 * arena_objects.
1125 */
1126static struct arena_object* unused_arena_objects = NULL;
1127
1128/* The head of the doubly-linked, NULL-terminated at each end, list of
1129 * arena_objects associated with arenas that have pools available.
1130 */
1131static struct arena_object* usable_arenas = NULL;
1132
1133/* How many arena_objects do we initially allocate?
1134 * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the
1135 * `arenas` vector.
1136 */
1137#define INITIAL_ARENA_OBJECTS 16
1138
1139/* Number of arenas allocated that haven't been free()'d. */
1140static size_t narenas_currently_allocated = 0;
1141
1142/* Total number of times malloc() called to allocate an arena. */
1143static size_t ntimes_arena_allocated = 0;
1144/* High water mark (max value ever seen) for narenas_currently_allocated. */
1145static size_t narenas_highwater = 0;
1146
1147static Py_ssize_t _Py_AllocatedBlocks = 0;
1148
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001149Py_ssize_t
1150_Py_GetAllocatedBlocks(void)
1151{
Victor Stinner9e87e772017-11-24 12:09:24 +01001152 return _Py_AllocatedBlocks;
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001153}
1154
1155
Thomas Woutersa9773292006-04-21 09:43:23 +00001156/* Allocate a new arena. If we run out of memory, return NULL. Else
1157 * allocate a new arena, and return the address of an arena_object
1158 * describing the new arena. It's expected that the caller will set
1159 * `usable_arenas` to the return value.
1160 */
1161static struct arena_object*
Tim Petersd97a1c02002-03-30 06:09:22 +00001162new_arena(void)
1163{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001164 struct arena_object* arenaobj;
1165 uint excess; /* number of bytes above pool alignment */
Victor Stinnerba108822012-03-10 00:21:44 +01001166 void *address;
Victor Stinner34be8072016-03-14 12:04:26 +01001167 static int debug_stats = -1;
Tim Petersd97a1c02002-03-30 06:09:22 +00001168
Victor Stinner34be8072016-03-14 12:04:26 +01001169 if (debug_stats == -1) {
Serhiy Storchaka4ae06c52017-12-12 13:55:04 +02001170 const char *opt = Py_GETENV("PYTHONMALLOCSTATS");
Victor Stinner34be8072016-03-14 12:04:26 +01001171 debug_stats = (opt != NULL && *opt != '\0');
1172 }
1173 if (debug_stats)
David Malcolm49526f42012-06-22 14:55:41 -04001174 _PyObject_DebugMallocStats(stderr);
Victor Stinner34be8072016-03-14 12:04:26 +01001175
Victor Stinner9e87e772017-11-24 12:09:24 +01001176 if (unused_arena_objects == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001177 uint i;
1178 uint numarenas;
1179 size_t nbytes;
Tim Peters0e871182002-04-13 08:29:14 +00001180
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001181 /* Double the number of arena objects on each allocation.
1182 * Note that it's possible for `numarenas` to overflow.
1183 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001184 numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;
1185 if (numarenas <= maxarenas)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001186 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001187#if SIZEOF_SIZE_T <= SIZEOF_INT
Victor Stinner9e87e772017-11-24 12:09:24 +01001188 if (numarenas > SIZE_MAX / sizeof(*arenas))
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001189 return NULL; /* overflow */
Martin v. Löwis5aca8822008-09-11 06:55:48 +00001190#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001191 nbytes = numarenas * sizeof(*arenas);
1192 arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001193 if (arenaobj == NULL)
1194 return NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001195 arenas = arenaobj;
Thomas Woutersa9773292006-04-21 09:43:23 +00001196
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001197 /* We might need to fix pointers that were copied. However,
1198 * new_arena only gets called when all the pages in the
1199 * previous arenas are full. Thus, there are *no* pointers
1200 * into the old array. Thus, we don't have to worry about
1201 * invalid pointers. Just to be sure, some asserts:
1202 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001203 assert(usable_arenas == NULL);
1204 assert(unused_arena_objects == NULL);
Thomas Woutersa9773292006-04-21 09:43:23 +00001205
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001206 /* Put the new arenas on the unused_arena_objects list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001207 for (i = maxarenas; i < numarenas; ++i) {
1208 arenas[i].address = 0; /* mark as unassociated */
1209 arenas[i].nextarena = i < numarenas - 1 ?
1210 &arenas[i+1] : NULL;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001211 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001212
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001213 /* Update globals. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001214 unused_arena_objects = &arenas[maxarenas];
1215 maxarenas = numarenas;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001216 }
Tim Petersd97a1c02002-03-30 06:09:22 +00001217
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001218 /* Take the next available arena object off the head of the list. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001219 assert(unused_arena_objects != NULL);
1220 arenaobj = unused_arena_objects;
1221 unused_arena_objects = arenaobj->nextarena;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001222 assert(arenaobj->address == 0);
Victor Stinner9e87e772017-11-24 12:09:24 +01001223 address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE);
Victor Stinner0507bf52013-07-07 02:05:46 +02001224 if (address == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001225 /* The allocation failed: return NULL after putting the
1226 * arenaobj back.
1227 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001228 arenaobj->nextarena = unused_arena_objects;
1229 unused_arena_objects = arenaobj;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001230 return NULL;
1231 }
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07001232 arenaobj->address = (uintptr_t)address;
Tim Petersd97a1c02002-03-30 06:09:22 +00001233
Victor Stinner9e87e772017-11-24 12:09:24 +01001234 ++narenas_currently_allocated;
1235 ++ntimes_arena_allocated;
1236 if (narenas_currently_allocated > narenas_highwater)
1237 narenas_highwater = narenas_currently_allocated;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001238 arenaobj->freepools = NULL;
1239 /* pool_address <- first pool-aligned address in the arena
1240 nfreepools <- number of whole pools that fit after alignment */
Victor Stinner9e87e772017-11-24 12:09:24 +01001241 arenaobj->pool_address = (block*)arenaobj->address;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001242 arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;
1243 assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);
1244 excess = (uint)(arenaobj->address & POOL_SIZE_MASK);
1245 if (excess != 0) {
1246 --arenaobj->nfreepools;
1247 arenaobj->pool_address += POOL_SIZE - excess;
1248 }
1249 arenaobj->ntotalpools = arenaobj->nfreepools;
Thomas Woutersa9773292006-04-21 09:43:23 +00001250
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001251 return arenaobj;
Tim Petersd97a1c02002-03-30 06:09:22 +00001252}
1253
Victor Stinner9ed83c42017-10-31 12:18:10 -07001254
Thomas Woutersa9773292006-04-21 09:43:23 +00001255/*
Benjamin Peterson3924f932016-09-18 19:12:48 -07001256address_in_range(P, POOL)
Thomas Woutersa9773292006-04-21 09:43:23 +00001257
1258Return true if and only if P is an address that was allocated by pymalloc.
1259POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
1260(the caller is asked to compute this because the macro expands POOL more than
1261once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a
Benjamin Peterson3924f932016-09-18 19:12:48 -07001262variable and pass the latter to the macro; because address_in_range is
Thomas Woutersa9773292006-04-21 09:43:23 +00001263called on every alloc/realloc/free, micro-efficiency is important here).
1264
1265Tricky: Let B be the arena base address associated with the pool, B =
1266arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
1267
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001268 B <= P < B + ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001269
1270Subtracting B throughout, this is true iff
1271
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001272 0 <= P-B < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001273
1274By using unsigned arithmetic, the "0 <=" half of the test can be skipped.
1275
1276Obscure: A PyMem "free memory" function can call the pymalloc free or realloc
1277before the first arena has been allocated. `arenas` is still NULL in that
1278case. We're relying on that maxarenas is also 0 in that case, so that
1279(POOL)->arenaindex < maxarenas must be false, saving us from trying to index
1280into a NULL arenas.
1281
1282Details: given P and POOL, the arena_object corresponding to P is AO =
1283arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
1284stores, etc), POOL is the correct address of P's pool, AO.address is the
1285correct base address of the pool's arena, and P must be within ARENA_SIZE of
1286AO.address. In addition, AO.address is not 0 (no arena can start at address 0
Benjamin Peterson3924f932016-09-18 19:12:48 -07001287(NULL)). Therefore address_in_range correctly reports that obmalloc
Thomas Woutersa9773292006-04-21 09:43:23 +00001288controls P.
1289
1290Now suppose obmalloc does not control P (e.g., P was obtained via a direct
1291call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
1292in this case -- it may even be uninitialized trash. If the trash arenaindex
1293is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't
1294control P.
1295
1296Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an
1297allocated arena, obmalloc controls all the memory in slice AO.address :
1298AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,
1299so P doesn't lie in that slice, so the macro correctly reports that P is not
1300controlled by obmalloc.
1301
1302Finally, if P is not controlled by obmalloc and AO corresponds to an unused
1303arena_object (one not currently associated with an allocated arena),
1304AO.address is 0, and the second test in the macro reduces to:
1305
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001306 P < ARENA_SIZE
Thomas Woutersa9773292006-04-21 09:43:23 +00001307
1308If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes
1309that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part
1310of the test still passes, and the third clause (AO.address != 0) is necessary
1311to get the correct result: AO.address is 0 in this case, so the macro
1312correctly reports that P is not controlled by obmalloc (despite that P lies in
1313slice AO.address : AO.address + ARENA_SIZE).
1314
1315Note: The third (AO.address != 0) clause was added in Python 2.5. Before
13162.5, arenas were never free()'ed, and an arenaindex < maxarena always
1317corresponded to a currently-allocated arena, so the "P is not controlled by
1318obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case
1319was impossible.
1320
1321Note that the logic is excruciating, and reading up possibly uninitialized
1322memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
1323creates problems for some memory debuggers. The overwhelming advantage is
1324that this test determines whether an arbitrary address is controlled by
1325obmalloc in a small constant time, independent of the number of arenas
1326obmalloc controls. Since this test is needed at every entry point, it's
1327extremely desirable that it be this fast.
1328*/
Thomas Woutersa9773292006-04-21 09:43:23 +00001329
Benjamin Peterson3924f932016-09-18 19:12:48 -07001330static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
1331address_in_range(void *p, poolp pool)
1332{
1333 // Since address_in_range may be reading from memory which was not allocated
1334 // by Python, it is important that pool->arenaindex is read only once, as
1335 // another thread may be concurrently modifying the value without holding
1336 // the GIL. The following dance forces the compiler to read pool->arenaindex
1337 // only once.
1338 uint arenaindex = *((volatile uint *)&pool->arenaindex);
Victor Stinner9e87e772017-11-24 12:09:24 +01001339 return arenaindex < maxarenas &&
1340 (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE &&
1341 arenas[arenaindex].address != 0;
Benjamin Peterson3924f932016-09-18 19:12:48 -07001342}
Tim Peters338e0102002-04-01 19:23:44 +00001343
Victor Stinner9ed83c42017-10-31 12:18:10 -07001344
Neil Schemenauera35c6882001-02-27 04:45:05 +00001345/*==========================================================================*/
1346
Victor Stinner9ed83c42017-10-31 12:18:10 -07001347/* pymalloc allocator
Neil Schemenauera35c6882001-02-27 04:45:05 +00001348
Victor Stinner9ed83c42017-10-31 12:18:10 -07001349 The basic blocks are ordered by decreasing execution frequency,
1350 which minimizes the number of jumps in the most common cases,
1351 improves branching prediction and instruction scheduling (small
1352 block allocations typically result in a couple of instructions).
1353 Unless the optimizer reorders everything, being too smart...
Neil Schemenauera35c6882001-02-27 04:45:05 +00001354
Victor Stinner9ed83c42017-10-31 12:18:10 -07001355 Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p.
1356
1357 Return 0 if pymalloc failed to allocate the memory block: on bigger
1358 requests, on error in the code below (as a last chance to serve the request)
1359 or when the max memory limit has been reached. */
1360static int
1361pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001362{
Victor Stinner9e87e772017-11-24 12:09:24 +01001363 block *bp;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001364 poolp pool;
1365 poolp next;
1366 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001367
Benjamin Peterson05159c42009-12-03 03:01:27 +00001368#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001369 if (UNLIKELY(running_on_valgrind == -1)) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001370 running_on_valgrind = RUNNING_ON_VALGRIND;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001371 }
1372 if (UNLIKELY(running_on_valgrind)) {
1373 return 0;
1374 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001375#endif
1376
Victor Stinner9ed83c42017-10-31 12:18:10 -07001377 if (nbytes == 0) {
1378 return 0;
1379 }
1380 if (nbytes > SMALL_REQUEST_THRESHOLD) {
1381 return 0;
1382 }
T. Wouters06bb4872017-03-31 10:10:19 -07001383
Victor Stinner9ed83c42017-10-31 12:18:10 -07001384 LOCK();
1385 /*
1386 * Most frequent paths first
1387 */
1388 size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;
Victor Stinner9e87e772017-11-24 12:09:24 +01001389 pool = usedpools[size + size];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001390 if (pool != pool->nextpool) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001391 /*
Victor Stinner9ed83c42017-10-31 12:18:10 -07001392 * There is a used pool for this size class.
1393 * Pick up the head block of its free list.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001394 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001395 ++pool->ref.count;
1396 bp = pool->freeblock;
1397 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001398 if ((pool->freeblock = *(block **)bp) != NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001399 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001400 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001401
Victor Stinner9ed83c42017-10-31 12:18:10 -07001402 /*
1403 * Reached the end of the free list, try to extend it.
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001404 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001405 if (pool->nextoffset <= pool->maxnextoffset) {
1406 /* There is room for another block. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001407 pool->freeblock = (block*)pool +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001408 pool->nextoffset;
1409 pool->nextoffset += INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001410 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001411 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001412 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001413
Victor Stinner9ed83c42017-10-31 12:18:10 -07001414 /* Pool is full, unlink from used pools. */
1415 next = pool->nextpool;
1416 pool = pool->prevpool;
1417 next->prevpool = pool;
1418 pool->nextpool = next;
1419 goto success;
1420 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001421
Victor Stinner9ed83c42017-10-31 12:18:10 -07001422 /* There isn't a pool of the right size class immediately
1423 * available: use a free pool.
1424 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001425 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001426 /* No arena has a free pool: allocate a new arena. */
1427#ifdef WITH_MEMORY_LIMITS
Victor Stinner9e87e772017-11-24 12:09:24 +01001428 if (narenas_currently_allocated >= MAX_ARENAS) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001429 goto failed;
1430 }
1431#endif
Victor Stinner9e87e772017-11-24 12:09:24 +01001432 usable_arenas = new_arena();
1433 if (usable_arenas == NULL) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001434 goto failed;
1435 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001436 usable_arenas->nextarena =
1437 usable_arenas->prevarena = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001438 }
Victor Stinner9e87e772017-11-24 12:09:24 +01001439 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001440
1441 /* Try to get a cached free pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001442 pool = usable_arenas->freepools;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001443 if (pool != NULL) {
1444 /* Unlink from cached pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001445 usable_arenas->freepools = pool->nextpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001446
1447 /* This arena already had the smallest nfreepools
1448 * value, so decreasing nfreepools doesn't change
1449 * that, and we don't need to rearrange the
1450 * usable_arenas list. However, if the arena has
1451 * become wholly allocated, we need to remove its
1452 * arena_object from usable_arenas.
1453 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001454 --usable_arenas->nfreepools;
1455 if (usable_arenas->nfreepools == 0) {
Victor Stinner9ed83c42017-10-31 12:18:10 -07001456 /* Wholly allocated: remove. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001457 assert(usable_arenas->freepools == NULL);
1458 assert(usable_arenas->nextarena == NULL ||
1459 usable_arenas->nextarena->prevarena ==
1460 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001461
Victor Stinner9e87e772017-11-24 12:09:24 +01001462 usable_arenas = usable_arenas->nextarena;
1463 if (usable_arenas != NULL) {
1464 usable_arenas->prevarena = NULL;
1465 assert(usable_arenas->address != 0);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001466 }
1467 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001468 else {
1469 /* nfreepools > 0: it must be that freepools
1470 * isn't NULL, or that we haven't yet carved
1471 * off all the arena's pools for the first
1472 * time.
1473 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001474 assert(usable_arenas->freepools != NULL ||
1475 usable_arenas->pool_address <=
1476 (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001477 ARENA_SIZE - POOL_SIZE);
1478 }
Thomas Woutersa9773292006-04-21 09:43:23 +00001479
Victor Stinner9ed83c42017-10-31 12:18:10 -07001480 init_pool:
1481 /* Frontlink to used pools. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001482 next = usedpools[size + size]; /* == prev */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001483 pool->nextpool = next;
1484 pool->prevpool = next;
1485 next->nextpool = pool;
1486 next->prevpool = pool;
1487 pool->ref.count = 1;
1488 if (pool->szidx == size) {
1489 /* Luckily, this pool last contained blocks
1490 * of the same size class, so its header
1491 * and free list are already initialized.
1492 */
1493 bp = pool->freeblock;
1494 assert(bp != NULL);
Victor Stinner9e87e772017-11-24 12:09:24 +01001495 pool->freeblock = *(block **)bp;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001496 goto success;
1497 }
1498 /*
1499 * Initialize the pool header, set up the free list to
1500 * contain just the second block, and return the first
1501 * block.
1502 */
1503 pool->szidx = size;
1504 size = INDEX2SIZE(size);
Victor Stinner9e87e772017-11-24 12:09:24 +01001505 bp = (block *)pool + POOL_OVERHEAD;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001506 pool->nextoffset = POOL_OVERHEAD + (size << 1);
1507 pool->maxnextoffset = POOL_SIZE - size;
1508 pool->freeblock = bp + size;
Victor Stinner9e87e772017-11-24 12:09:24 +01001509 *(block **)(pool->freeblock) = NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001510 goto success;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001511 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001512
Victor Stinner9ed83c42017-10-31 12:18:10 -07001513 /* Carve off a new pool. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001514 assert(usable_arenas->nfreepools > 0);
1515 assert(usable_arenas->freepools == NULL);
1516 pool = (poolp)usable_arenas->pool_address;
1517 assert((block*)pool <= (block*)usable_arenas->address +
Victor Stinner9ed83c42017-10-31 12:18:10 -07001518 ARENA_SIZE - POOL_SIZE);
Victor Stinner9e87e772017-11-24 12:09:24 +01001519 pool->arenaindex = (uint)(usable_arenas - arenas);
1520 assert(&arenas[pool->arenaindex] == usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001521 pool->szidx = DUMMY_SIZE_IDX;
Victor Stinner9e87e772017-11-24 12:09:24 +01001522 usable_arenas->pool_address += POOL_SIZE;
1523 --usable_arenas->nfreepools;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001524
Victor Stinner9e87e772017-11-24 12:09:24 +01001525 if (usable_arenas->nfreepools == 0) {
1526 assert(usable_arenas->nextarena == NULL ||
1527 usable_arenas->nextarena->prevarena ==
1528 usable_arenas);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001529 /* Unlink the arena: it is completely allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001530 usable_arenas = usable_arenas->nextarena;
1531 if (usable_arenas != NULL) {
1532 usable_arenas->prevarena = NULL;
1533 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001534 }
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001535 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001536
1537 goto init_pool;
1538
1539success:
1540 UNLOCK();
1541 assert(bp != NULL);
1542 *ptr_p = (void *)bp;
1543 return 1;
1544
1545failed:
1546 UNLOCK();
1547 return 0;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001548}
1549
Victor Stinner9ed83c42017-10-31 12:18:10 -07001550
Victor Stinnerdb067af2014-05-02 22:31:14 +02001551static void *
1552_PyObject_Malloc(void *ctx, size_t nbytes)
1553{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001554 void* ptr;
1555 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001556 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001557 return ptr;
1558 }
1559
1560 ptr = PyMem_RawMalloc(nbytes);
1561 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001562 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001563 }
1564 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001565}
1566
Victor Stinner9ed83c42017-10-31 12:18:10 -07001567
Victor Stinnerdb067af2014-05-02 22:31:14 +02001568static void *
1569_PyObject_Calloc(void *ctx, size_t nelem, size_t elsize)
1570{
Victor Stinner9ed83c42017-10-31 12:18:10 -07001571 void* ptr;
1572
1573 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
1574 size_t nbytes = nelem * elsize;
1575
1576 if (pymalloc_alloc(ctx, &ptr, nbytes)) {
1577 memset(ptr, 0, nbytes);
Victor Stinner9e87e772017-11-24 12:09:24 +01001578 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001579 return ptr;
1580 }
1581
1582 ptr = PyMem_RawCalloc(nelem, elsize);
1583 if (ptr != NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001584 _Py_AllocatedBlocks++;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001585 }
1586 return ptr;
Victor Stinnerdb067af2014-05-02 22:31:14 +02001587}
1588
Neil Schemenauera35c6882001-02-27 04:45:05 +00001589
Victor Stinner9ed83c42017-10-31 12:18:10 -07001590/* Free a memory block allocated by pymalloc_alloc().
1591 Return 1 if it was freed.
1592 Return 0 if the block was not allocated by pymalloc_alloc(). */
1593static int
1594pymalloc_free(void *ctx, void *p)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001595{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001596 poolp pool;
Victor Stinner9e87e772017-11-24 12:09:24 +01001597 block *lastfree;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001598 poolp next, prev;
1599 uint size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001600
Victor Stinner9ed83c42017-10-31 12:18:10 -07001601 assert(p != NULL);
Antoine Pitrouf9d0b122012-12-09 14:28:26 +01001602
Benjamin Peterson05159c42009-12-03 03:01:27 +00001603#ifdef WITH_VALGRIND
Victor Stinner9ed83c42017-10-31 12:18:10 -07001604 if (UNLIKELY(running_on_valgrind > 0)) {
1605 return 0;
1606 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001607#endif
1608
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001609 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001610 if (!address_in_range(p, pool)) {
1611 return 0;
1612 }
1613 /* We allocated this address. */
Thomas Woutersa9773292006-04-21 09:43:23 +00001614
Victor Stinner9ed83c42017-10-31 12:18:10 -07001615 LOCK();
Thomas Woutersa9773292006-04-21 09:43:23 +00001616
Victor Stinner9ed83c42017-10-31 12:18:10 -07001617 /* Link p to the start of the pool's freeblock list. Since
1618 * the pool had at least the p block outstanding, the pool
1619 * wasn't empty (so it's already in a usedpools[] list, or
1620 * was full and is in no list -- it's not in the freeblocks
1621 * list in any case).
1622 */
1623 assert(pool->ref.count > 0); /* else it was empty */
Victor Stinner9e87e772017-11-24 12:09:24 +01001624 *(block **)p = lastfree = pool->freeblock;
1625 pool->freeblock = (block *)p;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001626 if (!lastfree) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001627 /* Pool was full, so doesn't currently live in any list:
1628 * link it to the front of the appropriate usedpools[] list.
1629 * This mimics LRU pool usage for new allocations and
1630 * targets optimal filling when several pools contain
1631 * blocks of the same size class.
1632 */
1633 --pool->ref.count;
1634 assert(pool->ref.count > 0); /* else the pool is empty */
1635 size = pool->szidx;
Victor Stinner9e87e772017-11-24 12:09:24 +01001636 next = usedpools[size + size];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001637 prev = next->prevpool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001638
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001639 /* insert pool before next: prev <-> pool <-> next */
1640 pool->nextpool = next;
1641 pool->prevpool = prev;
1642 next->prevpool = pool;
1643 prev->nextpool = pool;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001644 goto success;
1645 }
1646
1647 struct arena_object* ao;
1648 uint nf; /* ao->nfreepools */
1649
1650 /* freeblock wasn't NULL, so the pool wasn't full,
1651 * and the pool is in a usedpools[] list.
1652 */
1653 if (--pool->ref.count != 0) {
1654 /* pool isn't empty: leave it in usedpools */
1655 goto success;
1656 }
1657 /* Pool is now empty: unlink from usedpools, and
1658 * link to the front of freepools. This ensures that
1659 * previously freed pools will be allocated later
1660 * (being not referenced, they are perhaps paged out).
1661 */
1662 next = pool->nextpool;
1663 prev = pool->prevpool;
1664 next->prevpool = prev;
1665 prev->nextpool = next;
1666
1667 /* Link the pool to freepools. This is a singly-linked
1668 * list, and pool->prevpool isn't used there.
1669 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001670 ao = &arenas[pool->arenaindex];
Victor Stinner9ed83c42017-10-31 12:18:10 -07001671 pool->nextpool = ao->freepools;
1672 ao->freepools = pool;
1673 nf = ++ao->nfreepools;
1674
1675 /* All the rest is arena management. We just freed
1676 * a pool, and there are 4 cases for arena mgmt:
1677 * 1. If all the pools are free, return the arena to
1678 * the system free().
1679 * 2. If this is the only free pool in the arena,
1680 * add the arena back to the `usable_arenas` list.
1681 * 3. If the "next" arena has a smaller count of free
1682 * pools, we have to "slide this arena right" to
1683 * restore that usable_arenas is sorted in order of
1684 * nfreepools.
1685 * 4. Else there's nothing more to do.
1686 */
1687 if (nf == ao->ntotalpools) {
1688 /* Case 1. First unlink ao from usable_arenas.
1689 */
1690 assert(ao->prevarena == NULL ||
1691 ao->prevarena->address != 0);
1692 assert(ao ->nextarena == NULL ||
1693 ao->nextarena->address != 0);
1694
1695 /* Fix the pointer in the prevarena, or the
1696 * usable_arenas pointer.
1697 */
1698 if (ao->prevarena == NULL) {
Victor Stinner9e87e772017-11-24 12:09:24 +01001699 usable_arenas = ao->nextarena;
1700 assert(usable_arenas == NULL ||
1701 usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001702 }
1703 else {
1704 assert(ao->prevarena->nextarena == ao);
1705 ao->prevarena->nextarena =
1706 ao->nextarena;
1707 }
1708 /* Fix the pointer in the nextarena. */
1709 if (ao->nextarena != NULL) {
1710 assert(ao->nextarena->prevarena == ao);
1711 ao->nextarena->prevarena =
1712 ao->prevarena;
1713 }
1714 /* Record that this arena_object slot is
1715 * available to be reused.
1716 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001717 ao->nextarena = unused_arena_objects;
1718 unused_arena_objects = ao;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001719
1720 /* Free the entire arena. */
Victor Stinner9e87e772017-11-24 12:09:24 +01001721 _PyObject_Arena.free(_PyObject_Arena.ctx,
Victor Stinner9ed83c42017-10-31 12:18:10 -07001722 (void *)ao->address, ARENA_SIZE);
1723 ao->address = 0; /* mark unassociated */
Victor Stinner9e87e772017-11-24 12:09:24 +01001724 --narenas_currently_allocated;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001725
1726 goto success;
1727 }
1728
1729 if (nf == 1) {
1730 /* Case 2. Put ao at the head of
1731 * usable_arenas. Note that because
1732 * ao->nfreepools was 0 before, ao isn't
1733 * currently on the usable_arenas list.
1734 */
Victor Stinner9e87e772017-11-24 12:09:24 +01001735 ao->nextarena = usable_arenas;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001736 ao->prevarena = NULL;
Victor Stinner9e87e772017-11-24 12:09:24 +01001737 if (usable_arenas)
1738 usable_arenas->prevarena = ao;
1739 usable_arenas = ao;
1740 assert(usable_arenas->address != 0);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001741
1742 goto success;
1743 }
1744
1745 /* If this arena is now out of order, we need to keep
1746 * the list sorted. The list is kept sorted so that
1747 * the "most full" arenas are used first, which allows
1748 * the nearly empty arenas to be completely freed. In
1749 * a few un-scientific tests, it seems like this
1750 * approach allowed a lot more memory to be freed.
1751 */
1752 if (ao->nextarena == NULL ||
1753 nf <= ao->nextarena->nfreepools) {
1754 /* Case 4. Nothing to do. */
1755 goto success;
1756 }
1757 /* Case 3: We have to move the arena towards the end
1758 * of the list, because it has more free pools than
1759 * the arena to its right.
1760 * First unlink ao from usable_arenas.
1761 */
1762 if (ao->prevarena != NULL) {
1763 /* ao isn't at the head of the list */
1764 assert(ao->prevarena->nextarena == ao);
1765 ao->prevarena->nextarena = ao->nextarena;
1766 }
1767 else {
1768 /* ao is at the head of the list */
Victor Stinner9e87e772017-11-24 12:09:24 +01001769 assert(usable_arenas == ao);
1770 usable_arenas = ao->nextarena;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001771 }
1772 ao->nextarena->prevarena = ao->prevarena;
1773
1774 /* Locate the new insertion point by iterating over
1775 * the list, using our nextarena pointer.
1776 */
1777 while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) {
1778 ao->prevarena = ao->nextarena;
1779 ao->nextarena = ao->nextarena->nextarena;
1780 }
1781
1782 /* Insert ao at this point. */
1783 assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena);
1784 assert(ao->prevarena->nextarena == ao->nextarena);
1785
1786 ao->prevarena->nextarena = ao;
1787 if (ao->nextarena != NULL) {
1788 ao->nextarena->prevarena = ao;
1789 }
1790
1791 /* Verify that the swaps worked. */
1792 assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools);
1793 assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools);
1794 assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao);
Victor Stinner9e87e772017-11-24 12:09:24 +01001795 assert((usable_arenas == ao && ao->prevarena == NULL)
Victor Stinner9ed83c42017-10-31 12:18:10 -07001796 || ao->prevarena->nextarena == ao);
1797
1798 goto success;
1799
1800success:
1801 UNLOCK();
1802 return 1;
1803}
1804
1805
1806static void
1807_PyObject_Free(void *ctx, void *p)
1808{
1809 /* PyObject_Free(NULL) has no effect */
1810 if (p == NULL) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001811 return;
1812 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001813
Victor Stinner9e87e772017-11-24 12:09:24 +01001814 _Py_AllocatedBlocks--;
Victor Stinner9ed83c42017-10-31 12:18:10 -07001815 if (!pymalloc_free(ctx, p)) {
1816 /* pymalloc didn't allocate this address */
1817 PyMem_RawFree(p);
1818 }
Neil Schemenauera35c6882001-02-27 04:45:05 +00001819}
1820
Neil Schemenauera35c6882001-02-27 04:45:05 +00001821
Victor Stinner9ed83c42017-10-31 12:18:10 -07001822/* pymalloc realloc.
1823
1824 If nbytes==0, then as the Python docs promise, we do not treat this like
1825 free(p), and return a non-NULL result.
1826
1827 Return 1 if pymalloc reallocated memory and wrote the new pointer into
1828 newptr_p.
1829
1830 Return 0 if pymalloc didn't allocated p. */
1831static int
1832pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes)
Neil Schemenauera35c6882001-02-27 04:45:05 +00001833{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001834 void *bp;
1835 poolp pool;
1836 size_t size;
Neil Schemenauera35c6882001-02-27 04:45:05 +00001837
Victor Stinner9ed83c42017-10-31 12:18:10 -07001838 assert(p != NULL);
Georg Brandld492ad82008-07-23 16:13:07 +00001839
Benjamin Peterson05159c42009-12-03 03:01:27 +00001840#ifdef WITH_VALGRIND
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001841 /* Treat running_on_valgrind == -1 the same as 0 */
Victor Stinner9ed83c42017-10-31 12:18:10 -07001842 if (UNLIKELY(running_on_valgrind > 0)) {
1843 return 0;
1844 }
Benjamin Peterson05159c42009-12-03 03:01:27 +00001845#endif
1846
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001847 pool = POOL_ADDR(p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07001848 if (!address_in_range(p, pool)) {
1849 /* pymalloc is not managing this block.
1850
1851 If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take
1852 over this block. However, if we do, we need to copy the valid data
1853 from the C-managed block to one of our blocks, and there's no
1854 portable way to know how much of the memory space starting at p is
1855 valid.
1856
1857 As bug 1185883 pointed out the hard way, it's possible that the
1858 C-managed block is "at the end" of allocated VM space, so that a
1859 memory fault can occur if we try to copy nbytes bytes starting at p.
1860 Instead we punt: let C continue to manage this block. */
1861 return 0;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001862 }
Victor Stinner9ed83c42017-10-31 12:18:10 -07001863
1864 /* pymalloc is in charge of this block */
1865 size = INDEX2SIZE(pool->szidx);
1866 if (nbytes <= size) {
1867 /* The block is staying the same or shrinking.
1868
1869 If it's shrinking, there's a tradeoff: it costs cycles to copy the
1870 block to a smaller size class, but it wastes memory not to copy it.
1871
1872 The compromise here is to copy on shrink only if at least 25% of
1873 size can be shaved off. */
1874 if (4 * nbytes > 3 * size) {
1875 /* It's the same, or shrinking and new/old > 3/4. */
1876 *newptr_p = p;
1877 return 1;
1878 }
1879 size = nbytes;
1880 }
1881
1882 bp = _PyObject_Malloc(ctx, nbytes);
1883 if (bp != NULL) {
1884 memcpy(bp, p, size);
1885 _PyObject_Free(ctx, p);
1886 }
1887 *newptr_p = bp;
1888 return 1;
1889}
1890
1891
1892static void *
1893_PyObject_Realloc(void *ctx, void *ptr, size_t nbytes)
1894{
1895 void *ptr2;
1896
1897 if (ptr == NULL) {
1898 return _PyObject_Malloc(ctx, nbytes);
1899 }
1900
1901 if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) {
1902 return ptr2;
1903 }
1904
1905 return PyMem_RawRealloc(ptr, nbytes);
Neil Schemenauera35c6882001-02-27 04:45:05 +00001906}
1907
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001908#else /* ! WITH_PYMALLOC */
Tim Petersddea2082002-03-23 10:03:50 +00001909
1910/*==========================================================================*/
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00001911/* pymalloc not enabled: Redirect the entry points to malloc. These will
1912 * only be used by extensions that are compiled with pymalloc enabled. */
Tim Peters62c06ba2002-03-23 22:28:18 +00001913
Antoine Pitrou92840532012-12-17 23:05:59 +01001914Py_ssize_t
1915_Py_GetAllocatedBlocks(void)
1916{
1917 return 0;
1918}
1919
Tim Peters1221c0a2002-03-23 00:20:15 +00001920#endif /* WITH_PYMALLOC */
1921
Victor Stinner34be8072016-03-14 12:04:26 +01001922
Tim Petersddea2082002-03-23 10:03:50 +00001923/*==========================================================================*/
Tim Peters62c06ba2002-03-23 22:28:18 +00001924/* A x-platform debugging allocator. This doesn't manage memory directly,
1925 * it wraps a real allocator, adding extra debugging info to the memory blocks.
1926 */
Tim Petersddea2082002-03-23 10:03:50 +00001927
Tim Petersf6fb5012002-04-12 07:38:53 +00001928/* Special bytes broadcast into debug memory blocks at appropriate times.
1929 * Strings of these are unlikely to be valid addresses, floats, ints or
1930 * 7-bit ASCII.
1931 */
1932#undef CLEANBYTE
1933#undef DEADBYTE
1934#undef FORBIDDENBYTE
1935#define CLEANBYTE 0xCB /* clean (newly allocated) memory */
Tim Peters889f61d2002-07-10 19:29:49 +00001936#define DEADBYTE 0xDB /* dead (newly freed) memory */
Tim Petersf6fb5012002-04-12 07:38:53 +00001937#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */
Tim Petersddea2082002-03-23 10:03:50 +00001938
Victor Stinner9e87e772017-11-24 12:09:24 +01001939static size_t serialno = 0; /* incremented on each debug {m,re}alloc */
1940
Tim Peterse0850172002-03-24 00:34:21 +00001941/* serialno is always incremented via calling this routine. The point is
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001942 * to supply a single place to set a breakpoint.
1943 */
Tim Peterse0850172002-03-24 00:34:21 +00001944static void
Neil Schemenauerbd02b142002-03-28 21:05:38 +00001945bumpserialno(void)
Tim Peterse0850172002-03-24 00:34:21 +00001946{
Victor Stinner9e87e772017-11-24 12:09:24 +01001947 ++serialno;
Tim Peterse0850172002-03-24 00:34:21 +00001948}
1949
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001950#define SST SIZEOF_SIZE_T
Tim Peterse0850172002-03-24 00:34:21 +00001951
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001952/* Read sizeof(size_t) bytes at p as a big-endian size_t. */
1953static size_t
1954read_size_t(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00001955{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001956 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001957 size_t result = *q++;
1958 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001959
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001960 for (i = SST; --i > 0; ++q)
1961 result = (result << 8) | *q;
1962 return result;
Tim Petersddea2082002-03-23 10:03:50 +00001963}
1964
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001965/* Write n as a big-endian size_t, MSB at address p, LSB at
1966 * p + sizeof(size_t) - 1.
1967 */
Tim Petersddea2082002-03-23 10:03:50 +00001968static void
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001969write_size_t(void *p, size_t n)
Tim Petersddea2082002-03-23 10:03:50 +00001970{
Benjamin Peterson19517e42016-09-18 19:22:22 -07001971 uint8_t *q = (uint8_t *)p + SST - 1;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001972 int i;
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001973
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001974 for (i = SST; --i >= 0; --q) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07001975 *q = (uint8_t)(n & 0xff);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00001976 n >>= 8;
1977 }
Tim Petersddea2082002-03-23 10:03:50 +00001978}
1979
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001980/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and
1981 fills them with useful stuff, here calling the underlying malloc's result p:
Tim Petersddea2082002-03-23 10:03:50 +00001982
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001983p[0: S]
1984 Number of bytes originally asked for. This is a size_t, big-endian (easier
1985 to read in a memory dump).
Georg Brandl7cba5fd2013-09-25 09:04:23 +02001986p[S]
Tim Petersdf099f52013-09-19 21:06:37 -05001987 API ID. See PEP 445. This is a character, but seems undocumented.
1988p[S+1: 2*S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001989 Copies of FORBIDDENBYTE. Used to catch under- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001990p[2*S: 2*S+n]
Tim Petersf6fb5012002-04-12 07:38:53 +00001991 The requested memory, filled with copies of CLEANBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00001992 Used to catch reference to uninitialized memory.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001993 &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc
Tim Petersddea2082002-03-23 10:03:50 +00001994 handled the request itself.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001995p[2*S+n: 2*S+n+S]
Tim Petersf6fb5012002-04-12 07:38:53 +00001996 Copies of FORBIDDENBYTE. Used to catch over- writes and reads.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00001997p[2*S+n+S: 2*S+n+2*S]
Victor Stinner0507bf52013-07-07 02:05:46 +02001998 A serial number, incremented by 1 on each call to _PyMem_DebugMalloc
1999 and _PyMem_DebugRealloc.
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002000 This is a big-endian size_t.
Tim Petersddea2082002-03-23 10:03:50 +00002001 If "bad memory" is detected later, the serial number gives an
2002 excellent way to set a breakpoint on the next run, to capture the
2003 instant at which this block was passed out.
2004*/
2005
Victor Stinner0507bf52013-07-07 02:05:46 +02002006static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002007_PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes)
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002008{
Victor Stinner0507bf52013-07-07 02:05:46 +02002009 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002010 uint8_t *p; /* base address of malloc'ed pad block */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002011 uint8_t *data; /* p + 2*SST == pointer to data bytes */
2012 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2013 size_t total; /* 2 * SST + nbytes + 2 * SST */
2014
2015 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) {
2016 /* integer overflow: can't represent total as a Py_ssize_t */
2017 return NULL;
2018 }
2019 total = nbytes + 4 * SST;
2020
2021 /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN]
2022 * ^--- p ^--- data ^--- tail
2023 S: nbytes stored as size_t
2024 I: API identifier (1 byte)
2025 F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after)
2026 C: Clean bytes used later to store actual data
2027 N: Serial number stored as size_t */
2028
2029 if (use_calloc) {
2030 p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total);
2031 }
2032 else {
2033 p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total);
2034 }
2035 if (p == NULL) {
2036 return NULL;
2037 }
2038 data = p + 2*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002039
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002040 bumpserialno();
Tim Petersddea2082002-03-23 10:03:50 +00002041
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002042 /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */
2043 write_size_t(p, nbytes);
Benjamin Peterson19517e42016-09-18 19:22:22 -07002044 p[SST] = (uint8_t)api->api_id;
Victor Stinner0507bf52013-07-07 02:05:46 +02002045 memset(p + SST + 1, FORBIDDENBYTE, SST-1);
Tim Petersddea2082002-03-23 10:03:50 +00002046
Victor Stinner9ed83c42017-10-31 12:18:10 -07002047 if (nbytes > 0 && !use_calloc) {
2048 memset(data, CLEANBYTE, nbytes);
2049 }
Tim Petersddea2082002-03-23 10:03:50 +00002050
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002051 /* at tail, write pad (SST bytes) and serialno (SST bytes) */
Victor Stinner9ed83c42017-10-31 12:18:10 -07002052 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002053 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002054 write_size_t(tail + SST, serialno);
Tim Petersddea2082002-03-23 10:03:50 +00002055
Victor Stinner9ed83c42017-10-31 12:18:10 -07002056 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002057}
2058
Victor Stinnerdb067af2014-05-02 22:31:14 +02002059static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002060_PyMem_DebugRawMalloc(void *ctx, size_t nbytes)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002061{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002062 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002063}
2064
2065static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002066_PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize)
Victor Stinnerdb067af2014-05-02 22:31:14 +02002067{
2068 size_t nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002069 assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002070 nbytes = nelem * elsize;
Victor Stinnerc4aec362016-03-14 22:26:53 +01002071 return _PyMem_DebugRawAlloc(1, ctx, nbytes);
Victor Stinnerdb067af2014-05-02 22:31:14 +02002072}
2073
Victor Stinner9ed83c42017-10-31 12:18:10 -07002074
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002075/* The debug free first checks the 2*SST bytes on each end for sanity (in
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002076 particular, that the FORBIDDENBYTEs with the api ID are still intact).
Tim Petersf6fb5012002-04-12 07:38:53 +00002077 Then fills the original bytes with DEADBYTE.
Tim Petersddea2082002-03-23 10:03:50 +00002078 Then calls the underlying free.
2079*/
Victor Stinner0507bf52013-07-07 02:05:46 +02002080static void
Victor Stinnerc4aec362016-03-14 22:26:53 +01002081_PyMem_DebugRawFree(void *ctx, void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002082{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002083 /* PyMem_Free(NULL) has no effect */
2084 if (p == NULL) {
2085 return;
2086 }
2087
Victor Stinner0507bf52013-07-07 02:05:46 +02002088 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002089 uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002090 size_t nbytes;
Tim Petersddea2082002-03-23 10:03:50 +00002091
Victor Stinner0507bf52013-07-07 02:05:46 +02002092 _PyMem_DebugCheckAddress(api->api_id, p);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002093 nbytes = read_size_t(q);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002094 nbytes += 4 * SST;
2095 memset(q, DEADBYTE, nbytes);
Victor Stinner0507bf52013-07-07 02:05:46 +02002096 api->alloc.free(api->alloc.ctx, q);
Tim Petersddea2082002-03-23 10:03:50 +00002097}
2098
Victor Stinner9ed83c42017-10-31 12:18:10 -07002099
Victor Stinner0507bf52013-07-07 02:05:46 +02002100static void *
Victor Stinnerc4aec362016-03-14 22:26:53 +01002101_PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes)
Tim Petersddea2082002-03-23 10:03:50 +00002102{
Victor Stinner9ed83c42017-10-31 12:18:10 -07002103 if (p == NULL) {
2104 return _PyMem_DebugRawAlloc(0, ctx, nbytes);
2105 }
2106
Victor Stinner0507bf52013-07-07 02:05:46 +02002107 debug_alloc_api_t *api = (debug_alloc_api_t *)ctx;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002108 uint8_t *head; /* base address of malloc'ed pad block */
2109 uint8_t *data; /* pointer to data bytes */
2110 uint8_t *r;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002111 uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */
2112 size_t total; /* 2 * SST + nbytes + 2 * SST */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002113 size_t original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002114 size_t block_serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002115#define ERASED_SIZE 64
2116 uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */
Tim Petersddea2082002-03-23 10:03:50 +00002117
Victor Stinner0507bf52013-07-07 02:05:46 +02002118 _PyMem_DebugCheckAddress(api->api_id, p);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002119
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002120 data = (uint8_t *)p;
2121 head = data - 2*SST;
2122 original_nbytes = read_size_t(head);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002123 if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) {
2124 /* integer overflow: can't represent total as a Py_ssize_t */
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002125 return NULL;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002126 }
2127 total = nbytes + 4*SST;
Tim Petersddea2082002-03-23 10:03:50 +00002128
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002129 tail = data + original_nbytes;
Victor Stinner9e87e772017-11-24 12:09:24 +01002130 block_serialno = read_size_t(tail + SST);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002131 /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and
2132 ERASED_SIZE bytes at the end as dead and save the copy of erased bytes.
2133 */
2134 if (original_nbytes <= sizeof(save)) {
2135 memcpy(save, data, original_nbytes);
2136 memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST);
2137 }
2138 else {
2139 memcpy(save, data, ERASED_SIZE);
2140 memset(head, DEADBYTE, ERASED_SIZE + 2*SST);
2141 memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE);
2142 memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST);
Victor Stinner9ed83c42017-10-31 12:18:10 -07002143 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002144
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002145 /* Resize and add decorations. */
2146 r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total);
2147 if (r == NULL) {
2148 nbytes = original_nbytes;
Victor Stinner9ed83c42017-10-31 12:18:10 -07002149 }
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002150 else {
2151 head = r;
2152 bumpserialno();
Victor Stinner9e87e772017-11-24 12:09:24 +01002153 block_serialno = serialno;
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002154 }
2155
2156 write_size_t(head, nbytes);
2157 head[SST] = (uint8_t)api->api_id;
2158 memset(head + SST + 1, FORBIDDENBYTE, SST-1);
2159 data = head + 2*SST;
Victor Stinnerc4266362013-07-09 00:44:43 +02002160
Victor Stinner9ed83c42017-10-31 12:18:10 -07002161 tail = data + nbytes;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002162 memset(tail, FORBIDDENBYTE, SST);
Victor Stinner9e87e772017-11-24 12:09:24 +01002163 write_size_t(tail + SST, block_serialno);
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002164
2165 /* Restore saved bytes. */
2166 if (original_nbytes <= sizeof(save)) {
2167 memcpy(data, save, Py_MIN(nbytes, original_nbytes));
2168 }
2169 else {
2170 size_t i = original_nbytes - ERASED_SIZE;
2171 memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE));
2172 if (nbytes > i) {
2173 memcpy(data + i, &save[ERASED_SIZE],
2174 Py_MIN(nbytes - i, ERASED_SIZE));
2175 }
2176 }
2177
2178 if (r == NULL) {
2179 return NULL;
2180 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002181
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002182 if (nbytes > original_nbytes) {
2183 /* growing: mark new extra memory clean */
Serhiy Storchaka3cc4c532017-11-07 12:46:42 +02002184 memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002185 }
Tim Peters85cc1c42002-04-12 08:52:50 +00002186
Victor Stinner9ed83c42017-10-31 12:18:10 -07002187 return data;
Tim Petersddea2082002-03-23 10:03:50 +00002188}
2189
Victor Stinnerc4aec362016-03-14 22:26:53 +01002190static void
2191_PyMem_DebugCheckGIL(void)
2192{
Victor Stinnerc4aec362016-03-14 22:26:53 +01002193 if (!PyGILState_Check())
2194 Py_FatalError("Python memory allocator called "
2195 "without holding the GIL");
Victor Stinnerc4aec362016-03-14 22:26:53 +01002196}
2197
2198static void *
2199_PyMem_DebugMalloc(void *ctx, size_t nbytes)
2200{
2201 _PyMem_DebugCheckGIL();
2202 return _PyMem_DebugRawMalloc(ctx, nbytes);
2203}
2204
2205static void *
2206_PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize)
2207{
2208 _PyMem_DebugCheckGIL();
2209 return _PyMem_DebugRawCalloc(ctx, nelem, elsize);
2210}
2211
Victor Stinner9ed83c42017-10-31 12:18:10 -07002212
Victor Stinnerc4aec362016-03-14 22:26:53 +01002213static void
2214_PyMem_DebugFree(void *ctx, void *ptr)
2215{
2216 _PyMem_DebugCheckGIL();
Victor Stinner0aed3a42016-03-23 11:30:43 +01002217 _PyMem_DebugRawFree(ctx, ptr);
Victor Stinnerc4aec362016-03-14 22:26:53 +01002218}
2219
Victor Stinner9ed83c42017-10-31 12:18:10 -07002220
Victor Stinnerc4aec362016-03-14 22:26:53 +01002221static void *
2222_PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes)
2223{
2224 _PyMem_DebugCheckGIL();
2225 return _PyMem_DebugRawRealloc(ctx, ptr, nbytes);
2226}
2227
Tim Peters7ccfadf2002-04-01 06:04:21 +00002228/* Check the forbidden bytes on both ends of the memory allocated for p.
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002229 * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,
Tim Peters7ccfadf2002-04-01 06:04:21 +00002230 * and call Py_FatalError to kill the program.
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002231 * The API id, is also checked.
Tim Peters7ccfadf2002-04-01 06:04:21 +00002232 */
Victor Stinner0507bf52013-07-07 02:05:46 +02002233static void
2234_PyMem_DebugCheckAddress(char api, const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002235{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002236 const uint8_t *q = (const uint8_t *)p;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002237 char msgbuf[64];
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002238 const char *msg;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002239 size_t nbytes;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002240 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002241 int i;
2242 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002243
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002244 if (p == NULL) {
2245 msg = "didn't expect a NULL pointer";
2246 goto error;
2247 }
Tim Petersddea2082002-03-23 10:03:50 +00002248
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002249 /* Check the API id */
2250 id = (char)q[-SST];
2251 if (id != api) {
2252 msg = msgbuf;
Serhiy Storchakae2f92de2017-11-11 13:06:26 +02002253 snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002254 msgbuf[sizeof(msgbuf)-1] = 0;
2255 goto error;
2256 }
Kristján Valur Jónssonae4cfb12009-09-28 13:45:02 +00002257
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002258 /* Check the stuff at the start of p first: if there's underwrite
2259 * corruption, the number-of-bytes field may be nuts, and checking
2260 * the tail could lead to a segfault then.
2261 */
2262 for (i = SST-1; i >= 1; --i) {
2263 if (*(q-i) != FORBIDDENBYTE) {
2264 msg = "bad leading pad byte";
2265 goto error;
2266 }
2267 }
Tim Petersddea2082002-03-23 10:03:50 +00002268
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002269 nbytes = read_size_t(q - 2*SST);
2270 tail = q + nbytes;
2271 for (i = 0; i < SST; ++i) {
2272 if (tail[i] != FORBIDDENBYTE) {
2273 msg = "bad trailing pad byte";
2274 goto error;
2275 }
2276 }
Tim Petersddea2082002-03-23 10:03:50 +00002277
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002278 return;
Tim Petersd1139e02002-03-28 07:32:11 +00002279
2280error:
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002281 _PyObject_DebugDumpAddress(p);
2282 Py_FatalError(msg);
Tim Petersddea2082002-03-23 10:03:50 +00002283}
2284
Tim Peters7ccfadf2002-04-01 06:04:21 +00002285/* Display info to stderr about the memory block at p. */
Victor Stinner0507bf52013-07-07 02:05:46 +02002286static void
Neil Schemenauerd2560cd2002-04-12 03:10:20 +00002287_PyObject_DebugDumpAddress(const void *p)
Tim Petersddea2082002-03-23 10:03:50 +00002288{
Benjamin Peterson19517e42016-09-18 19:22:22 -07002289 const uint8_t *q = (const uint8_t *)p;
2290 const uint8_t *tail;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002291 size_t nbytes, serial;
2292 int i;
2293 int ok;
2294 char id;
Tim Petersddea2082002-03-23 10:03:50 +00002295
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002296 fprintf(stderr, "Debug memory block at address p=%p:", p);
2297 if (p == NULL) {
2298 fprintf(stderr, "\n");
2299 return;
2300 }
2301 id = (char)q[-SST];
2302 fprintf(stderr, " API '%c'\n", id);
Tim Petersddea2082002-03-23 10:03:50 +00002303
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002304 nbytes = read_size_t(q - 2*SST);
2305 fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "
2306 "requested\n", nbytes);
Tim Petersddea2082002-03-23 10:03:50 +00002307
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002308 /* In case this is nuts, check the leading pad bytes first. */
2309 fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);
2310 ok = 1;
2311 for (i = 1; i <= SST-1; ++i) {
2312 if (*(q-i) != FORBIDDENBYTE) {
2313 ok = 0;
2314 break;
2315 }
2316 }
2317 if (ok)
2318 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2319 else {
2320 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
2321 FORBIDDENBYTE);
2322 for (i = SST-1; i >= 1; --i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002323 const uint8_t byte = *(q-i);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002324 fprintf(stderr, " at p-%d: 0x%02x", i, byte);
2325 if (byte != FORBIDDENBYTE)
2326 fputs(" *** OUCH", stderr);
2327 fputc('\n', stderr);
2328 }
Tim Peters449b5a82002-04-28 06:14:45 +00002329
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002330 fputs(" Because memory is corrupted at the start, the "
2331 "count of bytes requested\n"
2332 " may be bogus, and checking the trailing pad "
2333 "bytes may segfault.\n", stderr);
2334 }
Tim Petersddea2082002-03-23 10:03:50 +00002335
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002336 tail = q + nbytes;
2337 fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);
2338 ok = 1;
2339 for (i = 0; i < SST; ++i) {
2340 if (tail[i] != FORBIDDENBYTE) {
2341 ok = 0;
2342 break;
2343 }
2344 }
2345 if (ok)
2346 fputs("FORBIDDENBYTE, as expected.\n", stderr);
2347 else {
2348 fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002349 FORBIDDENBYTE);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002350 for (i = 0; i < SST; ++i) {
Benjamin Peterson19517e42016-09-18 19:22:22 -07002351 const uint8_t byte = tail[i];
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002352 fprintf(stderr, " at tail+%d: 0x%02x",
Stefan Krah735bb122010-11-26 10:54:09 +00002353 i, byte);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002354 if (byte != FORBIDDENBYTE)
2355 fputs(" *** OUCH", stderr);
2356 fputc('\n', stderr);
2357 }
2358 }
Tim Petersddea2082002-03-23 10:03:50 +00002359
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002360 serial = read_size_t(tail + SST);
2361 fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T
2362 "u to debug malloc/realloc.\n", serial);
Tim Petersddea2082002-03-23 10:03:50 +00002363
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002364 if (nbytes > 0) {
2365 i = 0;
2366 fputs(" Data at p:", stderr);
2367 /* print up to 8 bytes at the start */
2368 while (q < tail && i < 8) {
2369 fprintf(stderr, " %02x", *q);
2370 ++i;
2371 ++q;
2372 }
2373 /* and up to 8 at the end */
2374 if (q < tail) {
2375 if (tail - q > 8) {
2376 fputs(" ...", stderr);
2377 q = tail - 8;
2378 }
2379 while (q < tail) {
2380 fprintf(stderr, " %02x", *q);
2381 ++q;
2382 }
2383 }
2384 fputc('\n', stderr);
2385 }
Victor Stinner0611c262016-03-15 22:22:13 +01002386 fputc('\n', stderr);
2387
2388 fflush(stderr);
2389 _PyMem_DumpTraceback(fileno(stderr), p);
Tim Petersddea2082002-03-23 10:03:50 +00002390}
2391
David Malcolm49526f42012-06-22 14:55:41 -04002392
Thomas Wouters73e5a5b2006-06-08 15:35:45 +00002393static size_t
David Malcolm49526f42012-06-22 14:55:41 -04002394printone(FILE *out, const char* msg, size_t value)
Tim Peters16bcb6b2002-04-05 05:45:31 +00002395{
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002396 int i, k;
2397 char buf[100];
2398 size_t origvalue = value;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002399
David Malcolm49526f42012-06-22 14:55:41 -04002400 fputs(msg, out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002401 for (i = (int)strlen(msg); i < 35; ++i)
David Malcolm49526f42012-06-22 14:55:41 -04002402 fputc(' ', out);
2403 fputc('=', out);
Tim Peters49f26812002-04-06 01:45:35 +00002404
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002405 /* Write the value with commas. */
2406 i = 22;
2407 buf[i--] = '\0';
2408 buf[i--] = '\n';
2409 k = 3;
2410 do {
2411 size_t nextvalue = value / 10;
Benjamin Peterson2dba1ee2013-02-20 16:54:30 -05002412 unsigned int digit = (unsigned int)(value - nextvalue * 10);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002413 value = nextvalue;
2414 buf[i--] = (char)(digit + '0');
2415 --k;
2416 if (k == 0 && value && i >= 0) {
2417 k = 3;
2418 buf[i--] = ',';
2419 }
2420 } while (value && i >= 0);
Tim Peters49f26812002-04-06 01:45:35 +00002421
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002422 while (i >= 0)
2423 buf[i--] = ' ';
David Malcolm49526f42012-06-22 14:55:41 -04002424 fputs(buf, out);
Tim Peters49f26812002-04-06 01:45:35 +00002425
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002426 return origvalue;
Tim Peters16bcb6b2002-04-05 05:45:31 +00002427}
2428
David Malcolm49526f42012-06-22 14:55:41 -04002429void
2430_PyDebugAllocatorStats(FILE *out,
2431 const char *block_name, int num_blocks, size_t sizeof_block)
2432{
2433 char buf1[128];
2434 char buf2[128];
2435 PyOS_snprintf(buf1, sizeof(buf1),
Tim Peterseaa3bcc2013-09-05 22:57:04 -05002436 "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each",
David Malcolm49526f42012-06-22 14:55:41 -04002437 num_blocks, block_name, sizeof_block);
2438 PyOS_snprintf(buf2, sizeof(buf2),
2439 "%48s ", buf1);
2440 (void)printone(out, buf2, num_blocks * sizeof_block);
2441}
2442
Victor Stinner34be8072016-03-14 12:04:26 +01002443
David Malcolm49526f42012-06-22 14:55:41 -04002444#ifdef WITH_PYMALLOC
2445
Victor Stinner34be8072016-03-14 12:04:26 +01002446#ifdef Py_DEBUG
2447/* Is target in the list? The list is traversed via the nextpool pointers.
2448 * The list may be NULL-terminated, or circular. Return 1 if target is in
2449 * list, else 0.
2450 */
2451static int
2452pool_is_in_list(const poolp target, poolp list)
2453{
2454 poolp origlist = list;
2455 assert(target != NULL);
2456 if (list == NULL)
2457 return 0;
2458 do {
2459 if (target == list)
2460 return 1;
2461 list = list->nextpool;
2462 } while (list != NULL && list != origlist);
2463 return 0;
2464}
2465#endif
2466
David Malcolm49526f42012-06-22 14:55:41 -04002467/* Print summary info to "out" about the state of pymalloc's structures.
Tim Peters08d82152002-04-18 22:25:03 +00002468 * In Py_DEBUG mode, also perform some expensive internal consistency
2469 * checks.
Victor Stinner6bf992a2017-12-06 17:26:10 +01002470 *
2471 * Return 0 if the memory debug hooks are not installed or no statistics was
Leo Ariasc3d95082018-02-03 18:36:10 -06002472 * written into out, return 1 otherwise.
Tim Peters08d82152002-04-18 22:25:03 +00002473 */
Victor Stinner6bf992a2017-12-06 17:26:10 +01002474int
David Malcolm49526f42012-06-22 14:55:41 -04002475_PyObject_DebugMallocStats(FILE *out)
Tim Peters7ccfadf2002-04-01 06:04:21 +00002476{
Victor Stinner6bf992a2017-12-06 17:26:10 +01002477 if (!_PyMem_PymallocEnabled()) {
2478 return 0;
2479 }
2480
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002481 uint i;
2482 const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;
2483 /* # of pools, allocated blocks, and free blocks per class index */
2484 size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2485 size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2486 size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];
2487 /* total # of allocated bytes in used and full pools */
2488 size_t allocated_bytes = 0;
2489 /* total # of available bytes in used pools */
2490 size_t available_bytes = 0;
2491 /* # of free pools + pools not yet carved out of current arena */
2492 uint numfreepools = 0;
2493 /* # of bytes for arena alignment padding */
2494 size_t arena_alignment = 0;
2495 /* # of bytes in used and full pools used for pool_headers */
2496 size_t pool_header_bytes = 0;
2497 /* # of bytes in used and full pools wasted due to quantization,
2498 * i.e. the necessarily leftover space at the ends of used and
2499 * full pools.
2500 */
2501 size_t quantization = 0;
2502 /* # of arenas actually allocated. */
2503 size_t narenas = 0;
2504 /* running total -- should equal narenas * ARENA_SIZE */
2505 size_t total;
2506 char buf[128];
Tim Peters7ccfadf2002-04-01 06:04:21 +00002507
David Malcolm49526f42012-06-22 14:55:41 -04002508 fprintf(out, "Small block threshold = %d, in %u size classes.\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002509 SMALL_REQUEST_THRESHOLD, numclasses);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002510
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002511 for (i = 0; i < numclasses; ++i)
2512 numpools[i] = numblocks[i] = numfreeblocks[i] = 0;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002513
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002514 /* Because full pools aren't linked to from anything, it's easiest
2515 * to march over all the arenas. If we're lucky, most of the memory
2516 * will be living in full pools -- would be a shame to miss them.
2517 */
Victor Stinner9e87e772017-11-24 12:09:24 +01002518 for (i = 0; i < maxarenas; ++i) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002519 uint j;
Victor Stinner9e87e772017-11-24 12:09:24 +01002520 uintptr_t base = arenas[i].address;
Thomas Woutersa9773292006-04-21 09:43:23 +00002521
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002522 /* Skip arenas which are not allocated. */
Victor Stinner9e87e772017-11-24 12:09:24 +01002523 if (arenas[i].address == (uintptr_t)NULL)
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002524 continue;
2525 narenas += 1;
Thomas Woutersa9773292006-04-21 09:43:23 +00002526
Victor Stinner9e87e772017-11-24 12:09:24 +01002527 numfreepools += arenas[i].nfreepools;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002528
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002529 /* round up to pool alignment */
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002530 if (base & (uintptr_t)POOL_SIZE_MASK) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002531 arena_alignment += POOL_SIZE;
Benjamin Peterson5d4b09c2016-09-18 19:24:52 -07002532 base &= ~(uintptr_t)POOL_SIZE_MASK;
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002533 base += POOL_SIZE;
2534 }
Tim Peters7ccfadf2002-04-01 06:04:21 +00002535
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002536 /* visit every pool in the arena */
Victor Stinner9e87e772017-11-24 12:09:24 +01002537 assert(base <= (uintptr_t) arenas[i].pool_address);
2538 for (j = 0; base < (uintptr_t) arenas[i].pool_address;
Benjamin Peterson19517e42016-09-18 19:22:22 -07002539 ++j, base += POOL_SIZE) {
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002540 poolp p = (poolp)base;
2541 const uint sz = p->szidx;
2542 uint freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002543
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002544 if (p->ref.count == 0) {
2545 /* currently unused */
Victor Stinner34be8072016-03-14 12:04:26 +01002546#ifdef Py_DEBUG
Victor Stinner9e87e772017-11-24 12:09:24 +01002547 assert(pool_is_in_list(p, arenas[i].freepools));
Victor Stinner34be8072016-03-14 12:04:26 +01002548#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002549 continue;
2550 }
2551 ++numpools[sz];
2552 numblocks[sz] += p->ref.count;
2553 freeblocks = NUMBLOCKS(sz) - p->ref.count;
2554 numfreeblocks[sz] += freeblocks;
Tim Peters08d82152002-04-18 22:25:03 +00002555#ifdef Py_DEBUG
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002556 if (freeblocks > 0)
Victor Stinner9e87e772017-11-24 12:09:24 +01002557 assert(pool_is_in_list(p, usedpools[sz + sz]));
Tim Peters08d82152002-04-18 22:25:03 +00002558#endif
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002559 }
2560 }
Victor Stinner9e87e772017-11-24 12:09:24 +01002561 assert(narenas == narenas_currently_allocated);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002562
David Malcolm49526f42012-06-22 14:55:41 -04002563 fputc('\n', out);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002564 fputs("class size num pools blocks in use avail blocks\n"
2565 "----- ---- --------- ------------- ------------\n",
David Malcolm49526f42012-06-22 14:55:41 -04002566 out);
Tim Peters7ccfadf2002-04-01 06:04:21 +00002567
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002568 for (i = 0; i < numclasses; ++i) {
2569 size_t p = numpools[i];
2570 size_t b = numblocks[i];
2571 size_t f = numfreeblocks[i];
2572 uint size = INDEX2SIZE(i);
2573 if (p == 0) {
2574 assert(b == 0 && f == 0);
2575 continue;
2576 }
David Malcolm49526f42012-06-22 14:55:41 -04002577 fprintf(out, "%5u %6u "
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002578 "%11" PY_FORMAT_SIZE_T "u "
2579 "%15" PY_FORMAT_SIZE_T "u "
2580 "%13" PY_FORMAT_SIZE_T "u\n",
Stefan Krah735bb122010-11-26 10:54:09 +00002581 i, size, p, b, f);
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002582 allocated_bytes += b * size;
2583 available_bytes += f * size;
2584 pool_header_bytes += p * POOL_OVERHEAD;
2585 quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);
2586 }
David Malcolm49526f42012-06-22 14:55:41 -04002587 fputc('\n', out);
Victor Stinner34be8072016-03-14 12:04:26 +01002588 if (_PyMem_DebugEnabled())
Victor Stinner9e87e772017-11-24 12:09:24 +01002589 (void)printone(out, "# times object malloc called", serialno);
2590 (void)printone(out, "# arenas allocated total", ntimes_arena_allocated);
2591 (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas);
2592 (void)printone(out, "# arenas highwater mark", narenas_highwater);
David Malcolm49526f42012-06-22 14:55:41 -04002593 (void)printone(out, "# arenas allocated current", narenas);
Thomas Woutersa9773292006-04-21 09:43:23 +00002594
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002595 PyOS_snprintf(buf, sizeof(buf),
2596 "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",
2597 narenas, ARENA_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002598 (void)printone(out, buf, narenas * ARENA_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002599
David Malcolm49526f42012-06-22 14:55:41 -04002600 fputc('\n', out);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002601
David Malcolm49526f42012-06-22 14:55:41 -04002602 total = printone(out, "# bytes in allocated blocks", allocated_bytes);
2603 total += printone(out, "# bytes in available blocks", available_bytes);
Tim Peters49f26812002-04-06 01:45:35 +00002604
Antoine Pitrouf95a1b32010-05-09 15:52:27 +00002605 PyOS_snprintf(buf, sizeof(buf),
2606 "%u unused pools * %d bytes", numfreepools, POOL_SIZE);
David Malcolm49526f42012-06-22 14:55:41 -04002607 total += printone(out, buf, (size_t)numfreepools * POOL_SIZE);
Tim Peters16bcb6b2002-04-05 05:45:31 +00002608
David Malcolm49526f42012-06-22 14:55:41 -04002609 total += printone(out, "# bytes lost to pool headers", pool_header_bytes);
2610 total += printone(out, "# bytes lost to quantization", quantization);
2611 total += printone(out, "# bytes lost to arena alignment", arena_alignment);
2612 (void)printone(out, "Total", total);
Victor Stinner6bf992a2017-12-06 17:26:10 +01002613 return 1;
Tim Peters7ccfadf2002-04-01 06:04:21 +00002614}
2615
David Malcolm49526f42012-06-22 14:55:41 -04002616#endif /* #ifdef WITH_PYMALLOC */