Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1 | #include "Python.h" |
| 2 | |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 3 | #include <stdbool.h> |
| 4 | |
Victor Stinner | 0611c26 | 2016-03-15 22:22:13 +0100 | [diff] [blame] | 5 | |
| 6 | /* Defined in tracemalloc.c */ |
| 7 | extern void _PyMem_DumpTraceback(int fd, const void *ptr); |
| 8 | |
| 9 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 10 | /* Python's malloc wrappers (see pymem.h) */ |
| 11 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 12 | #undef uint |
| 13 | #define uint unsigned int /* assuming >= 16 bits */ |
| 14 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 15 | /* Forward declaration */ |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 16 | static void* _PyMem_DebugRawMalloc(void *ctx, size_t size); |
| 17 | static void* _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize); |
| 18 | static void* _PyMem_DebugRawRealloc(void *ctx, void *ptr, size_t size); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 19 | static void _PyMem_DebugRawFree(void *ctx, void *ptr); |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 20 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 21 | static void* _PyMem_DebugMalloc(void *ctx, size_t size); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 22 | static void* _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 23 | static void* _PyMem_DebugRealloc(void *ctx, void *ptr, size_t size); |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 24 | static void _PyMem_DebugFree(void *ctx, void *p); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 25 | |
| 26 | static void _PyObject_DebugDumpAddress(const void *p); |
| 27 | static void _PyMem_DebugCheckAddress(char api_id, const void *p); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 28 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 29 | static void _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain); |
| 30 | |
Nick Coghlan | 6ba64f4 | 2013-09-29 00:28:55 +1000 | [diff] [blame] | 31 | #if defined(__has_feature) /* Clang */ |
| 32 | #if __has_feature(address_sanitizer) /* is ASAN enabled? */ |
| 33 | #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 34 | __attribute__((no_address_safety_analysis)) |
Nick Coghlan | 6ba64f4 | 2013-09-29 00:28:55 +1000 | [diff] [blame] | 35 | #else |
| 36 | #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS |
| 37 | #endif |
| 38 | #else |
| 39 | #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */ |
| 40 | #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 41 | __attribute__((no_address_safety_analysis)) |
Nick Coghlan | 6ba64f4 | 2013-09-29 00:28:55 +1000 | [diff] [blame] | 42 | #else |
| 43 | #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS |
| 44 | #endif |
| 45 | #endif |
| 46 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 47 | #ifdef WITH_PYMALLOC |
| 48 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 49 | #ifdef MS_WINDOWS |
| 50 | # include <windows.h> |
| 51 | #elif defined(HAVE_MMAP) |
| 52 | # include <sys/mman.h> |
| 53 | # ifdef MAP_ANONYMOUS |
| 54 | # define ARENAS_USE_MMAP |
| 55 | # endif |
Antoine Pitrou | 6f26be0 | 2011-05-03 18:18:59 +0200 | [diff] [blame] | 56 | #endif |
| 57 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 58 | /* Forward declaration */ |
| 59 | static void* _PyObject_Malloc(void *ctx, size_t size); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 60 | static void* _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 61 | static void _PyObject_Free(void *ctx, void *p); |
| 62 | static void* _PyObject_Realloc(void *ctx, void *ptr, size_t size); |
Martin v. Löwis | cd83fa8 | 2013-06-27 12:23:29 +0200 | [diff] [blame] | 63 | #endif |
| 64 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 65 | |
| 66 | static void * |
| 67 | _PyMem_RawMalloc(void *ctx, size_t size) |
| 68 | { |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 69 | /* PyMem_RawMalloc(0) means malloc(1). Some systems would return NULL |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 70 | for malloc(0), which would be treated as an error. Some platforms would |
| 71 | return a pointer with no memory behind it, which would break pymalloc. |
| 72 | To solve these problems, allocate an extra byte. */ |
| 73 | if (size == 0) |
| 74 | size = 1; |
| 75 | return malloc(size); |
| 76 | } |
| 77 | |
| 78 | static void * |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 79 | _PyMem_RawCalloc(void *ctx, size_t nelem, size_t elsize) |
| 80 | { |
| 81 | /* PyMem_RawCalloc(0, 0) means calloc(1, 1). Some systems would return NULL |
| 82 | for calloc(0, 0), which would be treated as an error. Some platforms |
| 83 | would return a pointer with no memory behind it, which would break |
| 84 | pymalloc. To solve these problems, allocate an extra byte. */ |
| 85 | if (nelem == 0 || elsize == 0) { |
| 86 | nelem = 1; |
| 87 | elsize = 1; |
| 88 | } |
| 89 | return calloc(nelem, elsize); |
| 90 | } |
| 91 | |
| 92 | static void * |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 93 | _PyMem_RawRealloc(void *ctx, void *ptr, size_t size) |
| 94 | { |
| 95 | if (size == 0) |
| 96 | size = 1; |
| 97 | return realloc(ptr, size); |
| 98 | } |
| 99 | |
| 100 | static void |
| 101 | _PyMem_RawFree(void *ctx, void *ptr) |
| 102 | { |
| 103 | free(ptr); |
| 104 | } |
| 105 | |
| 106 | |
| 107 | #ifdef MS_WINDOWS |
| 108 | static void * |
| 109 | _PyObject_ArenaVirtualAlloc(void *ctx, size_t size) |
| 110 | { |
| 111 | return VirtualAlloc(NULL, size, |
| 112 | MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
| 113 | } |
| 114 | |
| 115 | static void |
| 116 | _PyObject_ArenaVirtualFree(void *ctx, void *ptr, size_t size) |
| 117 | { |
Victor Stinner | 725e668 | 2013-07-07 03:06:16 +0200 | [diff] [blame] | 118 | VirtualFree(ptr, 0, MEM_RELEASE); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | #elif defined(ARENAS_USE_MMAP) |
| 122 | static void * |
| 123 | _PyObject_ArenaMmap(void *ctx, size_t size) |
| 124 | { |
| 125 | void *ptr; |
| 126 | ptr = mmap(NULL, size, PROT_READ|PROT_WRITE, |
| 127 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
| 128 | if (ptr == MAP_FAILED) |
| 129 | return NULL; |
| 130 | assert(ptr != NULL); |
| 131 | return ptr; |
| 132 | } |
| 133 | |
| 134 | static void |
| 135 | _PyObject_ArenaMunmap(void *ctx, void *ptr, size_t size) |
| 136 | { |
| 137 | munmap(ptr, size); |
| 138 | } |
| 139 | |
| 140 | #else |
| 141 | static void * |
| 142 | _PyObject_ArenaMalloc(void *ctx, size_t size) |
| 143 | { |
| 144 | return malloc(size); |
| 145 | } |
| 146 | |
| 147 | static void |
| 148 | _PyObject_ArenaFree(void *ctx, void *ptr, size_t size) |
| 149 | { |
| 150 | free(ptr); |
| 151 | } |
| 152 | #endif |
| 153 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 154 | #define MALLOC_ALLOC {NULL, _PyMem_RawMalloc, _PyMem_RawCalloc, _PyMem_RawRealloc, _PyMem_RawFree} |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 155 | #ifdef WITH_PYMALLOC |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 156 | # define PYMALLOC_ALLOC {NULL, _PyObject_Malloc, _PyObject_Calloc, _PyObject_Realloc, _PyObject_Free} |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 157 | #endif |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 158 | |
| 159 | #define PYRAW_ALLOC MALLOC_ALLOC |
| 160 | #ifdef WITH_PYMALLOC |
| 161 | # define PYOBJ_ALLOC PYMALLOC_ALLOC |
| 162 | #else |
| 163 | # define PYOBJ_ALLOC MALLOC_ALLOC |
| 164 | #endif |
| 165 | #define PYMEM_ALLOC PYOBJ_ALLOC |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 166 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 167 | typedef struct { |
| 168 | /* We tag each block with an API ID in order to tag API violations */ |
| 169 | char api_id; |
Victor Stinner | d8f0d92 | 2014-06-02 21:57:10 +0200 | [diff] [blame] | 170 | PyMemAllocatorEx alloc; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 171 | } debug_alloc_api_t; |
| 172 | static struct { |
| 173 | debug_alloc_api_t raw; |
| 174 | debug_alloc_api_t mem; |
| 175 | debug_alloc_api_t obj; |
| 176 | } _PyMem_Debug = { |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 177 | {'r', PYRAW_ALLOC}, |
| 178 | {'m', PYMEM_ALLOC}, |
| 179 | {'o', PYOBJ_ALLOC} |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 180 | }; |
| 181 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 182 | #define PYDBGRAW_ALLOC \ |
| 183 | {&_PyMem_Debug.raw, _PyMem_DebugRawMalloc, _PyMem_DebugRawCalloc, _PyMem_DebugRawRealloc, _PyMem_DebugRawFree} |
| 184 | #define PYDBGMEM_ALLOC \ |
| 185 | {&_PyMem_Debug.mem, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} |
| 186 | #define PYDBGOBJ_ALLOC \ |
| 187 | {&_PyMem_Debug.obj, _PyMem_DebugMalloc, _PyMem_DebugCalloc, _PyMem_DebugRealloc, _PyMem_DebugFree} |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 188 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 189 | #ifdef Py_DEBUG |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 190 | static PyMemAllocatorEx _PyMem_Raw = PYDBGRAW_ALLOC; |
| 191 | static PyMemAllocatorEx _PyMem = PYDBGMEM_ALLOC; |
| 192 | static PyMemAllocatorEx _PyObject = PYDBGOBJ_ALLOC; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 193 | #else |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 194 | static PyMemAllocatorEx _PyMem_Raw = PYRAW_ALLOC; |
| 195 | static PyMemAllocatorEx _PyMem = PYMEM_ALLOC; |
| 196 | static PyMemAllocatorEx _PyObject = PYOBJ_ALLOC; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 197 | #endif |
Eric Snow | 2ebc5ce | 2017-09-07 23:51:28 -0600 | [diff] [blame] | 198 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 199 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 200 | static int |
| 201 | pymem_set_default_allocator(PyMemAllocatorDomain domain, int debug, |
| 202 | PyMemAllocatorEx *old_alloc) |
| 203 | { |
| 204 | if (old_alloc != NULL) { |
| 205 | PyMem_GetAllocator(domain, old_alloc); |
| 206 | } |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 207 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 208 | |
| 209 | PyMemAllocatorEx new_alloc; |
| 210 | switch(domain) |
| 211 | { |
| 212 | case PYMEM_DOMAIN_RAW: |
| 213 | new_alloc = (PyMemAllocatorEx)PYRAW_ALLOC; |
| 214 | break; |
| 215 | case PYMEM_DOMAIN_MEM: |
| 216 | new_alloc = (PyMemAllocatorEx)PYMEM_ALLOC; |
| 217 | break; |
| 218 | case PYMEM_DOMAIN_OBJ: |
| 219 | new_alloc = (PyMemAllocatorEx)PYOBJ_ALLOC; |
| 220 | break; |
| 221 | default: |
| 222 | /* unknown domain */ |
| 223 | return -1; |
| 224 | } |
| 225 | PyMem_SetAllocator(domain, &new_alloc); |
| 226 | if (debug) { |
| 227 | _PyMem_SetupDebugHooksDomain(domain); |
| 228 | } |
| 229 | return 0; |
| 230 | } |
| 231 | |
| 232 | |
| 233 | int |
| 234 | _PyMem_SetDefaultAllocator(PyMemAllocatorDomain domain, |
| 235 | PyMemAllocatorEx *old_alloc) |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 236 | { |
Victor Stinner | ccb0442 | 2017-11-16 03:20:31 -0800 | [diff] [blame] | 237 | #ifdef Py_DEBUG |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 238 | const int debug = 1; |
Victor Stinner | ccb0442 | 2017-11-16 03:20:31 -0800 | [diff] [blame] | 239 | #else |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 240 | const int debug = 0; |
Victor Stinner | ccb0442 | 2017-11-16 03:20:31 -0800 | [diff] [blame] | 241 | #endif |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 242 | return pymem_set_default_allocator(domain, debug, old_alloc); |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 243 | } |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 244 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 245 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 246 | int |
| 247 | _PyMem_SetupAllocators(const char *opt) |
| 248 | { |
| 249 | if (opt == NULL || *opt == '\0') { |
| 250 | /* PYTHONMALLOC is empty or is not set or ignored (-E/-I command line |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 251 | options): use default memory allocators */ |
| 252 | opt = "default"; |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 253 | } |
| 254 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 255 | if (strcmp(opt, "default") == 0) { |
| 256 | (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, NULL); |
| 257 | (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_MEM, NULL); |
| 258 | (void)_PyMem_SetDefaultAllocator(PYMEM_DOMAIN_OBJ, NULL); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 259 | } |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 260 | else if (strcmp(opt, "debug") == 0) { |
| 261 | (void)pymem_set_default_allocator(PYMEM_DOMAIN_RAW, 1, NULL); |
| 262 | (void)pymem_set_default_allocator(PYMEM_DOMAIN_MEM, 1, NULL); |
| 263 | (void)pymem_set_default_allocator(PYMEM_DOMAIN_OBJ, 1, NULL); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 264 | } |
| 265 | #ifdef WITH_PYMALLOC |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 266 | else if (strcmp(opt, "pymalloc") == 0 || strcmp(opt, "pymalloc_debug") == 0) { |
| 267 | PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC; |
| 268 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 269 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 270 | PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC; |
| 271 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &pymalloc); |
| 272 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &pymalloc); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 273 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 274 | if (strcmp(opt, "pymalloc_debug") == 0) { |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 275 | PyMem_SetupDebugHooks(); |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 276 | } |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 277 | } |
| 278 | #endif |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 279 | else if (strcmp(opt, "malloc") == 0 || strcmp(opt, "malloc_debug") == 0) { |
| 280 | PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC; |
| 281 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &malloc_alloc); |
| 282 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &malloc_alloc); |
| 283 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &malloc_alloc); |
| 284 | |
| 285 | if (strcmp(opt, "malloc_debug") == 0) { |
| 286 | PyMem_SetupDebugHooks(); |
| 287 | } |
| 288 | } |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 289 | else { |
| 290 | /* unknown allocator */ |
| 291 | return -1; |
| 292 | } |
| 293 | return 0; |
| 294 | } |
| 295 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 296 | |
| 297 | static int |
| 298 | pymemallocator_eq(PyMemAllocatorEx *a, PyMemAllocatorEx *b) |
| 299 | { |
| 300 | return (memcmp(a, b, sizeof(PyMemAllocatorEx)) == 0); |
| 301 | } |
| 302 | |
| 303 | |
| 304 | const char* |
| 305 | _PyMem_GetAllocatorsName(void) |
| 306 | { |
| 307 | PyMemAllocatorEx malloc_alloc = MALLOC_ALLOC; |
| 308 | #ifdef WITH_PYMALLOC |
| 309 | PyMemAllocatorEx pymalloc = PYMALLOC_ALLOC; |
| 310 | #endif |
| 311 | |
| 312 | if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) && |
| 313 | pymemallocator_eq(&_PyMem, &malloc_alloc) && |
| 314 | pymemallocator_eq(&_PyObject, &malloc_alloc)) |
| 315 | { |
| 316 | return "malloc"; |
| 317 | } |
| 318 | #ifdef WITH_PYMALLOC |
| 319 | if (pymemallocator_eq(&_PyMem_Raw, &malloc_alloc) && |
| 320 | pymemallocator_eq(&_PyMem, &pymalloc) && |
| 321 | pymemallocator_eq(&_PyObject, &pymalloc)) |
| 322 | { |
| 323 | return "pymalloc"; |
| 324 | } |
| 325 | #endif |
| 326 | |
| 327 | PyMemAllocatorEx dbg_raw = PYDBGRAW_ALLOC; |
| 328 | PyMemAllocatorEx dbg_mem = PYDBGMEM_ALLOC; |
| 329 | PyMemAllocatorEx dbg_obj = PYDBGOBJ_ALLOC; |
| 330 | |
| 331 | if (pymemallocator_eq(&_PyMem_Raw, &dbg_raw) && |
| 332 | pymemallocator_eq(&_PyMem, &dbg_mem) && |
| 333 | pymemallocator_eq(&_PyObject, &dbg_obj)) |
| 334 | { |
| 335 | /* Debug hooks installed */ |
| 336 | if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) && |
| 337 | pymemallocator_eq(&_PyMem_Debug.mem.alloc, &malloc_alloc) && |
| 338 | pymemallocator_eq(&_PyMem_Debug.obj.alloc, &malloc_alloc)) |
| 339 | { |
| 340 | return "malloc_debug"; |
| 341 | } |
| 342 | #ifdef WITH_PYMALLOC |
| 343 | if (pymemallocator_eq(&_PyMem_Debug.raw.alloc, &malloc_alloc) && |
| 344 | pymemallocator_eq(&_PyMem_Debug.mem.alloc, &pymalloc) && |
| 345 | pymemallocator_eq(&_PyMem_Debug.obj.alloc, &pymalloc)) |
| 346 | { |
| 347 | return "pymalloc_debug"; |
| 348 | } |
| 349 | #endif |
| 350 | } |
| 351 | return NULL; |
| 352 | } |
| 353 | |
| 354 | |
| 355 | #undef MALLOC_ALLOC |
| 356 | #undef PYMALLOC_ALLOC |
| 357 | #undef PYRAW_ALLOC |
| 358 | #undef PYMEM_ALLOC |
| 359 | #undef PYOBJ_ALLOC |
| 360 | #undef PYDBGRAW_ALLOC |
| 361 | #undef PYDBGMEM_ALLOC |
| 362 | #undef PYDBGOBJ_ALLOC |
| 363 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 364 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 365 | static PyObjectArenaAllocator _PyObject_Arena = {NULL, |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 366 | #ifdef MS_WINDOWS |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 367 | _PyObject_ArenaVirtualAlloc, _PyObject_ArenaVirtualFree |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 368 | #elif defined(ARENAS_USE_MMAP) |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 369 | _PyObject_ArenaMmap, _PyObject_ArenaMunmap |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 370 | #else |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 371 | _PyObject_ArenaMalloc, _PyObject_ArenaFree |
Victor Stinner | f7e5b56 | 2017-11-15 15:48:08 -0800 | [diff] [blame] | 372 | #endif |
| 373 | }; |
| 374 | |
Victor Stinner | 0621e0e | 2016-04-19 17:02:55 +0200 | [diff] [blame] | 375 | #ifdef WITH_PYMALLOC |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 376 | static int |
| 377 | _PyMem_DebugEnabled(void) |
| 378 | { |
| 379 | return (_PyObject.malloc == _PyMem_DebugMalloc); |
| 380 | } |
| 381 | |
Victor Stinner | 6bf992a | 2017-12-06 17:26:10 +0100 | [diff] [blame] | 382 | static int |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 383 | _PyMem_PymallocEnabled(void) |
| 384 | { |
| 385 | if (_PyMem_DebugEnabled()) { |
| 386 | return (_PyMem_Debug.obj.alloc.malloc == _PyObject_Malloc); |
| 387 | } |
| 388 | else { |
| 389 | return (_PyObject.malloc == _PyObject_Malloc); |
| 390 | } |
| 391 | } |
| 392 | #endif |
| 393 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 394 | |
| 395 | static void |
| 396 | _PyMem_SetupDebugHooksDomain(PyMemAllocatorDomain domain) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 397 | { |
Victor Stinner | d8f0d92 | 2014-06-02 21:57:10 +0200 | [diff] [blame] | 398 | PyMemAllocatorEx alloc; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 399 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 400 | if (domain == PYMEM_DOMAIN_RAW) { |
| 401 | if (_PyMem_Raw.malloc == _PyMem_DebugRawMalloc) { |
| 402 | return; |
| 403 | } |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 404 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 405 | PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &_PyMem_Debug.raw.alloc); |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 406 | alloc.ctx = &_PyMem_Debug.raw; |
| 407 | alloc.malloc = _PyMem_DebugRawMalloc; |
| 408 | alloc.calloc = _PyMem_DebugRawCalloc; |
| 409 | alloc.realloc = _PyMem_DebugRawRealloc; |
| 410 | alloc.free = _PyMem_DebugRawFree; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 411 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc); |
| 412 | } |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 413 | else if (domain == PYMEM_DOMAIN_MEM) { |
| 414 | if (_PyMem.malloc == _PyMem_DebugMalloc) { |
| 415 | return; |
| 416 | } |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 417 | |
Victor Stinner | ad52437 | 2016-03-16 12:12:53 +0100 | [diff] [blame] | 418 | PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &_PyMem_Debug.mem.alloc); |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 419 | alloc.ctx = &_PyMem_Debug.mem; |
| 420 | alloc.malloc = _PyMem_DebugMalloc; |
| 421 | alloc.calloc = _PyMem_DebugCalloc; |
| 422 | alloc.realloc = _PyMem_DebugRealloc; |
| 423 | alloc.free = _PyMem_DebugFree; |
Victor Stinner | ad52437 | 2016-03-16 12:12:53 +0100 | [diff] [blame] | 424 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc); |
| 425 | } |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 426 | else if (domain == PYMEM_DOMAIN_OBJ) { |
| 427 | if (_PyObject.malloc == _PyMem_DebugMalloc) { |
| 428 | return; |
| 429 | } |
Victor Stinner | ad52437 | 2016-03-16 12:12:53 +0100 | [diff] [blame] | 430 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 431 | PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &_PyMem_Debug.obj.alloc); |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 432 | alloc.ctx = &_PyMem_Debug.obj; |
| 433 | alloc.malloc = _PyMem_DebugMalloc; |
| 434 | alloc.calloc = _PyMem_DebugCalloc; |
| 435 | alloc.realloc = _PyMem_DebugRealloc; |
| 436 | alloc.free = _PyMem_DebugFree; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 437 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc); |
| 438 | } |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 439 | } |
| 440 | |
Victor Stinner | 5d39e04 | 2017-11-29 17:20:38 +0100 | [diff] [blame] | 441 | |
| 442 | void |
| 443 | PyMem_SetupDebugHooks(void) |
| 444 | { |
| 445 | _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_RAW); |
| 446 | _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_MEM); |
| 447 | _PyMem_SetupDebugHooksDomain(PYMEM_DOMAIN_OBJ); |
| 448 | } |
| 449 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 450 | void |
Victor Stinner | d8f0d92 | 2014-06-02 21:57:10 +0200 | [diff] [blame] | 451 | PyMem_GetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 452 | { |
| 453 | switch(domain) |
| 454 | { |
| 455 | case PYMEM_DOMAIN_RAW: *allocator = _PyMem_Raw; break; |
| 456 | case PYMEM_DOMAIN_MEM: *allocator = _PyMem; break; |
| 457 | case PYMEM_DOMAIN_OBJ: *allocator = _PyObject; break; |
| 458 | default: |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 459 | /* unknown domain: set all attributes to NULL */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 460 | allocator->ctx = NULL; |
| 461 | allocator->malloc = NULL; |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 462 | allocator->calloc = NULL; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 463 | allocator->realloc = NULL; |
| 464 | allocator->free = NULL; |
| 465 | } |
| 466 | } |
| 467 | |
| 468 | void |
Victor Stinner | d8f0d92 | 2014-06-02 21:57:10 +0200 | [diff] [blame] | 469 | PyMem_SetAllocator(PyMemAllocatorDomain domain, PyMemAllocatorEx *allocator) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 470 | { |
| 471 | switch(domain) |
| 472 | { |
| 473 | case PYMEM_DOMAIN_RAW: _PyMem_Raw = *allocator; break; |
| 474 | case PYMEM_DOMAIN_MEM: _PyMem = *allocator; break; |
| 475 | case PYMEM_DOMAIN_OBJ: _PyObject = *allocator; break; |
| 476 | /* ignore unknown domain */ |
| 477 | } |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 478 | } |
| 479 | |
| 480 | void |
| 481 | PyObject_GetArenaAllocator(PyObjectArenaAllocator *allocator) |
| 482 | { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 483 | *allocator = _PyObject_Arena; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | void |
| 487 | PyObject_SetArenaAllocator(PyObjectArenaAllocator *allocator) |
| 488 | { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 489 | _PyObject_Arena = *allocator; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 490 | } |
| 491 | |
| 492 | void * |
| 493 | PyMem_RawMalloc(size_t size) |
| 494 | { |
| 495 | /* |
| 496 | * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes. |
| 497 | * Most python internals blindly use a signed Py_ssize_t to track |
| 498 | * things without checking for overflows or negatives. |
| 499 | * As size_t is unsigned, checking for size < 0 is not required. |
| 500 | */ |
| 501 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 502 | return NULL; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 503 | return _PyMem_Raw.malloc(_PyMem_Raw.ctx, size); |
| 504 | } |
| 505 | |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 506 | void * |
| 507 | PyMem_RawCalloc(size_t nelem, size_t elsize) |
| 508 | { |
| 509 | /* see PyMem_RawMalloc() */ |
| 510 | if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize) |
| 511 | return NULL; |
| 512 | return _PyMem_Raw.calloc(_PyMem_Raw.ctx, nelem, elsize); |
| 513 | } |
| 514 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 515 | void* |
| 516 | PyMem_RawRealloc(void *ptr, size_t new_size) |
| 517 | { |
| 518 | /* see PyMem_RawMalloc() */ |
| 519 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 520 | return NULL; |
| 521 | return _PyMem_Raw.realloc(_PyMem_Raw.ctx, ptr, new_size); |
| 522 | } |
| 523 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 524 | void PyMem_RawFree(void *ptr) |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 525 | { |
| 526 | _PyMem_Raw.free(_PyMem_Raw.ctx, ptr); |
| 527 | } |
| 528 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 529 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 530 | void * |
| 531 | PyMem_Malloc(size_t size) |
| 532 | { |
| 533 | /* see PyMem_RawMalloc() */ |
| 534 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 535 | return NULL; |
| 536 | return _PyMem.malloc(_PyMem.ctx, size); |
| 537 | } |
| 538 | |
| 539 | void * |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 540 | PyMem_Calloc(size_t nelem, size_t elsize) |
| 541 | { |
| 542 | /* see PyMem_RawMalloc() */ |
| 543 | if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize) |
| 544 | return NULL; |
| 545 | return _PyMem.calloc(_PyMem.ctx, nelem, elsize); |
| 546 | } |
| 547 | |
| 548 | void * |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 549 | PyMem_Realloc(void *ptr, size_t new_size) |
| 550 | { |
| 551 | /* see PyMem_RawMalloc() */ |
| 552 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 553 | return NULL; |
| 554 | return _PyMem.realloc(_PyMem.ctx, ptr, new_size); |
| 555 | } |
| 556 | |
| 557 | void |
| 558 | PyMem_Free(void *ptr) |
| 559 | { |
| 560 | _PyMem.free(_PyMem.ctx, ptr); |
| 561 | } |
| 562 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 563 | |
Victor Stinner | 46972b7 | 2017-11-24 22:55:40 +0100 | [diff] [blame] | 564 | wchar_t* |
| 565 | _PyMem_RawWcsdup(const wchar_t *str) |
| 566 | { |
Victor Stinner | b64de46 | 2017-12-01 18:27:09 +0100 | [diff] [blame] | 567 | assert(str != NULL); |
| 568 | |
Victor Stinner | 46972b7 | 2017-11-24 22:55:40 +0100 | [diff] [blame] | 569 | size_t len = wcslen(str); |
| 570 | if (len > (size_t)PY_SSIZE_T_MAX / sizeof(wchar_t) - 1) { |
| 571 | return NULL; |
| 572 | } |
| 573 | |
| 574 | size_t size = (len + 1) * sizeof(wchar_t); |
| 575 | wchar_t *str2 = PyMem_RawMalloc(size); |
| 576 | if (str2 == NULL) { |
| 577 | return NULL; |
| 578 | } |
| 579 | |
| 580 | memcpy(str2, str, size); |
| 581 | return str2; |
| 582 | } |
| 583 | |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame] | 584 | char * |
| 585 | _PyMem_RawStrdup(const char *str) |
| 586 | { |
Victor Stinner | b64de46 | 2017-12-01 18:27:09 +0100 | [diff] [blame] | 587 | assert(str != NULL); |
| 588 | size_t size = strlen(str) + 1; |
| 589 | char *copy = PyMem_RawMalloc(size); |
| 590 | if (copy == NULL) { |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame] | 591 | return NULL; |
Victor Stinner | b64de46 | 2017-12-01 18:27:09 +0100 | [diff] [blame] | 592 | } |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame] | 593 | memcpy(copy, str, size); |
| 594 | return copy; |
| 595 | } |
| 596 | |
| 597 | char * |
| 598 | _PyMem_Strdup(const char *str) |
| 599 | { |
Victor Stinner | b64de46 | 2017-12-01 18:27:09 +0100 | [diff] [blame] | 600 | assert(str != NULL); |
| 601 | size_t size = strlen(str) + 1; |
| 602 | char *copy = PyMem_Malloc(size); |
| 603 | if (copy == NULL) { |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame] | 604 | return NULL; |
Victor Stinner | b64de46 | 2017-12-01 18:27:09 +0100 | [diff] [blame] | 605 | } |
Victor Stinner | 49fc8ec | 2013-07-07 23:30:24 +0200 | [diff] [blame] | 606 | memcpy(copy, str, size); |
| 607 | return copy; |
| 608 | } |
| 609 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 610 | void * |
| 611 | PyObject_Malloc(size_t size) |
| 612 | { |
| 613 | /* see PyMem_RawMalloc() */ |
| 614 | if (size > (size_t)PY_SSIZE_T_MAX) |
| 615 | return NULL; |
| 616 | return _PyObject.malloc(_PyObject.ctx, size); |
| 617 | } |
| 618 | |
| 619 | void * |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 620 | PyObject_Calloc(size_t nelem, size_t elsize) |
| 621 | { |
| 622 | /* see PyMem_RawMalloc() */ |
| 623 | if (elsize != 0 && nelem > (size_t)PY_SSIZE_T_MAX / elsize) |
| 624 | return NULL; |
| 625 | return _PyObject.calloc(_PyObject.ctx, nelem, elsize); |
| 626 | } |
| 627 | |
| 628 | void * |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 629 | PyObject_Realloc(void *ptr, size_t new_size) |
| 630 | { |
| 631 | /* see PyMem_RawMalloc() */ |
| 632 | if (new_size > (size_t)PY_SSIZE_T_MAX) |
| 633 | return NULL; |
| 634 | return _PyObject.realloc(_PyObject.ctx, ptr, new_size); |
| 635 | } |
| 636 | |
| 637 | void |
| 638 | PyObject_Free(void *ptr) |
| 639 | { |
| 640 | _PyObject.free(_PyObject.ctx, ptr); |
| 641 | } |
| 642 | |
| 643 | |
| 644 | #ifdef WITH_PYMALLOC |
| 645 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 646 | #ifdef WITH_VALGRIND |
| 647 | #include <valgrind/valgrind.h> |
| 648 | |
| 649 | /* If we're using GCC, use __builtin_expect() to reduce overhead of |
| 650 | the valgrind checks */ |
| 651 | #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) |
| 652 | # define UNLIKELY(value) __builtin_expect((value), 0) |
| 653 | #else |
| 654 | # define UNLIKELY(value) (value) |
| 655 | #endif |
| 656 | |
| 657 | /* -1 indicates that we haven't checked that we're running on valgrind yet. */ |
| 658 | static int running_on_valgrind = -1; |
| 659 | #endif |
| 660 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 661 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 662 | /* An object allocator for Python. |
| 663 | |
| 664 | Here is an introduction to the layers of the Python memory architecture, |
| 665 | showing where the object allocator is actually used (layer +2), It is |
| 666 | called for every object allocation and deallocation (PyObject_New/Del), |
| 667 | unless the object-specific allocators implement a proprietary allocation |
| 668 | scheme (ex.: ints use a simple free list). This is also the place where |
| 669 | the cyclic garbage collector operates selectively on container objects. |
| 670 | |
| 671 | |
| 672 | Object-specific allocators |
| 673 | _____ ______ ______ ________ |
| 674 | [ int ] [ dict ] [ list ] ... [ string ] Python core | |
| 675 | +3 | <----- Object-specific memory -----> | <-- Non-object memory --> | |
| 676 | _______________________________ | | |
| 677 | [ Python's object allocator ] | | |
| 678 | +2 | ####### Object memory ####### | <------ Internal buffers ------> | |
| 679 | ______________________________________________________________ | |
| 680 | [ Python's raw memory allocator (PyMem_ API) ] | |
| 681 | +1 | <----- Python memory (under PyMem manager's control) ------> | | |
| 682 | __________________________________________________________________ |
| 683 | [ Underlying general-purpose allocator (ex: C library malloc) ] |
| 684 | 0 | <------ Virtual memory allocated for the python process -------> | |
| 685 | |
| 686 | ========================================================================= |
| 687 | _______________________________________________________________________ |
| 688 | [ OS-specific Virtual Memory Manager (VMM) ] |
| 689 | -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> | |
| 690 | __________________________________ __________________________________ |
| 691 | [ ] [ ] |
| 692 | -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> | |
| 693 | |
| 694 | */ |
| 695 | /*==========================================================================*/ |
| 696 | |
| 697 | /* A fast, special-purpose memory allocator for small blocks, to be used |
| 698 | on top of a general-purpose malloc -- heavily based on previous art. */ |
| 699 | |
| 700 | /* Vladimir Marangozov -- August 2000 */ |
| 701 | |
| 702 | /* |
| 703 | * "Memory management is where the rubber meets the road -- if we do the wrong |
| 704 | * thing at any level, the results will not be good. And if we don't make the |
| 705 | * levels work well together, we are in serious trouble." (1) |
| 706 | * |
| 707 | * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles, |
| 708 | * "Dynamic Storage Allocation: A Survey and Critical Review", |
| 709 | * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995. |
| 710 | */ |
| 711 | |
| 712 | /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */ |
| 713 | |
| 714 | /*==========================================================================*/ |
| 715 | |
| 716 | /* |
| 717 | * Allocation strategy abstract: |
| 718 | * |
| 719 | * For small requests, the allocator sub-allocates <Big> blocks of memory. |
| 720 | * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the |
| 721 | * system's allocator. |
| 722 | * |
| 723 | * Small requests are grouped in size classes spaced 8 bytes apart, due |
| 724 | * to the required valid alignment of the returned address. Requests of |
| 725 | * a particular size are serviced from memory pools of 4K (one VMM page). |
| 726 | * Pools are fragmented on demand and contain free lists of blocks of one |
| 727 | * particular size class. In other words, there is a fixed-size allocator |
| 728 | * for each size class. Free pools are shared by the different allocators |
| 729 | * thus minimizing the space reserved for a particular size class. |
| 730 | * |
| 731 | * This allocation strategy is a variant of what is known as "simple |
| 732 | * segregated storage based on array of free lists". The main drawback of |
| 733 | * simple segregated storage is that we might end up with lot of reserved |
| 734 | * memory for the different free lists, which degenerate in time. To avoid |
| 735 | * this, we partition each free list in pools and we share dynamically the |
| 736 | * reserved space between all free lists. This technique is quite efficient |
| 737 | * for memory intensive programs which allocate mainly small-sized blocks. |
| 738 | * |
| 739 | * For small requests we have the following table: |
| 740 | * |
| 741 | * Request in bytes Size of allocated block Size class idx |
| 742 | * ---------------------------------------------------------------- |
| 743 | * 1-8 8 0 |
| 744 | * 9-16 16 1 |
| 745 | * 17-24 24 2 |
| 746 | * 25-32 32 3 |
| 747 | * 33-40 40 4 |
| 748 | * 41-48 48 5 |
| 749 | * 49-56 56 6 |
| 750 | * 57-64 64 7 |
| 751 | * 65-72 72 8 |
| 752 | * ... ... ... |
| 753 | * 497-504 504 62 |
| 754 | * 505-512 512 63 |
| 755 | * |
| 756 | * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying |
| 757 | * allocator. |
| 758 | */ |
| 759 | |
| 760 | /*==========================================================================*/ |
| 761 | |
| 762 | /* |
| 763 | * -- Main tunable settings section -- |
| 764 | */ |
| 765 | |
| 766 | /* |
| 767 | * Alignment of addresses returned to the user. 8-bytes alignment works |
| 768 | * on most current architectures (with 32-bit or 64-bit address busses). |
| 769 | * The alignment value is also used for grouping small requests in size |
| 770 | * classes spaced ALIGNMENT bytes apart. |
| 771 | * |
| 772 | * You shouldn't change this unless you know what you are doing. |
| 773 | */ |
| 774 | #define ALIGNMENT 8 /* must be 2^N */ |
| 775 | #define ALIGNMENT_SHIFT 3 |
| 776 | |
| 777 | /* Return the number of bytes in size class I, as a uint. */ |
| 778 | #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT) |
| 779 | |
| 780 | /* |
| 781 | * Max size threshold below which malloc requests are considered to be |
| 782 | * small enough in order to use preallocated memory pools. You can tune |
| 783 | * this value according to your application behaviour and memory needs. |
| 784 | * |
| 785 | * Note: a size threshold of 512 guarantees that newly created dictionaries |
| 786 | * will be allocated from preallocated memory pools on 64-bit. |
| 787 | * |
| 788 | * The following invariants must hold: |
| 789 | * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 512 |
| 790 | * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT |
| 791 | * |
| 792 | * Although not required, for better performance and space efficiency, |
| 793 | * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2. |
| 794 | */ |
| 795 | #define SMALL_REQUEST_THRESHOLD 512 |
| 796 | #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT) |
| 797 | |
| 798 | /* |
| 799 | * The system's VMM page size can be obtained on most unices with a |
| 800 | * getpagesize() call or deduced from various header files. To make |
| 801 | * things simpler, we assume that it is 4K, which is OK for most systems. |
| 802 | * It is probably better if this is the native page size, but it doesn't |
| 803 | * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page |
| 804 | * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation |
| 805 | * violation fault. 4K is apparently OK for all the platforms that python |
| 806 | * currently targets. |
| 807 | */ |
| 808 | #define SYSTEM_PAGE_SIZE (4 * 1024) |
| 809 | #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1) |
| 810 | |
| 811 | /* |
| 812 | * Maximum amount of memory managed by the allocator for small requests. |
| 813 | */ |
| 814 | #ifdef WITH_MEMORY_LIMITS |
| 815 | #ifndef SMALL_MEMORY_LIMIT |
| 816 | #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */ |
| 817 | #endif |
| 818 | #endif |
| 819 | |
| 820 | /* |
| 821 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned |
| 822 | * on a page boundary. This is a reserved virtual address space for the |
| 823 | * current process (obtained through a malloc()/mmap() call). In no way this |
| 824 | * means that the memory arenas will be used entirely. A malloc(<Big>) is |
| 825 | * usually an address range reservation for <Big> bytes, unless all pages within |
| 826 | * this space are referenced subsequently. So malloc'ing big blocks and not |
| 827 | * using them does not mean "wasting memory". It's an addressable range |
| 828 | * wastage... |
| 829 | * |
| 830 | * Arenas are allocated with mmap() on systems supporting anonymous memory |
| 831 | * mappings to reduce heap fragmentation. |
| 832 | */ |
| 833 | #define ARENA_SIZE (256 << 10) /* 256KB */ |
| 834 | |
| 835 | #ifdef WITH_MEMORY_LIMITS |
| 836 | #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE) |
| 837 | #endif |
| 838 | |
| 839 | /* |
| 840 | * Size of the pools used for small blocks. Should be a power of 2, |
| 841 | * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k. |
| 842 | */ |
| 843 | #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */ |
| 844 | #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK |
| 845 | |
| 846 | /* |
| 847 | * -- End of tunable settings section -- |
| 848 | */ |
| 849 | |
| 850 | /*==========================================================================*/ |
| 851 | |
| 852 | /* |
| 853 | * Locking |
| 854 | * |
| 855 | * To reduce lock contention, it would probably be better to refine the |
| 856 | * crude function locking with per size class locking. I'm not positive |
| 857 | * however, whether it's worth switching to such locking policy because |
| 858 | * of the performance penalty it might introduce. |
| 859 | * |
| 860 | * The following macros describe the simplest (should also be the fastest) |
| 861 | * lock object on a particular platform and the init/fini/lock/unlock |
| 862 | * operations on it. The locks defined here are not expected to be recursive |
| 863 | * because it is assumed that they will always be called in the order: |
| 864 | * INIT, [LOCK, UNLOCK]*, FINI. |
| 865 | */ |
| 866 | |
| 867 | /* |
| 868 | * Python's threads are serialized, so object malloc locking is disabled. |
| 869 | */ |
| 870 | #define SIMPLELOCK_DECL(lock) /* simple lock declaration */ |
| 871 | #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */ |
| 872 | #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */ |
| 873 | #define SIMPLELOCK_LOCK(lock) /* acquire released lock */ |
| 874 | #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */ |
| 875 | |
| 876 | /* When you say memory, my mind reasons in terms of (pointers to) blocks */ |
| 877 | typedef uint8_t block; |
| 878 | |
| 879 | /* Pool for small blocks. */ |
| 880 | struct pool_header { |
| 881 | union { block *_padding; |
| 882 | uint count; } ref; /* number of allocated blocks */ |
| 883 | block *freeblock; /* pool's free list head */ |
| 884 | struct pool_header *nextpool; /* next pool of this size class */ |
| 885 | struct pool_header *prevpool; /* previous pool "" */ |
| 886 | uint arenaindex; /* index into arenas of base adr */ |
| 887 | uint szidx; /* block size class index */ |
| 888 | uint nextoffset; /* bytes to virgin block */ |
| 889 | uint maxnextoffset; /* largest valid nextoffset */ |
| 890 | }; |
| 891 | |
| 892 | typedef struct pool_header *poolp; |
| 893 | |
| 894 | /* Record keeping for arenas. */ |
| 895 | struct arena_object { |
| 896 | /* The address of the arena, as returned by malloc. Note that 0 |
| 897 | * will never be returned by a successful malloc, and is used |
| 898 | * here to mark an arena_object that doesn't correspond to an |
| 899 | * allocated arena. |
| 900 | */ |
| 901 | uintptr_t address; |
| 902 | |
| 903 | /* Pool-aligned pointer to the next pool to be carved off. */ |
| 904 | block* pool_address; |
| 905 | |
| 906 | /* The number of available pools in the arena: free pools + never- |
| 907 | * allocated pools. |
| 908 | */ |
| 909 | uint nfreepools; |
| 910 | |
| 911 | /* The total number of pools in the arena, whether or not available. */ |
| 912 | uint ntotalpools; |
| 913 | |
| 914 | /* Singly-linked list of available pools. */ |
| 915 | struct pool_header* freepools; |
| 916 | |
| 917 | /* Whenever this arena_object is not associated with an allocated |
| 918 | * arena, the nextarena member is used to link all unassociated |
| 919 | * arena_objects in the singly-linked `unused_arena_objects` list. |
| 920 | * The prevarena member is unused in this case. |
| 921 | * |
| 922 | * When this arena_object is associated with an allocated arena |
| 923 | * with at least one available pool, both members are used in the |
| 924 | * doubly-linked `usable_arenas` list, which is maintained in |
| 925 | * increasing order of `nfreepools` values. |
| 926 | * |
| 927 | * Else this arena_object is associated with an allocated arena |
| 928 | * all of whose pools are in use. `nextarena` and `prevarena` |
| 929 | * are both meaningless in this case. |
| 930 | */ |
| 931 | struct arena_object* nextarena; |
| 932 | struct arena_object* prevarena; |
| 933 | }; |
| 934 | |
| 935 | #define POOL_OVERHEAD _Py_SIZE_ROUND_UP(sizeof(struct pool_header), ALIGNMENT) |
| 936 | |
| 937 | #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */ |
| 938 | |
| 939 | /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */ |
| 940 | #define POOL_ADDR(P) ((poolp)_Py_ALIGN_DOWN((P), POOL_SIZE)) |
| 941 | |
| 942 | /* Return total number of blocks in pool of size index I, as a uint. */ |
| 943 | #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I)) |
| 944 | |
| 945 | /*==========================================================================*/ |
| 946 | |
| 947 | /* |
| 948 | * This malloc lock |
| 949 | */ |
| 950 | SIMPLELOCK_DECL(_malloc_lock) |
| 951 | #define LOCK() SIMPLELOCK_LOCK(_malloc_lock) |
| 952 | #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock) |
| 953 | #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock) |
| 954 | #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock) |
| 955 | |
| 956 | /* |
| 957 | * Pool table -- headed, circular, doubly-linked lists of partially used pools. |
| 958 | |
| 959 | This is involved. For an index i, usedpools[i+i] is the header for a list of |
| 960 | all partially used pools holding small blocks with "size class idx" i. So |
| 961 | usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size |
| 962 | 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT. |
| 963 | |
| 964 | Pools are carved off an arena's highwater mark (an arena_object's pool_address |
| 965 | member) as needed. Once carved off, a pool is in one of three states forever |
| 966 | after: |
| 967 | |
| 968 | used == partially used, neither empty nor full |
| 969 | At least one block in the pool is currently allocated, and at least one |
| 970 | block in the pool is not currently allocated (note this implies a pool |
| 971 | has room for at least two blocks). |
| 972 | This is a pool's initial state, as a pool is created only when malloc |
| 973 | needs space. |
| 974 | The pool holds blocks of a fixed size, and is in the circular list headed |
| 975 | at usedpools[i] (see above). It's linked to the other used pools of the |
| 976 | same size class via the pool_header's nextpool and prevpool members. |
| 977 | If all but one block is currently allocated, a malloc can cause a |
| 978 | transition to the full state. If all but one block is not currently |
| 979 | allocated, a free can cause a transition to the empty state. |
| 980 | |
| 981 | full == all the pool's blocks are currently allocated |
| 982 | On transition to full, a pool is unlinked from its usedpools[] list. |
| 983 | It's not linked to from anything then anymore, and its nextpool and |
| 984 | prevpool members are meaningless until it transitions back to used. |
| 985 | A free of a block in a full pool puts the pool back in the used state. |
| 986 | Then it's linked in at the front of the appropriate usedpools[] list, so |
| 987 | that the next allocation for its size class will reuse the freed block. |
| 988 | |
| 989 | empty == all the pool's blocks are currently available for allocation |
| 990 | On transition to empty, a pool is unlinked from its usedpools[] list, |
| 991 | and linked to the front of its arena_object's singly-linked freepools list, |
| 992 | via its nextpool member. The prevpool member has no meaning in this case. |
| 993 | Empty pools have no inherent size class: the next time a malloc finds |
| 994 | an empty list in usedpools[], it takes the first pool off of freepools. |
| 995 | If the size class needed happens to be the same as the size class the pool |
| 996 | last had, some pool initialization can be skipped. |
| 997 | |
| 998 | |
| 999 | Block Management |
| 1000 | |
| 1001 | Blocks within pools are again carved out as needed. pool->freeblock points to |
| 1002 | the start of a singly-linked list of free blocks within the pool. When a |
| 1003 | block is freed, it's inserted at the front of its pool's freeblock list. Note |
| 1004 | that the available blocks in a pool are *not* linked all together when a pool |
| 1005 | is initialized. Instead only "the first two" (lowest addresses) blocks are |
| 1006 | set up, returning the first such block, and setting pool->freeblock to a |
| 1007 | one-block list holding the second such block. This is consistent with that |
| 1008 | pymalloc strives at all levels (arena, pool, and block) never to touch a piece |
| 1009 | of memory until it's actually needed. |
| 1010 | |
| 1011 | So long as a pool is in the used state, we're certain there *is* a block |
| 1012 | available for allocating, and pool->freeblock is not NULL. If pool->freeblock |
| 1013 | points to the end of the free list before we've carved the entire pool into |
| 1014 | blocks, that means we simply haven't yet gotten to one of the higher-address |
| 1015 | blocks. The offset from the pool_header to the start of "the next" virgin |
| 1016 | block is stored in the pool_header nextoffset member, and the largest value |
| 1017 | of nextoffset that makes sense is stored in the maxnextoffset member when a |
| 1018 | pool is initialized. All the blocks in a pool have been passed out at least |
| 1019 | once when and only when nextoffset > maxnextoffset. |
| 1020 | |
| 1021 | |
| 1022 | Major obscurity: While the usedpools vector is declared to have poolp |
| 1023 | entries, it doesn't really. It really contains two pointers per (conceptual) |
| 1024 | poolp entry, the nextpool and prevpool members of a pool_header. The |
| 1025 | excruciating initialization code below fools C so that |
| 1026 | |
| 1027 | usedpool[i+i] |
| 1028 | |
| 1029 | "acts like" a genuine poolp, but only so long as you only reference its |
| 1030 | nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is |
| 1031 | compensating for that a pool_header's nextpool and prevpool members |
| 1032 | immediately follow a pool_header's first two members: |
| 1033 | |
| 1034 | union { block *_padding; |
| 1035 | uint count; } ref; |
| 1036 | block *freeblock; |
| 1037 | |
| 1038 | each of which consume sizeof(block *) bytes. So what usedpools[i+i] really |
| 1039 | contains is a fudged-up pointer p such that *if* C believes it's a poolp |
| 1040 | pointer, then p->nextpool and p->prevpool are both p (meaning that the headed |
| 1041 | circular list is empty). |
| 1042 | |
| 1043 | It's unclear why the usedpools setup is so convoluted. It could be to |
| 1044 | minimize the amount of cache required to hold this heavily-referenced table |
| 1045 | (which only *needs* the two interpool pointer members of a pool_header). OTOH, |
| 1046 | referencing code has to remember to "double the index" and doing so isn't |
| 1047 | free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying |
| 1048 | on that C doesn't insert any padding anywhere in a pool_header at or before |
| 1049 | the prevpool member. |
| 1050 | **************************************************************************** */ |
| 1051 | |
| 1052 | #define PTA(x) ((poolp )((uint8_t *)&(usedpools[2*(x)]) - 2*sizeof(block *))) |
| 1053 | #define PT(x) PTA(x), PTA(x) |
| 1054 | |
| 1055 | static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = { |
| 1056 | PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7) |
| 1057 | #if NB_SMALL_SIZE_CLASSES > 8 |
| 1058 | , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15) |
| 1059 | #if NB_SMALL_SIZE_CLASSES > 16 |
| 1060 | , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23) |
| 1061 | #if NB_SMALL_SIZE_CLASSES > 24 |
| 1062 | , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31) |
| 1063 | #if NB_SMALL_SIZE_CLASSES > 32 |
| 1064 | , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39) |
| 1065 | #if NB_SMALL_SIZE_CLASSES > 40 |
| 1066 | , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47) |
| 1067 | #if NB_SMALL_SIZE_CLASSES > 48 |
| 1068 | , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55) |
| 1069 | #if NB_SMALL_SIZE_CLASSES > 56 |
| 1070 | , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63) |
| 1071 | #if NB_SMALL_SIZE_CLASSES > 64 |
| 1072 | #error "NB_SMALL_SIZE_CLASSES should be less than 64" |
| 1073 | #endif /* NB_SMALL_SIZE_CLASSES > 64 */ |
| 1074 | #endif /* NB_SMALL_SIZE_CLASSES > 56 */ |
| 1075 | #endif /* NB_SMALL_SIZE_CLASSES > 48 */ |
| 1076 | #endif /* NB_SMALL_SIZE_CLASSES > 40 */ |
| 1077 | #endif /* NB_SMALL_SIZE_CLASSES > 32 */ |
| 1078 | #endif /* NB_SMALL_SIZE_CLASSES > 24 */ |
| 1079 | #endif /* NB_SMALL_SIZE_CLASSES > 16 */ |
| 1080 | #endif /* NB_SMALL_SIZE_CLASSES > 8 */ |
| 1081 | }; |
| 1082 | |
| 1083 | /*========================================================================== |
| 1084 | Arena management. |
| 1085 | |
| 1086 | `arenas` is a vector of arena_objects. It contains maxarenas entries, some of |
| 1087 | which may not be currently used (== they're arena_objects that aren't |
| 1088 | currently associated with an allocated arena). Note that arenas proper are |
| 1089 | separately malloc'ed. |
| 1090 | |
| 1091 | Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5, |
| 1092 | we do try to free() arenas, and use some mild heuristic strategies to increase |
| 1093 | the likelihood that arenas eventually can be freed. |
| 1094 | |
| 1095 | unused_arena_objects |
| 1096 | |
| 1097 | This is a singly-linked list of the arena_objects that are currently not |
| 1098 | being used (no arena is associated with them). Objects are taken off the |
| 1099 | head of the list in new_arena(), and are pushed on the head of the list in |
| 1100 | PyObject_Free() when the arena is empty. Key invariant: an arena_object |
| 1101 | is on this list if and only if its .address member is 0. |
| 1102 | |
| 1103 | usable_arenas |
| 1104 | |
| 1105 | This is a doubly-linked list of the arena_objects associated with arenas |
| 1106 | that have pools available. These pools are either waiting to be reused, |
| 1107 | or have not been used before. The list is sorted to have the most- |
| 1108 | allocated arenas first (ascending order based on the nfreepools member). |
| 1109 | This means that the next allocation will come from a heavily used arena, |
| 1110 | which gives the nearly empty arenas a chance to be returned to the system. |
| 1111 | In my unscientific tests this dramatically improved the number of arenas |
| 1112 | that could be freed. |
| 1113 | |
| 1114 | Note that an arena_object associated with an arena all of whose pools are |
| 1115 | currently in use isn't on either list. |
| 1116 | */ |
| 1117 | |
| 1118 | /* Array of objects used to track chunks of memory (arenas). */ |
| 1119 | static struct arena_object* arenas = NULL; |
| 1120 | /* Number of slots currently allocated in the `arenas` vector. */ |
| 1121 | static uint maxarenas = 0; |
| 1122 | |
| 1123 | /* The head of the singly-linked, NULL-terminated list of available |
| 1124 | * arena_objects. |
| 1125 | */ |
| 1126 | static struct arena_object* unused_arena_objects = NULL; |
| 1127 | |
| 1128 | /* The head of the doubly-linked, NULL-terminated at each end, list of |
| 1129 | * arena_objects associated with arenas that have pools available. |
| 1130 | */ |
| 1131 | static struct arena_object* usable_arenas = NULL; |
| 1132 | |
| 1133 | /* How many arena_objects do we initially allocate? |
| 1134 | * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the |
| 1135 | * `arenas` vector. |
| 1136 | */ |
| 1137 | #define INITIAL_ARENA_OBJECTS 16 |
| 1138 | |
| 1139 | /* Number of arenas allocated that haven't been free()'d. */ |
| 1140 | static size_t narenas_currently_allocated = 0; |
| 1141 | |
| 1142 | /* Total number of times malloc() called to allocate an arena. */ |
| 1143 | static size_t ntimes_arena_allocated = 0; |
| 1144 | /* High water mark (max value ever seen) for narenas_currently_allocated. */ |
| 1145 | static size_t narenas_highwater = 0; |
| 1146 | |
| 1147 | static Py_ssize_t _Py_AllocatedBlocks = 0; |
| 1148 | |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1149 | Py_ssize_t |
| 1150 | _Py_GetAllocatedBlocks(void) |
| 1151 | { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1152 | return _Py_AllocatedBlocks; |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1153 | } |
| 1154 | |
| 1155 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1156 | /* Allocate a new arena. If we run out of memory, return NULL. Else |
| 1157 | * allocate a new arena, and return the address of an arena_object |
| 1158 | * describing the new arena. It's expected that the caller will set |
| 1159 | * `usable_arenas` to the return value. |
| 1160 | */ |
| 1161 | static struct arena_object* |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1162 | new_arena(void) |
| 1163 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1164 | struct arena_object* arenaobj; |
| 1165 | uint excess; /* number of bytes above pool alignment */ |
Victor Stinner | ba10882 | 2012-03-10 00:21:44 +0100 | [diff] [blame] | 1166 | void *address; |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 1167 | static int debug_stats = -1; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1168 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 1169 | if (debug_stats == -1) { |
Serhiy Storchaka | 4ae06c5 | 2017-12-12 13:55:04 +0200 | [diff] [blame] | 1170 | const char *opt = Py_GETENV("PYTHONMALLOCSTATS"); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 1171 | debug_stats = (opt != NULL && *opt != '\0'); |
| 1172 | } |
| 1173 | if (debug_stats) |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 1174 | _PyObject_DebugMallocStats(stderr); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 1175 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1176 | if (unused_arena_objects == NULL) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1177 | uint i; |
| 1178 | uint numarenas; |
| 1179 | size_t nbytes; |
Tim Peters | 0e87118 | 2002-04-13 08:29:14 +0000 | [diff] [blame] | 1180 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1181 | /* Double the number of arena objects on each allocation. |
| 1182 | * Note that it's possible for `numarenas` to overflow. |
| 1183 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1184 | numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS; |
| 1185 | if (numarenas <= maxarenas) |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1186 | return NULL; /* overflow */ |
Martin v. Löwis | 5aca882 | 2008-09-11 06:55:48 +0000 | [diff] [blame] | 1187 | #if SIZEOF_SIZE_T <= SIZEOF_INT |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1188 | if (numarenas > SIZE_MAX / sizeof(*arenas)) |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1189 | return NULL; /* overflow */ |
Martin v. Löwis | 5aca882 | 2008-09-11 06:55:48 +0000 | [diff] [blame] | 1190 | #endif |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1191 | nbytes = numarenas * sizeof(*arenas); |
| 1192 | arenaobj = (struct arena_object *)PyMem_RawRealloc(arenas, nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1193 | if (arenaobj == NULL) |
| 1194 | return NULL; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1195 | arenas = arenaobj; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1196 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1197 | /* We might need to fix pointers that were copied. However, |
| 1198 | * new_arena only gets called when all the pages in the |
| 1199 | * previous arenas are full. Thus, there are *no* pointers |
| 1200 | * into the old array. Thus, we don't have to worry about |
| 1201 | * invalid pointers. Just to be sure, some asserts: |
| 1202 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1203 | assert(usable_arenas == NULL); |
| 1204 | assert(unused_arena_objects == NULL); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1205 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1206 | /* Put the new arenas on the unused_arena_objects list. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1207 | for (i = maxarenas; i < numarenas; ++i) { |
| 1208 | arenas[i].address = 0; /* mark as unassociated */ |
| 1209 | arenas[i].nextarena = i < numarenas - 1 ? |
| 1210 | &arenas[i+1] : NULL; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1211 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1212 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1213 | /* Update globals. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1214 | unused_arena_objects = &arenas[maxarenas]; |
| 1215 | maxarenas = numarenas; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1216 | } |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1217 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1218 | /* Take the next available arena object off the head of the list. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1219 | assert(unused_arena_objects != NULL); |
| 1220 | arenaobj = unused_arena_objects; |
| 1221 | unused_arena_objects = arenaobj->nextarena; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1222 | assert(arenaobj->address == 0); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1223 | address = _PyObject_Arena.alloc(_PyObject_Arena.ctx, ARENA_SIZE); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1224 | if (address == NULL) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1225 | /* The allocation failed: return NULL after putting the |
| 1226 | * arenaobj back. |
| 1227 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1228 | arenaobj->nextarena = unused_arena_objects; |
| 1229 | unused_arena_objects = arenaobj; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1230 | return NULL; |
| 1231 | } |
Benjamin Peterson | 5d4b09c | 2016-09-18 19:24:52 -0700 | [diff] [blame] | 1232 | arenaobj->address = (uintptr_t)address; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1233 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1234 | ++narenas_currently_allocated; |
| 1235 | ++ntimes_arena_allocated; |
| 1236 | if (narenas_currently_allocated > narenas_highwater) |
| 1237 | narenas_highwater = narenas_currently_allocated; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1238 | arenaobj->freepools = NULL; |
| 1239 | /* pool_address <- first pool-aligned address in the arena |
| 1240 | nfreepools <- number of whole pools that fit after alignment */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1241 | arenaobj->pool_address = (block*)arenaobj->address; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1242 | arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE; |
| 1243 | assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE); |
| 1244 | excess = (uint)(arenaobj->address & POOL_SIZE_MASK); |
| 1245 | if (excess != 0) { |
| 1246 | --arenaobj->nfreepools; |
| 1247 | arenaobj->pool_address += POOL_SIZE - excess; |
| 1248 | } |
| 1249 | arenaobj->ntotalpools = arenaobj->nfreepools; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1250 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1251 | return arenaobj; |
Tim Peters | d97a1c0 | 2002-03-30 06:09:22 +0000 | [diff] [blame] | 1252 | } |
| 1253 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1254 | |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1255 | /* |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 1256 | address_in_range(P, POOL) |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1257 | |
| 1258 | Return true if and only if P is an address that was allocated by pymalloc. |
| 1259 | POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P) |
| 1260 | (the caller is asked to compute this because the macro expands POOL more than |
| 1261 | once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 1262 | variable and pass the latter to the macro; because address_in_range is |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1263 | called on every alloc/realloc/free, micro-efficiency is important here). |
| 1264 | |
| 1265 | Tricky: Let B be the arena base address associated with the pool, B = |
| 1266 | arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if |
| 1267 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1268 | B <= P < B + ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1269 | |
| 1270 | Subtracting B throughout, this is true iff |
| 1271 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1272 | 0 <= P-B < ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1273 | |
| 1274 | By using unsigned arithmetic, the "0 <=" half of the test can be skipped. |
| 1275 | |
| 1276 | Obscure: A PyMem "free memory" function can call the pymalloc free or realloc |
| 1277 | before the first arena has been allocated. `arenas` is still NULL in that |
| 1278 | case. We're relying on that maxarenas is also 0 in that case, so that |
| 1279 | (POOL)->arenaindex < maxarenas must be false, saving us from trying to index |
| 1280 | into a NULL arenas. |
| 1281 | |
| 1282 | Details: given P and POOL, the arena_object corresponding to P is AO = |
| 1283 | arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild |
| 1284 | stores, etc), POOL is the correct address of P's pool, AO.address is the |
| 1285 | correct base address of the pool's arena, and P must be within ARENA_SIZE of |
| 1286 | AO.address. In addition, AO.address is not 0 (no arena can start at address 0 |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 1287 | (NULL)). Therefore address_in_range correctly reports that obmalloc |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1288 | controls P. |
| 1289 | |
| 1290 | Now suppose obmalloc does not control P (e.g., P was obtained via a direct |
| 1291 | call to the system malloc() or realloc()). (POOL)->arenaindex may be anything |
| 1292 | in this case -- it may even be uninitialized trash. If the trash arenaindex |
| 1293 | is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't |
| 1294 | control P. |
| 1295 | |
| 1296 | Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an |
| 1297 | allocated arena, obmalloc controls all the memory in slice AO.address : |
| 1298 | AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc, |
| 1299 | so P doesn't lie in that slice, so the macro correctly reports that P is not |
| 1300 | controlled by obmalloc. |
| 1301 | |
| 1302 | Finally, if P is not controlled by obmalloc and AO corresponds to an unused |
| 1303 | arena_object (one not currently associated with an allocated arena), |
| 1304 | AO.address is 0, and the second test in the macro reduces to: |
| 1305 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1306 | P < ARENA_SIZE |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1307 | |
| 1308 | If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes |
| 1309 | that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part |
| 1310 | of the test still passes, and the third clause (AO.address != 0) is necessary |
| 1311 | to get the correct result: AO.address is 0 in this case, so the macro |
| 1312 | correctly reports that P is not controlled by obmalloc (despite that P lies in |
| 1313 | slice AO.address : AO.address + ARENA_SIZE). |
| 1314 | |
| 1315 | Note: The third (AO.address != 0) clause was added in Python 2.5. Before |
| 1316 | 2.5, arenas were never free()'ed, and an arenaindex < maxarena always |
| 1317 | corresponded to a currently-allocated arena, so the "P is not controlled by |
| 1318 | obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case |
| 1319 | was impossible. |
| 1320 | |
| 1321 | Note that the logic is excruciating, and reading up possibly uninitialized |
| 1322 | memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex) |
| 1323 | creates problems for some memory debuggers. The overwhelming advantage is |
| 1324 | that this test determines whether an arbitrary address is controlled by |
| 1325 | obmalloc in a small constant time, independent of the number of arenas |
| 1326 | obmalloc controls. Since this test is needed at every entry point, it's |
| 1327 | extremely desirable that it be this fast. |
| 1328 | */ |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1329 | |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 1330 | static bool ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS |
| 1331 | address_in_range(void *p, poolp pool) |
| 1332 | { |
| 1333 | // Since address_in_range may be reading from memory which was not allocated |
| 1334 | // by Python, it is important that pool->arenaindex is read only once, as |
| 1335 | // another thread may be concurrently modifying the value without holding |
| 1336 | // the GIL. The following dance forces the compiler to read pool->arenaindex |
| 1337 | // only once. |
| 1338 | uint arenaindex = *((volatile uint *)&pool->arenaindex); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1339 | return arenaindex < maxarenas && |
| 1340 | (uintptr_t)p - arenas[arenaindex].address < ARENA_SIZE && |
| 1341 | arenas[arenaindex].address != 0; |
Benjamin Peterson | 3924f93 | 2016-09-18 19:12:48 -0700 | [diff] [blame] | 1342 | } |
Tim Peters | 338e010 | 2002-04-01 19:23:44 +0000 | [diff] [blame] | 1343 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1344 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1345 | /*==========================================================================*/ |
| 1346 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1347 | /* pymalloc allocator |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1348 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1349 | The basic blocks are ordered by decreasing execution frequency, |
| 1350 | which minimizes the number of jumps in the most common cases, |
| 1351 | improves branching prediction and instruction scheduling (small |
| 1352 | block allocations typically result in a couple of instructions). |
| 1353 | Unless the optimizer reorders everything, being too smart... |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1354 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1355 | Return 1 if pymalloc allocated memory and wrote the pointer into *ptr_p. |
| 1356 | |
| 1357 | Return 0 if pymalloc failed to allocate the memory block: on bigger |
| 1358 | requests, on error in the code below (as a last chance to serve the request) |
| 1359 | or when the max memory limit has been reached. */ |
| 1360 | static int |
| 1361 | pymalloc_alloc(void *ctx, void **ptr_p, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1362 | { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1363 | block *bp; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1364 | poolp pool; |
| 1365 | poolp next; |
| 1366 | uint size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1367 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1368 | #ifdef WITH_VALGRIND |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1369 | if (UNLIKELY(running_on_valgrind == -1)) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1370 | running_on_valgrind = RUNNING_ON_VALGRIND; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1371 | } |
| 1372 | if (UNLIKELY(running_on_valgrind)) { |
| 1373 | return 0; |
| 1374 | } |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1375 | #endif |
| 1376 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1377 | if (nbytes == 0) { |
| 1378 | return 0; |
| 1379 | } |
| 1380 | if (nbytes > SMALL_REQUEST_THRESHOLD) { |
| 1381 | return 0; |
| 1382 | } |
T. Wouters | 06bb487 | 2017-03-31 10:10:19 -0700 | [diff] [blame] | 1383 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1384 | LOCK(); |
| 1385 | /* |
| 1386 | * Most frequent paths first |
| 1387 | */ |
| 1388 | size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1389 | pool = usedpools[size + size]; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1390 | if (pool != pool->nextpool) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1391 | /* |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1392 | * There is a used pool for this size class. |
| 1393 | * Pick up the head block of its free list. |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1394 | */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1395 | ++pool->ref.count; |
| 1396 | bp = pool->freeblock; |
| 1397 | assert(bp != NULL); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1398 | if ((pool->freeblock = *(block **)bp) != NULL) { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1399 | goto success; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1400 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1401 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1402 | /* |
| 1403 | * Reached the end of the free list, try to extend it. |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1404 | */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1405 | if (pool->nextoffset <= pool->maxnextoffset) { |
| 1406 | /* There is room for another block. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1407 | pool->freeblock = (block*)pool + |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1408 | pool->nextoffset; |
| 1409 | pool->nextoffset += INDEX2SIZE(size); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1410 | *(block **)(pool->freeblock) = NULL; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1411 | goto success; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1412 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1413 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1414 | /* Pool is full, unlink from used pools. */ |
| 1415 | next = pool->nextpool; |
| 1416 | pool = pool->prevpool; |
| 1417 | next->prevpool = pool; |
| 1418 | pool->nextpool = next; |
| 1419 | goto success; |
| 1420 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1421 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1422 | /* There isn't a pool of the right size class immediately |
| 1423 | * available: use a free pool. |
| 1424 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1425 | if (usable_arenas == NULL) { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1426 | /* No arena has a free pool: allocate a new arena. */ |
| 1427 | #ifdef WITH_MEMORY_LIMITS |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1428 | if (narenas_currently_allocated >= MAX_ARENAS) { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1429 | goto failed; |
| 1430 | } |
| 1431 | #endif |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1432 | usable_arenas = new_arena(); |
| 1433 | if (usable_arenas == NULL) { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1434 | goto failed; |
| 1435 | } |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1436 | usable_arenas->nextarena = |
| 1437 | usable_arenas->prevarena = NULL; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1438 | } |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1439 | assert(usable_arenas->address != 0); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1440 | |
| 1441 | /* Try to get a cached free pool. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1442 | pool = usable_arenas->freepools; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1443 | if (pool != NULL) { |
| 1444 | /* Unlink from cached pools. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1445 | usable_arenas->freepools = pool->nextpool; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1446 | |
| 1447 | /* This arena already had the smallest nfreepools |
| 1448 | * value, so decreasing nfreepools doesn't change |
| 1449 | * that, and we don't need to rearrange the |
| 1450 | * usable_arenas list. However, if the arena has |
| 1451 | * become wholly allocated, we need to remove its |
| 1452 | * arena_object from usable_arenas. |
| 1453 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1454 | --usable_arenas->nfreepools; |
| 1455 | if (usable_arenas->nfreepools == 0) { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1456 | /* Wholly allocated: remove. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1457 | assert(usable_arenas->freepools == NULL); |
| 1458 | assert(usable_arenas->nextarena == NULL || |
| 1459 | usable_arenas->nextarena->prevarena == |
| 1460 | usable_arenas); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1461 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1462 | usable_arenas = usable_arenas->nextarena; |
| 1463 | if (usable_arenas != NULL) { |
| 1464 | usable_arenas->prevarena = NULL; |
| 1465 | assert(usable_arenas->address != 0); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1466 | } |
| 1467 | } |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1468 | else { |
| 1469 | /* nfreepools > 0: it must be that freepools |
| 1470 | * isn't NULL, or that we haven't yet carved |
| 1471 | * off all the arena's pools for the first |
| 1472 | * time. |
| 1473 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1474 | assert(usable_arenas->freepools != NULL || |
| 1475 | usable_arenas->pool_address <= |
| 1476 | (block*)usable_arenas->address + |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1477 | ARENA_SIZE - POOL_SIZE); |
| 1478 | } |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1479 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1480 | init_pool: |
| 1481 | /* Frontlink to used pools. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1482 | next = usedpools[size + size]; /* == prev */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1483 | pool->nextpool = next; |
| 1484 | pool->prevpool = next; |
| 1485 | next->nextpool = pool; |
| 1486 | next->prevpool = pool; |
| 1487 | pool->ref.count = 1; |
| 1488 | if (pool->szidx == size) { |
| 1489 | /* Luckily, this pool last contained blocks |
| 1490 | * of the same size class, so its header |
| 1491 | * and free list are already initialized. |
| 1492 | */ |
| 1493 | bp = pool->freeblock; |
| 1494 | assert(bp != NULL); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1495 | pool->freeblock = *(block **)bp; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1496 | goto success; |
| 1497 | } |
| 1498 | /* |
| 1499 | * Initialize the pool header, set up the free list to |
| 1500 | * contain just the second block, and return the first |
| 1501 | * block. |
| 1502 | */ |
| 1503 | pool->szidx = size; |
| 1504 | size = INDEX2SIZE(size); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1505 | bp = (block *)pool + POOL_OVERHEAD; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1506 | pool->nextoffset = POOL_OVERHEAD + (size << 1); |
| 1507 | pool->maxnextoffset = POOL_SIZE - size; |
| 1508 | pool->freeblock = bp + size; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1509 | *(block **)(pool->freeblock) = NULL; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1510 | goto success; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1511 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1512 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1513 | /* Carve off a new pool. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1514 | assert(usable_arenas->nfreepools > 0); |
| 1515 | assert(usable_arenas->freepools == NULL); |
| 1516 | pool = (poolp)usable_arenas->pool_address; |
| 1517 | assert((block*)pool <= (block*)usable_arenas->address + |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1518 | ARENA_SIZE - POOL_SIZE); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1519 | pool->arenaindex = (uint)(usable_arenas - arenas); |
| 1520 | assert(&arenas[pool->arenaindex] == usable_arenas); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1521 | pool->szidx = DUMMY_SIZE_IDX; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1522 | usable_arenas->pool_address += POOL_SIZE; |
| 1523 | --usable_arenas->nfreepools; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1524 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1525 | if (usable_arenas->nfreepools == 0) { |
| 1526 | assert(usable_arenas->nextarena == NULL || |
| 1527 | usable_arenas->nextarena->prevarena == |
| 1528 | usable_arenas); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1529 | /* Unlink the arena: it is completely allocated. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1530 | usable_arenas = usable_arenas->nextarena; |
| 1531 | if (usable_arenas != NULL) { |
| 1532 | usable_arenas->prevarena = NULL; |
| 1533 | assert(usable_arenas->address != 0); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1534 | } |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1535 | } |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1536 | |
| 1537 | goto init_pool; |
| 1538 | |
| 1539 | success: |
| 1540 | UNLOCK(); |
| 1541 | assert(bp != NULL); |
| 1542 | *ptr_p = (void *)bp; |
| 1543 | return 1; |
| 1544 | |
| 1545 | failed: |
| 1546 | UNLOCK(); |
| 1547 | return 0; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1548 | } |
| 1549 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1550 | |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1551 | static void * |
| 1552 | _PyObject_Malloc(void *ctx, size_t nbytes) |
| 1553 | { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1554 | void* ptr; |
| 1555 | if (pymalloc_alloc(ctx, &ptr, nbytes)) { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1556 | _Py_AllocatedBlocks++; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1557 | return ptr; |
| 1558 | } |
| 1559 | |
| 1560 | ptr = PyMem_RawMalloc(nbytes); |
| 1561 | if (ptr != NULL) { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1562 | _Py_AllocatedBlocks++; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1563 | } |
| 1564 | return ptr; |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1565 | } |
| 1566 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1567 | |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1568 | static void * |
| 1569 | _PyObject_Calloc(void *ctx, size_t nelem, size_t elsize) |
| 1570 | { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1571 | void* ptr; |
| 1572 | |
| 1573 | assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize); |
| 1574 | size_t nbytes = nelem * elsize; |
| 1575 | |
| 1576 | if (pymalloc_alloc(ctx, &ptr, nbytes)) { |
| 1577 | memset(ptr, 0, nbytes); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1578 | _Py_AllocatedBlocks++; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1579 | return ptr; |
| 1580 | } |
| 1581 | |
| 1582 | ptr = PyMem_RawCalloc(nelem, elsize); |
| 1583 | if (ptr != NULL) { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1584 | _Py_AllocatedBlocks++; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1585 | } |
| 1586 | return ptr; |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 1587 | } |
| 1588 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1589 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1590 | /* Free a memory block allocated by pymalloc_alloc(). |
| 1591 | Return 1 if it was freed. |
| 1592 | Return 0 if the block was not allocated by pymalloc_alloc(). */ |
| 1593 | static int |
| 1594 | pymalloc_free(void *ctx, void *p) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1595 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1596 | poolp pool; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1597 | block *lastfree; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1598 | poolp next, prev; |
| 1599 | uint size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1600 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1601 | assert(p != NULL); |
Antoine Pitrou | f9d0b12 | 2012-12-09 14:28:26 +0100 | [diff] [blame] | 1602 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1603 | #ifdef WITH_VALGRIND |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1604 | if (UNLIKELY(running_on_valgrind > 0)) { |
| 1605 | return 0; |
| 1606 | } |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1607 | #endif |
| 1608 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1609 | pool = POOL_ADDR(p); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1610 | if (!address_in_range(p, pool)) { |
| 1611 | return 0; |
| 1612 | } |
| 1613 | /* We allocated this address. */ |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1614 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1615 | LOCK(); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 1616 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1617 | /* Link p to the start of the pool's freeblock list. Since |
| 1618 | * the pool had at least the p block outstanding, the pool |
| 1619 | * wasn't empty (so it's already in a usedpools[] list, or |
| 1620 | * was full and is in no list -- it's not in the freeblocks |
| 1621 | * list in any case). |
| 1622 | */ |
| 1623 | assert(pool->ref.count > 0); /* else it was empty */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1624 | *(block **)p = lastfree = pool->freeblock; |
| 1625 | pool->freeblock = (block *)p; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1626 | if (!lastfree) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1627 | /* Pool was full, so doesn't currently live in any list: |
| 1628 | * link it to the front of the appropriate usedpools[] list. |
| 1629 | * This mimics LRU pool usage for new allocations and |
| 1630 | * targets optimal filling when several pools contain |
| 1631 | * blocks of the same size class. |
| 1632 | */ |
| 1633 | --pool->ref.count; |
| 1634 | assert(pool->ref.count > 0); /* else the pool is empty */ |
| 1635 | size = pool->szidx; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1636 | next = usedpools[size + size]; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1637 | prev = next->prevpool; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1638 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1639 | /* insert pool before next: prev <-> pool <-> next */ |
| 1640 | pool->nextpool = next; |
| 1641 | pool->prevpool = prev; |
| 1642 | next->prevpool = pool; |
| 1643 | prev->nextpool = pool; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1644 | goto success; |
| 1645 | } |
| 1646 | |
| 1647 | struct arena_object* ao; |
| 1648 | uint nf; /* ao->nfreepools */ |
| 1649 | |
| 1650 | /* freeblock wasn't NULL, so the pool wasn't full, |
| 1651 | * and the pool is in a usedpools[] list. |
| 1652 | */ |
| 1653 | if (--pool->ref.count != 0) { |
| 1654 | /* pool isn't empty: leave it in usedpools */ |
| 1655 | goto success; |
| 1656 | } |
| 1657 | /* Pool is now empty: unlink from usedpools, and |
| 1658 | * link to the front of freepools. This ensures that |
| 1659 | * previously freed pools will be allocated later |
| 1660 | * (being not referenced, they are perhaps paged out). |
| 1661 | */ |
| 1662 | next = pool->nextpool; |
| 1663 | prev = pool->prevpool; |
| 1664 | next->prevpool = prev; |
| 1665 | prev->nextpool = next; |
| 1666 | |
| 1667 | /* Link the pool to freepools. This is a singly-linked |
| 1668 | * list, and pool->prevpool isn't used there. |
| 1669 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1670 | ao = &arenas[pool->arenaindex]; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1671 | pool->nextpool = ao->freepools; |
| 1672 | ao->freepools = pool; |
| 1673 | nf = ++ao->nfreepools; |
| 1674 | |
| 1675 | /* All the rest is arena management. We just freed |
| 1676 | * a pool, and there are 4 cases for arena mgmt: |
| 1677 | * 1. If all the pools are free, return the arena to |
| 1678 | * the system free(). |
| 1679 | * 2. If this is the only free pool in the arena, |
| 1680 | * add the arena back to the `usable_arenas` list. |
| 1681 | * 3. If the "next" arena has a smaller count of free |
| 1682 | * pools, we have to "slide this arena right" to |
| 1683 | * restore that usable_arenas is sorted in order of |
| 1684 | * nfreepools. |
| 1685 | * 4. Else there's nothing more to do. |
| 1686 | */ |
| 1687 | if (nf == ao->ntotalpools) { |
| 1688 | /* Case 1. First unlink ao from usable_arenas. |
| 1689 | */ |
| 1690 | assert(ao->prevarena == NULL || |
| 1691 | ao->prevarena->address != 0); |
| 1692 | assert(ao ->nextarena == NULL || |
| 1693 | ao->nextarena->address != 0); |
| 1694 | |
| 1695 | /* Fix the pointer in the prevarena, or the |
| 1696 | * usable_arenas pointer. |
| 1697 | */ |
| 1698 | if (ao->prevarena == NULL) { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1699 | usable_arenas = ao->nextarena; |
| 1700 | assert(usable_arenas == NULL || |
| 1701 | usable_arenas->address != 0); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1702 | } |
| 1703 | else { |
| 1704 | assert(ao->prevarena->nextarena == ao); |
| 1705 | ao->prevarena->nextarena = |
| 1706 | ao->nextarena; |
| 1707 | } |
| 1708 | /* Fix the pointer in the nextarena. */ |
| 1709 | if (ao->nextarena != NULL) { |
| 1710 | assert(ao->nextarena->prevarena == ao); |
| 1711 | ao->nextarena->prevarena = |
| 1712 | ao->prevarena; |
| 1713 | } |
| 1714 | /* Record that this arena_object slot is |
| 1715 | * available to be reused. |
| 1716 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1717 | ao->nextarena = unused_arena_objects; |
| 1718 | unused_arena_objects = ao; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1719 | |
| 1720 | /* Free the entire arena. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1721 | _PyObject_Arena.free(_PyObject_Arena.ctx, |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1722 | (void *)ao->address, ARENA_SIZE); |
| 1723 | ao->address = 0; /* mark unassociated */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1724 | --narenas_currently_allocated; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1725 | |
| 1726 | goto success; |
| 1727 | } |
| 1728 | |
| 1729 | if (nf == 1) { |
| 1730 | /* Case 2. Put ao at the head of |
| 1731 | * usable_arenas. Note that because |
| 1732 | * ao->nfreepools was 0 before, ao isn't |
| 1733 | * currently on the usable_arenas list. |
| 1734 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1735 | ao->nextarena = usable_arenas; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1736 | ao->prevarena = NULL; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1737 | if (usable_arenas) |
| 1738 | usable_arenas->prevarena = ao; |
| 1739 | usable_arenas = ao; |
| 1740 | assert(usable_arenas->address != 0); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1741 | |
| 1742 | goto success; |
| 1743 | } |
| 1744 | |
| 1745 | /* If this arena is now out of order, we need to keep |
| 1746 | * the list sorted. The list is kept sorted so that |
| 1747 | * the "most full" arenas are used first, which allows |
| 1748 | * the nearly empty arenas to be completely freed. In |
| 1749 | * a few un-scientific tests, it seems like this |
| 1750 | * approach allowed a lot more memory to be freed. |
| 1751 | */ |
| 1752 | if (ao->nextarena == NULL || |
| 1753 | nf <= ao->nextarena->nfreepools) { |
| 1754 | /* Case 4. Nothing to do. */ |
| 1755 | goto success; |
| 1756 | } |
| 1757 | /* Case 3: We have to move the arena towards the end |
| 1758 | * of the list, because it has more free pools than |
| 1759 | * the arena to its right. |
| 1760 | * First unlink ao from usable_arenas. |
| 1761 | */ |
| 1762 | if (ao->prevarena != NULL) { |
| 1763 | /* ao isn't at the head of the list */ |
| 1764 | assert(ao->prevarena->nextarena == ao); |
| 1765 | ao->prevarena->nextarena = ao->nextarena; |
| 1766 | } |
| 1767 | else { |
| 1768 | /* ao is at the head of the list */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1769 | assert(usable_arenas == ao); |
| 1770 | usable_arenas = ao->nextarena; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1771 | } |
| 1772 | ao->nextarena->prevarena = ao->prevarena; |
| 1773 | |
| 1774 | /* Locate the new insertion point by iterating over |
| 1775 | * the list, using our nextarena pointer. |
| 1776 | */ |
| 1777 | while (ao->nextarena != NULL && nf > ao->nextarena->nfreepools) { |
| 1778 | ao->prevarena = ao->nextarena; |
| 1779 | ao->nextarena = ao->nextarena->nextarena; |
| 1780 | } |
| 1781 | |
| 1782 | /* Insert ao at this point. */ |
| 1783 | assert(ao->nextarena == NULL || ao->prevarena == ao->nextarena->prevarena); |
| 1784 | assert(ao->prevarena->nextarena == ao->nextarena); |
| 1785 | |
| 1786 | ao->prevarena->nextarena = ao; |
| 1787 | if (ao->nextarena != NULL) { |
| 1788 | ao->nextarena->prevarena = ao; |
| 1789 | } |
| 1790 | |
| 1791 | /* Verify that the swaps worked. */ |
| 1792 | assert(ao->nextarena == NULL || nf <= ao->nextarena->nfreepools); |
| 1793 | assert(ao->prevarena == NULL || nf > ao->prevarena->nfreepools); |
| 1794 | assert(ao->nextarena == NULL || ao->nextarena->prevarena == ao); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1795 | assert((usable_arenas == ao && ao->prevarena == NULL) |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1796 | || ao->prevarena->nextarena == ao); |
| 1797 | |
| 1798 | goto success; |
| 1799 | |
| 1800 | success: |
| 1801 | UNLOCK(); |
| 1802 | return 1; |
| 1803 | } |
| 1804 | |
| 1805 | |
| 1806 | static void |
| 1807 | _PyObject_Free(void *ctx, void *p) |
| 1808 | { |
| 1809 | /* PyObject_Free(NULL) has no effect */ |
| 1810 | if (p == NULL) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1811 | return; |
| 1812 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1813 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1814 | _Py_AllocatedBlocks--; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1815 | if (!pymalloc_free(ctx, p)) { |
| 1816 | /* pymalloc didn't allocate this address */ |
| 1817 | PyMem_RawFree(p); |
| 1818 | } |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1819 | } |
| 1820 | |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1821 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1822 | /* pymalloc realloc. |
| 1823 | |
| 1824 | If nbytes==0, then as the Python docs promise, we do not treat this like |
| 1825 | free(p), and return a non-NULL result. |
| 1826 | |
| 1827 | Return 1 if pymalloc reallocated memory and wrote the new pointer into |
| 1828 | newptr_p. |
| 1829 | |
| 1830 | Return 0 if pymalloc didn't allocated p. */ |
| 1831 | static int |
| 1832 | pymalloc_realloc(void *ctx, void **newptr_p, void *p, size_t nbytes) |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1833 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1834 | void *bp; |
| 1835 | poolp pool; |
| 1836 | size_t size; |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1837 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1838 | assert(p != NULL); |
Georg Brandl | d492ad8 | 2008-07-23 16:13:07 +0000 | [diff] [blame] | 1839 | |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1840 | #ifdef WITH_VALGRIND |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1841 | /* Treat running_on_valgrind == -1 the same as 0 */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1842 | if (UNLIKELY(running_on_valgrind > 0)) { |
| 1843 | return 0; |
| 1844 | } |
Benjamin Peterson | 05159c4 | 2009-12-03 03:01:27 +0000 | [diff] [blame] | 1845 | #endif |
| 1846 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1847 | pool = POOL_ADDR(p); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1848 | if (!address_in_range(p, pool)) { |
| 1849 | /* pymalloc is not managing this block. |
| 1850 | |
| 1851 | If nbytes <= SMALL_REQUEST_THRESHOLD, it's tempting to try to take |
| 1852 | over this block. However, if we do, we need to copy the valid data |
| 1853 | from the C-managed block to one of our blocks, and there's no |
| 1854 | portable way to know how much of the memory space starting at p is |
| 1855 | valid. |
| 1856 | |
| 1857 | As bug 1185883 pointed out the hard way, it's possible that the |
| 1858 | C-managed block is "at the end" of allocated VM space, so that a |
| 1859 | memory fault can occur if we try to copy nbytes bytes starting at p. |
| 1860 | Instead we punt: let C continue to manage this block. */ |
| 1861 | return 0; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1862 | } |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 1863 | |
| 1864 | /* pymalloc is in charge of this block */ |
| 1865 | size = INDEX2SIZE(pool->szidx); |
| 1866 | if (nbytes <= size) { |
| 1867 | /* The block is staying the same or shrinking. |
| 1868 | |
| 1869 | If it's shrinking, there's a tradeoff: it costs cycles to copy the |
| 1870 | block to a smaller size class, but it wastes memory not to copy it. |
| 1871 | |
| 1872 | The compromise here is to copy on shrink only if at least 25% of |
| 1873 | size can be shaved off. */ |
| 1874 | if (4 * nbytes > 3 * size) { |
| 1875 | /* It's the same, or shrinking and new/old > 3/4. */ |
| 1876 | *newptr_p = p; |
| 1877 | return 1; |
| 1878 | } |
| 1879 | size = nbytes; |
| 1880 | } |
| 1881 | |
| 1882 | bp = _PyObject_Malloc(ctx, nbytes); |
| 1883 | if (bp != NULL) { |
| 1884 | memcpy(bp, p, size); |
| 1885 | _PyObject_Free(ctx, p); |
| 1886 | } |
| 1887 | *newptr_p = bp; |
| 1888 | return 1; |
| 1889 | } |
| 1890 | |
| 1891 | |
| 1892 | static void * |
| 1893 | _PyObject_Realloc(void *ctx, void *ptr, size_t nbytes) |
| 1894 | { |
| 1895 | void *ptr2; |
| 1896 | |
| 1897 | if (ptr == NULL) { |
| 1898 | return _PyObject_Malloc(ctx, nbytes); |
| 1899 | } |
| 1900 | |
| 1901 | if (pymalloc_realloc(ctx, &ptr2, ptr, nbytes)) { |
| 1902 | return ptr2; |
| 1903 | } |
| 1904 | |
| 1905 | return PyMem_RawRealloc(ptr, nbytes); |
Neil Schemenauer | a35c688 | 2001-02-27 04:45:05 +0000 | [diff] [blame] | 1906 | } |
| 1907 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1908 | #else /* ! WITH_PYMALLOC */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1909 | |
| 1910 | /*==========================================================================*/ |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 1911 | /* pymalloc not enabled: Redirect the entry points to malloc. These will |
| 1912 | * only be used by extensions that are compiled with pymalloc enabled. */ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1913 | |
Antoine Pitrou | 9284053 | 2012-12-17 23:05:59 +0100 | [diff] [blame] | 1914 | Py_ssize_t |
| 1915 | _Py_GetAllocatedBlocks(void) |
| 1916 | { |
| 1917 | return 0; |
| 1918 | } |
| 1919 | |
Tim Peters | 1221c0a | 2002-03-23 00:20:15 +0000 | [diff] [blame] | 1920 | #endif /* WITH_PYMALLOC */ |
| 1921 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 1922 | |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1923 | /*==========================================================================*/ |
Tim Peters | 62c06ba | 2002-03-23 22:28:18 +0000 | [diff] [blame] | 1924 | /* A x-platform debugging allocator. This doesn't manage memory directly, |
| 1925 | * it wraps a real allocator, adding extra debugging info to the memory blocks. |
| 1926 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1927 | |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1928 | /* Special bytes broadcast into debug memory blocks at appropriate times. |
| 1929 | * Strings of these are unlikely to be valid addresses, floats, ints or |
| 1930 | * 7-bit ASCII. |
| 1931 | */ |
| 1932 | #undef CLEANBYTE |
| 1933 | #undef DEADBYTE |
| 1934 | #undef FORBIDDENBYTE |
| 1935 | #define CLEANBYTE 0xCB /* clean (newly allocated) memory */ |
Tim Peters | 889f61d | 2002-07-10 19:29:49 +0000 | [diff] [blame] | 1936 | #define DEADBYTE 0xDB /* dead (newly freed) memory */ |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1937 | #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1938 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1939 | static size_t serialno = 0; /* incremented on each debug {m,re}alloc */ |
| 1940 | |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1941 | /* serialno is always incremented via calling this routine. The point is |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1942 | * to supply a single place to set a breakpoint. |
| 1943 | */ |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1944 | static void |
Neil Schemenauer | bd02b14 | 2002-03-28 21:05:38 +0000 | [diff] [blame] | 1945 | bumpserialno(void) |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1946 | { |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 1947 | ++serialno; |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1948 | } |
| 1949 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1950 | #define SST SIZEOF_SIZE_T |
Tim Peters | e085017 | 2002-03-24 00:34:21 +0000 | [diff] [blame] | 1951 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1952 | /* Read sizeof(size_t) bytes at p as a big-endian size_t. */ |
| 1953 | static size_t |
| 1954 | read_size_t(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1955 | { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 1956 | const uint8_t *q = (const uint8_t *)p; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1957 | size_t result = *q++; |
| 1958 | int i; |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1959 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1960 | for (i = SST; --i > 0; ++q) |
| 1961 | result = (result << 8) | *q; |
| 1962 | return result; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1963 | } |
| 1964 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1965 | /* Write n as a big-endian size_t, MSB at address p, LSB at |
| 1966 | * p + sizeof(size_t) - 1. |
| 1967 | */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1968 | static void |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1969 | write_size_t(void *p, size_t n) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1970 | { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 1971 | uint8_t *q = (uint8_t *)p + SST - 1; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1972 | int i; |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1973 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1974 | for (i = SST; --i >= 0; --q) { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 1975 | *q = (uint8_t)(n & 0xff); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 1976 | n >>= 8; |
| 1977 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1978 | } |
| 1979 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1980 | /* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and |
| 1981 | fills them with useful stuff, here calling the underlying malloc's result p: |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1982 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1983 | p[0: S] |
| 1984 | Number of bytes originally asked for. This is a size_t, big-endian (easier |
| 1985 | to read in a memory dump). |
Georg Brandl | 7cba5fd | 2013-09-25 09:04:23 +0200 | [diff] [blame] | 1986 | p[S] |
Tim Peters | df099f5 | 2013-09-19 21:06:37 -0500 | [diff] [blame] | 1987 | API ID. See PEP 445. This is a character, but seems undocumented. |
| 1988 | p[S+1: 2*S] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1989 | Copies of FORBIDDENBYTE. Used to catch under- writes and reads. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1990 | p[2*S: 2*S+n] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1991 | The requested memory, filled with copies of CLEANBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1992 | Used to catch reference to uninitialized memory. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1993 | &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 1994 | handled the request itself. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1995 | p[2*S+n: 2*S+n+S] |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 1996 | Copies of FORBIDDENBYTE. Used to catch over- writes and reads. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 1997 | p[2*S+n+S: 2*S+n+2*S] |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 1998 | A serial number, incremented by 1 on each call to _PyMem_DebugMalloc |
| 1999 | and _PyMem_DebugRealloc. |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 2000 | This is a big-endian size_t. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2001 | If "bad memory" is detected later, the serial number gives an |
| 2002 | excellent way to set a breakpoint on the next run, to capture the |
| 2003 | instant at which this block was passed out. |
| 2004 | */ |
| 2005 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2006 | static void * |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2007 | _PyMem_DebugRawAlloc(int use_calloc, void *ctx, size_t nbytes) |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 2008 | { |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2009 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2010 | uint8_t *p; /* base address of malloc'ed pad block */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2011 | uint8_t *data; /* p + 2*SST == pointer to data bytes */ |
| 2012 | uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */ |
| 2013 | size_t total; /* 2 * SST + nbytes + 2 * SST */ |
| 2014 | |
| 2015 | if (nbytes > (size_t)PY_SSIZE_T_MAX - 4 * SST) { |
| 2016 | /* integer overflow: can't represent total as a Py_ssize_t */ |
| 2017 | return NULL; |
| 2018 | } |
| 2019 | total = nbytes + 4 * SST; |
| 2020 | |
| 2021 | /* Layout: [SSSS IFFF CCCC...CCCC FFFF NNNN] |
| 2022 | * ^--- p ^--- data ^--- tail |
| 2023 | S: nbytes stored as size_t |
| 2024 | I: API identifier (1 byte) |
| 2025 | F: Forbidden bytes (size_t - 1 bytes before, size_t bytes after) |
| 2026 | C: Clean bytes used later to store actual data |
| 2027 | N: Serial number stored as size_t */ |
| 2028 | |
| 2029 | if (use_calloc) { |
| 2030 | p = (uint8_t *)api->alloc.calloc(api->alloc.ctx, 1, total); |
| 2031 | } |
| 2032 | else { |
| 2033 | p = (uint8_t *)api->alloc.malloc(api->alloc.ctx, total); |
| 2034 | } |
| 2035 | if (p == NULL) { |
| 2036 | return NULL; |
| 2037 | } |
| 2038 | data = p + 2*SST; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2039 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2040 | bumpserialno(); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2041 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2042 | /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */ |
| 2043 | write_size_t(p, nbytes); |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2044 | p[SST] = (uint8_t)api->api_id; |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2045 | memset(p + SST + 1, FORBIDDENBYTE, SST-1); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2046 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2047 | if (nbytes > 0 && !use_calloc) { |
| 2048 | memset(data, CLEANBYTE, nbytes); |
| 2049 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2050 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2051 | /* at tail, write pad (SST bytes) and serialno (SST bytes) */ |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2052 | tail = data + nbytes; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2053 | memset(tail, FORBIDDENBYTE, SST); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2054 | write_size_t(tail + SST, serialno); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2055 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2056 | return data; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2057 | } |
| 2058 | |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2059 | static void * |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2060 | _PyMem_DebugRawMalloc(void *ctx, size_t nbytes) |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2061 | { |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2062 | return _PyMem_DebugRawAlloc(0, ctx, nbytes); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2063 | } |
| 2064 | |
| 2065 | static void * |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2066 | _PyMem_DebugRawCalloc(void *ctx, size_t nelem, size_t elsize) |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2067 | { |
| 2068 | size_t nbytes; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2069 | assert(elsize == 0 || nelem <= (size_t)PY_SSIZE_T_MAX / elsize); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2070 | nbytes = nelem * elsize; |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2071 | return _PyMem_DebugRawAlloc(1, ctx, nbytes); |
Victor Stinner | db067af | 2014-05-02 22:31:14 +0200 | [diff] [blame] | 2072 | } |
| 2073 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2074 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 2075 | /* The debug free first checks the 2*SST bytes on each end for sanity (in |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 2076 | particular, that the FORBIDDENBYTEs with the api ID are still intact). |
Tim Peters | f6fb501 | 2002-04-12 07:38:53 +0000 | [diff] [blame] | 2077 | Then fills the original bytes with DEADBYTE. |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2078 | Then calls the underlying free. |
| 2079 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2080 | static void |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2081 | _PyMem_DebugRawFree(void *ctx, void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2082 | { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2083 | /* PyMem_Free(NULL) has no effect */ |
| 2084 | if (p == NULL) { |
| 2085 | return; |
| 2086 | } |
| 2087 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2088 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2089 | uint8_t *q = (uint8_t *)p - 2*SST; /* address returned from malloc */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2090 | size_t nbytes; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2091 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2092 | _PyMem_DebugCheckAddress(api->api_id, p); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2093 | nbytes = read_size_t(q); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2094 | nbytes += 4 * SST; |
| 2095 | memset(q, DEADBYTE, nbytes); |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2096 | api->alloc.free(api->alloc.ctx, q); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2097 | } |
| 2098 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2099 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2100 | static void * |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2101 | _PyMem_DebugRawRealloc(void *ctx, void *p, size_t nbytes) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2102 | { |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2103 | if (p == NULL) { |
| 2104 | return _PyMem_DebugRawAlloc(0, ctx, nbytes); |
| 2105 | } |
| 2106 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2107 | debug_alloc_api_t *api = (debug_alloc_api_t *)ctx; |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2108 | uint8_t *head; /* base address of malloc'ed pad block */ |
| 2109 | uint8_t *data; /* pointer to data bytes */ |
| 2110 | uint8_t *r; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2111 | uint8_t *tail; /* data + nbytes == pointer to tail pad bytes */ |
| 2112 | size_t total; /* 2 * SST + nbytes + 2 * SST */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2113 | size_t original_nbytes; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2114 | size_t block_serialno; |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2115 | #define ERASED_SIZE 64 |
| 2116 | uint8_t save[2*ERASED_SIZE]; /* A copy of erased bytes. */ |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2117 | |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2118 | _PyMem_DebugCheckAddress(api->api_id, p); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2119 | |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2120 | data = (uint8_t *)p; |
| 2121 | head = data - 2*SST; |
| 2122 | original_nbytes = read_size_t(head); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2123 | if (nbytes > (size_t)PY_SSIZE_T_MAX - 4*SST) { |
| 2124 | /* integer overflow: can't represent total as a Py_ssize_t */ |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2125 | return NULL; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2126 | } |
| 2127 | total = nbytes + 4*SST; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2128 | |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2129 | tail = data + original_nbytes; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2130 | block_serialno = read_size_t(tail + SST); |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2131 | /* Mark the header, the trailer, ERASED_SIZE bytes at the begin and |
| 2132 | ERASED_SIZE bytes at the end as dead and save the copy of erased bytes. |
| 2133 | */ |
| 2134 | if (original_nbytes <= sizeof(save)) { |
| 2135 | memcpy(save, data, original_nbytes); |
| 2136 | memset(data - 2*SST, DEADBYTE, original_nbytes + 4*SST); |
| 2137 | } |
| 2138 | else { |
| 2139 | memcpy(save, data, ERASED_SIZE); |
| 2140 | memset(head, DEADBYTE, ERASED_SIZE + 2*SST); |
| 2141 | memcpy(&save[ERASED_SIZE], tail - ERASED_SIZE, ERASED_SIZE); |
| 2142 | memset(tail - ERASED_SIZE, DEADBYTE, ERASED_SIZE + 2*SST); |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2143 | } |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 2144 | |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2145 | /* Resize and add decorations. */ |
| 2146 | r = (uint8_t *)api->alloc.realloc(api->alloc.ctx, head, total); |
| 2147 | if (r == NULL) { |
| 2148 | nbytes = original_nbytes; |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2149 | } |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2150 | else { |
| 2151 | head = r; |
| 2152 | bumpserialno(); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2153 | block_serialno = serialno; |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2154 | } |
| 2155 | |
| 2156 | write_size_t(head, nbytes); |
| 2157 | head[SST] = (uint8_t)api->api_id; |
| 2158 | memset(head + SST + 1, FORBIDDENBYTE, SST-1); |
| 2159 | data = head + 2*SST; |
Victor Stinner | c426636 | 2013-07-09 00:44:43 +0200 | [diff] [blame] | 2160 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2161 | tail = data + nbytes; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2162 | memset(tail, FORBIDDENBYTE, SST); |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2163 | write_size_t(tail + SST, block_serialno); |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2164 | |
| 2165 | /* Restore saved bytes. */ |
| 2166 | if (original_nbytes <= sizeof(save)) { |
| 2167 | memcpy(data, save, Py_MIN(nbytes, original_nbytes)); |
| 2168 | } |
| 2169 | else { |
| 2170 | size_t i = original_nbytes - ERASED_SIZE; |
| 2171 | memcpy(data, save, Py_MIN(nbytes, ERASED_SIZE)); |
| 2172 | if (nbytes > i) { |
| 2173 | memcpy(data + i, &save[ERASED_SIZE], |
| 2174 | Py_MIN(nbytes - i, ERASED_SIZE)); |
| 2175 | } |
| 2176 | } |
| 2177 | |
| 2178 | if (r == NULL) { |
| 2179 | return NULL; |
| 2180 | } |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 2181 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2182 | if (nbytes > original_nbytes) { |
| 2183 | /* growing: mark new extra memory clean */ |
Serhiy Storchaka | 3cc4c53 | 2017-11-07 12:46:42 +0200 | [diff] [blame] | 2184 | memset(data + original_nbytes, CLEANBYTE, nbytes - original_nbytes); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2185 | } |
Tim Peters | 85cc1c4 | 2002-04-12 08:52:50 +0000 | [diff] [blame] | 2186 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2187 | return data; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2188 | } |
| 2189 | |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2190 | static void |
| 2191 | _PyMem_DebugCheckGIL(void) |
| 2192 | { |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2193 | if (!PyGILState_Check()) |
| 2194 | Py_FatalError("Python memory allocator called " |
| 2195 | "without holding the GIL"); |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2196 | } |
| 2197 | |
| 2198 | static void * |
| 2199 | _PyMem_DebugMalloc(void *ctx, size_t nbytes) |
| 2200 | { |
| 2201 | _PyMem_DebugCheckGIL(); |
| 2202 | return _PyMem_DebugRawMalloc(ctx, nbytes); |
| 2203 | } |
| 2204 | |
| 2205 | static void * |
| 2206 | _PyMem_DebugCalloc(void *ctx, size_t nelem, size_t elsize) |
| 2207 | { |
| 2208 | _PyMem_DebugCheckGIL(); |
| 2209 | return _PyMem_DebugRawCalloc(ctx, nelem, elsize); |
| 2210 | } |
| 2211 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2212 | |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2213 | static void |
| 2214 | _PyMem_DebugFree(void *ctx, void *ptr) |
| 2215 | { |
| 2216 | _PyMem_DebugCheckGIL(); |
Victor Stinner | 0aed3a4 | 2016-03-23 11:30:43 +0100 | [diff] [blame] | 2217 | _PyMem_DebugRawFree(ctx, ptr); |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2218 | } |
| 2219 | |
Victor Stinner | 9ed83c4 | 2017-10-31 12:18:10 -0700 | [diff] [blame] | 2220 | |
Victor Stinner | c4aec36 | 2016-03-14 22:26:53 +0100 | [diff] [blame] | 2221 | static void * |
| 2222 | _PyMem_DebugRealloc(void *ctx, void *ptr, size_t nbytes) |
| 2223 | { |
| 2224 | _PyMem_DebugCheckGIL(); |
| 2225 | return _PyMem_DebugRawRealloc(ctx, ptr, nbytes); |
| 2226 | } |
| 2227 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2228 | /* Check the forbidden bytes on both ends of the memory allocated for p. |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 2229 | * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress, |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2230 | * and call Py_FatalError to kill the program. |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 2231 | * The API id, is also checked. |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2232 | */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2233 | static void |
| 2234 | _PyMem_DebugCheckAddress(char api, const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2235 | { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2236 | const uint8_t *q = (const uint8_t *)p; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2237 | char msgbuf[64]; |
Serhiy Storchaka | e2f92de | 2017-11-11 13:06:26 +0200 | [diff] [blame] | 2238 | const char *msg; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2239 | size_t nbytes; |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2240 | const uint8_t *tail; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2241 | int i; |
| 2242 | char id; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2243 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2244 | if (p == NULL) { |
| 2245 | msg = "didn't expect a NULL pointer"; |
| 2246 | goto error; |
| 2247 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2248 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2249 | /* Check the API id */ |
| 2250 | id = (char)q[-SST]; |
| 2251 | if (id != api) { |
| 2252 | msg = msgbuf; |
Serhiy Storchaka | e2f92de | 2017-11-11 13:06:26 +0200 | [diff] [blame] | 2253 | snprintf(msgbuf, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2254 | msgbuf[sizeof(msgbuf)-1] = 0; |
| 2255 | goto error; |
| 2256 | } |
Kristján Valur Jónsson | ae4cfb1 | 2009-09-28 13:45:02 +0000 | [diff] [blame] | 2257 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2258 | /* Check the stuff at the start of p first: if there's underwrite |
| 2259 | * corruption, the number-of-bytes field may be nuts, and checking |
| 2260 | * the tail could lead to a segfault then. |
| 2261 | */ |
| 2262 | for (i = SST-1; i >= 1; --i) { |
| 2263 | if (*(q-i) != FORBIDDENBYTE) { |
| 2264 | msg = "bad leading pad byte"; |
| 2265 | goto error; |
| 2266 | } |
| 2267 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2268 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2269 | nbytes = read_size_t(q - 2*SST); |
| 2270 | tail = q + nbytes; |
| 2271 | for (i = 0; i < SST; ++i) { |
| 2272 | if (tail[i] != FORBIDDENBYTE) { |
| 2273 | msg = "bad trailing pad byte"; |
| 2274 | goto error; |
| 2275 | } |
| 2276 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2277 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2278 | return; |
Tim Peters | d1139e0 | 2002-03-28 07:32:11 +0000 | [diff] [blame] | 2279 | |
| 2280 | error: |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2281 | _PyObject_DebugDumpAddress(p); |
| 2282 | Py_FatalError(msg); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2283 | } |
| 2284 | |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2285 | /* Display info to stderr about the memory block at p. */ |
Victor Stinner | 0507bf5 | 2013-07-07 02:05:46 +0200 | [diff] [blame] | 2286 | static void |
Neil Schemenauer | d2560cd | 2002-04-12 03:10:20 +0000 | [diff] [blame] | 2287 | _PyObject_DebugDumpAddress(const void *p) |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2288 | { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2289 | const uint8_t *q = (const uint8_t *)p; |
| 2290 | const uint8_t *tail; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2291 | size_t nbytes, serial; |
| 2292 | int i; |
| 2293 | int ok; |
| 2294 | char id; |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2295 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2296 | fprintf(stderr, "Debug memory block at address p=%p:", p); |
| 2297 | if (p == NULL) { |
| 2298 | fprintf(stderr, "\n"); |
| 2299 | return; |
| 2300 | } |
| 2301 | id = (char)q[-SST]; |
| 2302 | fprintf(stderr, " API '%c'\n", id); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2303 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2304 | nbytes = read_size_t(q - 2*SST); |
| 2305 | fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally " |
| 2306 | "requested\n", nbytes); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2307 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2308 | /* In case this is nuts, check the leading pad bytes first. */ |
| 2309 | fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1); |
| 2310 | ok = 1; |
| 2311 | for (i = 1; i <= SST-1; ++i) { |
| 2312 | if (*(q-i) != FORBIDDENBYTE) { |
| 2313 | ok = 0; |
| 2314 | break; |
| 2315 | } |
| 2316 | } |
| 2317 | if (ok) |
| 2318 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
| 2319 | else { |
| 2320 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
| 2321 | FORBIDDENBYTE); |
| 2322 | for (i = SST-1; i >= 1; --i) { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2323 | const uint8_t byte = *(q-i); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2324 | fprintf(stderr, " at p-%d: 0x%02x", i, byte); |
| 2325 | if (byte != FORBIDDENBYTE) |
| 2326 | fputs(" *** OUCH", stderr); |
| 2327 | fputc('\n', stderr); |
| 2328 | } |
Tim Peters | 449b5a8 | 2002-04-28 06:14:45 +0000 | [diff] [blame] | 2329 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2330 | fputs(" Because memory is corrupted at the start, the " |
| 2331 | "count of bytes requested\n" |
| 2332 | " may be bogus, and checking the trailing pad " |
| 2333 | "bytes may segfault.\n", stderr); |
| 2334 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2335 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2336 | tail = q + nbytes; |
| 2337 | fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail); |
| 2338 | ok = 1; |
| 2339 | for (i = 0; i < SST; ++i) { |
| 2340 | if (tail[i] != FORBIDDENBYTE) { |
| 2341 | ok = 0; |
| 2342 | break; |
| 2343 | } |
| 2344 | } |
| 2345 | if (ok) |
| 2346 | fputs("FORBIDDENBYTE, as expected.\n", stderr); |
| 2347 | else { |
| 2348 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2349 | FORBIDDENBYTE); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2350 | for (i = 0; i < SST; ++i) { |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2351 | const uint8_t byte = tail[i]; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2352 | fprintf(stderr, " at tail+%d: 0x%02x", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2353 | i, byte); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2354 | if (byte != FORBIDDENBYTE) |
| 2355 | fputs(" *** OUCH", stderr); |
| 2356 | fputc('\n', stderr); |
| 2357 | } |
| 2358 | } |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2359 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2360 | serial = read_size_t(tail + SST); |
| 2361 | fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T |
| 2362 | "u to debug malloc/realloc.\n", serial); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2363 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2364 | if (nbytes > 0) { |
| 2365 | i = 0; |
| 2366 | fputs(" Data at p:", stderr); |
| 2367 | /* print up to 8 bytes at the start */ |
| 2368 | while (q < tail && i < 8) { |
| 2369 | fprintf(stderr, " %02x", *q); |
| 2370 | ++i; |
| 2371 | ++q; |
| 2372 | } |
| 2373 | /* and up to 8 at the end */ |
| 2374 | if (q < tail) { |
| 2375 | if (tail - q > 8) { |
| 2376 | fputs(" ...", stderr); |
| 2377 | q = tail - 8; |
| 2378 | } |
| 2379 | while (q < tail) { |
| 2380 | fprintf(stderr, " %02x", *q); |
| 2381 | ++q; |
| 2382 | } |
| 2383 | } |
| 2384 | fputc('\n', stderr); |
| 2385 | } |
Victor Stinner | 0611c26 | 2016-03-15 22:22:13 +0100 | [diff] [blame] | 2386 | fputc('\n', stderr); |
| 2387 | |
| 2388 | fflush(stderr); |
| 2389 | _PyMem_DumpTraceback(fileno(stderr), p); |
Tim Peters | ddea208 | 2002-03-23 10:03:50 +0000 | [diff] [blame] | 2390 | } |
| 2391 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2392 | |
Thomas Wouters | 73e5a5b | 2006-06-08 15:35:45 +0000 | [diff] [blame] | 2393 | static size_t |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2394 | printone(FILE *out, const char* msg, size_t value) |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2395 | { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2396 | int i, k; |
| 2397 | char buf[100]; |
| 2398 | size_t origvalue = value; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2399 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2400 | fputs(msg, out); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2401 | for (i = (int)strlen(msg); i < 35; ++i) |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2402 | fputc(' ', out); |
| 2403 | fputc('=', out); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2404 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2405 | /* Write the value with commas. */ |
| 2406 | i = 22; |
| 2407 | buf[i--] = '\0'; |
| 2408 | buf[i--] = '\n'; |
| 2409 | k = 3; |
| 2410 | do { |
| 2411 | size_t nextvalue = value / 10; |
Benjamin Peterson | 2dba1ee | 2013-02-20 16:54:30 -0500 | [diff] [blame] | 2412 | unsigned int digit = (unsigned int)(value - nextvalue * 10); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2413 | value = nextvalue; |
| 2414 | buf[i--] = (char)(digit + '0'); |
| 2415 | --k; |
| 2416 | if (k == 0 && value && i >= 0) { |
| 2417 | k = 3; |
| 2418 | buf[i--] = ','; |
| 2419 | } |
| 2420 | } while (value && i >= 0); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2421 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2422 | while (i >= 0) |
| 2423 | buf[i--] = ' '; |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2424 | fputs(buf, out); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2425 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2426 | return origvalue; |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2427 | } |
| 2428 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2429 | void |
| 2430 | _PyDebugAllocatorStats(FILE *out, |
| 2431 | const char *block_name, int num_blocks, size_t sizeof_block) |
| 2432 | { |
| 2433 | char buf1[128]; |
| 2434 | char buf2[128]; |
| 2435 | PyOS_snprintf(buf1, sizeof(buf1), |
Tim Peters | eaa3bcc | 2013-09-05 22:57:04 -0500 | [diff] [blame] | 2436 | "%d %ss * %" PY_FORMAT_SIZE_T "d bytes each", |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2437 | num_blocks, block_name, sizeof_block); |
| 2438 | PyOS_snprintf(buf2, sizeof(buf2), |
| 2439 | "%48s ", buf1); |
| 2440 | (void)printone(out, buf2, num_blocks * sizeof_block); |
| 2441 | } |
| 2442 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 2443 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2444 | #ifdef WITH_PYMALLOC |
| 2445 | |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 2446 | #ifdef Py_DEBUG |
| 2447 | /* Is target in the list? The list is traversed via the nextpool pointers. |
| 2448 | * The list may be NULL-terminated, or circular. Return 1 if target is in |
| 2449 | * list, else 0. |
| 2450 | */ |
| 2451 | static int |
| 2452 | pool_is_in_list(const poolp target, poolp list) |
| 2453 | { |
| 2454 | poolp origlist = list; |
| 2455 | assert(target != NULL); |
| 2456 | if (list == NULL) |
| 2457 | return 0; |
| 2458 | do { |
| 2459 | if (target == list) |
| 2460 | return 1; |
| 2461 | list = list->nextpool; |
| 2462 | } while (list != NULL && list != origlist); |
| 2463 | return 0; |
| 2464 | } |
| 2465 | #endif |
| 2466 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2467 | /* Print summary info to "out" about the state of pymalloc's structures. |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2468 | * In Py_DEBUG mode, also perform some expensive internal consistency |
| 2469 | * checks. |
Victor Stinner | 6bf992a | 2017-12-06 17:26:10 +0100 | [diff] [blame] | 2470 | * |
| 2471 | * Return 0 if the memory debug hooks are not installed or no statistics was |
Leo Arias | c3d9508 | 2018-02-03 18:36:10 -0600 | [diff] [blame] | 2472 | * written into out, return 1 otherwise. |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2473 | */ |
Victor Stinner | 6bf992a | 2017-12-06 17:26:10 +0100 | [diff] [blame] | 2474 | int |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2475 | _PyObject_DebugMallocStats(FILE *out) |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2476 | { |
Victor Stinner | 6bf992a | 2017-12-06 17:26:10 +0100 | [diff] [blame] | 2477 | if (!_PyMem_PymallocEnabled()) { |
| 2478 | return 0; |
| 2479 | } |
| 2480 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2481 | uint i; |
| 2482 | const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT; |
| 2483 | /* # of pools, allocated blocks, and free blocks per class index */ |
| 2484 | size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2485 | size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2486 | size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT]; |
| 2487 | /* total # of allocated bytes in used and full pools */ |
| 2488 | size_t allocated_bytes = 0; |
| 2489 | /* total # of available bytes in used pools */ |
| 2490 | size_t available_bytes = 0; |
| 2491 | /* # of free pools + pools not yet carved out of current arena */ |
| 2492 | uint numfreepools = 0; |
| 2493 | /* # of bytes for arena alignment padding */ |
| 2494 | size_t arena_alignment = 0; |
| 2495 | /* # of bytes in used and full pools used for pool_headers */ |
| 2496 | size_t pool_header_bytes = 0; |
| 2497 | /* # of bytes in used and full pools wasted due to quantization, |
| 2498 | * i.e. the necessarily leftover space at the ends of used and |
| 2499 | * full pools. |
| 2500 | */ |
| 2501 | size_t quantization = 0; |
| 2502 | /* # of arenas actually allocated. */ |
| 2503 | size_t narenas = 0; |
| 2504 | /* running total -- should equal narenas * ARENA_SIZE */ |
| 2505 | size_t total; |
| 2506 | char buf[128]; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2507 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2508 | fprintf(out, "Small block threshold = %d, in %u size classes.\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2509 | SMALL_REQUEST_THRESHOLD, numclasses); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2510 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2511 | for (i = 0; i < numclasses; ++i) |
| 2512 | numpools[i] = numblocks[i] = numfreeblocks[i] = 0; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2513 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2514 | /* Because full pools aren't linked to from anything, it's easiest |
| 2515 | * to march over all the arenas. If we're lucky, most of the memory |
| 2516 | * will be living in full pools -- would be a shame to miss them. |
| 2517 | */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2518 | for (i = 0; i < maxarenas; ++i) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2519 | uint j; |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2520 | uintptr_t base = arenas[i].address; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2521 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2522 | /* Skip arenas which are not allocated. */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2523 | if (arenas[i].address == (uintptr_t)NULL) |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2524 | continue; |
| 2525 | narenas += 1; |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2526 | |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2527 | numfreepools += arenas[i].nfreepools; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2528 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2529 | /* round up to pool alignment */ |
Benjamin Peterson | 5d4b09c | 2016-09-18 19:24:52 -0700 | [diff] [blame] | 2530 | if (base & (uintptr_t)POOL_SIZE_MASK) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2531 | arena_alignment += POOL_SIZE; |
Benjamin Peterson | 5d4b09c | 2016-09-18 19:24:52 -0700 | [diff] [blame] | 2532 | base &= ~(uintptr_t)POOL_SIZE_MASK; |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2533 | base += POOL_SIZE; |
| 2534 | } |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2535 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2536 | /* visit every pool in the arena */ |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2537 | assert(base <= (uintptr_t) arenas[i].pool_address); |
| 2538 | for (j = 0; base < (uintptr_t) arenas[i].pool_address; |
Benjamin Peterson | 19517e4 | 2016-09-18 19:22:22 -0700 | [diff] [blame] | 2539 | ++j, base += POOL_SIZE) { |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2540 | poolp p = (poolp)base; |
| 2541 | const uint sz = p->szidx; |
| 2542 | uint freeblocks; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2543 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2544 | if (p->ref.count == 0) { |
| 2545 | /* currently unused */ |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 2546 | #ifdef Py_DEBUG |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2547 | assert(pool_is_in_list(p, arenas[i].freepools)); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 2548 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2549 | continue; |
| 2550 | } |
| 2551 | ++numpools[sz]; |
| 2552 | numblocks[sz] += p->ref.count; |
| 2553 | freeblocks = NUMBLOCKS(sz) - p->ref.count; |
| 2554 | numfreeblocks[sz] += freeblocks; |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2555 | #ifdef Py_DEBUG |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2556 | if (freeblocks > 0) |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2557 | assert(pool_is_in_list(p, usedpools[sz + sz])); |
Tim Peters | 08d8215 | 2002-04-18 22:25:03 +0000 | [diff] [blame] | 2558 | #endif |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2559 | } |
| 2560 | } |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2561 | assert(narenas == narenas_currently_allocated); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2562 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2563 | fputc('\n', out); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2564 | fputs("class size num pools blocks in use avail blocks\n" |
| 2565 | "----- ---- --------- ------------- ------------\n", |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2566 | out); |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2567 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2568 | for (i = 0; i < numclasses; ++i) { |
| 2569 | size_t p = numpools[i]; |
| 2570 | size_t b = numblocks[i]; |
| 2571 | size_t f = numfreeblocks[i]; |
| 2572 | uint size = INDEX2SIZE(i); |
| 2573 | if (p == 0) { |
| 2574 | assert(b == 0 && f == 0); |
| 2575 | continue; |
| 2576 | } |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2577 | fprintf(out, "%5u %6u " |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2578 | "%11" PY_FORMAT_SIZE_T "u " |
| 2579 | "%15" PY_FORMAT_SIZE_T "u " |
| 2580 | "%13" PY_FORMAT_SIZE_T "u\n", |
Stefan Krah | 735bb12 | 2010-11-26 10:54:09 +0000 | [diff] [blame] | 2581 | i, size, p, b, f); |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2582 | allocated_bytes += b * size; |
| 2583 | available_bytes += f * size; |
| 2584 | pool_header_bytes += p * POOL_OVERHEAD; |
| 2585 | quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size); |
| 2586 | } |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2587 | fputc('\n', out); |
Victor Stinner | 34be807 | 2016-03-14 12:04:26 +0100 | [diff] [blame] | 2588 | if (_PyMem_DebugEnabled()) |
Victor Stinner | 9e87e77 | 2017-11-24 12:09:24 +0100 | [diff] [blame] | 2589 | (void)printone(out, "# times object malloc called", serialno); |
| 2590 | (void)printone(out, "# arenas allocated total", ntimes_arena_allocated); |
| 2591 | (void)printone(out, "# arenas reclaimed", ntimes_arena_allocated - narenas); |
| 2592 | (void)printone(out, "# arenas highwater mark", narenas_highwater); |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2593 | (void)printone(out, "# arenas allocated current", narenas); |
Thomas Wouters | a977329 | 2006-04-21 09:43:23 +0000 | [diff] [blame] | 2594 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2595 | PyOS_snprintf(buf, sizeof(buf), |
| 2596 | "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena", |
| 2597 | narenas, ARENA_SIZE); |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2598 | (void)printone(out, buf, narenas * ARENA_SIZE); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2599 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2600 | fputc('\n', out); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2601 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2602 | total = printone(out, "# bytes in allocated blocks", allocated_bytes); |
| 2603 | total += printone(out, "# bytes in available blocks", available_bytes); |
Tim Peters | 49f2681 | 2002-04-06 01:45:35 +0000 | [diff] [blame] | 2604 | |
Antoine Pitrou | f95a1b3 | 2010-05-09 15:52:27 +0000 | [diff] [blame] | 2605 | PyOS_snprintf(buf, sizeof(buf), |
| 2606 | "%u unused pools * %d bytes", numfreepools, POOL_SIZE); |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2607 | total += printone(out, buf, (size_t)numfreepools * POOL_SIZE); |
Tim Peters | 16bcb6b | 2002-04-05 05:45:31 +0000 | [diff] [blame] | 2608 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2609 | total += printone(out, "# bytes lost to pool headers", pool_header_bytes); |
| 2610 | total += printone(out, "# bytes lost to quantization", quantization); |
| 2611 | total += printone(out, "# bytes lost to arena alignment", arena_alignment); |
| 2612 | (void)printone(out, "Total", total); |
Victor Stinner | 6bf992a | 2017-12-06 17:26:10 +0100 | [diff] [blame] | 2613 | return 1; |
Tim Peters | 7ccfadf | 2002-04-01 06:04:21 +0000 | [diff] [blame] | 2614 | } |
| 2615 | |
David Malcolm | 49526f4 | 2012-06-22 14:55:41 -0400 | [diff] [blame] | 2616 | #endif /* #ifdef WITH_PYMALLOC */ |