| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1 | /*- |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 2 | * This allocator implementation is designed to provide scalable performance |
| 3 | * for multi-threaded programs on multi-processor systems. The following |
| 4 | * features are included for this purpose: |
| 5 | * |
| 6 | * + Multiple arenas are used if there are multiple CPUs, which reduces lock |
| 7 | * contention and cache sloshing. |
| 8 | * |
| 9 | * + Thread-specific caching is used if there are multiple threads, which |
| 10 | * reduces the amount of locking. |
| 11 | * |
| 12 | * + Cache line sharing between arenas is avoided for internal data |
| 13 | * structures. |
| 14 | * |
| 15 | * + Memory is managed in chunks and runs (chunks can be split into runs), |
| 16 | * rather than as individual pages. This provides a constant-time |
| 17 | * mechanism for associating allocations with particular arenas. |
| 18 | * |
| 19 | * Allocation requests are rounded up to the nearest size class, and no record |
| 20 | * of the original request size is maintained. Allocations are broken into |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 21 | * categories according to size class. Assuming runtime defaults, 4 KiB pages |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 22 | * and a 16 byte quantum on a 32-bit system, the size classes in each category |
| 23 | * are as follows: |
| 24 | * |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 25 | * |========================================| |
| 26 | * | Category | Subcategory | Size | |
| 27 | * |========================================| |
| 28 | * | Small | Tiny | 2 | |
| 29 | * | | | 4 | |
| 30 | * | | | 8 | |
| 31 | * | |------------------+----------| |
| 32 | * | | Quantum-spaced | 16 | |
| 33 | * | | | 32 | |
| 34 | * | | | 48 | |
| 35 | * | | | ... | |
| 36 | * | | | 96 | |
| 37 | * | | | 112 | |
| 38 | * | | | 128 | |
| 39 | * | |------------------+----------| |
| 40 | * | | Cacheline-spaced | 192 | |
| 41 | * | | | 256 | |
| 42 | * | | | 320 | |
| 43 | * | | | 384 | |
| 44 | * | | | 448 | |
| 45 | * | | | 512 | |
| 46 | * | |------------------+----------| |
| 47 | * | | Sub-page | 760 | |
| 48 | * | | | 1024 | |
| 49 | * | | | 1280 | |
| 50 | * | | | ... | |
| 51 | * | | | 3328 | |
| 52 | * | | | 3584 | |
| 53 | * | | | 3840 | |
| 54 | * |========================================| |
| 55 | * | Medium | 4 KiB | |
| 56 | * | | 6 KiB | |
| 57 | * | | 8 KiB | |
| 58 | * | | ... | |
| 59 | * | | 28 KiB | |
| 60 | * | | 30 KiB | |
| 61 | * | | 32 KiB | |
| 62 | * |========================================| |
| 63 | * | Large | 36 KiB | |
| 64 | * | | 40 KiB | |
| 65 | * | | 44 KiB | |
| 66 | * | | ... | |
| 67 | * | | 1012 KiB | |
| 68 | * | | 1016 KiB | |
| 69 | * | | 1020 KiB | |
| 70 | * |========================================| |
| 71 | * | Huge | 1 MiB | |
| 72 | * | | 2 MiB | |
| 73 | * | | 3 MiB | |
| 74 | * | | ... | |
| 75 | * |========================================| |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 76 | * |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 77 | * Different mechanisms are used accoding to category: |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 78 | * |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 79 | * Small/medium : Each size class is segregated into its own set of runs. |
| 80 | * Each run maintains a bitmap of which regions are |
| 81 | * free/allocated. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 82 | * |
| 83 | * Large : Each allocation is backed by a dedicated run. Metadata are stored |
| 84 | * in the associated arena chunk header maps. |
| 85 | * |
| 86 | * Huge : Each allocation is backed by a dedicated contiguous set of chunks. |
| 87 | * Metadata are stored in a separate red-black tree. |
| 88 | * |
| 89 | ******************************************************************************* |
| 90 | */ |
| 91 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 92 | #define JEMALLOC_C_ |
| Jason Evans | b0fd501 | 2010-01-17 01:49:20 -0800 | [diff] [blame] | 93 | #include "internal/jemalloc_internal.h" |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 94 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 95 | /******************************************************************************/ |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 96 | /* Data. */ |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 97 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 98 | arena_t **arenas; |
| 99 | unsigned narenas; |
| 100 | #ifndef NO_TLS |
| 101 | static unsigned next_arena; |
| 102 | #endif |
| 103 | static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ |
| 104 | |
| 105 | #ifndef NO_TLS |
| 106 | __thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec")); |
| 107 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 108 | |
| 109 | /* Set to true once the allocator has been initialized. */ |
| 110 | static bool malloc_initialized = false; |
| 111 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 112 | /* Used to let the initializing thread recursively allocate. */ |
| 113 | static pthread_t malloc_initializer = (unsigned long)0; |
| 114 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 115 | /* Used to avoid initialization races. */ |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 116 | static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 117 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 118 | #ifdef DYNAMIC_PAGE_SHIFT |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 119 | size_t pagesize; |
| 120 | size_t pagesize_mask; |
| 121 | size_t lg_pagesize; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 122 | #endif |
| 123 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 124 | unsigned ncpus; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 125 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 126 | /* Runtime configuration options. */ |
| 127 | const char *JEMALLOC_P(malloc_options) |
| 128 | JEMALLOC_ATTR(visibility("default")); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 129 | #ifdef JEMALLOC_DEBUG |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 130 | bool opt_abort = true; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 131 | # ifdef JEMALLOC_FILL |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 132 | bool opt_junk = true; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 133 | # endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 134 | #else |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 135 | bool opt_abort = false; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 136 | # ifdef JEMALLOC_FILL |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 137 | bool opt_junk = false; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 138 | # endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 139 | #endif |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 140 | #ifdef JEMALLOC_SYSV |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 141 | bool opt_sysv = false; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 142 | #endif |
| Jason Evans | b8f0a65 | 2009-06-29 09:41:43 -0700 | [diff] [blame] | 143 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 144 | bool opt_xmalloc = false; |
| Jason Evans | b8f0a65 | 2009-06-29 09:41:43 -0700 | [diff] [blame] | 145 | #endif |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 146 | #ifdef JEMALLOC_FILL |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 147 | bool opt_zero = false; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 148 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 149 | static int opt_narenas_lshift = 0; |
| 150 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 151 | /******************************************************************************/ |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 152 | /* Function prototypes for non-inline static functions. */ |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 153 | |
| Jason Evans | ed1bf45 | 2010-01-19 12:11:25 -0800 | [diff] [blame^] | 154 | static void wrtmessage(void *w4opaque, const char *p1, const char *p2, |
| 155 | const char *p3, const char *p4); |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 156 | static void stats_print_atexit(void); |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 157 | static unsigned malloc_ncpus(void); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 158 | static bool malloc_init_hard(void); |
| Jason Evans | cc00a15 | 2009-06-25 18:06:48 -0700 | [diff] [blame] | 159 | static void jemalloc_prefork(void); |
| 160 | static void jemalloc_postfork(void); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 161 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 162 | /******************************************************************************/ |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 163 | /* malloc_message() setup. */ |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 164 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 165 | #ifdef JEMALLOC_HAVE_ATTR |
| 166 | JEMALLOC_ATTR(visibility("hidden")) |
| 167 | #else |
| 168 | static |
| 169 | #endif |
| 170 | void |
| Jason Evans | ed1bf45 | 2010-01-19 12:11:25 -0800 | [diff] [blame^] | 171 | wrtmessage(void *w4opaque, const char *p1, const char *p2, const char *p3, |
| 172 | const char *p4) |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 173 | { |
| 174 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 175 | if (write(STDERR_FILENO, p1, strlen(p1)) < 0 |
| 176 | || write(STDERR_FILENO, p2, strlen(p2)) < 0 |
| 177 | || write(STDERR_FILENO, p3, strlen(p3)) < 0 |
| 178 | || write(STDERR_FILENO, p4, strlen(p4)) < 0) |
| 179 | return; |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 180 | } |
| 181 | |
| Jason Evans | ed1bf45 | 2010-01-19 12:11:25 -0800 | [diff] [blame^] | 182 | void (*JEMALLOC_P(malloc_message))(void *, const char *p1, const char *p2, |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 183 | const char *p3, const char *p4) JEMALLOC_ATTR(visibility("default")) = |
| 184 | wrtmessage; |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 185 | |
| 186 | /******************************************************************************/ |
| 187 | /* |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 188 | * Begin miscellaneous support functions. |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 189 | */ |
| 190 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 191 | /* Create a new arena and insert it into the arenas array at index ind. */ |
| 192 | arena_t * |
| 193 | arenas_extend(unsigned ind) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 194 | { |
| 195 | arena_t *ret; |
| 196 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 197 | /* Allocate enough space for trailing bins. */ |
| 198 | ret = (arena_t *)base_alloc(sizeof(arena_t) |
| 199 | + (sizeof(arena_bin_t) * (nbins - 1))); |
| 200 | if (ret != NULL && arena_new(ret, ind) == false) { |
| 201 | arenas[ind] = ret; |
| 202 | return (ret); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 203 | } |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 204 | /* Only reached if there is an OOM error. */ |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 205 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 206 | /* |
| 207 | * OOM here is quite inconvenient to propagate, since dealing with it |
| 208 | * would require a check for failure in the fast path. Instead, punt |
| 209 | * by using arenas[0]. In practice, this is an extremely unlikely |
| 210 | * failure. |
| 211 | */ |
| 212 | malloc_write4("<jemalloc>", ": Error initializing arena\n", "", ""); |
| 213 | if (opt_abort) |
| 214 | abort(); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 215 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 216 | return (arenas[0]); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 217 | } |
| 218 | |
| 219 | #ifndef NO_TLS |
| 220 | /* |
| 221 | * Choose an arena based on a per-thread value (slow-path code only, called |
| 222 | * only by choose_arena()). |
| 223 | */ |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 224 | arena_t * |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 225 | choose_arena_hard(void) |
| 226 | { |
| 227 | arena_t *ret; |
| 228 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 229 | if (narenas > 1) { |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 230 | malloc_mutex_lock(&arenas_lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 231 | if ((ret = arenas[next_arena]) == NULL) |
| 232 | ret = arenas_extend(next_arena); |
| 233 | next_arena = (next_arena + 1) % narenas; |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 234 | malloc_mutex_unlock(&arenas_lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 235 | } else |
| 236 | ret = arenas[0]; |
| 237 | |
| 238 | arenas_map = ret; |
| 239 | |
| 240 | return (ret); |
| 241 | } |
| 242 | #endif |
| 243 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 244 | static inline void * |
| 245 | ipalloc(size_t alignment, size_t size) |
| 246 | { |
| 247 | void *ret; |
| 248 | size_t ceil_size; |
| 249 | |
| 250 | /* |
| 251 | * Round size up to the nearest multiple of alignment. |
| 252 | * |
| 253 | * This done, we can take advantage of the fact that for each small |
| 254 | * size class, every object is aligned at the smallest power of two |
| 255 | * that is non-zero in the base two representation of the size. For |
| 256 | * example: |
| 257 | * |
| 258 | * Size | Base 2 | Minimum alignment |
| 259 | * -----+----------+------------------ |
| 260 | * 96 | 1100000 | 32 |
| 261 | * 144 | 10100000 | 32 |
| 262 | * 192 | 11000000 | 64 |
| 263 | * |
| 264 | * Depending on runtime settings, it is possible that arena_malloc() |
| 265 | * will further round up to a power of two, but that never causes |
| 266 | * correctness issues. |
| 267 | */ |
| 268 | ceil_size = (size + (alignment - 1)) & (-alignment); |
| 269 | /* |
| 270 | * (ceil_size < size) protects against the combination of maximal |
| 271 | * alignment and size greater than maximal alignment. |
| 272 | */ |
| 273 | if (ceil_size < size) { |
| 274 | /* size_t overflow. */ |
| 275 | return (NULL); |
| 276 | } |
| 277 | |
| 278 | if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE |
| 279 | && ceil_size <= arena_maxclass)) |
| Jason Evans | cc00a15 | 2009-06-25 18:06:48 -0700 | [diff] [blame] | 280 | ret = arena_malloc(ceil_size, false); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 281 | else { |
| 282 | size_t run_size; |
| 283 | |
| 284 | /* |
| 285 | * We can't achieve subpage alignment, so round up alignment |
| 286 | * permanently; it makes later calculations simpler. |
| 287 | */ |
| 288 | alignment = PAGE_CEILING(alignment); |
| 289 | ceil_size = PAGE_CEILING(size); |
| 290 | /* |
| 291 | * (ceil_size < size) protects against very large sizes within |
| 292 | * PAGE_SIZE of SIZE_T_MAX. |
| 293 | * |
| 294 | * (ceil_size + alignment < ceil_size) protects against the |
| 295 | * combination of maximal alignment and ceil_size large enough |
| 296 | * to cause overflow. This is similar to the first overflow |
| 297 | * check above, but it needs to be repeated due to the new |
| 298 | * ceil_size value, which may now be *equal* to maximal |
| 299 | * alignment, whereas before we only detected overflow if the |
| 300 | * original size was *greater* than maximal alignment. |
| 301 | */ |
| 302 | if (ceil_size < size || ceil_size + alignment < ceil_size) { |
| 303 | /* size_t overflow. */ |
| 304 | return (NULL); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Calculate the size of the over-size run that arena_palloc() |
| 309 | * would need to allocate in order to guarantee the alignment. |
| 310 | */ |
| 311 | if (ceil_size >= alignment) |
| 312 | run_size = ceil_size + alignment - PAGE_SIZE; |
| 313 | else { |
| 314 | /* |
| 315 | * It is possible that (alignment << 1) will cause |
| 316 | * overflow, but it doesn't matter because we also |
| 317 | * subtract PAGE_SIZE, which in the case of overflow |
| 318 | * leaves us with a very large run_size. That causes |
| 319 | * the first conditional below to fail, which means |
| 320 | * that the bogus run_size value never gets used for |
| 321 | * anything important. |
| 322 | */ |
| 323 | run_size = (alignment << 1) - PAGE_SIZE; |
| 324 | } |
| 325 | |
| 326 | if (run_size <= arena_maxclass) { |
| 327 | ret = arena_palloc(choose_arena(), alignment, ceil_size, |
| 328 | run_size); |
| 329 | } else if (alignment <= chunksize) |
| 330 | ret = huge_malloc(ceil_size, false); |
| 331 | else |
| 332 | ret = huge_palloc(alignment, ceil_size); |
| 333 | } |
| 334 | |
| 335 | assert(((uintptr_t)ret & (alignment - 1)) == 0); |
| 336 | return (ret); |
| 337 | } |
| 338 | |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 339 | static void |
| 340 | stats_print_atexit(void) |
| 341 | { |
| 342 | |
| 343 | #if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS)) |
| 344 | unsigned i; |
| 345 | |
| 346 | /* |
| 347 | * Merge stats from extant threads. This is racy, since individual |
| 348 | * threads do not lock when recording tcache stats events. As a |
| 349 | * consequence, the final stats may be slightly out of date by the time |
| 350 | * they are reported, if other threads continue to allocate. |
| 351 | */ |
| 352 | for (i = 0; i < narenas; i++) { |
| 353 | arena_t *arena = arenas[i]; |
| 354 | if (arena != NULL) { |
| 355 | tcache_t *tcache; |
| 356 | |
| 357 | malloc_mutex_lock(&arena->lock); |
| 358 | ql_foreach(tcache, &arena->tcache_ql, link) { |
| 359 | tcache_stats_merge(tcache, arena); |
| 360 | } |
| 361 | malloc_mutex_unlock(&arena->lock); |
| 362 | } |
| 363 | } |
| 364 | #endif |
| Jason Evans | ed1bf45 | 2010-01-19 12:11:25 -0800 | [diff] [blame^] | 365 | JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | static inline void * |
| 369 | iralloc(void *ptr, size_t size) |
| 370 | { |
| 371 | size_t oldsize; |
| 372 | |
| 373 | assert(ptr != NULL); |
| 374 | assert(size != 0); |
| 375 | |
| 376 | oldsize = isalloc(ptr); |
| 377 | |
| 378 | if (size <= arena_maxclass) |
| 379 | return (arena_ralloc(ptr, size, oldsize)); |
| 380 | else |
| 381 | return (huge_ralloc(ptr, size, oldsize)); |
| 382 | } |
| 383 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 384 | /* |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 385 | * End miscellaneous support functions. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 386 | */ |
| 387 | /******************************************************************************/ |
| 388 | /* |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 389 | * Begin initialization functions. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 390 | */ |
| 391 | |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 392 | static unsigned |
| 393 | malloc_ncpus(void) |
| 394 | { |
| 395 | unsigned ret; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 396 | long result; |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 397 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 398 | result = sysconf(_SC_NPROCESSORS_ONLN); |
| 399 | if (result == -1) { |
| 400 | /* Error. */ |
| 401 | ret = 1; |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 402 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 403 | ret = (unsigned)result; |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 404 | |
| 405 | return (ret); |
| 406 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 407 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 408 | /* |
| 409 | * FreeBSD's pthreads implementation calls malloc(3), so the malloc |
| 410 | * implementation has to take pains to avoid infinite recursion during |
| 411 | * initialization. |
| 412 | */ |
| 413 | static inline bool |
| 414 | malloc_init(void) |
| 415 | { |
| 416 | |
| 417 | if (malloc_initialized == false) |
| 418 | return (malloc_init_hard()); |
| 419 | |
| 420 | return (false); |
| 421 | } |
| 422 | |
| 423 | static bool |
| 424 | malloc_init_hard(void) |
| 425 | { |
| 426 | unsigned i; |
| 427 | int linklen; |
| 428 | char buf[PATH_MAX + 1]; |
| 429 | const char *opts; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 430 | arena_t *init_arenas[1]; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 431 | |
| 432 | malloc_mutex_lock(&init_lock); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 433 | if (malloc_initialized || malloc_initializer == pthread_self()) { |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 434 | /* |
| 435 | * Another thread initialized the allocator before this one |
| Jason Evans | a25d0a8 | 2009-11-09 14:57:38 -0800 | [diff] [blame] | 436 | * acquired init_lock, or this thread is the initializing |
| 437 | * thread, and it is recursively allocating. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 438 | */ |
| 439 | malloc_mutex_unlock(&init_lock); |
| 440 | return (false); |
| 441 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 442 | if (malloc_initializer != (unsigned long)0) { |
| 443 | /* Busy-wait until the initializing thread completes. */ |
| 444 | do { |
| 445 | malloc_mutex_unlock(&init_lock); |
| 446 | CPU_SPINWAIT; |
| 447 | malloc_mutex_lock(&init_lock); |
| 448 | } while (malloc_initialized == false); |
| 449 | return (false); |
| 450 | } |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 451 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 452 | #ifdef DYNAMIC_PAGE_SHIFT |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 453 | /* Get page size. */ |
| 454 | { |
| 455 | long result; |
| 456 | |
| 457 | result = sysconf(_SC_PAGESIZE); |
| 458 | assert(result != -1); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 459 | pagesize = (unsigned)result; |
| 460 | |
| 461 | /* |
| 462 | * We assume that pagesize is a power of 2 when calculating |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 463 | * pagesize_mask and lg_pagesize. |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 464 | */ |
| 465 | assert(((result - 1) & result) == 0); |
| 466 | pagesize_mask = result - 1; |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 467 | lg_pagesize = ffs((int)result) - 1; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 468 | } |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 469 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 470 | |
| 471 | for (i = 0; i < 3; i++) { |
| 472 | unsigned j; |
| 473 | |
| 474 | /* Get runtime configuration. */ |
| 475 | switch (i) { |
| 476 | case 0: |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 477 | if ((linklen = readlink("/etc/jemalloc.conf", buf, |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 478 | sizeof(buf) - 1)) != -1) { |
| 479 | /* |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 480 | * Use the contents of the "/etc/jemalloc.conf" |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 481 | * symbolic link's name. |
| 482 | */ |
| 483 | buf[linklen] = '\0'; |
| 484 | opts = buf; |
| 485 | } else { |
| 486 | /* No configuration specified. */ |
| 487 | buf[0] = '\0'; |
| 488 | opts = buf; |
| 489 | } |
| 490 | break; |
| 491 | case 1: |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 492 | if ((opts = getenv("JEMALLOC_OPTIONS")) != NULL) { |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 493 | /* |
| 494 | * Do nothing; opts is already initialized to |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 495 | * the value of the JEMALLOC_OPTIONS |
| 496 | * environment variable. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 497 | */ |
| 498 | } else { |
| 499 | /* No configuration specified. */ |
| 500 | buf[0] = '\0'; |
| 501 | opts = buf; |
| 502 | } |
| 503 | break; |
| 504 | case 2: |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 505 | if (JEMALLOC_P(malloc_options) != NULL) { |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 506 | /* |
| 507 | * Use options that were compiled into the |
| 508 | * program. |
| 509 | */ |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 510 | opts = JEMALLOC_P(malloc_options); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 511 | } else { |
| 512 | /* No configuration specified. */ |
| 513 | buf[0] = '\0'; |
| 514 | opts = buf; |
| 515 | } |
| 516 | break; |
| 517 | default: |
| 518 | /* NOTREACHED */ |
| 519 | assert(false); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 520 | buf[0] = '\0'; |
| 521 | opts = buf; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 522 | } |
| 523 | |
| 524 | for (j = 0; opts[j] != '\0'; j++) { |
| 525 | unsigned k, nreps; |
| 526 | bool nseen; |
| 527 | |
| 528 | /* Parse repetition count, if any. */ |
| 529 | for (nreps = 0, nseen = false;; j++, nseen = true) { |
| 530 | switch (opts[j]) { |
| 531 | case '0': case '1': case '2': case '3': |
| 532 | case '4': case '5': case '6': case '7': |
| 533 | case '8': case '9': |
| 534 | nreps *= 10; |
| 535 | nreps += opts[j] - '0'; |
| 536 | break; |
| 537 | default: |
| 538 | goto MALLOC_OUT; |
| 539 | } |
| 540 | } |
| 541 | MALLOC_OUT: |
| 542 | if (nseen == false) |
| 543 | nreps = 1; |
| 544 | |
| 545 | for (k = 0; k < nreps; k++) { |
| 546 | switch (opts[j]) { |
| 547 | case 'a': |
| 548 | opt_abort = false; |
| 549 | break; |
| 550 | case 'A': |
| 551 | opt_abort = true; |
| 552 | break; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 553 | case 'c': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 554 | if (opt_lg_cspace_max - 1 > |
| 555 | opt_lg_qspace_max && |
| 556 | opt_lg_cspace_max > |
| 557 | LG_CACHELINE) |
| 558 | opt_lg_cspace_max--; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 559 | break; |
| 560 | case 'C': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 561 | if (opt_lg_cspace_max < PAGE_SHIFT |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 562 | - 1) |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 563 | opt_lg_cspace_max++; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 564 | break; |
| Jason Evans | 45c128d | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 565 | case 'd': |
| 566 | if (opt_lg_dirty_mult + 1 < |
| 567 | (sizeof(size_t) << 3)) |
| 568 | opt_lg_dirty_mult++; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 569 | break; |
| Jason Evans | 45c128d | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 570 | case 'D': |
| 571 | if (opt_lg_dirty_mult >= 0) |
| 572 | opt_lg_dirty_mult--; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 573 | break; |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 574 | #ifdef JEMALLOC_TCACHE |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 575 | case 'g': |
| Jason Evans | 3f3ecfb | 2010-01-03 14:45:26 -0800 | [diff] [blame] | 576 | if (opt_lg_tcache_gc_sweep >= 0) |
| 577 | opt_lg_tcache_gc_sweep--; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 578 | break; |
| 579 | case 'G': |
| Jason Evans | 3f3ecfb | 2010-01-03 14:45:26 -0800 | [diff] [blame] | 580 | if (opt_lg_tcache_gc_sweep + 1 < |
| 581 | (sizeof(size_t) << 3)) |
| 582 | opt_lg_tcache_gc_sweep++; |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 583 | break; |
| 584 | case 'h': |
| Jason Evans | 279e09d | 2010-01-03 16:16:10 -0800 | [diff] [blame] | 585 | if (opt_lg_tcache_nslots > 0) |
| 586 | opt_lg_tcache_nslots--; |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 587 | break; |
| 588 | case 'H': |
| Jason Evans | 279e09d | 2010-01-03 16:16:10 -0800 | [diff] [blame] | 589 | if (opt_lg_tcache_nslots + 1 < |
| 590 | (sizeof(size_t) << 3)) |
| 591 | opt_lg_tcache_nslots++; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 592 | break; |
| 593 | #endif |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 594 | #ifdef JEMALLOC_FILL |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 595 | case 'j': |
| 596 | opt_junk = false; |
| 597 | break; |
| 598 | case 'J': |
| 599 | opt_junk = true; |
| 600 | break; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 601 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 602 | case 'k': |
| 603 | /* |
| 604 | * Chunks always require at least one |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 605 | * header page, plus enough room to |
| 606 | * hold a run for the largest medium |
| 607 | * size class (one page more than the |
| 608 | * size). |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 609 | */ |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 610 | if ((1U << (opt_lg_chunk - 1)) >= |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 611 | (2U << PAGE_SHIFT) + (1U << |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 612 | opt_lg_medium_max)) |
| 613 | opt_lg_chunk--; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 614 | break; |
| 615 | case 'K': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 616 | if (opt_lg_chunk + 1 < |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 617 | (sizeof(size_t) << 3)) |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 618 | opt_lg_chunk++; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 619 | break; |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 620 | case 'm': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 621 | if (opt_lg_medium_max > PAGE_SHIFT) |
| 622 | opt_lg_medium_max--; |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 623 | break; |
| 624 | case 'M': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 625 | if (opt_lg_medium_max + 1 < |
| 626 | opt_lg_chunk) |
| 627 | opt_lg_medium_max++; |
| Jason Evans | b237816 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 628 | break; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 629 | case 'n': |
| 630 | opt_narenas_lshift--; |
| 631 | break; |
| 632 | case 'N': |
| 633 | opt_narenas_lshift++; |
| 634 | break; |
| 635 | case 'p': |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 636 | opt_stats_print = false; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 637 | break; |
| 638 | case 'P': |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 639 | opt_stats_print = true; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 640 | break; |
| 641 | case 'q': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 642 | if (opt_lg_qspace_max > LG_QUANTUM) |
| 643 | opt_lg_qspace_max--; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 644 | break; |
| 645 | case 'Q': |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 646 | if (opt_lg_qspace_max + 1 < |
| 647 | opt_lg_cspace_max) |
| 648 | opt_lg_qspace_max++; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 649 | break; |
| Jason Evans | e9db6c9 | 2010-01-03 16:17:52 -0800 | [diff] [blame] | 650 | #ifdef JEMALLOC_TCACHE |
| 651 | case 's': |
| 652 | opt_tcache_sort = false; |
| 653 | break; |
| 654 | case 'S': |
| 655 | opt_tcache_sort = true; |
| 656 | break; |
| 657 | #endif |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 658 | #ifdef JEMALLOC_TRACE |
| 659 | case 't': |
| 660 | opt_trace = false; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 661 | break; |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 662 | case 'T': |
| 663 | opt_trace = true; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 664 | break; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 665 | #endif |
| 666 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 667 | case 'v': |
| 668 | opt_sysv = false; |
| 669 | break; |
| 670 | case 'V': |
| 671 | opt_sysv = true; |
| 672 | break; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 673 | #endif |
| 674 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 675 | case 'x': |
| 676 | opt_xmalloc = false; |
| 677 | break; |
| 678 | case 'X': |
| 679 | opt_xmalloc = true; |
| 680 | break; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 681 | #endif |
| 682 | #ifdef JEMALLOC_FILL |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 683 | case 'z': |
| 684 | opt_zero = false; |
| 685 | break; |
| 686 | case 'Z': |
| 687 | opt_zero = true; |
| 688 | break; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 689 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 690 | default: { |
| 691 | char cbuf[2]; |
| 692 | |
| 693 | cbuf[0] = opts[j]; |
| 694 | cbuf[1] = '\0'; |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 695 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 696 | ": Unsupported character " |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 697 | "in malloc options: '", cbuf, |
| 698 | "'\n"); |
| 699 | } |
| 700 | } |
| 701 | } |
| 702 | } |
| 703 | } |
| 704 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 705 | #ifdef JEMALLOC_TRACE |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 706 | if (opt_trace) |
| 707 | trace_boot(); |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 708 | #endif |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 709 | if (opt_stats_print) { |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 710 | /* Print statistics at exit. */ |
| Jason Evans | 03c2237 | 2010-01-03 12:10:42 -0800 | [diff] [blame] | 711 | atexit(stats_print_atexit); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 712 | } |
| 713 | |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 714 | /* Register fork handlers. */ |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 715 | pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork); |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 716 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 717 | if (arena_boot0()) { |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 718 | malloc_mutex_unlock(&init_lock); |
| 719 | return (true); |
| 720 | } |
| 721 | |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 722 | #ifdef JEMALLOC_TCACHE |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 723 | tcache_boot(); |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 724 | #endif |
| 725 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 726 | if (chunk_boot()) { |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 727 | malloc_mutex_unlock(&init_lock); |
| 728 | return (true); |
| 729 | } |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 730 | arena_boot1(); |
| 731 | |
| 732 | if (huge_boot()) { |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 733 | malloc_mutex_unlock(&init_lock); |
| 734 | return (true); |
| 735 | } |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 736 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 737 | if (huge_boot()) { |
| Jason Evans | c9658dd | 2009-06-22 14:44:08 -0700 | [diff] [blame] | 738 | malloc_mutex_unlock(&init_lock); |
| 739 | return (true); |
| 740 | } |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 741 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 742 | /* |
| 743 | * Create enough scaffolding to allow recursive allocation in |
| 744 | * malloc_ncpus(). |
| 745 | */ |
| 746 | narenas = 1; |
| 747 | arenas = init_arenas; |
| 748 | memset(arenas, 0, sizeof(arena_t *) * narenas); |
| 749 | |
| 750 | /* |
| 751 | * Initialize one arena here. The rest are lazily created in |
| 752 | * choose_arena_hard(). |
| 753 | */ |
| 754 | arenas_extend(0); |
| 755 | if (arenas[0] == NULL) { |
| 756 | malloc_mutex_unlock(&init_lock); |
| 757 | return (true); |
| 758 | } |
| 759 | |
| 760 | #ifndef NO_TLS |
| 761 | /* |
| 762 | * Assign the initial arena to the initial thread, in order to avoid |
| 763 | * spurious creation of an extra arena if the application switches to |
| 764 | * threaded mode. |
| 765 | */ |
| 766 | arenas_map = arenas[0]; |
| 767 | #endif |
| 768 | |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 769 | malloc_mutex_init(&arenas_lock); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 770 | |
| 771 | /* Get number of CPUs. */ |
| 772 | malloc_initializer = pthread_self(); |
| 773 | malloc_mutex_unlock(&init_lock); |
| 774 | ncpus = malloc_ncpus(); |
| 775 | malloc_mutex_lock(&init_lock); |
| 776 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 777 | if (ncpus > 1) { |
| 778 | /* |
| Jason Evans | 5463a52 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 779 | * For SMP systems, create more than one arena per CPU by |
| 780 | * default. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 781 | */ |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 782 | #ifdef JEMALLOC_TCACHE |
| Jason Evans | 279e09d | 2010-01-03 16:16:10 -0800 | [diff] [blame] | 783 | if (tcache_nslots) { |
| Jason Evans | 5463a52 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 784 | /* |
| 785 | * Only large object allocation/deallocation is |
| 786 | * guaranteed to acquire an arena mutex, so we can get |
| 787 | * away with fewer arenas than without thread caching. |
| 788 | */ |
| 789 | opt_narenas_lshift += 1; |
| 790 | } else { |
| 791 | #endif |
| 792 | /* |
| 793 | * All allocations must acquire an arena mutex, so use |
| 794 | * plenty of arenas. |
| 795 | */ |
| 796 | opt_narenas_lshift += 2; |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 797 | #ifdef JEMALLOC_TCACHE |
| Jason Evans | 5463a52 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 798 | } |
| 799 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | /* Determine how many arenas to use. */ |
| 803 | narenas = ncpus; |
| 804 | if (opt_narenas_lshift > 0) { |
| 805 | if ((narenas << opt_narenas_lshift) > narenas) |
| 806 | narenas <<= opt_narenas_lshift; |
| 807 | /* |
| 808 | * Make sure not to exceed the limits of what base_alloc() can |
| 809 | * handle. |
| 810 | */ |
| 811 | if (narenas * sizeof(arena_t *) > chunksize) |
| 812 | narenas = chunksize / sizeof(arena_t *); |
| 813 | } else if (opt_narenas_lshift < 0) { |
| 814 | if ((narenas >> -opt_narenas_lshift) < narenas) |
| 815 | narenas >>= -opt_narenas_lshift; |
| 816 | /* Make sure there is at least one arena. */ |
| 817 | if (narenas == 0) |
| 818 | narenas = 1; |
| 819 | } |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 820 | |
| 821 | #ifdef NO_TLS |
| 822 | if (narenas > 1) { |
| 823 | static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19, |
| 824 | 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, |
| 825 | 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, |
| 826 | 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, |
| 827 | 223, 227, 229, 233, 239, 241, 251, 257, 263}; |
| 828 | unsigned nprimes, parenas; |
| 829 | |
| 830 | /* |
| 831 | * Pick a prime number of hash arenas that is more than narenas |
| 832 | * so that direct hashing of pthread_self() pointers tends to |
| 833 | * spread allocations evenly among the arenas. |
| 834 | */ |
| 835 | assert((narenas & 1) == 0); /* narenas must be even. */ |
| Jason Evans | 94ad2b5 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 836 | nprimes = (sizeof(primes) >> LG_SIZEOF_INT); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 837 | parenas = primes[nprimes - 1]; /* In case not enough primes. */ |
| 838 | for (i = 1; i < nprimes; i++) { |
| 839 | if (primes[i] > narenas) { |
| 840 | parenas = primes[i]; |
| 841 | break; |
| 842 | } |
| 843 | } |
| 844 | narenas = parenas; |
| 845 | } |
| 846 | #endif |
| 847 | |
| 848 | #ifndef NO_TLS |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 849 | next_arena = 0; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 850 | #endif |
| 851 | |
| 852 | /* Allocate and initialize arenas. */ |
| 853 | arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); |
| 854 | if (arenas == NULL) { |
| 855 | malloc_mutex_unlock(&init_lock); |
| 856 | return (true); |
| 857 | } |
| 858 | /* |
| 859 | * Zero the array. In practice, this should always be pre-zeroed, |
| 860 | * since it was just mmap()ed, but let's be sure. |
| 861 | */ |
| 862 | memset(arenas, 0, sizeof(arena_t *) * narenas); |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 863 | /* Copy the pointer to the one arena that was already initialized. */ |
| 864 | arenas[0] = init_arenas[0]; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 865 | |
| 866 | malloc_initialized = true; |
| 867 | malloc_mutex_unlock(&init_lock); |
| 868 | return (false); |
| 869 | } |
| 870 | |
| 871 | /* |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 872 | * End initialization functions. |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 873 | */ |
| 874 | /******************************************************************************/ |
| 875 | /* |
| 876 | * Begin malloc(3)-compatible functions. |
| 877 | */ |
| 878 | |
| Jason Evans | 9ad4823 | 2010-01-03 11:59:20 -0800 | [diff] [blame] | 879 | JEMALLOC_ATTR(malloc) |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 880 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 881 | void * |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 882 | JEMALLOC_P(malloc)(size_t size) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 883 | { |
| 884 | void *ret; |
| 885 | |
| 886 | if (malloc_init()) { |
| 887 | ret = NULL; |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 888 | goto OOM; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 889 | } |
| 890 | |
| 891 | if (size == 0) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 892 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 893 | if (opt_sysv == false) |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 894 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 895 | size = 1; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 896 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 897 | else { |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 898 | # ifdef JEMALLOC_XMALLOC |
| 899 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 900 | malloc_write4("<jemalloc>", |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 901 | ": Error in malloc(): invalid size 0\n", "", |
| 902 | ""); |
| 903 | abort(); |
| 904 | } |
| 905 | # endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 906 | ret = NULL; |
| 907 | goto RETURN; |
| 908 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 909 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 910 | } |
| 911 | |
| 912 | ret = imalloc(size); |
| 913 | |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 914 | OOM: |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 915 | if (ret == NULL) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 916 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 917 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 918 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 919 | ": Error in malloc(): out of memory\n", "", |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 920 | ""); |
| 921 | abort(); |
| 922 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 923 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 924 | errno = ENOMEM; |
| 925 | } |
| 926 | |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 927 | #ifdef JEMALLOC_SYSV |
| 928 | RETURN: |
| 929 | #endif |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 930 | #ifdef JEMALLOC_TRACE |
| 931 | if (opt_trace) |
| 932 | trace_malloc(ret, size); |
| 933 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 934 | return (ret); |
| 935 | } |
| 936 | |
| Jason Evans | 9ad4823 | 2010-01-03 11:59:20 -0800 | [diff] [blame] | 937 | JEMALLOC_ATTR(nonnull(1)) |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 938 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 939 | int |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 940 | JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 941 | { |
| 942 | int ret; |
| 943 | void *result; |
| 944 | |
| 945 | if (malloc_init()) |
| 946 | result = NULL; |
| 947 | else { |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 948 | if (size == 0) { |
| 949 | #ifdef JEMALLOC_SYSV |
| 950 | if (opt_sysv == false) |
| 951 | #endif |
| 952 | size = 1; |
| 953 | #ifdef JEMALLOC_SYSV |
| 954 | else { |
| 955 | # ifdef JEMALLOC_XMALLOC |
| 956 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 957 | malloc_write4("<jemalloc>", |
| Jason Evans | f251814 | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 958 | ": Error in posix_memalign(): " |
| 959 | "invalid size 0\n", "", ""); |
| 960 | abort(); |
| 961 | } |
| 962 | # endif |
| 963 | result = NULL; |
| 964 | *memptr = NULL; |
| 965 | ret = 0; |
| 966 | goto RETURN; |
| 967 | } |
| 968 | #endif |
| 969 | } |
| 970 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 971 | /* Make sure that alignment is a large enough power of 2. */ |
| 972 | if (((alignment - 1) & alignment) != 0 |
| 973 | || alignment < sizeof(void *)) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 974 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 975 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 976 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 977 | ": Error in posix_memalign(): " |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 978 | "invalid alignment\n", "", ""); |
| 979 | abort(); |
| 980 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 981 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 982 | result = NULL; |
| 983 | ret = EINVAL; |
| 984 | goto RETURN; |
| 985 | } |
| 986 | |
| 987 | result = ipalloc(alignment, size); |
| 988 | } |
| 989 | |
| 990 | if (result == NULL) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 991 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 992 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 993 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 994 | ": Error in posix_memalign(): out of memory\n", |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 995 | "", ""); |
| 996 | abort(); |
| 997 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 998 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 999 | ret = ENOMEM; |
| 1000 | goto RETURN; |
| 1001 | } |
| 1002 | |
| 1003 | *memptr = result; |
| 1004 | ret = 0; |
| 1005 | |
| 1006 | RETURN: |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1007 | #ifdef JEMALLOC_TRACE |
| 1008 | if (opt_trace) |
| 1009 | trace_posix_memalign(result, alignment, size); |
| 1010 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1011 | return (ret); |
| 1012 | } |
| 1013 | |
| Jason Evans | 9ad4823 | 2010-01-03 11:59:20 -0800 | [diff] [blame] | 1014 | JEMALLOC_ATTR(malloc) |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1015 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1016 | void * |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1017 | JEMALLOC_P(calloc)(size_t num, size_t size) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1018 | { |
| 1019 | void *ret; |
| 1020 | size_t num_size; |
| 1021 | |
| 1022 | if (malloc_init()) { |
| 1023 | num_size = 0; |
| 1024 | ret = NULL; |
| 1025 | goto RETURN; |
| 1026 | } |
| 1027 | |
| 1028 | num_size = num * size; |
| 1029 | if (num_size == 0) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1030 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1031 | if ((opt_sysv == false) && ((num == 0) || (size == 0))) |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1032 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1033 | num_size = 1; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1034 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1035 | else { |
| 1036 | ret = NULL; |
| 1037 | goto RETURN; |
| 1038 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1039 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1040 | /* |
| 1041 | * Try to avoid division here. We know that it isn't possible to |
| 1042 | * overflow during multiplication if neither operand uses any of the |
| 1043 | * most significant half of the bits in a size_t. |
| 1044 | */ |
| 1045 | } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) |
| 1046 | && (num_size / size != num)) { |
| 1047 | /* size_t overflow. */ |
| 1048 | ret = NULL; |
| 1049 | goto RETURN; |
| 1050 | } |
| 1051 | |
| 1052 | ret = icalloc(num_size); |
| 1053 | |
| 1054 | RETURN: |
| 1055 | if (ret == NULL) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1056 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1057 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1058 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 1059 | ": Error in calloc(): out of memory\n", "", |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1060 | ""); |
| 1061 | abort(); |
| 1062 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1063 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1064 | errno = ENOMEM; |
| 1065 | } |
| 1066 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1067 | #ifdef JEMALLOC_TRACE |
| 1068 | if (opt_trace) |
| 1069 | trace_calloc(ret, num, size); |
| 1070 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1071 | return (ret); |
| 1072 | } |
| 1073 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1074 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1075 | void * |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1076 | JEMALLOC_P(realloc)(void *ptr, size_t size) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1077 | { |
| 1078 | void *ret; |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1079 | #ifdef JEMALLOC_TRACE |
| 1080 | size_t old_size; |
| 1081 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1082 | |
| 1083 | if (size == 0) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1084 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1085 | if (opt_sysv == false) |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1086 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1087 | size = 1; |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1088 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1089 | else { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1090 | if (ptr != NULL) { |
| 1091 | #ifdef JEMALLOC_TRACE |
| 1092 | if (opt_trace) |
| 1093 | old_size = isalloc(ptr); |
| 1094 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1095 | idalloc(ptr); |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1096 | } |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1097 | ret = NULL; |
| 1098 | goto RETURN; |
| 1099 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1100 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1101 | } |
| 1102 | |
| 1103 | if (ptr != NULL) { |
| Jason Evans | a25d0a8 | 2009-11-09 14:57:38 -0800 | [diff] [blame] | 1104 | assert(malloc_initialized || malloc_initializer == |
| 1105 | pthread_self()); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1106 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1107 | #ifdef JEMALLOC_TRACE |
| 1108 | if (opt_trace) |
| 1109 | old_size = isalloc(ptr); |
| 1110 | #endif |
| 1111 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1112 | ret = iralloc(ptr, size); |
| 1113 | |
| 1114 | if (ret == NULL) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1115 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1116 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1117 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 1118 | ": Error in realloc(): out of " |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1119 | "memory\n", "", ""); |
| 1120 | abort(); |
| 1121 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1122 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1123 | errno = ENOMEM; |
| 1124 | } |
| 1125 | } else { |
| 1126 | if (malloc_init()) |
| 1127 | ret = NULL; |
| 1128 | else |
| 1129 | ret = imalloc(size); |
| 1130 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1131 | #ifdef JEMALLOC_TRACE |
| 1132 | if (opt_trace) |
| 1133 | old_size = 0; |
| 1134 | #endif |
| 1135 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1136 | if (ret == NULL) { |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1137 | #ifdef JEMALLOC_XMALLOC |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1138 | if (opt_xmalloc) { |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1139 | malloc_write4("<jemalloc>", |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 1140 | ": Error in realloc(): out of " |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1141 | "memory\n", "", ""); |
| 1142 | abort(); |
| 1143 | } |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1144 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1145 | errno = ENOMEM; |
| 1146 | } |
| 1147 | } |
| 1148 | |
| Jason Evans | b8f0a65 | 2009-06-29 09:41:43 -0700 | [diff] [blame] | 1149 | #ifdef JEMALLOC_SYSV |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1150 | RETURN: |
| Jason Evans | b8f0a65 | 2009-06-29 09:41:43 -0700 | [diff] [blame] | 1151 | #endif |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1152 | #ifdef JEMALLOC_TRACE |
| 1153 | if (opt_trace) |
| 1154 | trace_realloc(ret, ptr, size, old_size); |
| 1155 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1156 | return (ret); |
| 1157 | } |
| 1158 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1159 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1160 | void |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1161 | JEMALLOC_P(free)(void *ptr) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1162 | { |
| 1163 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1164 | if (ptr != NULL) { |
| Jason Evans | a25d0a8 | 2009-11-09 14:57:38 -0800 | [diff] [blame] | 1165 | assert(malloc_initialized || malloc_initializer == |
| 1166 | pthread_self()); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1167 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1168 | #ifdef JEMALLOC_TRACE |
| 1169 | if (opt_trace) |
| 1170 | trace_free(ptr, isalloc(ptr)); |
| 1171 | #endif |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1172 | idalloc(ptr); |
| 1173 | } |
| 1174 | } |
| 1175 | |
| 1176 | /* |
| 1177 | * End malloc(3)-compatible functions. |
| 1178 | */ |
| 1179 | /******************************************************************************/ |
| 1180 | /* |
| 1181 | * Begin non-standard functions. |
| 1182 | */ |
| 1183 | |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1184 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1185 | size_t |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1186 | JEMALLOC_P(malloc_usable_size)(const void *ptr) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1187 | { |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1188 | size_t ret; |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1189 | |
| 1190 | assert(ptr != NULL); |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1191 | ret = isalloc(ptr); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1192 | |
| Jason Evans | 569432c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1193 | #ifdef JEMALLOC_TRACE |
| 1194 | if (opt_trace) |
| 1195 | trace_malloc_usable_size(ret, ptr); |
| 1196 | #endif |
| 1197 | return (ret); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1198 | } |
| 1199 | |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1200 | #ifdef JEMALLOC_TCACHE |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1201 | JEMALLOC_ATTR(visibility("default")) |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1202 | void |
| Jason Evans | e476f8a | 2010-01-16 09:53:50 -0800 | [diff] [blame] | 1203 | JEMALLOC_P(malloc_tcache_flush)(void) |
| Jason Evans | 84cbbcb | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1204 | { |
| 1205 | tcache_t *tcache; |
| 1206 | |
| 1207 | tcache = tcache_tls; |
| 1208 | if (tcache == NULL) |
| 1209 | return; |
| 1210 | |
| 1211 | tcache_destroy(tcache); |
| 1212 | tcache_tls = NULL; |
| 1213 | } |
| 1214 | #endif |
| 1215 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1216 | /* |
| 1217 | * End non-standard functions. |
| 1218 | */ |
| 1219 | /******************************************************************************/ |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1220 | |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1221 | /* |
| 1222 | * The following functions are used by threading libraries for protection of |
| 1223 | * malloc during fork(). These functions are only called if the program is |
| 1224 | * running in threaded mode, so there is no need to check whether the program |
| 1225 | * is threaded here. |
| 1226 | */ |
| 1227 | |
| Jason Evans | cc00a15 | 2009-06-25 18:06:48 -0700 | [diff] [blame] | 1228 | static void |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 1229 | jemalloc_prefork(void) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1230 | { |
| 1231 | bool again; |
| 1232 | unsigned i, j; |
| 1233 | arena_t *larenas[narenas], *tarenas[narenas]; |
| 1234 | |
| 1235 | /* Acquire all mutexes in a safe order. */ |
| 1236 | |
| 1237 | /* |
| 1238 | * arenas_lock must be acquired after all of the arena mutexes, in |
| 1239 | * order to avoid potential deadlock with arena_lock_balance[_hard](). |
| 1240 | * Since arenas_lock protects the arenas array, the following code has |
| 1241 | * to race with arenas_extend() callers until it succeeds in locking |
| 1242 | * all arenas before locking arenas_lock. |
| 1243 | */ |
| 1244 | memset(larenas, 0, sizeof(arena_t *) * narenas); |
| 1245 | do { |
| 1246 | again = false; |
| 1247 | |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1248 | malloc_mutex_lock(&arenas_lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1249 | for (i = 0; i < narenas; i++) { |
| 1250 | if (arenas[i] != larenas[i]) { |
| 1251 | memcpy(tarenas, arenas, sizeof(arena_t *) * |
| 1252 | narenas); |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1253 | malloc_mutex_unlock(&arenas_lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1254 | for (j = 0; j < narenas; j++) { |
| 1255 | if (larenas[j] != tarenas[j]) { |
| 1256 | larenas[j] = tarenas[j]; |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1257 | malloc_mutex_lock( |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1258 | &larenas[j]->lock); |
| 1259 | } |
| 1260 | } |
| 1261 | again = true; |
| 1262 | break; |
| 1263 | } |
| 1264 | } |
| 1265 | } while (again); |
| 1266 | |
| 1267 | malloc_mutex_lock(&base_mtx); |
| 1268 | |
| 1269 | malloc_mutex_lock(&huge_mtx); |
| 1270 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1271 | #ifdef JEMALLOC_DSS |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1272 | malloc_mutex_lock(&dss_mtx); |
| 1273 | #endif |
| 1274 | } |
| 1275 | |
| Jason Evans | cc00a15 | 2009-06-25 18:06:48 -0700 | [diff] [blame] | 1276 | static void |
| Jason Evans | 804c9ec | 2009-06-22 17:44:33 -0700 | [diff] [blame] | 1277 | jemalloc_postfork(void) |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1278 | { |
| 1279 | unsigned i; |
| 1280 | arena_t *larenas[narenas]; |
| 1281 | |
| 1282 | /* Release all mutexes, now that fork() has completed. */ |
| 1283 | |
| Jason Evans | b7924f5 | 2009-06-23 19:01:18 -0700 | [diff] [blame] | 1284 | #ifdef JEMALLOC_DSS |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1285 | malloc_mutex_unlock(&dss_mtx); |
| 1286 | #endif |
| 1287 | |
| 1288 | malloc_mutex_unlock(&huge_mtx); |
| 1289 | |
| 1290 | malloc_mutex_unlock(&base_mtx); |
| 1291 | |
| 1292 | memcpy(larenas, arenas, sizeof(arena_t *) * narenas); |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1293 | malloc_mutex_unlock(&arenas_lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1294 | for (i = 0; i < narenas; i++) { |
| 1295 | if (larenas[i] != NULL) |
| Jason Evans | 3ee7a5c | 2009-12-29 00:09:15 -0800 | [diff] [blame] | 1296 | malloc_mutex_unlock(&larenas[i]->lock); |
| Jason Evans | 289053c | 2009-06-22 12:08:42 -0700 | [diff] [blame] | 1297 | } |
| 1298 | } |