blob: 6db5b014664bd45d51b7d1cd1e24a3c4d309f560 [file] [log] [blame]
Jason Evans289053c2009-06-22 12:08:42 -07001/*-
Jason Evans289053c2009-06-22 12:08:42 -07002 * This allocator implementation is designed to provide scalable performance
3 * for multi-threaded programs on multi-processor systems. The following
4 * features are included for this purpose:
5 *
6 * + Multiple arenas are used if there are multiple CPUs, which reduces lock
7 * contention and cache sloshing.
8 *
9 * + Thread-specific caching is used if there are multiple threads, which
10 * reduces the amount of locking.
11 *
12 * + Cache line sharing between arenas is avoided for internal data
13 * structures.
14 *
15 * + Memory is managed in chunks and runs (chunks can be split into runs),
16 * rather than as individual pages. This provides a constant-time
17 * mechanism for associating allocations with particular arenas.
18 *
19 * Allocation requests are rounded up to the nearest size class, and no record
20 * of the original request size is maintained. Allocations are broken into
Jason Evansb2378162009-12-29 00:09:15 -080021 * categories according to size class. Assuming runtime defaults, 4 KiB pages
Jason Evans289053c2009-06-22 12:08:42 -070022 * and a 16 byte quantum on a 32-bit system, the size classes in each category
23 * are as follows:
24 *
Jason Evansb2378162009-12-29 00:09:15 -080025 * |========================================|
26 * | Category | Subcategory | Size |
27 * |========================================|
28 * | Small | Tiny | 2 |
29 * | | | 4 |
30 * | | | 8 |
31 * | |------------------+----------|
32 * | | Quantum-spaced | 16 |
33 * | | | 32 |
34 * | | | 48 |
35 * | | | ... |
36 * | | | 96 |
37 * | | | 112 |
38 * | | | 128 |
39 * | |------------------+----------|
40 * | | Cacheline-spaced | 192 |
41 * | | | 256 |
42 * | | | 320 |
43 * | | | 384 |
44 * | | | 448 |
45 * | | | 512 |
46 * | |------------------+----------|
47 * | | Sub-page | 760 |
48 * | | | 1024 |
49 * | | | 1280 |
50 * | | | ... |
51 * | | | 3328 |
52 * | | | 3584 |
53 * | | | 3840 |
54 * |========================================|
55 * | Medium | 4 KiB |
56 * | | 6 KiB |
57 * | | 8 KiB |
58 * | | ... |
59 * | | 28 KiB |
60 * | | 30 KiB |
61 * | | 32 KiB |
62 * |========================================|
63 * | Large | 36 KiB |
64 * | | 40 KiB |
65 * | | 44 KiB |
66 * | | ... |
67 * | | 1012 KiB |
68 * | | 1016 KiB |
69 * | | 1020 KiB |
70 * |========================================|
71 * | Huge | 1 MiB |
72 * | | 2 MiB |
73 * | | 3 MiB |
74 * | | ... |
75 * |========================================|
Jason Evans289053c2009-06-22 12:08:42 -070076 *
Jason Evansb2378162009-12-29 00:09:15 -080077 * Different mechanisms are used accoding to category:
Jason Evans289053c2009-06-22 12:08:42 -070078 *
Jason Evansb2378162009-12-29 00:09:15 -080079 * Small/medium : Each size class is segregated into its own set of runs.
80 * Each run maintains a bitmap of which regions are
81 * free/allocated.
Jason Evans289053c2009-06-22 12:08:42 -070082 *
83 * Large : Each allocation is backed by a dedicated run. Metadata are stored
84 * in the associated arena chunk header maps.
85 *
86 * Huge : Each allocation is backed by a dedicated contiguous set of chunks.
87 * Metadata are stored in a separate red-black tree.
88 *
89 *******************************************************************************
90 */
91
Jason Evanse476f8a2010-01-16 09:53:50 -080092#define JEMALLOC_C_
Jason Evansb0fd5012010-01-17 01:49:20 -080093#include "internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -070094
Jason Evans289053c2009-06-22 12:08:42 -070095/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080096/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -070097
Jason Evanse476f8a2010-01-16 09:53:50 -080098arena_t **arenas;
99unsigned narenas;
100#ifndef NO_TLS
101static unsigned next_arena;
102#endif
103static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
104
105#ifndef NO_TLS
106__thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec"));
107#endif
Jason Evans289053c2009-06-22 12:08:42 -0700108
109/* Set to true once the allocator has been initialized. */
110static bool malloc_initialized = false;
111
Jason Evansb7924f52009-06-23 19:01:18 -0700112/* Used to let the initializing thread recursively allocate. */
113static pthread_t malloc_initializer = (unsigned long)0;
114
Jason Evans289053c2009-06-22 12:08:42 -0700115/* Used to avoid initialization races. */
Jason Evansc9658dd2009-06-22 14:44:08 -0700116static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP;
Jason Evans289053c2009-06-22 12:08:42 -0700117
Jason Evansb7924f52009-06-23 19:01:18 -0700118#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -0800119size_t pagesize;
120size_t pagesize_mask;
121size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -0700122#endif
123
Jason Evanse476f8a2010-01-16 09:53:50 -0800124unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -0700125
Jason Evanse476f8a2010-01-16 09:53:50 -0800126/* Runtime configuration options. */
127const char *JEMALLOC_P(malloc_options)
128 JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -0700129#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -0800130bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -0700131# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -0800132bool opt_junk = true;
Jason Evansb7924f52009-06-23 19:01:18 -0700133# endif
Jason Evans289053c2009-06-22 12:08:42 -0700134#else
Jason Evanse476f8a2010-01-16 09:53:50 -0800135bool opt_abort = false;
Jason Evansb7924f52009-06-23 19:01:18 -0700136# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -0800137bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -0700138# endif
Jason Evans289053c2009-06-22 12:08:42 -0700139#endif
Jason Evansb7924f52009-06-23 19:01:18 -0700140#ifdef JEMALLOC_SYSV
Jason Evanse476f8a2010-01-16 09:53:50 -0800141bool opt_sysv = false;
Jason Evansb7924f52009-06-23 19:01:18 -0700142#endif
Jason Evansb8f0a652009-06-29 09:41:43 -0700143#ifdef JEMALLOC_XMALLOC
Jason Evanse476f8a2010-01-16 09:53:50 -0800144bool opt_xmalloc = false;
Jason Evansb8f0a652009-06-29 09:41:43 -0700145#endif
Jason Evansb7924f52009-06-23 19:01:18 -0700146#ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -0800147bool opt_zero = false;
Jason Evansb7924f52009-06-23 19:01:18 -0700148#endif
Jason Evans289053c2009-06-22 12:08:42 -0700149static int opt_narenas_lshift = 0;
150
Jason Evans289053c2009-06-22 12:08:42 -0700151/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -0800152/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -0700153
Jason Evansed1bf452010-01-19 12:11:25 -0800154static void wrtmessage(void *w4opaque, const char *p1, const char *p2,
155 const char *p3, const char *p4);
Jason Evans03c22372010-01-03 12:10:42 -0800156static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -0700157static unsigned malloc_ncpus(void);
Jason Evans289053c2009-06-22 12:08:42 -0700158static bool malloc_init_hard(void);
Jason Evanscc00a152009-06-25 18:06:48 -0700159static void jemalloc_prefork(void);
160static void jemalloc_postfork(void);
Jason Evans289053c2009-06-22 12:08:42 -0700161
Jason Evans289053c2009-06-22 12:08:42 -0700162/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -0800163/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -0700164
Jason Evanse476f8a2010-01-16 09:53:50 -0800165#ifdef JEMALLOC_HAVE_ATTR
166JEMALLOC_ATTR(visibility("hidden"))
167#else
168static
169#endif
170void
Jason Evansed1bf452010-01-19 12:11:25 -0800171wrtmessage(void *w4opaque, const char *p1, const char *p2, const char *p3,
172 const char *p4)
Jason Evansc9658dd2009-06-22 14:44:08 -0700173{
174
Jason Evansb7924f52009-06-23 19:01:18 -0700175 if (write(STDERR_FILENO, p1, strlen(p1)) < 0
176 || write(STDERR_FILENO, p2, strlen(p2)) < 0
177 || write(STDERR_FILENO, p3, strlen(p3)) < 0
178 || write(STDERR_FILENO, p4, strlen(p4)) < 0)
179 return;
Jason Evansc9658dd2009-06-22 14:44:08 -0700180}
181
Jason Evansed1bf452010-01-19 12:11:25 -0800182void (*JEMALLOC_P(malloc_message))(void *, const char *p1, const char *p2,
Jason Evanse476f8a2010-01-16 09:53:50 -0800183 const char *p3, const char *p4) JEMALLOC_ATTR(visibility("default")) =
184 wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -0700185
186/******************************************************************************/
187/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800188 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700189 */
190
Jason Evanse476f8a2010-01-16 09:53:50 -0800191/* Create a new arena and insert it into the arenas array at index ind. */
192arena_t *
193arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700194{
195 arena_t *ret;
196
Jason Evanse476f8a2010-01-16 09:53:50 -0800197 /* Allocate enough space for trailing bins. */
198 ret = (arena_t *)base_alloc(sizeof(arena_t)
199 + (sizeof(arena_bin_t) * (nbins - 1)));
200 if (ret != NULL && arena_new(ret, ind) == false) {
201 arenas[ind] = ret;
202 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700203 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800204 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700205
Jason Evanse476f8a2010-01-16 09:53:50 -0800206 /*
207 * OOM here is quite inconvenient to propagate, since dealing with it
208 * would require a check for failure in the fast path. Instead, punt
209 * by using arenas[0]. In practice, this is an extremely unlikely
210 * failure.
211 */
212 malloc_write4("<jemalloc>", ": Error initializing arena\n", "", "");
213 if (opt_abort)
214 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700215
Jason Evanse476f8a2010-01-16 09:53:50 -0800216 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700217}
218
219#ifndef NO_TLS
220/*
221 * Choose an arena based on a per-thread value (slow-path code only, called
222 * only by choose_arena()).
223 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800224arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700225choose_arena_hard(void)
226{
227 arena_t *ret;
228
Jason Evans289053c2009-06-22 12:08:42 -0700229 if (narenas > 1) {
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800230 malloc_mutex_lock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -0700231 if ((ret = arenas[next_arena]) == NULL)
232 ret = arenas_extend(next_arena);
233 next_arena = (next_arena + 1) % narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800234 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -0700235 } else
236 ret = arenas[0];
237
238 arenas_map = ret;
239
240 return (ret);
241}
242#endif
243
Jason Evans289053c2009-06-22 12:08:42 -0700244static inline void *
245ipalloc(size_t alignment, size_t size)
246{
247 void *ret;
248 size_t ceil_size;
249
250 /*
251 * Round size up to the nearest multiple of alignment.
252 *
253 * This done, we can take advantage of the fact that for each small
254 * size class, every object is aligned at the smallest power of two
255 * that is non-zero in the base two representation of the size. For
256 * example:
257 *
258 * Size | Base 2 | Minimum alignment
259 * -----+----------+------------------
260 * 96 | 1100000 | 32
261 * 144 | 10100000 | 32
262 * 192 | 11000000 | 64
263 *
264 * Depending on runtime settings, it is possible that arena_malloc()
265 * will further round up to a power of two, but that never causes
266 * correctness issues.
267 */
268 ceil_size = (size + (alignment - 1)) & (-alignment);
269 /*
270 * (ceil_size < size) protects against the combination of maximal
271 * alignment and size greater than maximal alignment.
272 */
273 if (ceil_size < size) {
274 /* size_t overflow. */
275 return (NULL);
276 }
277
278 if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE
279 && ceil_size <= arena_maxclass))
Jason Evanscc00a152009-06-25 18:06:48 -0700280 ret = arena_malloc(ceil_size, false);
Jason Evans289053c2009-06-22 12:08:42 -0700281 else {
282 size_t run_size;
283
284 /*
285 * We can't achieve subpage alignment, so round up alignment
286 * permanently; it makes later calculations simpler.
287 */
288 alignment = PAGE_CEILING(alignment);
289 ceil_size = PAGE_CEILING(size);
290 /*
291 * (ceil_size < size) protects against very large sizes within
292 * PAGE_SIZE of SIZE_T_MAX.
293 *
294 * (ceil_size + alignment < ceil_size) protects against the
295 * combination of maximal alignment and ceil_size large enough
296 * to cause overflow. This is similar to the first overflow
297 * check above, but it needs to be repeated due to the new
298 * ceil_size value, which may now be *equal* to maximal
299 * alignment, whereas before we only detected overflow if the
300 * original size was *greater* than maximal alignment.
301 */
302 if (ceil_size < size || ceil_size + alignment < ceil_size) {
303 /* size_t overflow. */
304 return (NULL);
305 }
306
307 /*
308 * Calculate the size of the over-size run that arena_palloc()
309 * would need to allocate in order to guarantee the alignment.
310 */
311 if (ceil_size >= alignment)
312 run_size = ceil_size + alignment - PAGE_SIZE;
313 else {
314 /*
315 * It is possible that (alignment << 1) will cause
316 * overflow, but it doesn't matter because we also
317 * subtract PAGE_SIZE, which in the case of overflow
318 * leaves us with a very large run_size. That causes
319 * the first conditional below to fail, which means
320 * that the bogus run_size value never gets used for
321 * anything important.
322 */
323 run_size = (alignment << 1) - PAGE_SIZE;
324 }
325
326 if (run_size <= arena_maxclass) {
327 ret = arena_palloc(choose_arena(), alignment, ceil_size,
328 run_size);
329 } else if (alignment <= chunksize)
330 ret = huge_malloc(ceil_size, false);
331 else
332 ret = huge_palloc(alignment, ceil_size);
333 }
334
335 assert(((uintptr_t)ret & (alignment - 1)) == 0);
336 return (ret);
337}
338
Jason Evans03c22372010-01-03 12:10:42 -0800339static void
340stats_print_atexit(void)
341{
342
343#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
344 unsigned i;
345
346 /*
347 * Merge stats from extant threads. This is racy, since individual
348 * threads do not lock when recording tcache stats events. As a
349 * consequence, the final stats may be slightly out of date by the time
350 * they are reported, if other threads continue to allocate.
351 */
352 for (i = 0; i < narenas; i++) {
353 arena_t *arena = arenas[i];
354 if (arena != NULL) {
355 tcache_t *tcache;
356
357 malloc_mutex_lock(&arena->lock);
358 ql_foreach(tcache, &arena->tcache_ql, link) {
359 tcache_stats_merge(tcache, arena);
360 }
361 malloc_mutex_unlock(&arena->lock);
362 }
363 }
364#endif
Jason Evansed1bf452010-01-19 12:11:25 -0800365 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700366}
367
368static inline void *
369iralloc(void *ptr, size_t size)
370{
371 size_t oldsize;
372
373 assert(ptr != NULL);
374 assert(size != 0);
375
376 oldsize = isalloc(ptr);
377
378 if (size <= arena_maxclass)
379 return (arena_ralloc(ptr, size, oldsize));
380 else
381 return (huge_ralloc(ptr, size, oldsize));
382}
383
Jason Evans289053c2009-06-22 12:08:42 -0700384/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800385 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700386 */
387/******************************************************************************/
388/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800389 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700390 */
391
Jason Evansc9658dd2009-06-22 14:44:08 -0700392static unsigned
393malloc_ncpus(void)
394{
395 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700396 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700397
Jason Evansb7924f52009-06-23 19:01:18 -0700398 result = sysconf(_SC_NPROCESSORS_ONLN);
399 if (result == -1) {
400 /* Error. */
401 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700402 }
Jason Evansb7924f52009-06-23 19:01:18 -0700403 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700404
405 return (ret);
406}
Jason Evansb7924f52009-06-23 19:01:18 -0700407
Jason Evans289053c2009-06-22 12:08:42 -0700408/*
409 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
410 * implementation has to take pains to avoid infinite recursion during
411 * initialization.
412 */
413static inline bool
414malloc_init(void)
415{
416
417 if (malloc_initialized == false)
418 return (malloc_init_hard());
419
420 return (false);
421}
422
423static bool
424malloc_init_hard(void)
425{
426 unsigned i;
427 int linklen;
428 char buf[PATH_MAX + 1];
429 const char *opts;
Jason Evansb7924f52009-06-23 19:01:18 -0700430 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700431
432 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700433 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700434 /*
435 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800436 * acquired init_lock, or this thread is the initializing
437 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700438 */
439 malloc_mutex_unlock(&init_lock);
440 return (false);
441 }
Jason Evansb7924f52009-06-23 19:01:18 -0700442 if (malloc_initializer != (unsigned long)0) {
443 /* Busy-wait until the initializing thread completes. */
444 do {
445 malloc_mutex_unlock(&init_lock);
446 CPU_SPINWAIT;
447 malloc_mutex_lock(&init_lock);
448 } while (malloc_initialized == false);
449 return (false);
450 }
Jason Evans289053c2009-06-22 12:08:42 -0700451
Jason Evansb7924f52009-06-23 19:01:18 -0700452#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700453 /* Get page size. */
454 {
455 long result;
456
457 result = sysconf(_SC_PAGESIZE);
458 assert(result != -1);
Jason Evansb7924f52009-06-23 19:01:18 -0700459 pagesize = (unsigned)result;
460
461 /*
462 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800463 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700464 */
465 assert(((result - 1) & result) == 0);
466 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800467 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700468 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700469#endif
Jason Evans289053c2009-06-22 12:08:42 -0700470
471 for (i = 0; i < 3; i++) {
472 unsigned j;
473
474 /* Get runtime configuration. */
475 switch (i) {
476 case 0:
Jason Evans804c9ec2009-06-22 17:44:33 -0700477 if ((linklen = readlink("/etc/jemalloc.conf", buf,
Jason Evans289053c2009-06-22 12:08:42 -0700478 sizeof(buf) - 1)) != -1) {
479 /*
Jason Evans804c9ec2009-06-22 17:44:33 -0700480 * Use the contents of the "/etc/jemalloc.conf"
Jason Evans289053c2009-06-22 12:08:42 -0700481 * symbolic link's name.
482 */
483 buf[linklen] = '\0';
484 opts = buf;
485 } else {
486 /* No configuration specified. */
487 buf[0] = '\0';
488 opts = buf;
489 }
490 break;
491 case 1:
Jason Evansb7924f52009-06-23 19:01:18 -0700492 if ((opts = getenv("JEMALLOC_OPTIONS")) != NULL) {
Jason Evans289053c2009-06-22 12:08:42 -0700493 /*
494 * Do nothing; opts is already initialized to
Jason Evans804c9ec2009-06-22 17:44:33 -0700495 * the value of the JEMALLOC_OPTIONS
496 * environment variable.
Jason Evans289053c2009-06-22 12:08:42 -0700497 */
498 } else {
499 /* No configuration specified. */
500 buf[0] = '\0';
501 opts = buf;
502 }
503 break;
504 case 2:
Jason Evanse476f8a2010-01-16 09:53:50 -0800505 if (JEMALLOC_P(malloc_options) != NULL) {
Jason Evans289053c2009-06-22 12:08:42 -0700506 /*
507 * Use options that were compiled into the
508 * program.
509 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800510 opts = JEMALLOC_P(malloc_options);
Jason Evans289053c2009-06-22 12:08:42 -0700511 } else {
512 /* No configuration specified. */
513 buf[0] = '\0';
514 opts = buf;
515 }
516 break;
517 default:
518 /* NOTREACHED */
519 assert(false);
Jason Evansb7924f52009-06-23 19:01:18 -0700520 buf[0] = '\0';
521 opts = buf;
Jason Evans289053c2009-06-22 12:08:42 -0700522 }
523
524 for (j = 0; opts[j] != '\0'; j++) {
525 unsigned k, nreps;
526 bool nseen;
527
528 /* Parse repetition count, if any. */
529 for (nreps = 0, nseen = false;; j++, nseen = true) {
530 switch (opts[j]) {
531 case '0': case '1': case '2': case '3':
532 case '4': case '5': case '6': case '7':
533 case '8': case '9':
534 nreps *= 10;
535 nreps += opts[j] - '0';
536 break;
537 default:
538 goto MALLOC_OUT;
539 }
540 }
541MALLOC_OUT:
542 if (nseen == false)
543 nreps = 1;
544
545 for (k = 0; k < nreps; k++) {
546 switch (opts[j]) {
547 case 'a':
548 opt_abort = false;
549 break;
550 case 'A':
551 opt_abort = true;
552 break;
Jason Evans289053c2009-06-22 12:08:42 -0700553 case 'c':
Jason Evans94ad2b52009-12-29 00:09:15 -0800554 if (opt_lg_cspace_max - 1 >
555 opt_lg_qspace_max &&
556 opt_lg_cspace_max >
557 LG_CACHELINE)
558 opt_lg_cspace_max--;
Jason Evans289053c2009-06-22 12:08:42 -0700559 break;
560 case 'C':
Jason Evans94ad2b52009-12-29 00:09:15 -0800561 if (opt_lg_cspace_max < PAGE_SHIFT
Jason Evans289053c2009-06-22 12:08:42 -0700562 - 1)
Jason Evans94ad2b52009-12-29 00:09:15 -0800563 opt_lg_cspace_max++;
Jason Evans289053c2009-06-22 12:08:42 -0700564 break;
Jason Evans45c128d2009-12-29 00:09:15 -0800565 case 'd':
566 if (opt_lg_dirty_mult + 1 <
567 (sizeof(size_t) << 3))
568 opt_lg_dirty_mult++;
Jason Evans289053c2009-06-22 12:08:42 -0700569 break;
Jason Evans45c128d2009-12-29 00:09:15 -0800570 case 'D':
571 if (opt_lg_dirty_mult >= 0)
572 opt_lg_dirty_mult--;
Jason Evans289053c2009-06-22 12:08:42 -0700573 break;
Jason Evans84cbbcb2009-12-29 00:09:15 -0800574#ifdef JEMALLOC_TCACHE
Jason Evans289053c2009-06-22 12:08:42 -0700575 case 'g':
Jason Evans3f3ecfb2010-01-03 14:45:26 -0800576 if (opt_lg_tcache_gc_sweep >= 0)
577 opt_lg_tcache_gc_sweep--;
Jason Evans289053c2009-06-22 12:08:42 -0700578 break;
579 case 'G':
Jason Evans3f3ecfb2010-01-03 14:45:26 -0800580 if (opt_lg_tcache_gc_sweep + 1 <
581 (sizeof(size_t) << 3))
582 opt_lg_tcache_gc_sweep++;
Jason Evans84cbbcb2009-12-29 00:09:15 -0800583 break;
584 case 'h':
Jason Evans279e09d2010-01-03 16:16:10 -0800585 if (opt_lg_tcache_nslots > 0)
586 opt_lg_tcache_nslots--;
Jason Evans84cbbcb2009-12-29 00:09:15 -0800587 break;
588 case 'H':
Jason Evans279e09d2010-01-03 16:16:10 -0800589 if (opt_lg_tcache_nslots + 1 <
590 (sizeof(size_t) << 3))
591 opt_lg_tcache_nslots++;
Jason Evans289053c2009-06-22 12:08:42 -0700592 break;
593#endif
Jason Evansb7924f52009-06-23 19:01:18 -0700594#ifdef JEMALLOC_FILL
Jason Evans289053c2009-06-22 12:08:42 -0700595 case 'j':
596 opt_junk = false;
597 break;
598 case 'J':
599 opt_junk = true;
600 break;
Jason Evansb7924f52009-06-23 19:01:18 -0700601#endif
Jason Evans289053c2009-06-22 12:08:42 -0700602 case 'k':
603 /*
604 * Chunks always require at least one
Jason Evansb2378162009-12-29 00:09:15 -0800605 * header page, plus enough room to
606 * hold a run for the largest medium
607 * size class (one page more than the
608 * size).
Jason Evans289053c2009-06-22 12:08:42 -0700609 */
Jason Evans94ad2b52009-12-29 00:09:15 -0800610 if ((1U << (opt_lg_chunk - 1)) >=
Jason Evansb2378162009-12-29 00:09:15 -0800611 (2U << PAGE_SHIFT) + (1U <<
Jason Evans94ad2b52009-12-29 00:09:15 -0800612 opt_lg_medium_max))
613 opt_lg_chunk--;
Jason Evans289053c2009-06-22 12:08:42 -0700614 break;
615 case 'K':
Jason Evans94ad2b52009-12-29 00:09:15 -0800616 if (opt_lg_chunk + 1 <
Jason Evans289053c2009-06-22 12:08:42 -0700617 (sizeof(size_t) << 3))
Jason Evans94ad2b52009-12-29 00:09:15 -0800618 opt_lg_chunk++;
Jason Evans289053c2009-06-22 12:08:42 -0700619 break;
Jason Evansb2378162009-12-29 00:09:15 -0800620 case 'm':
Jason Evans94ad2b52009-12-29 00:09:15 -0800621 if (opt_lg_medium_max > PAGE_SHIFT)
622 opt_lg_medium_max--;
Jason Evansb2378162009-12-29 00:09:15 -0800623 break;
624 case 'M':
Jason Evans94ad2b52009-12-29 00:09:15 -0800625 if (opt_lg_medium_max + 1 <
626 opt_lg_chunk)
627 opt_lg_medium_max++;
Jason Evansb2378162009-12-29 00:09:15 -0800628 break;
Jason Evans289053c2009-06-22 12:08:42 -0700629 case 'n':
630 opt_narenas_lshift--;
631 break;
632 case 'N':
633 opt_narenas_lshift++;
634 break;
635 case 'p':
Jason Evans03c22372010-01-03 12:10:42 -0800636 opt_stats_print = false;
Jason Evans289053c2009-06-22 12:08:42 -0700637 break;
638 case 'P':
Jason Evans03c22372010-01-03 12:10:42 -0800639 opt_stats_print = true;
Jason Evans289053c2009-06-22 12:08:42 -0700640 break;
641 case 'q':
Jason Evans94ad2b52009-12-29 00:09:15 -0800642 if (opt_lg_qspace_max > LG_QUANTUM)
643 opt_lg_qspace_max--;
Jason Evans289053c2009-06-22 12:08:42 -0700644 break;
645 case 'Q':
Jason Evans94ad2b52009-12-29 00:09:15 -0800646 if (opt_lg_qspace_max + 1 <
647 opt_lg_cspace_max)
648 opt_lg_qspace_max++;
Jason Evans289053c2009-06-22 12:08:42 -0700649 break;
Jason Evanse9db6c92010-01-03 16:17:52 -0800650#ifdef JEMALLOC_TCACHE
651 case 's':
652 opt_tcache_sort = false;
653 break;
654 case 'S':
655 opt_tcache_sort = true;
656 break;
657#endif
Jason Evans569432c2009-12-29 00:09:15 -0800658#ifdef JEMALLOC_TRACE
659 case 't':
660 opt_trace = false;
Jason Evans289053c2009-06-22 12:08:42 -0700661 break;
Jason Evans569432c2009-12-29 00:09:15 -0800662 case 'T':
663 opt_trace = true;
Jason Evans289053c2009-06-22 12:08:42 -0700664 break;
Jason Evansb7924f52009-06-23 19:01:18 -0700665#endif
666#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700667 case 'v':
668 opt_sysv = false;
669 break;
670 case 'V':
671 opt_sysv = true;
672 break;
Jason Evansb7924f52009-06-23 19:01:18 -0700673#endif
674#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700675 case 'x':
676 opt_xmalloc = false;
677 break;
678 case 'X':
679 opt_xmalloc = true;
680 break;
Jason Evansb7924f52009-06-23 19:01:18 -0700681#endif
682#ifdef JEMALLOC_FILL
Jason Evans289053c2009-06-22 12:08:42 -0700683 case 'z':
684 opt_zero = false;
685 break;
686 case 'Z':
687 opt_zero = true;
688 break;
Jason Evansb7924f52009-06-23 19:01:18 -0700689#endif
Jason Evans289053c2009-06-22 12:08:42 -0700690 default: {
691 char cbuf[2];
692
693 cbuf[0] = opts[j];
694 cbuf[1] = '\0';
Jason Evanse476f8a2010-01-16 09:53:50 -0800695 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -0700696 ": Unsupported character "
Jason Evans289053c2009-06-22 12:08:42 -0700697 "in malloc options: '", cbuf,
698 "'\n");
699 }
700 }
701 }
702 }
703 }
704
Jason Evans569432c2009-12-29 00:09:15 -0800705#ifdef JEMALLOC_TRACE
Jason Evanse476f8a2010-01-16 09:53:50 -0800706 if (opt_trace)
707 trace_boot();
Jason Evans569432c2009-12-29 00:09:15 -0800708#endif
Jason Evans03c22372010-01-03 12:10:42 -0800709 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700710 /* Print statistics at exit. */
Jason Evans03c22372010-01-03 12:10:42 -0800711 atexit(stats_print_atexit);
Jason Evans289053c2009-06-22 12:08:42 -0700712 }
713
Jason Evansc9658dd2009-06-22 14:44:08 -0700714 /* Register fork handlers. */
Jason Evans804c9ec2009-06-22 17:44:33 -0700715 pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork);
Jason Evansc9658dd2009-06-22 14:44:08 -0700716
Jason Evanse476f8a2010-01-16 09:53:50 -0800717 if (arena_boot0()) {
Jason Evans289053c2009-06-22 12:08:42 -0700718 malloc_mutex_unlock(&init_lock);
719 return (true);
720 }
721
Jason Evans84cbbcb2009-12-29 00:09:15 -0800722#ifdef JEMALLOC_TCACHE
Jason Evanse476f8a2010-01-16 09:53:50 -0800723 tcache_boot();
Jason Evans84cbbcb2009-12-29 00:09:15 -0800724#endif
725
Jason Evanse476f8a2010-01-16 09:53:50 -0800726 if (chunk_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700727 malloc_mutex_unlock(&init_lock);
728 return (true);
729 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800730 arena_boot1();
731
732 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700733 malloc_mutex_unlock(&init_lock);
734 return (true);
735 }
Jason Evans289053c2009-06-22 12:08:42 -0700736
Jason Evanse476f8a2010-01-16 09:53:50 -0800737 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
Jason Evans289053c2009-06-22 12:08:42 -0700741
Jason Evansb7924f52009-06-23 19:01:18 -0700742 /*
743 * Create enough scaffolding to allow recursive allocation in
744 * malloc_ncpus().
745 */
746 narenas = 1;
747 arenas = init_arenas;
748 memset(arenas, 0, sizeof(arena_t *) * narenas);
749
750 /*
751 * Initialize one arena here. The rest are lazily created in
752 * choose_arena_hard().
753 */
754 arenas_extend(0);
755 if (arenas[0] == NULL) {
756 malloc_mutex_unlock(&init_lock);
757 return (true);
758 }
759
760#ifndef NO_TLS
761 /*
762 * Assign the initial arena to the initial thread, in order to avoid
763 * spurious creation of an extra arena if the application switches to
764 * threaded mode.
765 */
766 arenas_map = arenas[0];
767#endif
768
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800769 malloc_mutex_init(&arenas_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700770
771 /* Get number of CPUs. */
772 malloc_initializer = pthread_self();
773 malloc_mutex_unlock(&init_lock);
774 ncpus = malloc_ncpus();
775 malloc_mutex_lock(&init_lock);
776
Jason Evans289053c2009-06-22 12:08:42 -0700777 if (ncpus > 1) {
778 /*
Jason Evans5463a522009-12-29 00:09:15 -0800779 * For SMP systems, create more than one arena per CPU by
780 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700781 */
Jason Evans84cbbcb2009-12-29 00:09:15 -0800782#ifdef JEMALLOC_TCACHE
Jason Evans279e09d2010-01-03 16:16:10 -0800783 if (tcache_nslots) {
Jason Evans5463a522009-12-29 00:09:15 -0800784 /*
785 * Only large object allocation/deallocation is
786 * guaranteed to acquire an arena mutex, so we can get
787 * away with fewer arenas than without thread caching.
788 */
789 opt_narenas_lshift += 1;
790 } else {
791#endif
792 /*
793 * All allocations must acquire an arena mutex, so use
794 * plenty of arenas.
795 */
796 opt_narenas_lshift += 2;
Jason Evans84cbbcb2009-12-29 00:09:15 -0800797#ifdef JEMALLOC_TCACHE
Jason Evans5463a522009-12-29 00:09:15 -0800798 }
799#endif
Jason Evans289053c2009-06-22 12:08:42 -0700800 }
801
802 /* Determine how many arenas to use. */
803 narenas = ncpus;
804 if (opt_narenas_lshift > 0) {
805 if ((narenas << opt_narenas_lshift) > narenas)
806 narenas <<= opt_narenas_lshift;
807 /*
808 * Make sure not to exceed the limits of what base_alloc() can
809 * handle.
810 */
811 if (narenas * sizeof(arena_t *) > chunksize)
812 narenas = chunksize / sizeof(arena_t *);
813 } else if (opt_narenas_lshift < 0) {
814 if ((narenas >> -opt_narenas_lshift) < narenas)
815 narenas >>= -opt_narenas_lshift;
816 /* Make sure there is at least one arena. */
817 if (narenas == 0)
818 narenas = 1;
819 }
Jason Evans289053c2009-06-22 12:08:42 -0700820
821#ifdef NO_TLS
822 if (narenas > 1) {
823 static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19,
824 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83,
825 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
826 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
827 223, 227, 229, 233, 239, 241, 251, 257, 263};
828 unsigned nprimes, parenas;
829
830 /*
831 * Pick a prime number of hash arenas that is more than narenas
832 * so that direct hashing of pthread_self() pointers tends to
833 * spread allocations evenly among the arenas.
834 */
835 assert((narenas & 1) == 0); /* narenas must be even. */
Jason Evans94ad2b52009-12-29 00:09:15 -0800836 nprimes = (sizeof(primes) >> LG_SIZEOF_INT);
Jason Evans289053c2009-06-22 12:08:42 -0700837 parenas = primes[nprimes - 1]; /* In case not enough primes. */
838 for (i = 1; i < nprimes; i++) {
839 if (primes[i] > narenas) {
840 parenas = primes[i];
841 break;
842 }
843 }
844 narenas = parenas;
845 }
846#endif
847
848#ifndef NO_TLS
Jason Evans289053c2009-06-22 12:08:42 -0700849 next_arena = 0;
Jason Evans289053c2009-06-22 12:08:42 -0700850#endif
851
852 /* Allocate and initialize arenas. */
853 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
854 if (arenas == NULL) {
855 malloc_mutex_unlock(&init_lock);
856 return (true);
857 }
858 /*
859 * Zero the array. In practice, this should always be pre-zeroed,
860 * since it was just mmap()ed, but let's be sure.
861 */
862 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700863 /* Copy the pointer to the one arena that was already initialized. */
864 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700865
866 malloc_initialized = true;
867 malloc_mutex_unlock(&init_lock);
868 return (false);
869}
870
871/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800872 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700873 */
874/******************************************************************************/
875/*
876 * Begin malloc(3)-compatible functions.
877 */
878
Jason Evans9ad48232010-01-03 11:59:20 -0800879JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800880JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700881void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800882JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700883{
884 void *ret;
885
886 if (malloc_init()) {
887 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800888 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700889 }
890
891 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -0700892#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700893 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -0700894#endif
Jason Evans289053c2009-06-22 12:08:42 -0700895 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -0700896#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700897 else {
Jason Evansf2518142009-12-29 00:09:15 -0800898# ifdef JEMALLOC_XMALLOC
899 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800900 malloc_write4("<jemalloc>",
Jason Evansf2518142009-12-29 00:09:15 -0800901 ": Error in malloc(): invalid size 0\n", "",
902 "");
903 abort();
904 }
905# endif
Jason Evans289053c2009-06-22 12:08:42 -0700906 ret = NULL;
907 goto RETURN;
908 }
Jason Evansb7924f52009-06-23 19:01:18 -0700909#endif
Jason Evans289053c2009-06-22 12:08:42 -0700910 }
911
912 ret = imalloc(size);
913
Jason Evansf2518142009-12-29 00:09:15 -0800914OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700915 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -0700916#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700917 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800918 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -0700919 ": Error in malloc(): out of memory\n", "",
Jason Evans289053c2009-06-22 12:08:42 -0700920 "");
921 abort();
922 }
Jason Evansb7924f52009-06-23 19:01:18 -0700923#endif
Jason Evans289053c2009-06-22 12:08:42 -0700924 errno = ENOMEM;
925 }
926
Jason Evansf2518142009-12-29 00:09:15 -0800927#ifdef JEMALLOC_SYSV
928RETURN:
929#endif
Jason Evans569432c2009-12-29 00:09:15 -0800930#ifdef JEMALLOC_TRACE
931 if (opt_trace)
932 trace_malloc(ret, size);
933#endif
Jason Evans289053c2009-06-22 12:08:42 -0700934 return (ret);
935}
936
Jason Evans9ad48232010-01-03 11:59:20 -0800937JEMALLOC_ATTR(nonnull(1))
Jason Evanse476f8a2010-01-16 09:53:50 -0800938JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700939int
Jason Evanse476f8a2010-01-16 09:53:50 -0800940JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700941{
942 int ret;
943 void *result;
944
945 if (malloc_init())
946 result = NULL;
947 else {
Jason Evansf2518142009-12-29 00:09:15 -0800948 if (size == 0) {
949#ifdef JEMALLOC_SYSV
950 if (opt_sysv == false)
951#endif
952 size = 1;
953#ifdef JEMALLOC_SYSV
954 else {
955# ifdef JEMALLOC_XMALLOC
956 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800957 malloc_write4("<jemalloc>",
Jason Evansf2518142009-12-29 00:09:15 -0800958 ": Error in posix_memalign(): "
959 "invalid size 0\n", "", "");
960 abort();
961 }
962# endif
963 result = NULL;
964 *memptr = NULL;
965 ret = 0;
966 goto RETURN;
967 }
968#endif
969 }
970
Jason Evans289053c2009-06-22 12:08:42 -0700971 /* Make sure that alignment is a large enough power of 2. */
972 if (((alignment - 1) & alignment) != 0
973 || alignment < sizeof(void *)) {
Jason Evansb7924f52009-06-23 19:01:18 -0700974#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700975 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800976 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -0700977 ": Error in posix_memalign(): "
Jason Evans289053c2009-06-22 12:08:42 -0700978 "invalid alignment\n", "", "");
979 abort();
980 }
Jason Evansb7924f52009-06-23 19:01:18 -0700981#endif
Jason Evans289053c2009-06-22 12:08:42 -0700982 result = NULL;
983 ret = EINVAL;
984 goto RETURN;
985 }
986
987 result = ipalloc(alignment, size);
988 }
989
990 if (result == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -0700991#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700992 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -0800993 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -0700994 ": Error in posix_memalign(): out of memory\n",
Jason Evans289053c2009-06-22 12:08:42 -0700995 "", "");
996 abort();
997 }
Jason Evansb7924f52009-06-23 19:01:18 -0700998#endif
Jason Evans289053c2009-06-22 12:08:42 -0700999 ret = ENOMEM;
1000 goto RETURN;
1001 }
1002
1003 *memptr = result;
1004 ret = 0;
1005
1006RETURN:
Jason Evans569432c2009-12-29 00:09:15 -08001007#ifdef JEMALLOC_TRACE
1008 if (opt_trace)
1009 trace_posix_memalign(result, alignment, size);
1010#endif
Jason Evans289053c2009-06-22 12:08:42 -07001011 return (ret);
1012}
1013
Jason Evans9ad48232010-01-03 11:59:20 -08001014JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001015JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001016void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001017JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001018{
1019 void *ret;
1020 size_t num_size;
1021
1022 if (malloc_init()) {
1023 num_size = 0;
1024 ret = NULL;
1025 goto RETURN;
1026 }
1027
1028 num_size = num * size;
1029 if (num_size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001030#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001031 if ((opt_sysv == false) && ((num == 0) || (size == 0)))
Jason Evansb7924f52009-06-23 19:01:18 -07001032#endif
Jason Evans289053c2009-06-22 12:08:42 -07001033 num_size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001034#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001035 else {
1036 ret = NULL;
1037 goto RETURN;
1038 }
Jason Evansb7924f52009-06-23 19:01:18 -07001039#endif
Jason Evans289053c2009-06-22 12:08:42 -07001040 /*
1041 * Try to avoid division here. We know that it isn't possible to
1042 * overflow during multiplication if neither operand uses any of the
1043 * most significant half of the bits in a size_t.
1044 */
1045 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1046 && (num_size / size != num)) {
1047 /* size_t overflow. */
1048 ret = NULL;
1049 goto RETURN;
1050 }
1051
1052 ret = icalloc(num_size);
1053
1054RETURN:
1055 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001056#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001057 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001058 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -07001059 ": Error in calloc(): out of memory\n", "",
Jason Evans289053c2009-06-22 12:08:42 -07001060 "");
1061 abort();
1062 }
Jason Evansb7924f52009-06-23 19:01:18 -07001063#endif
Jason Evans289053c2009-06-22 12:08:42 -07001064 errno = ENOMEM;
1065 }
1066
Jason Evans569432c2009-12-29 00:09:15 -08001067#ifdef JEMALLOC_TRACE
1068 if (opt_trace)
1069 trace_calloc(ret, num, size);
1070#endif
Jason Evans289053c2009-06-22 12:08:42 -07001071 return (ret);
1072}
1073
Jason Evanse476f8a2010-01-16 09:53:50 -08001074JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001075void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001076JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001077{
1078 void *ret;
Jason Evans569432c2009-12-29 00:09:15 -08001079#ifdef JEMALLOC_TRACE
1080 size_t old_size;
1081#endif
Jason Evans289053c2009-06-22 12:08:42 -07001082
1083 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001084#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001085 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -07001086#endif
Jason Evans289053c2009-06-22 12:08:42 -07001087 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001088#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001089 else {
Jason Evanse476f8a2010-01-16 09:53:50 -08001090 if (ptr != NULL) {
1091#ifdef JEMALLOC_TRACE
1092 if (opt_trace)
1093 old_size = isalloc(ptr);
1094#endif
Jason Evans289053c2009-06-22 12:08:42 -07001095 idalloc(ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -08001096 }
Jason Evans289053c2009-06-22 12:08:42 -07001097 ret = NULL;
1098 goto RETURN;
1099 }
Jason Evansb7924f52009-06-23 19:01:18 -07001100#endif
Jason Evans289053c2009-06-22 12:08:42 -07001101 }
1102
1103 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001104 assert(malloc_initialized || malloc_initializer ==
1105 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001106
Jason Evans569432c2009-12-29 00:09:15 -08001107#ifdef JEMALLOC_TRACE
1108 if (opt_trace)
1109 old_size = isalloc(ptr);
1110#endif
1111
Jason Evans289053c2009-06-22 12:08:42 -07001112 ret = iralloc(ptr, size);
1113
1114 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001115#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001116 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001117 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -07001118 ": Error in realloc(): out of "
Jason Evans289053c2009-06-22 12:08:42 -07001119 "memory\n", "", "");
1120 abort();
1121 }
Jason Evansb7924f52009-06-23 19:01:18 -07001122#endif
Jason Evans289053c2009-06-22 12:08:42 -07001123 errno = ENOMEM;
1124 }
1125 } else {
1126 if (malloc_init())
1127 ret = NULL;
1128 else
1129 ret = imalloc(size);
1130
Jason Evans569432c2009-12-29 00:09:15 -08001131#ifdef JEMALLOC_TRACE
1132 if (opt_trace)
1133 old_size = 0;
1134#endif
1135
Jason Evans289053c2009-06-22 12:08:42 -07001136 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001137#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001138 if (opt_xmalloc) {
Jason Evanse476f8a2010-01-16 09:53:50 -08001139 malloc_write4("<jemalloc>",
Jason Evans804c9ec2009-06-22 17:44:33 -07001140 ": Error in realloc(): out of "
Jason Evans289053c2009-06-22 12:08:42 -07001141 "memory\n", "", "");
1142 abort();
1143 }
Jason Evansb7924f52009-06-23 19:01:18 -07001144#endif
Jason Evans289053c2009-06-22 12:08:42 -07001145 errno = ENOMEM;
1146 }
1147 }
1148
Jason Evansb8f0a652009-06-29 09:41:43 -07001149#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001150RETURN:
Jason Evansb8f0a652009-06-29 09:41:43 -07001151#endif
Jason Evans569432c2009-12-29 00:09:15 -08001152#ifdef JEMALLOC_TRACE
1153 if (opt_trace)
1154 trace_realloc(ret, ptr, size, old_size);
1155#endif
Jason Evans289053c2009-06-22 12:08:42 -07001156 return (ret);
1157}
1158
Jason Evanse476f8a2010-01-16 09:53:50 -08001159JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001160void
Jason Evanse476f8a2010-01-16 09:53:50 -08001161JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001162{
1163
Jason Evans289053c2009-06-22 12:08:42 -07001164 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001165 assert(malloc_initialized || malloc_initializer ==
1166 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001167
Jason Evans569432c2009-12-29 00:09:15 -08001168#ifdef JEMALLOC_TRACE
1169 if (opt_trace)
1170 trace_free(ptr, isalloc(ptr));
1171#endif
Jason Evans289053c2009-06-22 12:08:42 -07001172 idalloc(ptr);
1173 }
1174}
1175
1176/*
1177 * End malloc(3)-compatible functions.
1178 */
1179/******************************************************************************/
1180/*
1181 * Begin non-standard functions.
1182 */
1183
Jason Evanse476f8a2010-01-16 09:53:50 -08001184JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001185size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001186JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001187{
Jason Evans569432c2009-12-29 00:09:15 -08001188 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001189
1190 assert(ptr != NULL);
Jason Evans569432c2009-12-29 00:09:15 -08001191 ret = isalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001192
Jason Evans569432c2009-12-29 00:09:15 -08001193#ifdef JEMALLOC_TRACE
1194 if (opt_trace)
1195 trace_malloc_usable_size(ret, ptr);
1196#endif
1197 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001198}
1199
Jason Evans84cbbcb2009-12-29 00:09:15 -08001200#ifdef JEMALLOC_TCACHE
Jason Evanse476f8a2010-01-16 09:53:50 -08001201JEMALLOC_ATTR(visibility("default"))
Jason Evans84cbbcb2009-12-29 00:09:15 -08001202void
Jason Evanse476f8a2010-01-16 09:53:50 -08001203JEMALLOC_P(malloc_tcache_flush)(void)
Jason Evans84cbbcb2009-12-29 00:09:15 -08001204{
1205 tcache_t *tcache;
1206
1207 tcache = tcache_tls;
1208 if (tcache == NULL)
1209 return;
1210
1211 tcache_destroy(tcache);
1212 tcache_tls = NULL;
1213}
1214#endif
1215
Jason Evans289053c2009-06-22 12:08:42 -07001216/*
1217 * End non-standard functions.
1218 */
1219/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001220
Jason Evans289053c2009-06-22 12:08:42 -07001221/*
1222 * The following functions are used by threading libraries for protection of
1223 * malloc during fork(). These functions are only called if the program is
1224 * running in threaded mode, so there is no need to check whether the program
1225 * is threaded here.
1226 */
1227
Jason Evanscc00a152009-06-25 18:06:48 -07001228static void
Jason Evans804c9ec2009-06-22 17:44:33 -07001229jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001230{
1231 bool again;
1232 unsigned i, j;
1233 arena_t *larenas[narenas], *tarenas[narenas];
1234
1235 /* Acquire all mutexes in a safe order. */
1236
1237 /*
1238 * arenas_lock must be acquired after all of the arena mutexes, in
1239 * order to avoid potential deadlock with arena_lock_balance[_hard]().
1240 * Since arenas_lock protects the arenas array, the following code has
1241 * to race with arenas_extend() callers until it succeeds in locking
1242 * all arenas before locking arenas_lock.
1243 */
1244 memset(larenas, 0, sizeof(arena_t *) * narenas);
1245 do {
1246 again = false;
1247
Jason Evans3ee7a5c2009-12-29 00:09:15 -08001248 malloc_mutex_lock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001249 for (i = 0; i < narenas; i++) {
1250 if (arenas[i] != larenas[i]) {
1251 memcpy(tarenas, arenas, sizeof(arena_t *) *
1252 narenas);
Jason Evans3ee7a5c2009-12-29 00:09:15 -08001253 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001254 for (j = 0; j < narenas; j++) {
1255 if (larenas[j] != tarenas[j]) {
1256 larenas[j] = tarenas[j];
Jason Evans3ee7a5c2009-12-29 00:09:15 -08001257 malloc_mutex_lock(
Jason Evans289053c2009-06-22 12:08:42 -07001258 &larenas[j]->lock);
1259 }
1260 }
1261 again = true;
1262 break;
1263 }
1264 }
1265 } while (again);
1266
1267 malloc_mutex_lock(&base_mtx);
1268
1269 malloc_mutex_lock(&huge_mtx);
1270
Jason Evansb7924f52009-06-23 19:01:18 -07001271#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001272 malloc_mutex_lock(&dss_mtx);
1273#endif
1274}
1275
Jason Evanscc00a152009-06-25 18:06:48 -07001276static void
Jason Evans804c9ec2009-06-22 17:44:33 -07001277jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001278{
1279 unsigned i;
1280 arena_t *larenas[narenas];
1281
1282 /* Release all mutexes, now that fork() has completed. */
1283
Jason Evansb7924f52009-06-23 19:01:18 -07001284#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001285 malloc_mutex_unlock(&dss_mtx);
1286#endif
1287
1288 malloc_mutex_unlock(&huge_mtx);
1289
1290 malloc_mutex_unlock(&base_mtx);
1291
1292 memcpy(larenas, arenas, sizeof(arena_t *) * narenas);
Jason Evans3ee7a5c2009-12-29 00:09:15 -08001293 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001294 for (i = 0; i < narenas; i++) {
1295 if (larenas[i] != NULL)
Jason Evans3ee7a5c2009-12-29 00:09:15 -08001296 malloc_mutex_unlock(&larenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001297 }
1298}