blob: 8b2ab8d7229296fedcc92d7611fbf8a2074e4d5e [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanse476f8a2010-01-16 09:53:50 -08007/* Runtime configuration options. */
Dave Rigbye3a16fc2014-09-24 14:19:28 +01008const char *je_malloc_conf JEMALLOC_ATTR(weak);
Jason Evansd1b6e182013-01-22 16:54:26 -08009bool opt_abort =
10#ifdef JEMALLOC_DEBUG
11 true
12#else
13 false
14#endif
15 ;
16bool opt_junk =
17#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18 true
19#else
20 false
21#endif
22 ;
Jason Evans122449b2012-04-06 00:35:09 -070023size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070024bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070025bool opt_utrace = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080026bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080027bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070028size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansecd3e592014-04-15 14:33:50 -070030/* Initialized to true if the process is running inside Valgrind. */
31bool in_valgrind;
32
Jason Evanscd9a1342012-03-21 18:33:03 -070033unsigned ncpus;
34
Jason Evans8bb31982014-10-07 23:14:57 -070035/* Protects arenas initialization (arenas, narenas_total). */
36static malloc_mutex_t arenas_lock;
37/*
38 * Arenas that are used to service external requests. Not all elements of the
39 * arenas array are necessarily used; arenas are created lazily as needed.
40 *
41 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
42 * arenas. arenas[narenas_auto..narenas_total) are only used if the application
43 * takes some action to create them and allocate from them.
44 */
45static arena_t **arenas;
46static unsigned narenas_total;
47static arena_t *a0; /* arenas[0]; read-only after initialization. */
48static unsigned narenas_auto; /* Read-only after initialization. */
Jason Evanscd9a1342012-03-21 18:33:03 -070049
50/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070051static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070052
Jason Evans155bfa72014-10-05 17:54:10 -070053JEMALLOC_ALIGNED(CACHELINE)
54const size_t index2size_tab[NSIZES] = {
55#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
56 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
57 SIZE_CLASSES
58#undef SC
59};
60
61JEMALLOC_ALIGNED(CACHELINE)
62const uint8_t size2index_tab[] = {
Jason Evans81e54752014-10-10 22:34:25 -070063#if LG_TINY_MIN == 0
64#warning "Dangerous LG_TINY_MIN"
65#define S2B_0(i) i,
66#elif LG_TINY_MIN == 1
67#warning "Dangerous LG_TINY_MIN"
68#define S2B_1(i) i,
69#elif LG_TINY_MIN == 2
70#warning "Dangerous LG_TINY_MIN"
71#define S2B_2(i) i,
72#elif LG_TINY_MIN == 3
Jason Evans155bfa72014-10-05 17:54:10 -070073#define S2B_3(i) i,
Jason Evans81e54752014-10-10 22:34:25 -070074#elif LG_TINY_MIN == 4
75#define S2B_4(i) i,
76#elif LG_TINY_MIN == 5
77#define S2B_5(i) i,
78#elif LG_TINY_MIN == 6
79#define S2B_6(i) i,
80#elif LG_TINY_MIN == 7
81#define S2B_7(i) i,
82#elif LG_TINY_MIN == 8
83#define S2B_8(i) i,
84#elif LG_TINY_MIN == 9
85#define S2B_9(i) i,
86#elif LG_TINY_MIN == 10
87#define S2B_10(i) i,
88#elif LG_TINY_MIN == 11
89#define S2B_11(i) i,
90#else
91#error "Unsupported LG_TINY_MIN"
92#endif
93#if LG_TINY_MIN < 1
94#define S2B_1(i) S2B_0(i) S2B_0(i)
95#endif
96#if LG_TINY_MIN < 2
97#define S2B_2(i) S2B_1(i) S2B_1(i)
98#endif
99#if LG_TINY_MIN < 3
100#define S2B_3(i) S2B_2(i) S2B_2(i)
101#endif
102#if LG_TINY_MIN < 4
Jason Evans155bfa72014-10-05 17:54:10 -0700103#define S2B_4(i) S2B_3(i) S2B_3(i)
Jason Evans81e54752014-10-10 22:34:25 -0700104#endif
105#if LG_TINY_MIN < 5
Jason Evans155bfa72014-10-05 17:54:10 -0700106#define S2B_5(i) S2B_4(i) S2B_4(i)
Jason Evans81e54752014-10-10 22:34:25 -0700107#endif
108#if LG_TINY_MIN < 6
Jason Evans155bfa72014-10-05 17:54:10 -0700109#define S2B_6(i) S2B_5(i) S2B_5(i)
Jason Evans81e54752014-10-10 22:34:25 -0700110#endif
111#if LG_TINY_MIN < 7
Jason Evans155bfa72014-10-05 17:54:10 -0700112#define S2B_7(i) S2B_6(i) S2B_6(i)
Jason Evans81e54752014-10-10 22:34:25 -0700113#endif
114#if LG_TINY_MIN < 8
Jason Evans155bfa72014-10-05 17:54:10 -0700115#define S2B_8(i) S2B_7(i) S2B_7(i)
Jason Evans81e54752014-10-10 22:34:25 -0700116#endif
117#if LG_TINY_MIN < 9
Jason Evans155bfa72014-10-05 17:54:10 -0700118#define S2B_9(i) S2B_8(i) S2B_8(i)
Jason Evans81e54752014-10-10 22:34:25 -0700119#endif
120#if LG_TINY_MIN < 10
Jason Evansfc0b3b72014-10-09 17:54:06 -0700121#define S2B_10(i) S2B_9(i) S2B_9(i)
Jason Evans81e54752014-10-10 22:34:25 -0700122#endif
123#if LG_TINY_MIN < 11
Jason Evansfc0b3b72014-10-09 17:54:06 -0700124#define S2B_11(i) S2B_10(i) S2B_10(i)
Jason Evans81e54752014-10-10 22:34:25 -0700125#endif
Jason Evans155bfa72014-10-05 17:54:10 -0700126#define S2B_no(i)
127#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
128 S2B_##lg_delta_lookup(index)
129 SIZE_CLASSES
130#undef S2B_3
131#undef S2B_4
132#undef S2B_5
133#undef S2B_6
134#undef S2B_7
135#undef S2B_8
136#undef S2B_9
Jason Evansfc0b3b72014-10-09 17:54:06 -0700137#undef S2B_10
138#undef S2B_11
Jason Evans155bfa72014-10-05 17:54:10 -0700139#undef S2B_no
140#undef SC
141};
142
Jason Evans41b6afb2012-02-02 22:04:57 -0800143#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -0700144/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -0700145# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -0800146# define INITIALIZER pthread_self()
147# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -0700148static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800149#else
Jason Evans02b23122012-04-05 11:06:23 -0700150# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -0800151# define INITIALIZER true
152# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -0700153static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -0800154#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700155
156/* Used to avoid initialization races. */
Mike Hommeya19e87f2012-04-21 21:27:46 -0700157#ifdef _WIN32
158static malloc_mutex_t init_lock;
159
160JEMALLOC_ATTR(constructor)
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200161static void WINAPI
162_init_init_lock(void)
Mike Hommeya19e87f2012-04-21 21:27:46 -0700163{
164
165 malloc_mutex_init(&init_lock);
166}
Mike Hommeyfd97b1d2012-04-30 12:38:31 +0200167
168#ifdef _MSC_VER
169# pragma section(".CRT$XCU", read)
170JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
171static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
172#endif
173
Mike Hommeya19e87f2012-04-21 21:27:46 -0700174#else
Jason Evanscd9a1342012-03-21 18:33:03 -0700175static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700176#endif
Jason Evanscd9a1342012-03-21 18:33:03 -0700177
Jason Evansb1476112012-04-05 13:36:17 -0700178typedef struct {
179 void *p; /* Input pointer (as in realloc(p, s)). */
180 size_t s; /* Request size. */
181 void *r; /* Result pointer. */
182} malloc_utrace_t;
183
184#ifdef JEMALLOC_UTRACE
185# define UTRACE(a, b, c) do { \
Jason Evans9c640bf2014-09-11 16:20:44 -0700186 if (unlikely(opt_utrace)) { \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800187 int utrace_serrno = errno; \
Jason Evansb1476112012-04-05 13:36:17 -0700188 malloc_utrace_t ut; \
189 ut.p = (a); \
190 ut.s = (b); \
191 ut.r = (c); \
192 utrace(&ut, sizeof(ut)); \
Garrett Cooper6e6164a2012-12-02 17:56:25 -0800193 errno = utrace_serrno; \
Jason Evansb1476112012-04-05 13:36:17 -0700194 } \
195} while (0)
196#else
197# define UTRACE(a, b, c)
198#endif
199
Jason Evans289053c2009-06-22 12:08:42 -0700200/******************************************************************************/
Jason Evansb2c31662014-01-12 15:05:44 -0800201/*
202 * Function prototypes for static functions that are referenced prior to
203 * definition.
204 */
Jason Evans289053c2009-06-22 12:08:42 -0700205
Jason Evans289053c2009-06-22 12:08:42 -0700206static bool malloc_init_hard(void);
207
Jason Evans289053c2009-06-22 12:08:42 -0700208/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -0700209/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800210 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700211 */
212
Jason Evans8bb31982014-10-07 23:14:57 -0700213JEMALLOC_ALWAYS_INLINE_C void
214malloc_thread_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700215{
Jason Evans289053c2009-06-22 12:08:42 -0700216
Jason Evanse476f8a2010-01-16 09:53:50 -0800217 /*
Jason Evans8bb31982014-10-07 23:14:57 -0700218 * TSD initialization can't be safely done as a side effect of
219 * deallocation, because it is possible for a thread to do nothing but
220 * deallocate its TLS data via free(), in which case writing to TLS
221 * would cause write-after-free memory corruption. The quarantine
222 * facility *only* gets used as a side effect of deallocation, so make
223 * a best effort attempt at initializing its TSD by hooking all
224 * allocation events.
Jason Evanse476f8a2010-01-16 09:53:50 -0800225 */
Jason Evans8bb31982014-10-07 23:14:57 -0700226 if (config_fill && unlikely(opt_quarantine))
227 quarantine_alloc_hook();
Jason Evans289053c2009-06-22 12:08:42 -0700228}
229
Jason Evans8bb31982014-10-07 23:14:57 -0700230JEMALLOC_ALWAYS_INLINE_C bool
231malloc_init(void)
232{
233
234 if (unlikely(!malloc_initialized) && malloc_init_hard())
235 return (true);
236 malloc_thread_init();
237
238 return (false);
239}
240
241/*
242 * The a0*() functions are used instead of i[mcd]alloc() in bootstrap-sensitive
243 * situations that cannot tolerate TLS variable access. These functions are
244 * also exposed for use in static binaries on FreeBSD, hence the old-style
245 * malloc() API.
246 */
247
Jason Evanse476f8a2010-01-16 09:53:50 -0800248arena_t *
Jason Evans8bb31982014-10-07 23:14:57 -0700249a0get(void)
250{
251
252 assert(a0 != NULL);
253 return (a0);
254}
255
256static void *
257a0alloc(size_t size, bool zero)
258{
259 void *ret;
260
261 if (unlikely(malloc_init()))
262 return (NULL);
263
264 if (size == 0)
265 size = 1;
266
Daniel Micay809b0ac2014-10-23 10:30:52 -0400267 if (likely(size <= arena_maxclass))
Jason Evans9b756772014-10-10 18:19:20 -0700268 ret = arena_malloc(NULL, a0get(), size, zero, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700269 else
Jason Evans9b756772014-10-10 18:19:20 -0700270 ret = huge_malloc(NULL, a0get(), size, zero, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700271
272 return (ret);
273}
274
275void *
276a0malloc(size_t size)
277{
278
279 return (a0alloc(size, false));
280}
281
282void *
283a0calloc(size_t num, size_t size)
284{
285
286 return (a0alloc(num * size, true));
287}
288
289void
290a0free(void *ptr)
291{
292 arena_chunk_t *chunk;
293
294 if (ptr == NULL)
295 return;
296
297 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Daniel Micay809b0ac2014-10-23 10:30:52 -0400298 if (likely(chunk != ptr))
Jason Evans9b756772014-10-10 18:19:20 -0700299 arena_dalloc(NULL, chunk, ptr, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700300 else
Jason Evans9b756772014-10-10 18:19:20 -0700301 huge_dalloc(NULL, ptr, false);
Jason Evans8bb31982014-10-07 23:14:57 -0700302}
303
304/* Create a new arena and insert it into the arenas array at index ind. */
Jason Evans3a8b9b12014-10-08 00:54:16 -0700305static arena_t *
306arena_init_locked(unsigned ind)
Jason Evans8bb31982014-10-07 23:14:57 -0700307{
308 arena_t *arena;
309
Jason Evans8bb31982014-10-07 23:14:57 -0700310 /* Expand arenas if necessary. */
311 assert(ind <= narenas_total);
312 if (ind == narenas_total) {
313 unsigned narenas_new = narenas_total + 1;
314 arena_t **arenas_new =
315 (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
316 sizeof(arena_t *)));
Jason Evans3a8b9b12014-10-08 00:54:16 -0700317 if (arenas_new == NULL)
318 return (NULL);
Jason Evans8bb31982014-10-07 23:14:57 -0700319 memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
320 arenas_new[ind] = NULL;
321 /*
322 * Deallocate only if arenas came from a0malloc() (not
323 * base_alloc()).
324 */
325 if (narenas_total != narenas_auto)
326 a0free(arenas);
327 arenas = arenas_new;
328 narenas_total = narenas_new;
329 }
330
331 /*
332 * Another thread may have already initialized arenas[ind] if it's an
333 * auto arena.
334 */
335 arena = arenas[ind];
336 if (arena != NULL) {
337 assert(ind < narenas_auto);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700338 return (arena);
Jason Evans8bb31982014-10-07 23:14:57 -0700339 }
340
341 /* Actually initialize the arena. */
342 arena = arenas[ind] = arena_new(ind);
Jason Evans3a8b9b12014-10-08 00:54:16 -0700343 return (arena);
344}
345
346arena_t *
347arena_init(unsigned ind)
348{
349 arena_t *arena;
350
351 malloc_mutex_lock(&arenas_lock);
352 arena = arena_init_locked(ind);
Jason Evans8bb31982014-10-07 23:14:57 -0700353 malloc_mutex_unlock(&arenas_lock);
354 return (arena);
355}
356
357unsigned
358narenas_total_get(void)
359{
360 unsigned narenas;
361
362 malloc_mutex_lock(&arenas_lock);
363 narenas = narenas_total;
364 malloc_mutex_unlock(&arenas_lock);
365
366 return (narenas);
367}
368
369static void
370arena_bind_locked(tsd_t *tsd, unsigned ind)
371{
372 arena_t *arena;
373
374 arena = arenas[ind];
375 arena->nthreads++;
376
377 if (tsd_nominal(tsd))
378 tsd_arena_set(tsd, arena);
379}
380
381static void
382arena_bind(tsd_t *tsd, unsigned ind)
383{
384
385 malloc_mutex_lock(&arenas_lock);
386 arena_bind_locked(tsd, ind);
387 malloc_mutex_unlock(&arenas_lock);
388}
389
390void
391arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
392{
393 arena_t *oldarena, *newarena;
394
395 malloc_mutex_lock(&arenas_lock);
396 oldarena = arenas[oldind];
397 newarena = arenas[newind];
398 oldarena->nthreads--;
399 newarena->nthreads++;
400 malloc_mutex_unlock(&arenas_lock);
401 tsd_arena_set(tsd, newarena);
402}
403
404unsigned
405arena_nbound(unsigned ind)
406{
407 unsigned nthreads;
408
409 malloc_mutex_lock(&arenas_lock);
410 nthreads = arenas[ind]->nthreads;
411 malloc_mutex_unlock(&arenas_lock);
412 return (nthreads);
413}
414
415static void
416arena_unbind(tsd_t *tsd, unsigned ind)
417{
418 arena_t *arena;
419
420 malloc_mutex_lock(&arenas_lock);
421 arena = arenas[ind];
422 arena->nthreads--;
423 malloc_mutex_unlock(&arenas_lock);
424 tsd_arena_set(tsd, NULL);
425}
426
427arena_t *
428arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
429{
430 arena_t *arena;
431 arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
432 unsigned narenas_cache = tsd_narenas_cache_get(tsd);
433 unsigned narenas_actual = narenas_total_get();
434
435 /* Deallocate old cache if it's too small. */
436 if (arenas_cache != NULL && narenas_cache < narenas_actual) {
437 a0free(arenas_cache);
438 arenas_cache = NULL;
439 narenas_cache = 0;
440 tsd_arenas_cache_set(tsd, arenas_cache);
441 tsd_narenas_cache_set(tsd, narenas_cache);
442 }
443
444 /* Allocate cache if it's missing. */
445 if (arenas_cache == NULL) {
446 bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
447 assert(ind < narenas_actual || !init_if_missing);
448 narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
449
450 if (!*arenas_cache_bypassp) {
451 *arenas_cache_bypassp = true;
452 arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
453 narenas_cache);
454 *arenas_cache_bypassp = false;
455 } else
456 arenas_cache = NULL;
457 if (arenas_cache == NULL) {
458 /*
459 * This function must always tell the truth, even if
460 * it's slow, so don't let OOM or recursive allocation
461 * avoidance (note arenas_cache_bypass check) get in the
462 * way.
463 */
464 if (ind >= narenas_actual)
465 return (NULL);
466 malloc_mutex_lock(&arenas_lock);
467 arena = arenas[ind];
468 malloc_mutex_unlock(&arenas_lock);
469 return (arena);
470 }
471 tsd_arenas_cache_set(tsd, arenas_cache);
472 tsd_narenas_cache_set(tsd, narenas_cache);
473 }
474
475 /*
476 * Copy to cache. It's possible that the actual number of arenas has
477 * increased since narenas_total_get() was called above, but that causes
478 * no correctness issues unless two threads concurrently execute the
479 * arenas.extend mallctl, which we trust mallctl synchronization to
480 * prevent.
481 */
482 malloc_mutex_lock(&arenas_lock);
483 memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
484 malloc_mutex_unlock(&arenas_lock);
485 if (narenas_cache > narenas_actual) {
486 memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
487 (narenas_cache - narenas_actual));
488 }
489
490 /* Read the refreshed cache, and init the arena if necessary. */
491 arena = arenas_cache[ind];
492 if (init_if_missing && arena == NULL)
493 arena = arenas_cache[ind] = arena_init(ind);
494 return (arena);
495}
496
497/* Slow path, called only by arena_choose(). */
498arena_t *
499arena_choose_hard(tsd_t *tsd)
Jason Evans289053c2009-06-22 12:08:42 -0700500{
501 arena_t *ret;
502
Jason Evans609ae592012-10-11 13:53:15 -0700503 if (narenas_auto > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700504 unsigned i, choose, first_null;
505
506 choose = 0;
Jason Evans609ae592012-10-11 13:53:15 -0700507 first_null = narenas_auto;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800508 malloc_mutex_lock(&arenas_lock);
Jason Evans8bb31982014-10-07 23:14:57 -0700509 assert(a0get() != NULL);
Jason Evans609ae592012-10-11 13:53:15 -0700510 for (i = 1; i < narenas_auto; i++) {
Jason Evans597632b2011-03-18 13:41:33 -0700511 if (arenas[i] != NULL) {
512 /*
513 * Choose the first arena that has the lowest
514 * number of threads assigned to it.
515 */
516 if (arenas[i]->nthreads <
517 arenas[choose]->nthreads)
518 choose = i;
Jason Evans609ae592012-10-11 13:53:15 -0700519 } else if (first_null == narenas_auto) {
Jason Evans597632b2011-03-18 13:41:33 -0700520 /*
521 * Record the index of the first uninitialized
522 * arena, in case all extant arenas are in use.
523 *
524 * NB: It is possible for there to be
525 * discontinuities in terms of initialized
526 * versus uninitialized arenas, due to the
527 * "thread.arena" mallctl.
528 */
529 first_null = i;
530 }
531 }
532
Jason Evans609ae592012-10-11 13:53:15 -0700533 if (arenas[choose]->nthreads == 0
534 || first_null == narenas_auto) {
Jason Evans597632b2011-03-18 13:41:33 -0700535 /*
536 * Use an unloaded arena, or the least loaded arena if
537 * all arenas are already initialized.
538 */
539 ret = arenas[choose];
540 } else {
541 /* Initialize a new arena. */
Jason Evans8bb31982014-10-07 23:14:57 -0700542 choose = first_null;
Jason Evans3a8b9b12014-10-08 00:54:16 -0700543 ret = arena_init_locked(choose);
Jason Evans8bb31982014-10-07 23:14:57 -0700544 if (ret == NULL) {
545 malloc_mutex_unlock(&arenas_lock);
546 return (NULL);
547 }
Jason Evans597632b2011-03-18 13:41:33 -0700548 }
Jason Evans8bb31982014-10-07 23:14:57 -0700549 arena_bind_locked(tsd, choose);
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800550 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700551 } else {
Jason Evans8bb31982014-10-07 23:14:57 -0700552 ret = a0get();
553 arena_bind(tsd, 0);
Jason Evans597632b2011-03-18 13:41:33 -0700554 }
Jason Evans289053c2009-06-22 12:08:42 -0700555
Jason Evans289053c2009-06-22 12:08:42 -0700556 return (ret);
557}
Jason Evans289053c2009-06-22 12:08:42 -0700558
Jason Evans5460aa62014-09-22 21:09:23 -0700559void
560thread_allocated_cleanup(tsd_t *tsd)
561{
562
563 /* Do nothing. */
564}
565
566void
567thread_deallocated_cleanup(tsd_t *tsd)
568{
569
570 /* Do nothing. */
571}
572
573void
574arena_cleanup(tsd_t *tsd)
575{
Jason Evans8bb31982014-10-07 23:14:57 -0700576 arena_t *arena;
577
578 arena = tsd_arena_get(tsd);
579 if (arena != NULL)
580 arena_unbind(tsd, arena->ind);
581}
582
583void
584arenas_cache_cleanup(tsd_t *tsd)
585{
586 arena_t **arenas_cache;
587
588 arenas_cache = tsd_arenas_cache_get(tsd);
589 if (arenas != NULL)
590 a0free(arenas_cache);
591}
592
593void
594narenas_cache_cleanup(tsd_t *tsd)
595{
596
597 /* Do nothing. */
598}
599
600void
601arenas_cache_bypass_cleanup(tsd_t *tsd)
602{
Jason Evans5460aa62014-09-22 21:09:23 -0700603
604 /* Do nothing. */
605}
606
Jason Evans03c22372010-01-03 12:10:42 -0800607static void
608stats_print_atexit(void)
609{
610
Jason Evans7372b152012-02-10 20:22:09 -0800611 if (config_tcache && config_stats) {
Jason Evans609ae592012-10-11 13:53:15 -0700612 unsigned narenas, i;
Jason Evans03c22372010-01-03 12:10:42 -0800613
Jason Evans7372b152012-02-10 20:22:09 -0800614 /*
615 * Merge stats from extant threads. This is racy, since
616 * individual threads do not lock when recording tcache stats
617 * events. As a consequence, the final stats may be slightly
618 * out of date by the time they are reported, if other threads
619 * continue to allocate.
620 */
Jason Evans609ae592012-10-11 13:53:15 -0700621 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
Jason Evans7372b152012-02-10 20:22:09 -0800622 arena_t *arena = arenas[i];
623 if (arena != NULL) {
624 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800625
Jason Evans7372b152012-02-10 20:22:09 -0800626 /*
627 * tcache_stats_merge() locks bins, so if any
628 * code is introduced that acquires both arena
629 * and bin locks in the opposite order,
630 * deadlocks may result.
631 */
632 malloc_mutex_lock(&arena->lock);
633 ql_foreach(tcache, &arena->tcache_ql, link) {
634 tcache_stats_merge(tcache, arena);
635 }
636 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800637 }
Jason Evans03c22372010-01-03 12:10:42 -0800638 }
639 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800640 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700641}
642
Jason Evans289053c2009-06-22 12:08:42 -0700643/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800644 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700645 */
646/******************************************************************************/
647/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800648 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700649 */
650
Jason Evansc9658dd2009-06-22 14:44:08 -0700651static unsigned
652malloc_ncpus(void)
653{
Jason Evansb7924f52009-06-23 19:01:18 -0700654 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700655
Mike Hommeya19e87f2012-04-21 21:27:46 -0700656#ifdef _WIN32
657 SYSTEM_INFO si;
658 GetSystemInfo(&si);
659 result = si.dwNumberOfProcessors;
660#else
Jason Evansb7924f52009-06-23 19:01:18 -0700661 result = sysconf(_SC_NPROCESSORS_ONLN);
Corey Richardson1d553f72012-09-26 16:28:29 -0400662#endif
Jason Evansaddad092013-11-29 16:19:44 -0800663 return ((result == -1) ? 1 : (unsigned)result);
Jason Evansc9658dd2009-06-22 14:44:08 -0700664}
Jason Evansb7924f52009-06-23 19:01:18 -0700665
Jason Evans289053c2009-06-22 12:08:42 -0700666static bool
Jason Evanse7339702010-10-23 18:37:06 -0700667malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
668 char const **v_p, size_t *vlen_p)
669{
670 bool accept;
671 const char *opts = *opts_p;
672
673 *k_p = opts;
674
Jason Evans551ebc42014-10-03 10:16:09 -0700675 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700676 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800677 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
678 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
679 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
680 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
681 case 'Y': case 'Z':
682 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
683 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
684 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
685 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
686 case 'y': case 'z':
687 case '0': case '1': case '2': case '3': case '4': case '5':
688 case '6': case '7': case '8': case '9':
689 case '_':
690 opts++;
691 break;
692 case ':':
693 opts++;
694 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
695 *v_p = opts;
696 accept = true;
697 break;
698 case '\0':
699 if (opts != *opts_p) {
700 malloc_write("<jemalloc>: Conf string ends "
701 "with key\n");
702 }
703 return (true);
704 default:
705 malloc_write("<jemalloc>: Malformed conf string\n");
706 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700707 }
708 }
709
Jason Evans551ebc42014-10-03 10:16:09 -0700710 for (accept = false; !accept;) {
Jason Evanse7339702010-10-23 18:37:06 -0700711 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800712 case ',':
713 opts++;
714 /*
715 * Look ahead one character here, because the next time
716 * this function is called, it will assume that end of
717 * input has been cleanly reached if no input remains,
718 * but we have optimistically already consumed the
719 * comma if one exists.
720 */
721 if (*opts == '\0') {
722 malloc_write("<jemalloc>: Conf string ends "
723 "with comma\n");
724 }
725 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
726 accept = true;
727 break;
728 case '\0':
729 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
730 accept = true;
731 break;
732 default:
733 opts++;
734 break;
Jason Evanse7339702010-10-23 18:37:06 -0700735 }
736 }
737
738 *opts_p = opts;
739 return (false);
740}
741
742static void
743malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
744 size_t vlen)
745{
Jason Evanse7339702010-10-23 18:37:06 -0700746
Jason Evansd81e4bd2012-03-06 14:57:45 -0800747 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
748 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700749}
750
751static void
752malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700753{
754 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700755 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700756 const char *opts, *k, *v;
757 size_t klen, vlen;
758
Jason Evans781fe752012-05-15 14:48:14 -0700759 /*
760 * Automatically configure valgrind before processing options. The
761 * valgrind option remains in jemalloc 3.x for compatibility reasons.
762 */
763 if (config_valgrind) {
Jason Evansecd3e592014-04-15 14:33:50 -0700764 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
Jason Evans9c640bf2014-09-11 16:20:44 -0700765 if (config_fill && unlikely(in_valgrind)) {
Jason Evans781fe752012-05-15 14:48:14 -0700766 opt_junk = false;
Jason Evans551ebc42014-10-03 10:16:09 -0700767 assert(!opt_zero);
Jason Evans781fe752012-05-15 14:48:14 -0700768 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
769 opt_redzone = true;
770 }
Jason Evans9c640bf2014-09-11 16:20:44 -0700771 if (config_tcache && unlikely(in_valgrind))
Jason Evans174b70e2012-05-15 23:31:53 -0700772 opt_tcache = false;
Jason Evans781fe752012-05-15 14:48:14 -0700773 }
774
Jason Evanse7339702010-10-23 18:37:06 -0700775 for (i = 0; i < 3; i++) {
776 /* Get runtime configuration. */
777 switch (i) {
778 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800779 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700780 /*
781 * Use options that were compiled into the
782 * program.
783 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800784 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700785 } else {
786 /* No configuration specified. */
787 buf[0] = '\0';
788 opts = buf;
789 }
790 break;
791 case 1: {
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200792 int linklen = 0;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700793#ifndef _WIN32
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200794 int saved_errno = errno;
Jason Evanse7339702010-10-23 18:37:06 -0700795 const char *linkname =
Mike Hommeya19e87f2012-04-21 21:27:46 -0700796# ifdef JEMALLOC_PREFIX
Jason Evanse7339702010-10-23 18:37:06 -0700797 "/etc/"JEMALLOC_PREFIX"malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700798# else
Jason Evanse7339702010-10-23 18:37:06 -0700799 "/etc/malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700800# endif
Jason Evanse7339702010-10-23 18:37:06 -0700801 ;
802
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200803 /*
804 * Try to use the contents of the "/etc/malloc.conf"
805 * symbolic link's name.
806 */
807 linklen = readlink(linkname, buf, sizeof(buf) - 1);
808 if (linklen == -1) {
Jason Evanse7339702010-10-23 18:37:06 -0700809 /* No configuration specified. */
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200810 linklen = 0;
811 /* restore errno */
812 set_errno(saved_errno);
Jason Evanse7339702010-10-23 18:37:06 -0700813 }
Alexandre Perrindd6ef032013-09-20 19:58:11 +0200814#endif
815 buf[linklen] = '\0';
816 opts = buf;
Jason Evanse7339702010-10-23 18:37:06 -0700817 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800818 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700819 const char *envname =
820#ifdef JEMALLOC_PREFIX
821 JEMALLOC_CPREFIX"MALLOC_CONF"
822#else
823 "MALLOC_CONF"
824#endif
825 ;
826
827 if ((opts = getenv(envname)) != NULL) {
828 /*
829 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800830 * the value of the MALLOC_CONF environment
831 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700832 */
833 } else {
834 /* No configuration specified. */
835 buf[0] = '\0';
836 opts = buf;
837 }
838 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800839 } default:
Jason Evans6556e282013-10-21 14:56:27 -0700840 not_reached();
Jason Evanse7339702010-10-23 18:37:06 -0700841 buf[0] = '\0';
842 opts = buf;
843 }
844
Jason Evans551ebc42014-10-03 10:16:09 -0700845 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
846 &vlen)) {
Jason Evansbd87b012014-04-15 16:35:08 -0700847#define CONF_MATCH(n) \
848 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
849#define CONF_HANDLE_BOOL(o, n, cont) \
850 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -0700851 if (strncmp("true", v, vlen) == 0 && \
852 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800853 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700854 else if (strncmp("false", v, vlen) == \
855 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800856 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700857 else { \
858 malloc_conf_error( \
859 "Invalid conf value", \
860 k, klen, v, vlen); \
861 } \
Jason Evansbd87b012014-04-15 16:35:08 -0700862 if (cont) \
863 continue; \
Jason Evans1bf27432012-12-23 08:51:48 -0800864 }
865#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
Jason Evansbd87b012014-04-15 16:35:08 -0700866 if (CONF_MATCH(n)) { \
Jason Evans122449b2012-04-06 00:35:09 -0700867 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700868 char *end; \
869 \
Mike Hommeya14bce82012-04-30 12:38:26 +0200870 set_errno(0); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800871 um = malloc_strtoumax(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +0200872 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -0700873 (uintptr_t)v != vlen) { \
874 malloc_conf_error( \
875 "Invalid conf value", \
876 k, klen, v, vlen); \
Jason Evans1bf27432012-12-23 08:51:48 -0800877 } else if (clip) { \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700878 if ((min) != 0 && um < (min)) \
879 o = (min); \
880 else if (um > (max)) \
881 o = (max); \
Jason Evans1bf27432012-12-23 08:51:48 -0800882 else \
883 o = um; \
884 } else { \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700885 if (((min) != 0 && um < (min)) \
886 || um > (max)) { \
Jason Evans1bf27432012-12-23 08:51:48 -0800887 malloc_conf_error( \
888 "Out-of-range " \
889 "conf value", \
890 k, klen, v, vlen); \
891 } else \
892 o = um; \
893 } \
Jason Evanse7339702010-10-23 18:37:06 -0700894 continue; \
895 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800896#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evansbd87b012014-04-15 16:35:08 -0700897 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -0700898 long l; \
899 char *end; \
900 \
Mike Hommeya14bce82012-04-30 12:38:26 +0200901 set_errno(0); \
Jason Evanse7339702010-10-23 18:37:06 -0700902 l = strtol(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +0200903 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -0700904 (uintptr_t)v != vlen) { \
905 malloc_conf_error( \
906 "Invalid conf value", \
907 k, klen, v, vlen); \
Jason Evansfc0b3b72014-10-09 17:54:06 -0700908 } else if (l < (ssize_t)(min) || l > \
909 (ssize_t)(max)) { \
Jason Evanse7339702010-10-23 18:37:06 -0700910 malloc_conf_error( \
911 "Out-of-range conf value", \
912 k, klen, v, vlen); \
913 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800914 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700915 continue; \
916 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800917#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evansbd87b012014-04-15 16:35:08 -0700918 if (CONF_MATCH(n)) { \
Jason Evanse7339702010-10-23 18:37:06 -0700919 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800920 sizeof(o)-1) ? vlen : \
921 sizeof(o)-1; \
922 strncpy(o, v, cpylen); \
923 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700924 continue; \
925 }
926
Jason Evansbd87b012014-04-15 16:35:08 -0700927 CONF_HANDLE_BOOL(opt_abort, "abort", true)
Jason Evanse7339702010-10-23 18:37:06 -0700928 /*
Jason Evansfc0b3b72014-10-09 17:54:06 -0700929 * Chunks always require at least one header page,
930 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
931 * possibly an additional page in the presence of
932 * redzones. In order to simplify options processing,
933 * use a conservative bound that accommodates all these
934 * constraints.
Jason Evanse7339702010-10-23 18:37:06 -0700935 */
Jason Evans606f1fd2012-04-20 21:39:14 -0700936 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evansfc0b3b72014-10-09 17:54:06 -0700937 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
938 (sizeof(size_t) << 3) - 1, true)
Jason Evans609ae592012-10-11 13:53:15 -0700939 if (strncmp("dss", k, klen) == 0) {
940 int i;
941 bool match = false;
942 for (i = 0; i < dss_prec_limit; i++) {
943 if (strncmp(dss_prec_names[i], v, vlen)
944 == 0) {
945 if (chunk_dss_prec_set(i)) {
946 malloc_conf_error(
947 "Error setting dss",
948 k, klen, v, vlen);
949 } else {
950 opt_dss =
951 dss_prec_names[i];
952 match = true;
953 break;
954 }
955 }
956 }
Jason Evans551ebc42014-10-03 10:16:09 -0700957 if (!match) {
Jason Evans609ae592012-10-11 13:53:15 -0700958 malloc_conf_error("Invalid conf value",
959 k, klen, v, vlen);
960 }
961 continue;
962 }
Jason Evans606f1fd2012-04-20 21:39:14 -0700963 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
Jason Evans1bf27432012-12-23 08:51:48 -0800964 SIZE_T_MAX, false)
Jason Evans606f1fd2012-04-20 21:39:14 -0700965 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -0800966 -1, (sizeof(size_t) << 3) - 1)
Jason Evansbd87b012014-04-15 16:35:08 -0700967 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
Jason Evans7372b152012-02-10 20:22:09 -0800968 if (config_fill) {
Jason Evansbd87b012014-04-15 16:35:08 -0700969 CONF_HANDLE_BOOL(opt_junk, "junk", true)
Jason Evans606f1fd2012-04-20 21:39:14 -0700970 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans1bf27432012-12-23 08:51:48 -0800971 0, SIZE_T_MAX, false)
Jason Evansbd87b012014-04-15 16:35:08 -0700972 CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
973 CONF_HANDLE_BOOL(opt_zero, "zero", true)
Jason Evans7372b152012-02-10 20:22:09 -0800974 }
Jason Evansb1476112012-04-05 13:36:17 -0700975 if (config_utrace) {
Jason Evansbd87b012014-04-15 16:35:08 -0700976 CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
Jason Evansb1476112012-04-05 13:36:17 -0700977 }
Jason Evans7372b152012-02-10 20:22:09 -0800978 if (config_xmalloc) {
Jason Evansbd87b012014-04-15 16:35:08 -0700979 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
Jason Evans7372b152012-02-10 20:22:09 -0800980 }
981 if (config_tcache) {
Jason Evansbd87b012014-04-15 16:35:08 -0700982 CONF_HANDLE_BOOL(opt_tcache, "tcache",
983 !config_valgrind || !in_valgrind)
984 if (CONF_MATCH("tcache")) {
985 assert(config_valgrind && in_valgrind);
986 if (opt_tcache) {
987 opt_tcache = false;
988 malloc_conf_error(
989 "tcache cannot be enabled "
990 "while running inside Valgrind",
991 k, klen, v, vlen);
992 }
993 continue;
994 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800995 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -0700996 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800997 (sizeof(size_t) << 3) - 1)
998 }
999 if (config_prof) {
Jason Evansbd87b012014-04-15 16:35:08 -07001000 CONF_HANDLE_BOOL(opt_prof, "prof", true)
Jason Evans606f1fd2012-04-20 21:39:14 -07001001 CONF_HANDLE_CHAR_P(opt_prof_prefix,
1002 "prof_prefix", "jeprof")
Jason Evansbd87b012014-04-15 16:35:08 -07001003 CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1004 true)
Jason Evansfc12c0b2014-10-03 23:25:30 -07001005 CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1006 "prof_thread_active_init", true)
Jason Evans602c8e02014-08-18 16:22:13 -07001007 CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
Jason Evans606f1fd2012-04-20 21:39:14 -07001008 "lg_prof_sample", 0,
Jason Evans602c8e02014-08-18 16:22:13 -07001009 (sizeof(uint64_t) << 3) - 1, true)
Jason Evansbd87b012014-04-15 16:35:08 -07001010 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1011 true)
Jason Evansd81e4bd2012-03-06 14:57:45 -08001012 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -07001013 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -08001014 (sizeof(uint64_t) << 3) - 1)
Jason Evansbd87b012014-04-15 16:35:08 -07001015 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1016 true)
1017 CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1018 true)
1019 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1020 true)
Jason Evans7372b152012-02-10 20:22:09 -08001021 }
Jason Evanse7339702010-10-23 18:37:06 -07001022 malloc_conf_error("Invalid conf pair", k, klen, v,
1023 vlen);
Jason Evansbd87b012014-04-15 16:35:08 -07001024#undef CONF_MATCH
Jason Evanse7339702010-10-23 18:37:06 -07001025#undef CONF_HANDLE_BOOL
1026#undef CONF_HANDLE_SIZE_T
1027#undef CONF_HANDLE_SSIZE_T
1028#undef CONF_HANDLE_CHAR_P
1029 }
Jason Evanse7339702010-10-23 18:37:06 -07001030 }
1031}
1032
1033static bool
1034malloc_init_hard(void)
1035{
Jason Evansb7924f52009-06-23 19:01:18 -07001036 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -07001037
1038 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -08001039 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -07001040 /*
1041 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -08001042 * acquired init_lock, or this thread is the initializing
1043 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -07001044 */
1045 malloc_mutex_unlock(&init_lock);
1046 return (false);
1047 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001048#ifdef JEMALLOC_THREADED_INIT
Jason Evans551ebc42014-10-03 10:16:09 -07001049 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
Jason Evansb7924f52009-06-23 19:01:18 -07001050 /* Busy-wait until the initializing thread completes. */
1051 do {
1052 malloc_mutex_unlock(&init_lock);
1053 CPU_SPINWAIT;
1054 malloc_mutex_lock(&init_lock);
Jason Evans551ebc42014-10-03 10:16:09 -07001055 } while (!malloc_initialized);
Jason Evans2541e1b2010-07-22 11:35:59 -07001056 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -07001057 return (false);
1058 }
Jason Evans41b6afb2012-02-02 22:04:57 -08001059#endif
1060 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -07001061
Jason Evans8bb31982014-10-07 23:14:57 -07001062 if (malloc_tsd_boot0()) {
Jason Evans5460aa62014-09-22 21:09:23 -07001063 malloc_mutex_unlock(&init_lock);
1064 return (true);
1065 }
1066
Jason Evans7372b152012-02-10 20:22:09 -08001067 if (config_prof)
1068 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -07001069
Jason Evanse7339702010-10-23 18:37:06 -07001070 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -07001071
Jason Evans03c22372010-01-03 12:10:42 -08001072 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -07001073 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -08001074 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -08001075 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -08001076 if (opt_abort)
1077 abort();
1078 }
Jason Evans289053c2009-06-22 12:08:42 -07001079 }
1080
Mike Hommeyb8325f92012-04-12 15:15:35 +02001081 if (base_boot()) {
Jason Evansa0bf2422010-01-29 14:30:41 -08001082 malloc_mutex_unlock(&init_lock);
1083 return (true);
1084 }
Jason Evansc9658dd2009-06-22 14:44:08 -07001085
Jason Evansa8f8d752012-04-21 19:17:21 -07001086 if (chunk_boot()) {
Jason Evans3c234352010-01-27 13:10:55 -08001087 malloc_mutex_unlock(&init_lock);
1088 return (true);
1089 }
1090
Jason Evans41b6afb2012-02-02 22:04:57 -08001091 if (ctl_boot()) {
1092 malloc_mutex_unlock(&init_lock);
1093 return (true);
1094 }
1095
Jason Evans7372b152012-02-10 20:22:09 -08001096 if (config_prof)
1097 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -08001098
Jason Evansb1726102012-02-28 16:50:47 -08001099 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -07001100
Jason Evans5460aa62014-09-22 21:09:23 -07001101 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -07001102 malloc_mutex_unlock(&init_lock);
1103 return (true);
1104 }
Jason Evans84cbbcb2009-12-29 00:09:15 -08001105
Jason Evanse476f8a2010-01-16 09:53:50 -08001106 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -07001107 malloc_mutex_unlock(&init_lock);
1108 return (true);
1109 }
Jason Evans289053c2009-06-22 12:08:42 -07001110
Jason Evans1d1cee12013-10-21 15:04:12 -07001111 if (malloc_mutex_init(&arenas_lock)) {
1112 malloc_mutex_unlock(&init_lock);
Jason Evans8e6f8b42011-11-03 18:40:03 -07001113 return (true);
Jason Evans1d1cee12013-10-21 15:04:12 -07001114 }
Jason Evans8e6f8b42011-11-03 18:40:03 -07001115
Jason Evansb7924f52009-06-23 19:01:18 -07001116 /*
1117 * Create enough scaffolding to allow recursive allocation in
1118 * malloc_ncpus().
1119 */
Jason Evans609ae592012-10-11 13:53:15 -07001120 narenas_total = narenas_auto = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001121 arenas = init_arenas;
Jason Evans609ae592012-10-11 13:53:15 -07001122 memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
Jason Evansb7924f52009-06-23 19:01:18 -07001123
1124 /*
1125 * Initialize one arena here. The rest are lazily created in
Jason Evans8bb31982014-10-07 23:14:57 -07001126 * arena_choose_hard().
Jason Evansb7924f52009-06-23 19:01:18 -07001127 */
Jason Evans8bb31982014-10-07 23:14:57 -07001128 a0 = arena_init(0);
1129 if (a0 == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001130 malloc_mutex_unlock(&init_lock);
1131 return (true);
1132 }
1133
Jason Evans6da54182012-03-23 18:05:51 -07001134 if (config_prof && prof_boot2()) {
1135 malloc_mutex_unlock(&init_lock);
1136 return (true);
1137 }
1138
Jason Evansb7924f52009-06-23 19:01:18 -07001139 malloc_mutex_unlock(&init_lock);
Leonard Crestezac4403c2013-10-22 00:11:09 +03001140 /**********************************************************************/
1141 /* Recursive allocation may follow. */
1142
Jason Evansb7924f52009-06-23 19:01:18 -07001143 ncpus = malloc_ncpus();
Leonard Crestezac4403c2013-10-22 00:11:09 +03001144
1145#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
Richard Diamond94ed6812014-05-28 21:47:15 -05001146 && !defined(_WIN32) && !defined(__native_client__))
Leonard Crestezac4403c2013-10-22 00:11:09 +03001147 /* LinuxThreads's pthread_atfork() allocates. */
1148 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1149 jemalloc_postfork_child) != 0) {
1150 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1151 if (opt_abort)
1152 abort();
1153 }
1154#endif
1155
1156 /* Done recursively allocating. */
1157 /**********************************************************************/
Jason Evansb7924f52009-06-23 19:01:18 -07001158 malloc_mutex_lock(&init_lock);
1159
Jason Evans633aaff2012-04-03 08:47:07 -07001160 if (mutex_boot()) {
1161 malloc_mutex_unlock(&init_lock);
1162 return (true);
1163 }
1164
Jason Evanse7339702010-10-23 18:37:06 -07001165 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -07001166 /*
Jason Evans5463a522009-12-29 00:09:15 -08001167 * For SMP systems, create more than one arena per CPU by
1168 * default.
Jason Evans289053c2009-06-22 12:08:42 -07001169 */
Jason Evanse7339702010-10-23 18:37:06 -07001170 if (ncpus > 1)
1171 opt_narenas = ncpus << 2;
1172 else
1173 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001174 }
Jason Evans609ae592012-10-11 13:53:15 -07001175 narenas_auto = opt_narenas;
Jason Evanse7339702010-10-23 18:37:06 -07001176 /*
1177 * Make sure that the arenas array can be allocated. In practice, this
1178 * limit is enough to allow the allocator to function, but the ctl
1179 * machinery will fail to allocate memory at far lower limits.
1180 */
Jason Evans609ae592012-10-11 13:53:15 -07001181 if (narenas_auto > chunksize / sizeof(arena_t *)) {
1182 narenas_auto = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -08001183 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
Jason Evans609ae592012-10-11 13:53:15 -07001184 narenas_auto);
Jason Evans289053c2009-06-22 12:08:42 -07001185 }
Jason Evans609ae592012-10-11 13:53:15 -07001186 narenas_total = narenas_auto;
Jason Evans289053c2009-06-22 12:08:42 -07001187
Jason Evans289053c2009-06-22 12:08:42 -07001188 /* Allocate and initialize arenas. */
Jason Evans609ae592012-10-11 13:53:15 -07001189 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
Jason Evans289053c2009-06-22 12:08:42 -07001190 if (arenas == NULL) {
1191 malloc_mutex_unlock(&init_lock);
1192 return (true);
1193 }
1194 /*
1195 * Zero the array. In practice, this should always be pre-zeroed,
1196 * since it was just mmap()ed, but let's be sure.
1197 */
Jason Evans609ae592012-10-11 13:53:15 -07001198 memset(arenas, 0, sizeof(arena_t *) * narenas_total);
Jason Evansb7924f52009-06-23 19:01:18 -07001199 /* Copy the pointer to the one arena that was already initialized. */
1200 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -07001201
1202 malloc_initialized = true;
1203 malloc_mutex_unlock(&init_lock);
Jason Evans8bb31982014-10-07 23:14:57 -07001204 malloc_tsd_boot1();
Leonard Crestezac4403c2013-10-22 00:11:09 +03001205
Jason Evans289053c2009-06-22 12:08:42 -07001206 return (false);
1207}
1208
1209/*
Jason Evanse476f8a2010-01-16 09:53:50 -08001210 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -07001211 */
1212/******************************************************************************/
1213/*
1214 * Begin malloc(3)-compatible functions.
1215 */
1216
Jason Evansb2c31662014-01-12 15:05:44 -08001217static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001218imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001219{
1220 void *p;
1221
Jason Evans602c8e02014-08-18 16:22:13 -07001222 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001223 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001224 if (usize <= SMALL_MAXCLASS) {
Jason Evans5460aa62014-09-22 21:09:23 -07001225 p = imalloc(tsd, LARGE_MINCLASS);
Jason Evansb2c31662014-01-12 15:05:44 -08001226 if (p == NULL)
1227 return (NULL);
1228 arena_prof_promoted(p, usize);
1229 } else
Jason Evans5460aa62014-09-22 21:09:23 -07001230 p = imalloc(tsd, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001231
1232 return (p);
1233}
1234
1235JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001236imalloc_prof(tsd_t *tsd, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001237{
1238 void *p;
Jason Evans602c8e02014-08-18 16:22:13 -07001239 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001240
Jason Evans5460aa62014-09-22 21:09:23 -07001241 tctx = prof_alloc_prep(tsd, usize, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001242 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans5460aa62014-09-22 21:09:23 -07001243 p = imalloc_prof_sample(tsd, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001244 else
Jason Evans5460aa62014-09-22 21:09:23 -07001245 p = imalloc(tsd, usize);
Jason Evanscfc57062014-10-30 23:18:45 -07001246 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001247 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001248 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001249 }
Jason Evans602c8e02014-08-18 16:22:13 -07001250 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001251
1252 return (p);
1253}
1254
Jason Evans6f001052014-04-22 18:41:15 -07001255JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001256imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
Jason Evans6f001052014-04-22 18:41:15 -07001257{
1258
Jason Evans029d44c2014-10-04 11:12:53 -07001259 if (unlikely(malloc_init()))
Jason Evans6f001052014-04-22 18:41:15 -07001260 return (NULL);
Jason Evans029d44c2014-10-04 11:12:53 -07001261 *tsd = tsd_fetch();
Jason Evans6f001052014-04-22 18:41:15 -07001262
1263 if (config_prof && opt_prof) {
1264 *usize = s2u(size);
Jason Evans5460aa62014-09-22 21:09:23 -07001265 return (imalloc_prof(*tsd, *usize));
Jason Evans6f001052014-04-22 18:41:15 -07001266 }
1267
Jason Evans9c640bf2014-09-11 16:20:44 -07001268 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evans6f001052014-04-22 18:41:15 -07001269 *usize = s2u(size);
Jason Evans5460aa62014-09-22 21:09:23 -07001270 return (imalloc(*tsd, size));
Jason Evans6f001052014-04-22 18:41:15 -07001271}
Jason Evansb2c31662014-01-12 15:05:44 -08001272
Jason Evans289053c2009-06-22 12:08:42 -07001273void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001274je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001275{
1276 void *ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001277 tsd_t *tsd;
Jason Evans8694e2e2012-04-23 13:05:32 -07001278 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001279
Jason Evansc90ad712012-02-28 20:31:37 -08001280 if (size == 0)
1281 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001282
Jason Evans5460aa62014-09-22 21:09:23 -07001283 ret = imalloc_body(size, &tsd, &usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07001284 if (unlikely(ret == NULL)) {
1285 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evans698805c2010-03-03 17:45:38 -08001286 malloc_write("<jemalloc>: Error in malloc(): "
1287 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001288 abort();
1289 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001290 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001291 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001292 if (config_stats && likely(ret != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001293 assert(usize == isalloc(ret, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001294 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001295 }
Jason Evansb1476112012-04-05 13:36:17 -07001296 UTRACE(0, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001297 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001298 return (ret);
1299}
1300
Jason Evansb2c31662014-01-12 15:05:44 -08001301static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001302imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1303 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001304{
1305 void *p;
1306
Jason Evans602c8e02014-08-18 16:22:13 -07001307 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001308 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001309 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07001310 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
Jason Evans5460aa62014-09-22 21:09:23 -07001311 p = imalloc(tsd, LARGE_MINCLASS);
Jason Evansb2c31662014-01-12 15:05:44 -08001312 if (p == NULL)
1313 return (NULL);
1314 arena_prof_promoted(p, usize);
1315 } else
Jason Evans5460aa62014-09-22 21:09:23 -07001316 p = ipalloc(tsd, usize, alignment, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001317
1318 return (p);
1319}
1320
1321JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001322imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001323{
1324 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001325 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001326
Jason Evans5460aa62014-09-22 21:09:23 -07001327 tctx = prof_alloc_prep(tsd, usize, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001328 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans5460aa62014-09-22 21:09:23 -07001329 p = imemalign_prof_sample(tsd, alignment, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001330 else
Jason Evans5460aa62014-09-22 21:09:23 -07001331 p = ipalloc(tsd, usize, alignment, false);
Jason Evanscfc57062014-10-30 23:18:45 -07001332 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001333 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001334 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001335 }
Jason Evans602c8e02014-08-18 16:22:13 -07001336 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001337
1338 return (p);
1339}
1340
Jason Evans9ad48232010-01-03 11:59:20 -08001341JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -07001342static int
Jason Evansb2c31662014-01-12 15:05:44 -08001343imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -07001344{
1345 int ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001346 tsd_t *tsd;
Jason Evans7372b152012-02-10 20:22:09 -08001347 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -07001348 void *result;
Jason Evans289053c2009-06-22 12:08:42 -07001349
Jason Evans0a0bbf62012-03-13 12:55:21 -07001350 assert(min_alignment != 0);
1351
Jason Evans029d44c2014-10-04 11:12:53 -07001352 if (unlikely(malloc_init())) {
Jason Evans289053c2009-06-22 12:08:42 -07001353 result = NULL;
Jason Evansb2c31662014-01-12 15:05:44 -08001354 goto label_oom;
1355 } else {
Jason Evans029d44c2014-10-04 11:12:53 -07001356 tsd = tsd_fetch();
Jason Evansc90ad712012-02-28 20:31:37 -08001357 if (size == 0)
1358 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -08001359
Jason Evans289053c2009-06-22 12:08:42 -07001360 /* Make sure that alignment is a large enough power of 2. */
Jason Evans9c640bf2014-09-11 16:20:44 -07001361 if (unlikely(((alignment - 1) & alignment) != 0
1362 || (alignment < min_alignment))) {
1363 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evans0a0bbf62012-03-13 12:55:21 -07001364 malloc_write("<jemalloc>: Error allocating "
1365 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -07001366 abort();
1367 }
1368 result = NULL;
1369 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001370 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001371 }
1372
Jason Evans5ff709c2012-04-11 18:13:45 -07001373 usize = sa2u(size, alignment);
Jason Evans9c640bf2014-09-11 16:20:44 -07001374 if (unlikely(usize == 0)) {
Jason Evans38d92102011-03-23 00:37:29 -07001375 result = NULL;
Jason Evansb2c31662014-01-12 15:05:44 -08001376 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001377 }
1378
Jason Evans6e73dc12014-09-09 19:37:26 -07001379 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07001380 result = imemalign_prof(tsd, alignment, usize);
Jason Evans6e73dc12014-09-09 19:37:26 -07001381 else
Jason Evans5460aa62014-09-22 21:09:23 -07001382 result = ipalloc(tsd, usize, alignment, false);
Jason Evans9c640bf2014-09-11 16:20:44 -07001383 if (unlikely(result == NULL))
Jason Evansb2c31662014-01-12 15:05:44 -08001384 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -07001385 }
1386
1387 *memptr = result;
1388 ret = 0;
Jason Evansa1ee7832012-04-10 15:07:44 -07001389label_return:
Jason Evans9c640bf2014-09-11 16:20:44 -07001390 if (config_stats && likely(result != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001391 assert(usize == isalloc(result, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001392 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001393 }
Jason Evansb1476112012-04-05 13:36:17 -07001394 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -07001395 return (ret);
Jason Evansb2c31662014-01-12 15:05:44 -08001396label_oom:
1397 assert(result == NULL);
Jason Evans9c640bf2014-09-11 16:20:44 -07001398 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001399 malloc_write("<jemalloc>: Error allocating aligned memory: "
1400 "out of memory\n");
1401 abort();
1402 }
1403 ret = ENOMEM;
1404 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001405}
1406
Jason Evansa5070042011-08-12 13:48:27 -07001407int
Jason Evans0a5489e2012-03-01 17:19:20 -08001408je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -07001409{
Jason Evans122449b2012-04-06 00:35:09 -07001410 int ret = imemalign(memptr, alignment, size, sizeof(void *));
1411 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1412 config_prof), false);
1413 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001414}
1415
Jason Evans0a0bbf62012-03-13 12:55:21 -07001416void *
1417je_aligned_alloc(size_t alignment, size_t size)
1418{
1419 void *ret;
1420 int err;
1421
Jason Evans9c640bf2014-09-11 16:20:44 -07001422 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
Jason Evans0a0bbf62012-03-13 12:55:21 -07001423 ret = NULL;
Mike Hommeya14bce82012-04-30 12:38:26 +02001424 set_errno(err);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001425 }
Jason Evans122449b2012-04-06 00:35:09 -07001426 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1427 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001428 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -07001429}
1430
Jason Evansb2c31662014-01-12 15:05:44 -08001431static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001432icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001433{
1434 void *p;
1435
Jason Evans602c8e02014-08-18 16:22:13 -07001436 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001437 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001438 if (usize <= SMALL_MAXCLASS) {
Jason Evans5460aa62014-09-22 21:09:23 -07001439 p = icalloc(tsd, LARGE_MINCLASS);
Jason Evansb2c31662014-01-12 15:05:44 -08001440 if (p == NULL)
1441 return (NULL);
1442 arena_prof_promoted(p, usize);
1443 } else
Jason Evans5460aa62014-09-22 21:09:23 -07001444 p = icalloc(tsd, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001445
1446 return (p);
1447}
1448
1449JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001450icalloc_prof(tsd_t *tsd, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001451{
1452 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001453 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001454
Jason Evans5460aa62014-09-22 21:09:23 -07001455 tctx = prof_alloc_prep(tsd, usize, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001456 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Jason Evans5460aa62014-09-22 21:09:23 -07001457 p = icalloc_prof_sample(tsd, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001458 else
Jason Evans5460aa62014-09-22 21:09:23 -07001459 p = icalloc(tsd, usize);
Jason Evanscfc57062014-10-30 23:18:45 -07001460 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001461 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001462 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001463 }
Jason Evans602c8e02014-08-18 16:22:13 -07001464 prof_malloc(p, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001465
1466 return (p);
1467}
1468
Jason Evans289053c2009-06-22 12:08:42 -07001469void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001470je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001471{
1472 void *ret;
Jason Evans5460aa62014-09-22 21:09:23 -07001473 tsd_t *tsd;
Jason Evans289053c2009-06-22 12:08:42 -07001474 size_t num_size;
Jason Evans8694e2e2012-04-23 13:05:32 -07001475 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans289053c2009-06-22 12:08:42 -07001476
Jason Evans029d44c2014-10-04 11:12:53 -07001477 if (unlikely(malloc_init())) {
Jason Evans289053c2009-06-22 12:08:42 -07001478 num_size = 0;
1479 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001480 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001481 }
Jason Evans029d44c2014-10-04 11:12:53 -07001482 tsd = tsd_fetch();
Jason Evans289053c2009-06-22 12:08:42 -07001483
1484 num_size = num * size;
Jason Evans9c640bf2014-09-11 16:20:44 -07001485 if (unlikely(num_size == 0)) {
Jason Evansc90ad712012-02-28 20:31:37 -08001486 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001487 num_size = 1;
1488 else {
1489 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001490 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001491 }
1492 /*
1493 * Try to avoid division here. We know that it isn't possible to
1494 * overflow during multiplication if neither operand uses any of the
1495 * most significant half of the bits in a size_t.
1496 */
Jason Evans9c640bf2014-09-11 16:20:44 -07001497 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1498 2))) && (num_size / size != num))) {
Jason Evans289053c2009-06-22 12:08:42 -07001499 /* size_t overflow. */
1500 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001501 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001502 }
1503
Jason Evans7372b152012-02-10 20:22:09 -08001504 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001505 usize = s2u(num_size);
Jason Evans5460aa62014-09-22 21:09:23 -07001506 ret = icalloc_prof(tsd, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001507 } else {
Jason Evans9c640bf2014-09-11 16:20:44 -07001508 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evans7372b152012-02-10 20:22:09 -08001509 usize = s2u(num_size);
Jason Evans5460aa62014-09-22 21:09:23 -07001510 ret = icalloc(tsd, num_size);
Jason Evans93443682010-10-20 17:39:18 -07001511 }
Jason Evans289053c2009-06-22 12:08:42 -07001512
Jason Evansa1ee7832012-04-10 15:07:44 -07001513label_return:
Jason Evans9c640bf2014-09-11 16:20:44 -07001514 if (unlikely(ret == NULL)) {
1515 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evans698805c2010-03-03 17:45:38 -08001516 malloc_write("<jemalloc>: Error in calloc(): out of "
1517 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001518 abort();
1519 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001520 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001521 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001522 if (config_stats && likely(ret != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001523 assert(usize == isalloc(ret, config_prof));
Jason Evans5460aa62014-09-22 21:09:23 -07001524 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evans93443682010-10-20 17:39:18 -07001525 }
Jason Evansb1476112012-04-05 13:36:17 -07001526 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001527 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001528 return (ret);
1529}
1530
Jason Evansb2c31662014-01-12 15:05:44 -08001531static void *
Daniel Micayd33f8342014-10-24 13:18:57 -04001532irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
1533 prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001534{
1535 void *p;
1536
Jason Evans602c8e02014-08-18 16:22:13 -07001537 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001538 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001539 if (usize <= SMALL_MAXCLASS) {
Daniel Micayd33f8342014-10-24 13:18:57 -04001540 p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001541 if (p == NULL)
1542 return (NULL);
1543 arena_prof_promoted(p, usize);
1544 } else
Daniel Micayd33f8342014-10-24 13:18:57 -04001545 p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001546
1547 return (p);
1548}
1549
1550JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001551irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001552{
1553 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001554 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001555
Jason Evans602c8e02014-08-18 16:22:13 -07001556 old_tctx = prof_tctx_get(oldptr);
Jason Evans5460aa62014-09-22 21:09:23 -07001557 tctx = prof_alloc_prep(tsd, usize, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001558 if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
Daniel Micayd33f8342014-10-24 13:18:57 -04001559 p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001560 else
Daniel Micayd33f8342014-10-24 13:18:57 -04001561 p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001562 if (p == NULL)
1563 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07001564 prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001565
1566 return (p);
1567}
1568
1569JEMALLOC_INLINE_C void
Jason Evans5460aa62014-09-22 21:09:23 -07001570ifree(tsd_t *tsd, void *ptr, bool try_tcache)
Jason Evansb2c31662014-01-12 15:05:44 -08001571{
1572 size_t usize;
1573 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1574
1575 assert(ptr != NULL);
1576 assert(malloc_initialized || IS_INITIALIZER);
1577
1578 if (config_prof && opt_prof) {
1579 usize = isalloc(ptr, config_prof);
Jason Evans5460aa62014-09-22 21:09:23 -07001580 prof_free(tsd, ptr, usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001581 } else if (config_stats || config_valgrind)
1582 usize = isalloc(ptr, config_prof);
Jason Evans029d44c2014-10-04 11:12:53 -07001583 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001584 *tsd_thread_deallocatedp_get(tsd) += usize;
Jason Evans9c640bf2014-09-11 16:20:44 -07001585 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08001586 rzsize = p2rz(ptr);
Jason Evans5460aa62014-09-22 21:09:23 -07001587 iqalloc(tsd, ptr, try_tcache);
Jason Evansb2c31662014-01-12 15:05:44 -08001588 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1589}
1590
Daniel Micay4cfe5512014-08-28 15:41:48 -04001591JEMALLOC_INLINE_C void
Jason Evans5460aa62014-09-22 21:09:23 -07001592isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
Daniel Micay4cfe5512014-08-28 15:41:48 -04001593{
1594 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1595
1596 assert(ptr != NULL);
1597 assert(malloc_initialized || IS_INITIALIZER);
1598
1599 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07001600 prof_free(tsd, ptr, usize);
Jason Evans029d44c2014-10-04 11:12:53 -07001601 if (config_stats)
Jason Evans5460aa62014-09-22 21:09:23 -07001602 *tsd_thread_deallocatedp_get(tsd) += usize;
Jason Evans9c640bf2014-09-11 16:20:44 -07001603 if (config_valgrind && unlikely(in_valgrind))
Daniel Micay4cfe5512014-08-28 15:41:48 -04001604 rzsize = p2rz(ptr);
Jason Evans5460aa62014-09-22 21:09:23 -07001605 isqalloc(tsd, ptr, usize, try_tcache);
Daniel Micay4cfe5512014-08-28 15:41:48 -04001606 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1607}
1608
Jason Evans289053c2009-06-22 12:08:42 -07001609void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001610je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001611{
1612 void *ret;
Jason Evans0800afd2014-10-04 14:59:17 -07001613 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans8694e2e2012-04-23 13:05:32 -07001614 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans66576932013-12-15 16:21:30 -08001615 size_t old_usize = 0;
Jason Evans73692322013-12-10 13:51:52 -08001616 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans6109fe02010-02-10 10:37:56 -08001617
Jason Evans9c640bf2014-09-11 16:20:44 -07001618 if (unlikely(size == 0)) {
Jason Evansf081b882012-02-28 20:24:05 -08001619 if (ptr != NULL) {
Jason Evansb2c31662014-01-12 15:05:44 -08001620 /* realloc(ptr, 0) is equivalent to free(ptr). */
1621 UTRACE(ptr, 0, 0);
Jason Evans029d44c2014-10-04 11:12:53 -07001622 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07001623 ifree(tsd, ptr, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001624 return (NULL);
1625 }
1626 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001627 }
1628
Jason Evans9c640bf2014-09-11 16:20:44 -07001629 if (likely(ptr != NULL)) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001630 assert(malloc_initialized || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08001631 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07001632 tsd = tsd_fetch();
Jason Evans289053c2009-06-22 12:08:42 -07001633
Daniel Micayd33f8342014-10-24 13:18:57 -04001634 old_usize = isalloc(ptr, config_prof);
Jason Evans029d44c2014-10-04 11:12:53 -07001635 if (config_valgrind && unlikely(in_valgrind))
1636 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
Jason Evansb2c31662014-01-12 15:05:44 -08001637
Jason Evans029d44c2014-10-04 11:12:53 -07001638 if (config_prof && opt_prof) {
1639 usize = s2u(size);
1640 ret = irealloc_prof(tsd, ptr, old_usize, usize);
1641 } else {
1642 if (config_stats || (config_valgrind &&
1643 unlikely(in_valgrind)))
Jason Evans7372b152012-02-10 20:22:09 -08001644 usize = s2u(size);
Daniel Micayd33f8342014-10-24 13:18:57 -04001645 ret = iralloc(tsd, ptr, old_usize, size, 0, false);
Jason Evans029d44c2014-10-04 11:12:53 -07001646 }
Jason Evans289053c2009-06-22 12:08:42 -07001647 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001648 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans5460aa62014-09-22 21:09:23 -07001649 ret = imalloc_body(size, &tsd, &usize);
Jason Evans289053c2009-06-22 12:08:42 -07001650 }
1651
Jason Evans9c640bf2014-09-11 16:20:44 -07001652 if (unlikely(ret == NULL)) {
1653 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansb2c31662014-01-12 15:05:44 -08001654 malloc_write("<jemalloc>: Error in realloc(): "
1655 "out of memory\n");
1656 abort();
1657 }
1658 set_errno(ENOMEM);
1659 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001660 if (config_stats && likely(ret != NULL)) {
Jason Evans122449b2012-04-06 00:35:09 -07001661 assert(usize == isalloc(ret, config_prof));
Jason Evans029d44c2014-10-04 11:12:53 -07001662 *tsd_thread_allocatedp_get(tsd) += usize;
1663 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evans93443682010-10-20 17:39:18 -07001664 }
Jason Evansb1476112012-04-05 13:36:17 -07001665 UTRACE(ptr, size, ret);
Jason Evansbd87b012014-04-15 16:35:08 -07001666 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1667 old_rzsize, true, false);
Jason Evans289053c2009-06-22 12:08:42 -07001668 return (ret);
1669}
1670
1671void
Jason Evans0a5489e2012-03-01 17:19:20 -08001672je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001673{
1674
Jason Evansb1476112012-04-05 13:36:17 -07001675 UTRACE(ptr, 0, 0);
Jason Evans9c640bf2014-09-11 16:20:44 -07001676 if (likely(ptr != NULL))
Jason Evans029d44c2014-10-04 11:12:53 -07001677 ifree(tsd_fetch(), ptr, true);
Jason Evans289053c2009-06-22 12:08:42 -07001678}
1679
1680/*
1681 * End malloc(3)-compatible functions.
1682 */
1683/******************************************************************************/
1684/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001685 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001686 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001687
1688#ifdef JEMALLOC_OVERRIDE_MEMALIGN
Jason Evans6a0d2912010-09-20 16:44:23 -07001689void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001690je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001691{
Jason Evans9225a192012-03-23 15:39:07 -07001692 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001693 imemalign(&ret, alignment, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001694 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001695 return (ret);
1696}
1697#endif
1698
1699#ifdef JEMALLOC_OVERRIDE_VALLOC
Jason Evans6a0d2912010-09-20 16:44:23 -07001700void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001701je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001702{
Jason Evans9225a192012-03-23 15:39:07 -07001703 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001704 imemalign(&ret, PAGE, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001705 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001706 return (ret);
1707}
1708#endif
1709
Mike Hommey5c89c502012-03-26 17:46:57 +02001710/*
1711 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1712 * #define je_malloc malloc
1713 */
1714#define malloc_is_malloc 1
1715#define is_malloc_(a) malloc_is_ ## a
1716#define is_malloc(a) is_malloc_(a)
1717
Sara Golemon3e24afa2014-08-18 13:06:39 -07001718#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
Jason Evans4bb09832012-02-29 10:37:27 -08001719/*
1720 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1721 * to inconsistently reference libc's malloc(3)-compatible functions
1722 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1723 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001724 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001725 * passed an extra argument for the caller return address, which will be
1726 * ignored.
1727 */
Jason Evansa344dd02014-05-01 15:51:30 -07001728JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1729JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1730JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
Sara Golemon3e24afa2014-08-18 13:06:39 -07001731# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
Jason Evansa344dd02014-05-01 15:51:30 -07001732JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
Mike Hommeyda99e312012-04-30 12:38:29 +02001733 je_memalign;
Sara Golemon3e24afa2014-08-18 13:06:39 -07001734# endif
Jason Evans4bb09832012-02-29 10:37:27 -08001735#endif
1736
Jason Evans6a0d2912010-09-20 16:44:23 -07001737/*
1738 * End non-standard override functions.
1739 */
1740/******************************************************************************/
1741/*
Jason Evans289053c2009-06-22 12:08:42 -07001742 * Begin non-standard functions.
1743 */
1744
Jason Evans8bb31982014-10-07 23:14:57 -07001745JEMALLOC_ALWAYS_INLINE_C bool
1746imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
Jason Evansb718cf72014-09-07 14:40:19 -07001747 size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
1748{
1749
1750 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1751 *alignment = 0;
1752 *usize = s2u(size);
1753 } else {
1754 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1755 *usize = sa2u(size, *alignment);
1756 }
1757 *zero = MALLOCX_ZERO_GET(flags);
1758 if ((flags & MALLOCX_ARENA_MASK) != 0) {
1759 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1760 *try_tcache = false;
Jason Evans8bb31982014-10-07 23:14:57 -07001761 *arena = arena_get(tsd, arena_ind, true, true);
1762 if (unlikely(*arena == NULL))
1763 return (true);
Jason Evansb718cf72014-09-07 14:40:19 -07001764 } else {
1765 *try_tcache = true;
1766 *arena = NULL;
1767 }
Jason Evans8bb31982014-10-07 23:14:57 -07001768 return (false);
Jason Evansb718cf72014-09-07 14:40:19 -07001769}
1770
Jason Evans8bb31982014-10-07 23:14:57 -07001771JEMALLOC_ALWAYS_INLINE_C bool
1772imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1773 size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
Jason Evansb718cf72014-09-07 14:40:19 -07001774{
1775
Jason Evans9c640bf2014-09-11 16:20:44 -07001776 if (likely(flags == 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07001777 *usize = s2u(size);
1778 assert(usize != 0);
1779 *alignment = 0;
1780 *zero = false;
1781 *try_tcache = true;
1782 *arena = NULL;
Jason Evans8bb31982014-10-07 23:14:57 -07001783 return (false);
Jason Evansb718cf72014-09-07 14:40:19 -07001784 } else {
Jason Evans8bb31982014-10-07 23:14:57 -07001785 return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1786 alignment, zero, try_tcache, arena));
Jason Evansb718cf72014-09-07 14:40:19 -07001787 }
1788}
1789
Jason Evansd82a5e62013-12-12 22:35:52 -08001790JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001791imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1792 bool try_tcache, arena_t *arena)
Jason Evans289053c2009-06-22 12:08:42 -07001793{
Jason Evansd82a5e62013-12-12 22:35:52 -08001794
Jason Evans5460aa62014-09-22 21:09:23 -07001795 if (alignment != 0) {
1796 return (ipalloct(tsd, usize, alignment, zero, try_tcache,
1797 arena));
1798 }
Jason Evansb718cf72014-09-07 14:40:19 -07001799 if (zero)
Jason Evans5460aa62014-09-22 21:09:23 -07001800 return (icalloct(tsd, usize, try_tcache, arena));
1801 return (imalloct(tsd, usize, try_tcache, arena));
Jason Evansb718cf72014-09-07 14:40:19 -07001802}
1803
Jason Evansb718cf72014-09-07 14:40:19 -07001804JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001805imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
1806 size_t alignment, bool zero, bool try_tcache, arena_t *arena)
Jason Evansb718cf72014-09-07 14:40:19 -07001807{
1808
Jason Evans9c640bf2014-09-11 16:20:44 -07001809 if (likely(flags == 0))
Jason Evans5460aa62014-09-22 21:09:23 -07001810 return (imalloc(tsd, size));
1811 return (imallocx_flags(tsd, usize, alignment, zero, try_tcache, arena));
Jason Evansd82a5e62013-12-12 22:35:52 -08001812}
1813
Jason Evansb2c31662014-01-12 15:05:44 -08001814static void *
Jason Evans5460aa62014-09-22 21:09:23 -07001815imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
1816 size_t alignment, bool zero, bool try_tcache, arena_t *arena)
Jason Evansb2c31662014-01-12 15:05:44 -08001817{
1818 void *p;
1819
Jason Evans9b0cbf02014-04-11 14:24:51 -07001820 if (usize <= SMALL_MAXCLASS) {
Jason Evansb718cf72014-09-07 14:40:19 -07001821 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1822 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
Jason Evans5460aa62014-09-22 21:09:23 -07001823 p = imalloct(tsd, LARGE_MINCLASS, try_tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08001824 if (p == NULL)
1825 return (NULL);
1826 arena_prof_promoted(p, usize);
Jason Evansb718cf72014-09-07 14:40:19 -07001827 } else {
Jason Evans5460aa62014-09-22 21:09:23 -07001828 p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
1829 zero, try_tcache, arena);
Jason Evansb718cf72014-09-07 14:40:19 -07001830 }
Jason Evansb2c31662014-01-12 15:05:44 -08001831
1832 return (p);
1833}
1834
1835JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001836imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
Jason Evansb2c31662014-01-12 15:05:44 -08001837{
1838 void *p;
Jason Evansb718cf72014-09-07 14:40:19 -07001839 size_t alignment;
1840 bool zero;
1841 bool try_tcache;
1842 arena_t *arena;
1843 prof_tctx_t *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001844
Jason Evans8bb31982014-10-07 23:14:57 -07001845 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
1846 &zero, &try_tcache, &arena)))
1847 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07001848 tctx = prof_alloc_prep(tsd, *usize, true);
Jason Evans9c640bf2014-09-11 16:20:44 -07001849 if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001850 p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
1851 zero, try_tcache, arena);
Jason Evansb718cf72014-09-07 14:40:19 -07001852 } else if ((uintptr_t)tctx > (uintptr_t)1U) {
Jason Evans5460aa62014-09-22 21:09:23 -07001853 p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
1854 zero, try_tcache, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08001855 } else
Jason Evansb718cf72014-09-07 14:40:19 -07001856 p = NULL;
Jason Evans9c640bf2014-09-11 16:20:44 -07001857 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001858 prof_alloc_rollback(tsd, tctx, true);
Jason Evansb2c31662014-01-12 15:05:44 -08001859 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001860 }
Jason Evansb718cf72014-09-07 14:40:19 -07001861 prof_malloc(p, *usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001862
1863 return (p);
1864}
1865
Jason Evansb718cf72014-09-07 14:40:19 -07001866JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001867imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
Jason Evansb718cf72014-09-07 14:40:19 -07001868{
1869 size_t alignment;
1870 bool zero;
1871 bool try_tcache;
1872 arena_t *arena;
1873
Jason Evans9c640bf2014-09-11 16:20:44 -07001874 if (likely(flags == 0)) {
1875 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evansb718cf72014-09-07 14:40:19 -07001876 *usize = s2u(size);
Jason Evans5460aa62014-09-22 21:09:23 -07001877 return (imalloc(tsd, size));
Jason Evansb718cf72014-09-07 14:40:19 -07001878 }
1879
Jason Evans8bb31982014-10-07 23:14:57 -07001880 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
1881 &alignment, &zero, &try_tcache, &arena)))
1882 return (NULL);
Jason Evans5460aa62014-09-22 21:09:23 -07001883 return (imallocx_flags(tsd, *usize, alignment, zero, try_tcache,
1884 arena));
Jason Evansb718cf72014-09-07 14:40:19 -07001885}
1886
Jason Evansd82a5e62013-12-12 22:35:52 -08001887void *
1888je_mallocx(size_t size, int flags)
1889{
Jason Evans5460aa62014-09-22 21:09:23 -07001890 tsd_t *tsd;
Jason Evansd82a5e62013-12-12 22:35:52 -08001891 void *p;
1892 size_t usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08001893
1894 assert(size != 0);
1895
Jason Evans029d44c2014-10-04 11:12:53 -07001896 if (unlikely(malloc_init()))
Jason Evansd82a5e62013-12-12 22:35:52 -08001897 goto label_oom;
Jason Evans029d44c2014-10-04 11:12:53 -07001898 tsd = tsd_fetch();
Jason Evansd82a5e62013-12-12 22:35:52 -08001899
Jason Evansb718cf72014-09-07 14:40:19 -07001900 if (config_prof && opt_prof)
Jason Evans5460aa62014-09-22 21:09:23 -07001901 p = imallocx_prof(tsd, size, flags, &usize);
Jason Evansb718cf72014-09-07 14:40:19 -07001902 else
Jason Evans5460aa62014-09-22 21:09:23 -07001903 p = imallocx_no_prof(tsd, size, flags, &usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07001904 if (unlikely(p == NULL))
Jason Evansb2c31662014-01-12 15:05:44 -08001905 goto label_oom;
Jason Evansd82a5e62013-12-12 22:35:52 -08001906
1907 if (config_stats) {
1908 assert(usize == isalloc(p, config_prof));
Jason Evans029d44c2014-10-04 11:12:53 -07001909 *tsd_thread_allocatedp_get(tsd) += usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08001910 }
1911 UTRACE(0, size, p);
Jason Evansb718cf72014-09-07 14:40:19 -07001912 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
Jason Evansd82a5e62013-12-12 22:35:52 -08001913 return (p);
1914label_oom:
Jason Evans9c640bf2014-09-11 16:20:44 -07001915 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansd82a5e62013-12-12 22:35:52 -08001916 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
1917 abort();
1918 }
1919 UTRACE(0, size, 0);
1920 return (NULL);
1921}
1922
Jason Evansb2c31662014-01-12 15:05:44 -08001923static void *
Daniel Micayd33f8342014-10-24 13:18:57 -04001924irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
1925 size_t alignment, size_t usize, bool zero, bool try_tcache_alloc,
1926 bool try_tcache_dalloc, arena_t *arena, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08001927{
1928 void *p;
1929
Jason Evans602c8e02014-08-18 16:22:13 -07001930 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08001931 return (NULL);
Jason Evans9b0cbf02014-04-11 14:24:51 -07001932 if (usize <= SMALL_MAXCLASS) {
Daniel Micayd33f8342014-10-24 13:18:57 -04001933 p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
1934 zero, try_tcache_alloc, try_tcache_dalloc, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08001935 if (p == NULL)
1936 return (NULL);
1937 arena_prof_promoted(p, usize);
1938 } else {
Daniel Micayd33f8342014-10-24 13:18:57 -04001939 p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
Jason Evans5460aa62014-09-22 21:09:23 -07001940 try_tcache_alloc, try_tcache_dalloc, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08001941 }
1942
1943 return (p);
1944}
1945
1946JEMALLOC_ALWAYS_INLINE_C void *
Jason Evans5460aa62014-09-22 21:09:23 -07001947irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
1948 size_t alignment, size_t *usize, bool zero, bool try_tcache_alloc,
1949 bool try_tcache_dalloc, arena_t *arena)
Jason Evansb2c31662014-01-12 15:05:44 -08001950{
1951 void *p;
Jason Evans6e73dc12014-09-09 19:37:26 -07001952 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08001953
Jason Evans602c8e02014-08-18 16:22:13 -07001954 old_tctx = prof_tctx_get(oldptr);
Jason Evans5460aa62014-09-22 21:09:23 -07001955 tctx = prof_alloc_prep(tsd, *usize, false);
Jason Evans9c640bf2014-09-11 16:20:44 -07001956 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Daniel Micayd33f8342014-10-24 13:18:57 -04001957 p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
1958 alignment, *usize, zero, try_tcache_alloc,
1959 try_tcache_dalloc, arena, tctx);
Jason Evans6e73dc12014-09-09 19:37:26 -07001960 } else {
Daniel Micayd33f8342014-10-24 13:18:57 -04001961 p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
Jason Evans5460aa62014-09-22 21:09:23 -07001962 try_tcache_alloc, try_tcache_dalloc, arena);
Jason Evansb2c31662014-01-12 15:05:44 -08001963 }
Jason Evans9c640bf2014-09-11 16:20:44 -07001964 if (unlikely(p == NULL)) {
Jason Evans5460aa62014-09-22 21:09:23 -07001965 prof_alloc_rollback(tsd, tctx, false);
Jason Evansb2c31662014-01-12 15:05:44 -08001966 return (NULL);
Jason Evans6e73dc12014-09-09 19:37:26 -07001967 }
Jason Evansb2c31662014-01-12 15:05:44 -08001968
1969 if (p == oldptr && alignment != 0) {
1970 /*
1971 * The allocation did not move, so it is possible that the size
1972 * class is smaller than would guarantee the requested
1973 * alignment, and that the alignment constraint was
1974 * serendipitously satisfied. Additionally, old_usize may not
1975 * be the same as the current usize because of in-place large
1976 * reallocation. Therefore, query the actual value of usize.
1977 */
1978 *usize = isalloc(p, config_prof);
1979 }
Jason Evans5460aa62014-09-22 21:09:23 -07001980 prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08001981
1982 return (p);
1983}
1984
Jason Evansd82a5e62013-12-12 22:35:52 -08001985void *
1986je_rallocx(void *ptr, size_t size, int flags)
1987{
1988 void *p;
Jason Evans5460aa62014-09-22 21:09:23 -07001989 tsd_t *tsd;
Jason Evans9c640bf2014-09-11 16:20:44 -07001990 size_t usize;
Daniel Micayd33f8342014-10-24 13:18:57 -04001991 size_t old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08001992 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07001993 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08001994 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08001995 bool try_tcache_alloc, try_tcache_dalloc;
1996 arena_t *arena;
1997
1998 assert(ptr != NULL);
1999 assert(size != 0);
2000 assert(malloc_initialized || IS_INITIALIZER);
2001 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002002 tsd = tsd_fetch();
Jason Evans5460aa62014-09-22 21:09:23 -07002003
Jason Evans9c640bf2014-09-11 16:20:44 -07002004 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07002005 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002006 arena_chunk_t *chunk;
2007 try_tcache_alloc = false;
2008 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evans8bb31982014-10-07 23:14:57 -07002009 arena = arena_get(tsd, arena_ind, true, true);
2010 if (unlikely(arena == NULL))
2011 goto label_oom;
2012 try_tcache_dalloc = (chunk == ptr || chunk->arena != arena);
Jason Evansd82a5e62013-12-12 22:35:52 -08002013 } else {
2014 try_tcache_alloc = true;
2015 try_tcache_dalloc = true;
2016 arena = NULL;
2017 }
2018
Daniel Micayd33f8342014-10-24 13:18:57 -04002019 old_usize = isalloc(ptr, config_prof);
Jason Evans9c640bf2014-09-11 16:20:44 -07002020 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002021 old_rzsize = u2rz(old_usize);
2022
Jason Evansd82a5e62013-12-12 22:35:52 -08002023 if (config_prof && opt_prof) {
Jason Evansb2c31662014-01-12 15:05:44 -08002024 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2025 assert(usize != 0);
Jason Evans5460aa62014-09-22 21:09:23 -07002026 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2027 zero, try_tcache_alloc, try_tcache_dalloc, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002028 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002029 goto label_oom;
Jason Evansd82a5e62013-12-12 22:35:52 -08002030 } else {
Daniel Micayd33f8342014-10-24 13:18:57 -04002031 p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2032 try_tcache_alloc, try_tcache_dalloc, arena);
Jason Evans9c640bf2014-09-11 16:20:44 -07002033 if (unlikely(p == NULL))
Jason Evansd82a5e62013-12-12 22:35:52 -08002034 goto label_oom;
Jason Evans9c640bf2014-09-11 16:20:44 -07002035 if (config_stats || (config_valgrind && unlikely(in_valgrind)))
Jason Evansd82a5e62013-12-12 22:35:52 -08002036 usize = isalloc(p, config_prof);
2037 }
2038
2039 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002040 *tsd_thread_allocatedp_get(tsd) += usize;
2041 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002042 }
2043 UTRACE(ptr, size, p);
Jason Evansbd87b012014-04-15 16:35:08 -07002044 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2045 old_rzsize, false, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002046 return (p);
2047label_oom:
Jason Evans9c640bf2014-09-11 16:20:44 -07002048 if (config_xmalloc && unlikely(opt_xmalloc)) {
Jason Evansd82a5e62013-12-12 22:35:52 -08002049 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2050 abort();
2051 }
2052 UTRACE(ptr, size, 0);
2053 return (NULL);
2054}
2055
Jason Evansb2c31662014-01-12 15:05:44 -08002056JEMALLOC_ALWAYS_INLINE_C size_t
2057ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002058 size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002059{
2060 size_t usize;
2061
Daniel Micayd33f8342014-10-24 13:18:57 -04002062 if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002063 return (old_usize);
2064 usize = isalloc(ptr, config_prof);
2065
2066 return (usize);
2067}
2068
2069static size_t
2070ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002071 size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
Jason Evansb2c31662014-01-12 15:05:44 -08002072{
2073 size_t usize;
2074
Jason Evans602c8e02014-08-18 16:22:13 -07002075 if (tctx == NULL)
Jason Evansb2c31662014-01-12 15:05:44 -08002076 return (old_usize);
2077 /* Use minimum usize to determine whether promotion may happen. */
Jason Evans9b0cbf02014-04-11 14:24:51 -07002078 if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
2079 SMALL_MAXCLASS) {
Daniel Micayd33f8342014-10-24 13:18:57 -04002080 if (ixalloc(ptr, old_usize, SMALL_MAXCLASS+1,
2081 (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra -
2082 (SMALL_MAXCLASS+1), alignment, zero))
Jason Evansb2c31662014-01-12 15:05:44 -08002083 return (old_usize);
2084 usize = isalloc(ptr, config_prof);
Jason Evans155bfa72014-10-05 17:54:10 -07002085 if (max_usize < LARGE_MINCLASS)
Jason Evansb2c31662014-01-12 15:05:44 -08002086 arena_prof_promoted(ptr, usize);
2087 } else {
2088 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
Daniel Micaydc652132014-10-30 23:23:16 -04002089 zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002090 }
2091
2092 return (usize);
2093}
2094
2095JEMALLOC_ALWAYS_INLINE_C size_t
Jason Evans5460aa62014-09-22 21:09:23 -07002096ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
Daniel Micaydc652132014-10-30 23:23:16 -04002097 size_t extra, size_t alignment, bool zero)
Jason Evansb2c31662014-01-12 15:05:44 -08002098{
Jason Evans6e73dc12014-09-09 19:37:26 -07002099 size_t max_usize, usize;
2100 prof_tctx_t *old_tctx, *tctx;
Jason Evansb2c31662014-01-12 15:05:44 -08002101
Jason Evans602c8e02014-08-18 16:22:13 -07002102 old_tctx = prof_tctx_get(ptr);
Jason Evans6e73dc12014-09-09 19:37:26 -07002103 /*
2104 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2105 * Therefore, compute its maximum possible value and use that in
2106 * prof_alloc_prep() to decide whether to capture a backtrace.
2107 * prof_realloc() will use the actual usize to decide whether to sample.
2108 */
2109 max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2110 alignment);
Jason Evans5460aa62014-09-22 21:09:23 -07002111 tctx = prof_alloc_prep(tsd, max_usize, false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002112 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
Jason Evansb2c31662014-01-12 15:05:44 -08002113 usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002114 alignment, zero, max_usize, tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002115 } else {
2116 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
Daniel Micaydc652132014-10-30 23:23:16 -04002117 zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002118 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002119 if (unlikely(usize == old_usize)) {
Jason Evans5460aa62014-09-22 21:09:23 -07002120 prof_alloc_rollback(tsd, tctx, false);
Jason Evansb2c31662014-01-12 15:05:44 -08002121 return (usize);
Jason Evans6e73dc12014-09-09 19:37:26 -07002122 }
Jason Evans5460aa62014-09-22 21:09:23 -07002123 prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
Jason Evansb2c31662014-01-12 15:05:44 -08002124
2125 return (usize);
2126}
2127
Jason Evansd82a5e62013-12-12 22:35:52 -08002128size_t
2129je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2130{
Jason Evans5460aa62014-09-22 21:09:23 -07002131 tsd_t *tsd;
Jason Evans66576932013-12-15 16:21:30 -08002132 size_t usize, old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002133 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evansb718cf72014-09-07 14:40:19 -07002134 size_t alignment = MALLOCX_ALIGN_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002135 bool zero = flags & MALLOCX_ZERO;
Jason Evansd82a5e62013-12-12 22:35:52 -08002136
2137 assert(ptr != NULL);
2138 assert(size != 0);
2139 assert(SIZE_T_MAX - size >= extra);
2140 assert(malloc_initialized || IS_INITIALIZER);
2141 malloc_thread_init();
Jason Evans029d44c2014-10-04 11:12:53 -07002142 tsd = tsd_fetch();
Jason Evansd82a5e62013-12-12 22:35:52 -08002143
Jason Evansb2c31662014-01-12 15:05:44 -08002144 old_usize = isalloc(ptr, config_prof);
Jason Evans9c640bf2014-09-11 16:20:44 -07002145 if (config_valgrind && unlikely(in_valgrind))
Jason Evansb2c31662014-01-12 15:05:44 -08002146 old_rzsize = u2rz(old_usize);
Jason Evansd82a5e62013-12-12 22:35:52 -08002147
2148 if (config_prof && opt_prof) {
Jason Evans5460aa62014-09-22 21:09:23 -07002149 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
Daniel Micaydc652132014-10-30 23:23:16 -04002150 alignment, zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002151 } else {
Jason Evansb2c31662014-01-12 15:05:44 -08002152 usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
Daniel Micaydc652132014-10-30 23:23:16 -04002153 zero);
Jason Evansd82a5e62013-12-12 22:35:52 -08002154 }
Jason Evans9c640bf2014-09-11 16:20:44 -07002155 if (unlikely(usize == old_usize))
Jason Evansb2c31662014-01-12 15:05:44 -08002156 goto label_not_resized;
Jason Evansd82a5e62013-12-12 22:35:52 -08002157
2158 if (config_stats) {
Jason Evans5460aa62014-09-22 21:09:23 -07002159 *tsd_thread_allocatedp_get(tsd) += usize;
2160 *tsd_thread_deallocatedp_get(tsd) += old_usize;
Jason Evansd82a5e62013-12-12 22:35:52 -08002161 }
Jason Evansbd87b012014-04-15 16:35:08 -07002162 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2163 old_rzsize, false, zero);
Jason Evansb2c31662014-01-12 15:05:44 -08002164label_not_resized:
Jason Evansd82a5e62013-12-12 22:35:52 -08002165 UTRACE(ptr, size, ptr);
2166 return (usize);
2167}
2168
2169size_t
2170je_sallocx(const void *ptr, int flags)
2171{
2172 size_t usize;
Jason Evans289053c2009-06-22 12:08:42 -07002173
Jason Evans41b6afb2012-02-02 22:04:57 -08002174 assert(malloc_initialized || IS_INITIALIZER);
Jason Evansbbe29d32013-01-30 15:03:11 -08002175 malloc_thread_init();
Jason Evans8e3c3c62010-09-17 15:46:18 -07002176
Jason Evans7372b152012-02-10 20:22:09 -08002177 if (config_ivsalloc)
Jason Evansd82a5e62013-12-12 22:35:52 -08002178 usize = ivsalloc(ptr, config_prof);
2179 else {
2180 assert(ptr != NULL);
2181 usize = isalloc(ptr, config_prof);
2182 }
Jason Evans289053c2009-06-22 12:08:42 -07002183
Jason Evansd82a5e62013-12-12 22:35:52 -08002184 return (usize);
Jason Evans289053c2009-06-22 12:08:42 -07002185}
2186
Jason Evans4201af02010-01-24 02:53:40 -08002187void
Jason Evansd82a5e62013-12-12 22:35:52 -08002188je_dallocx(void *ptr, int flags)
Jason Evans4201af02010-01-24 02:53:40 -08002189{
Jason Evans8bb31982014-10-07 23:14:57 -07002190 tsd_t *tsd;
Jason Evansd82a5e62013-12-12 22:35:52 -08002191 bool try_tcache;
Jason Evans4201af02010-01-24 02:53:40 -08002192
Jason Evansd82a5e62013-12-12 22:35:52 -08002193 assert(ptr != NULL);
2194 assert(malloc_initialized || IS_INITIALIZER);
2195
Jason Evans8bb31982014-10-07 23:14:57 -07002196 tsd = tsd_fetch();
Jason Evans9c640bf2014-09-11 16:20:44 -07002197 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
Jason Evansb718cf72014-09-07 14:40:19 -07002198 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
Jason Evansd82a5e62013-12-12 22:35:52 -08002199 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evans8bb31982014-10-07 23:14:57 -07002200 arena_t *arena = arena_get(tsd, arena_ind, true, true);
2201 /*
2202 * If arena is NULL, the application passed an arena that has
2203 * never been used before, which is unsupported during
2204 * deallocation.
2205 */
2206 assert(arena != NULL);
2207 try_tcache = (chunk == ptr || chunk->arena != arena);
Jason Evansd82a5e62013-12-12 22:35:52 -08002208 } else
2209 try_tcache = true;
2210
2211 UTRACE(ptr, 0, 0);
Jason Evans029d44c2014-10-04 11:12:53 -07002212 ifree(tsd_fetch(), ptr, try_tcache);
Jason Evansd82a5e62013-12-12 22:35:52 -08002213}
2214
Jason Evansa2260c92014-09-09 10:29:26 -07002215JEMALLOC_ALWAYS_INLINE_C size_t
2216inallocx(size_t size, int flags)
2217{
2218 size_t usize;
2219
Jason Evans9c640bf2014-09-11 16:20:44 -07002220 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
Jason Evansa2260c92014-09-09 10:29:26 -07002221 usize = s2u(size);
2222 else
2223 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2224 assert(usize != 0);
2225 return (usize);
2226}
2227
Daniel Micay4cfe5512014-08-28 15:41:48 -04002228void
2229je_sdallocx(void *ptr, size_t size, int flags)
2230{
Jason Evans8bb31982014-10-07 23:14:57 -07002231 tsd_t *tsd;
Daniel Micay4cfe5512014-08-28 15:41:48 -04002232 bool try_tcache;
Jason Evansa2260c92014-09-09 10:29:26 -07002233 size_t usize;
Daniel Micay4cfe5512014-08-28 15:41:48 -04002234
2235 assert(ptr != NULL);
2236 assert(malloc_initialized || IS_INITIALIZER);
Jason Evansa2260c92014-09-09 10:29:26 -07002237 usize = inallocx(size, flags);
2238 assert(usize == isalloc(ptr, config_prof));
Daniel Micay4cfe5512014-08-28 15:41:48 -04002239
Jason Evans8bb31982014-10-07 23:14:57 -07002240 tsd = tsd_fetch();
Jason Evans9c640bf2014-09-11 16:20:44 -07002241 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
Daniel Micay4cfe5512014-08-28 15:41:48 -04002242 unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2243 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evans8bb31982014-10-07 23:14:57 -07002244 arena_t *arena = arena_get(tsd, arena_ind, true, true);
2245 /*
2246 * If arena is NULL, the application passed an arena that has
2247 * never been used before, which is unsupported during
2248 * deallocation.
2249 */
2250 try_tcache = (chunk == ptr || chunk->arena != arena);
Daniel Micay4cfe5512014-08-28 15:41:48 -04002251 } else
2252 try_tcache = true;
2253
2254 UTRACE(ptr, 0, 0);
Jason Evans8bb31982014-10-07 23:14:57 -07002255 isfree(tsd, ptr, usize, try_tcache);
Daniel Micay4cfe5512014-08-28 15:41:48 -04002256}
2257
Jason Evansd82a5e62013-12-12 22:35:52 -08002258size_t
2259je_nallocx(size_t size, int flags)
2260{
Jason Evansd82a5e62013-12-12 22:35:52 -08002261
2262 assert(size != 0);
2263
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002264 if (unlikely(malloc_init()))
Jason Evansd82a5e62013-12-12 22:35:52 -08002265 return (0);
2266
Jason Evansa2260c92014-09-09 10:29:26 -07002267 return (inallocx(size, flags));
Jason Evans4201af02010-01-24 02:53:40 -08002268}
2269
Jason Evans3c234352010-01-27 13:10:55 -08002270int
Jason Evans0a5489e2012-03-01 17:19:20 -08002271je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08002272 size_t newlen)
2273{
2274
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002275 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002276 return (EAGAIN);
2277
Jason Evans3c234352010-01-27 13:10:55 -08002278 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2279}
2280
Jason Evans3c234352010-01-27 13:10:55 -08002281int
Jason Evans0a5489e2012-03-01 17:19:20 -08002282je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08002283{
2284
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002285 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002286 return (EAGAIN);
2287
Jason Evans3c234352010-01-27 13:10:55 -08002288 return (ctl_nametomib(name, mibp, miblenp));
2289}
2290
Jason Evans3c234352010-01-27 13:10:55 -08002291int
Jason Evans0a5489e2012-03-01 17:19:20 -08002292je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2293 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08002294{
2295
Daniel Micay23fdf8b2014-09-09 15:26:05 -04002296 if (unlikely(malloc_init()))
Jason Evans95833312010-01-27 13:45:21 -08002297 return (EAGAIN);
2298
Jason Evans3c234352010-01-27 13:10:55 -08002299 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2300}
2301
Jason Evansd82a5e62013-12-12 22:35:52 -08002302void
2303je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2304 const char *opts)
2305{
2306
2307 stats_print(write_cb, cbopaque, opts);
2308}
2309
2310size_t
2311je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2312{
2313 size_t ret;
2314
2315 assert(malloc_initialized || IS_INITIALIZER);
2316 malloc_thread_init();
2317
2318 if (config_ivsalloc)
2319 ret = ivsalloc(ptr, config_prof);
2320 else
2321 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
2322
2323 return (ret);
2324}
2325
Jason Evans7e77eaf2012-03-02 17:47:37 -08002326/*
2327 * End non-standard functions.
2328 */
2329/******************************************************************************/
2330/*
Jason Evans289053c2009-06-22 12:08:42 -07002331 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07002332 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07002333 */
2334
Jason Evans20f1fc92012-10-09 14:46:22 -07002335/*
2336 * If an application creates a thread before doing any allocation in the main
2337 * thread, then calls fork(2) in the main thread followed by memory allocation
2338 * in the child process, a race can occur that results in deadlock within the
2339 * child: the main thread may have forked while the created thread had
2340 * partially initialized the allocator. Ordinarily jemalloc prevents
2341 * fork/malloc races via the following functions it registers during
2342 * initialization using pthread_atfork(), but of course that does no good if
2343 * the allocator isn't fully initialized at fork time. The following library
Jason Evans9b756772014-10-10 18:19:20 -07002344 * constructor is a partial solution to this problem. It may still be possible
2345 * to trigger the deadlock described above, but doing so would involve forking
2346 * via a library constructor that runs before jemalloc's runs.
Jason Evans20f1fc92012-10-09 14:46:22 -07002347 */
2348JEMALLOC_ATTR(constructor)
2349static void
2350jemalloc_constructor(void)
2351{
2352
2353 malloc_init();
2354}
2355
Jason Evans41b6afb2012-02-02 22:04:57 -08002356#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002357void
Jason Evans804c9ec2009-06-22 17:44:33 -07002358jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002359#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002360JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002361_malloc_prefork(void)
2362#endif
Jason Evans289053c2009-06-22 12:08:42 -07002363{
Jason Evansfbbb6242010-01-24 17:56:48 -08002364 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07002365
Jason Evans58ad1e42012-05-11 17:40:16 -07002366#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans551ebc42014-10-03 10:16:09 -07002367 if (!malloc_initialized)
Jason Evans58ad1e42012-05-11 17:40:16 -07002368 return;
2369#endif
2370 assert(malloc_initialized);
2371
Jason Evans289053c2009-06-22 12:08:42 -07002372 /* Acquire all mutexes in a safe order. */
Jason Evans20f1fc92012-10-09 14:46:22 -07002373 ctl_prefork();
Jason Evans88c222c2013-02-06 11:59:30 -08002374 prof_prefork();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002375 malloc_mutex_prefork(&arenas_lock);
Jason Evans609ae592012-10-11 13:53:15 -07002376 for (i = 0; i < narenas_total; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08002377 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002378 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08002379 }
Jason Evansb5225922012-10-09 16:16:00 -07002380 chunk_prefork();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002381 base_prefork();
2382 huge_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07002383}
2384
Jason Evans41b6afb2012-02-02 22:04:57 -08002385#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07002386void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002387jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08002388#else
Mike Hommeyda99e312012-04-30 12:38:29 +02002389JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08002390_malloc_postfork(void)
2391#endif
Jason Evans289053c2009-06-22 12:08:42 -07002392{
2393 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07002394
Jason Evans58ad1e42012-05-11 17:40:16 -07002395#ifdef JEMALLOC_MUTEX_INIT_CB
Jason Evans551ebc42014-10-03 10:16:09 -07002396 if (!malloc_initialized)
Jason Evans58ad1e42012-05-11 17:40:16 -07002397 return;
2398#endif
2399 assert(malloc_initialized);
2400
Jason Evans289053c2009-06-22 12:08:42 -07002401 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002402 huge_postfork_parent();
2403 base_postfork_parent();
Jason Evansb5225922012-10-09 16:16:00 -07002404 chunk_postfork_parent();
Jason Evans609ae592012-10-11 13:53:15 -07002405 for (i = 0; i < narenas_total; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08002406 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002407 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07002408 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002409 malloc_mutex_postfork_parent(&arenas_lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002410 prof_postfork_parent();
Jason Evans20f1fc92012-10-09 14:46:22 -07002411 ctl_postfork_parent();
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002412}
2413
2414void
2415jemalloc_postfork_child(void)
2416{
2417 unsigned i;
2418
Jason Evans58ad1e42012-05-11 17:40:16 -07002419 assert(malloc_initialized);
2420
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002421 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002422 huge_postfork_child();
2423 base_postfork_child();
Jason Evansb5225922012-10-09 16:16:00 -07002424 chunk_postfork_child();
Jason Evans609ae592012-10-11 13:53:15 -07002425 for (i = 0; i < narenas_total; i++) {
Jason Evans4e2e3dd2012-03-13 16:31:41 -07002426 if (arenas[i] != NULL)
2427 arena_postfork_child(arenas[i]);
2428 }
2429 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002430 prof_postfork_child();
Jason Evans20f1fc92012-10-09 14:46:22 -07002431 ctl_postfork_child();
Jason Evans289053c2009-06-22 12:08:42 -07002432}
Jason Evans2dbecf12010-09-05 10:35:13 -07002433
2434/******************************************************************************/