blob: a161c2e26e173484f438c165c290c6d01abad846 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans93443682010-10-20 17:39:18 -070016#ifdef JEMALLOC_STATS
17# ifndef NO_TLS
18__thread thread_allocated_t thread_allocated_tls;
19# else
20pthread_key_t thread_allocated_tsd;
21# endif
22#endif
23
Jason Evans289053c2009-06-22 12:08:42 -070024/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070025static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070026
Jason Evansb7924f52009-06-23 19:01:18 -070027/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070028static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070029
Jason Evans289053c2009-06-22 12:08:42 -070030/* Used to avoid initialization races. */
Jason Evans893a0ed2011-03-18 19:30:18 -070031static malloc_mutex_t init_lock =
32#ifdef JEMALLOC_OSSPIN
33 0
34#else
35 MALLOC_MUTEX_INITIALIZER
36#endif
37 ;
Jason Evans289053c2009-06-22 12:08:42 -070038
Jason Evansb7924f52009-06-23 19:01:18 -070039#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080040size_t pagesize;
41size_t pagesize_mask;
42size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070043#endif
44
Jason Evanse476f8a2010-01-16 09:53:50 -080045unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070046
Jason Evanse476f8a2010-01-16 09:53:50 -080047/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070048const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070049#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080050bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070051# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_junk = true;
Jason Evansb7924f52009-06-23 19:01:18 -070053# endif
Jason Evans289053c2009-06-22 12:08:42 -070054#else
Jason Evanse476f8a2010-01-16 09:53:50 -080055bool opt_abort = false;
Jason Evansb7924f52009-06-23 19:01:18 -070056# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080057bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070058# endif
Jason Evans289053c2009-06-22 12:08:42 -070059#endif
Jason Evansb7924f52009-06-23 19:01:18 -070060#ifdef JEMALLOC_SYSV
Jason Evanse476f8a2010-01-16 09:53:50 -080061bool opt_sysv = false;
Jason Evansb7924f52009-06-23 19:01:18 -070062#endif
Jason Evansb8f0a652009-06-29 09:41:43 -070063#ifdef JEMALLOC_XMALLOC
Jason Evanse476f8a2010-01-16 09:53:50 -080064bool opt_xmalloc = false;
Jason Evansb8f0a652009-06-29 09:41:43 -070065#endif
Jason Evansb7924f52009-06-23 19:01:18 -070066#ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080067bool opt_zero = false;
Jason Evansb7924f52009-06-23 19:01:18 -070068#endif
Jason Evanse7339702010-10-23 18:37:06 -070069size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070070
Jason Evans289053c2009-06-22 12:08:42 -070071/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080072/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans698805c2010-03-03 17:45:38 -080074static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080075static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070076static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070077static void arenas_cleanup(void *arg);
Jason Evans93443682010-10-20 17:39:18 -070078#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79static void thread_allocated_cleanup(void *arg);
80#endif
Jason Evanse7339702010-10-23 18:37:06 -070081static bool malloc_conf_next(char const **opts_p, char const **k_p,
82 size_t *klen_p, char const **v_p, size_t *vlen_p);
83static void malloc_conf_error(const char *msg, const char *k, size_t klen,
84 const char *v, size_t vlen);
85static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070086static bool malloc_init_hard(void);
Jason Evansa5070042011-08-12 13:48:27 -070087static int imemalign(void **memptr, size_t alignment, size_t size);
Jason Evans289053c2009-06-22 12:08:42 -070088
Jason Evans289053c2009-06-22 12:08:42 -070089/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080090/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070091
Jason Evanse476f8a2010-01-16 09:53:50 -080092#ifdef JEMALLOC_HAVE_ATTR
93JEMALLOC_ATTR(visibility("hidden"))
94#else
95static
96#endif
97void
Jason Evans698805c2010-03-03 17:45:38 -080098wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070099{
Jason Evans355b4382010-09-20 19:20:48 -0700100#ifdef JEMALLOC_CC_SILENCE
101 int result =
102#endif
103 write(STDERR_FILENO, s, strlen(s));
104#ifdef JEMALLOC_CC_SILENCE
105 if (result < 0)
106 result = errno;
107#endif
Jason Evansc9658dd2009-06-22 14:44:08 -0700108}
109
Jason Evans698805c2010-03-03 17:45:38 -0800110void (*JEMALLOC_P(malloc_message))(void *, const char *s)
111 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -0700112
113/******************************************************************************/
114/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800115 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700116 */
117
Jason Evanse476f8a2010-01-16 09:53:50 -0800118/* Create a new arena and insert it into the arenas array at index ind. */
119arena_t *
120arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700121{
122 arena_t *ret;
123
Jason Evanse476f8a2010-01-16 09:53:50 -0800124 /* Allocate enough space for trailing bins. */
Jason Evansc2fc8c82010-10-01 18:02:43 -0700125 ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
126 + (sizeof(arena_bin_t) * nbins));
Jason Evanse476f8a2010-01-16 09:53:50 -0800127 if (ret != NULL && arena_new(ret, ind) == false) {
128 arenas[ind] = ret;
129 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700130 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800131 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700132
Jason Evanse476f8a2010-01-16 09:53:50 -0800133 /*
134 * OOM here is quite inconvenient to propagate, since dealing with it
135 * would require a check for failure in the fast path. Instead, punt
136 * by using arenas[0]. In practice, this is an extremely unlikely
137 * failure.
138 */
Jason Evans698805c2010-03-03 17:45:38 -0800139 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800140 if (opt_abort)
141 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700142
Jason Evanse476f8a2010-01-16 09:53:50 -0800143 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700144}
145
Jason Evans289053c2009-06-22 12:08:42 -0700146/*
147 * Choose an arena based on a per-thread value (slow-path code only, called
148 * only by choose_arena()).
149 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800150arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700151choose_arena_hard(void)
152{
153 arena_t *ret;
154
Jason Evans289053c2009-06-22 12:08:42 -0700155 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700156 unsigned i, choose, first_null;
157
158 choose = 0;
159 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800160 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700161 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700162 for (i = 1; i < narenas; i++) {
163 if (arenas[i] != NULL) {
164 /*
165 * Choose the first arena that has the lowest
166 * number of threads assigned to it.
167 */
168 if (arenas[i]->nthreads <
169 arenas[choose]->nthreads)
170 choose = i;
171 } else if (first_null == narenas) {
172 /*
173 * Record the index of the first uninitialized
174 * arena, in case all extant arenas are in use.
175 *
176 * NB: It is possible for there to be
177 * discontinuities in terms of initialized
178 * versus uninitialized arenas, due to the
179 * "thread.arena" mallctl.
180 */
181 first_null = i;
182 }
183 }
184
185 if (arenas[choose] == 0 || first_null == narenas) {
186 /*
187 * Use an unloaded arena, or the least loaded arena if
188 * all arenas are already initialized.
189 */
190 ret = arenas[choose];
191 } else {
192 /* Initialize a new arena. */
193 ret = arenas_extend(first_null);
194 }
195 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800196 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700197 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700198 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700199 malloc_mutex_lock(&arenas_lock);
200 ret->nthreads++;
201 malloc_mutex_unlock(&arenas_lock);
202 }
Jason Evans289053c2009-06-22 12:08:42 -0700203
Jason Evans2dbecf12010-09-05 10:35:13 -0700204 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700205
206 return (ret);
207}
Jason Evans289053c2009-06-22 12:08:42 -0700208
Jason Evansa09f55c2010-09-20 16:05:41 -0700209/*
210 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
211 * provide a wrapper.
212 */
213int
214buferror(int errnum, char *buf, size_t buflen)
215{
216#ifdef _GNU_SOURCE
217 char *b = strerror_r(errno, buf, buflen);
218 if (b != buf) {
219 strncpy(buf, b, buflen);
220 buf[buflen-1] = '\0';
221 }
222 return (0);
223#else
224 return (strerror_r(errno, buf, buflen));
225#endif
226}
227
Jason Evans03c22372010-01-03 12:10:42 -0800228static void
229stats_print_atexit(void)
230{
231
232#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
233 unsigned i;
234
235 /*
236 * Merge stats from extant threads. This is racy, since individual
237 * threads do not lock when recording tcache stats events. As a
238 * consequence, the final stats may be slightly out of date by the time
239 * they are reported, if other threads continue to allocate.
240 */
241 for (i = 0; i < narenas; i++) {
242 arena_t *arena = arenas[i];
243 if (arena != NULL) {
244 tcache_t *tcache;
245
Jason Evansdafde142010-03-17 16:27:39 -0700246 /*
247 * tcache_stats_merge() locks bins, so if any code is
248 * introduced that acquires both arena and bin locks in
249 * the opposite order, deadlocks may result.
250 */
Jason Evans03c22372010-01-03 12:10:42 -0800251 malloc_mutex_lock(&arena->lock);
252 ql_foreach(tcache, &arena->tcache_ql, link) {
253 tcache_stats_merge(tcache, arena);
254 }
255 malloc_mutex_unlock(&arena->lock);
256 }
257 }
258#endif
Jason Evansed1bf452010-01-19 12:11:25 -0800259 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700260}
261
Jason Evans9dcad2d2011-02-13 18:11:54 -0800262#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
263thread_allocated_t *
264thread_allocated_get_hard(void)
265{
266 thread_allocated_t *thread_allocated = (thread_allocated_t *)
267 imalloc(sizeof(thread_allocated_t));
268 if (thread_allocated == NULL) {
269 static thread_allocated_t static_thread_allocated = {0, 0};
270 malloc_write("<jemalloc>: Error allocating TSD;"
271 " mallctl(\"thread.{de,}allocated[p]\", ...)"
272 " will be inaccurate\n");
273 if (opt_abort)
274 abort();
275 return (&static_thread_allocated);
276 }
277 pthread_setspecific(thread_allocated_tsd, thread_allocated);
278 thread_allocated->allocated = 0;
279 thread_allocated->deallocated = 0;
280 return (thread_allocated);
281}
282#endif
283
Jason Evans289053c2009-06-22 12:08:42 -0700284/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800285 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700286 */
287/******************************************************************************/
288/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800289 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700290 */
291
Jason Evansc9658dd2009-06-22 14:44:08 -0700292static unsigned
293malloc_ncpus(void)
294{
295 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700296 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700297
Jason Evansb7924f52009-06-23 19:01:18 -0700298 result = sysconf(_SC_NPROCESSORS_ONLN);
299 if (result == -1) {
300 /* Error. */
301 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700302 }
Jason Evansb7924f52009-06-23 19:01:18 -0700303 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700304
305 return (ret);
306}
Jason Evansb7924f52009-06-23 19:01:18 -0700307
Jason Evans597632b2011-03-18 13:41:33 -0700308static void
309arenas_cleanup(void *arg)
310{
311 arena_t *arena = (arena_t *)arg;
312
313 malloc_mutex_lock(&arenas_lock);
314 arena->nthreads--;
315 malloc_mutex_unlock(&arenas_lock);
316}
317
Jason Evans93443682010-10-20 17:39:18 -0700318#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
319static void
320thread_allocated_cleanup(void *arg)
321{
322 uint64_t *allocated = (uint64_t *)arg;
323
324 if (allocated != NULL)
325 idalloc(allocated);
326}
327#endif
328
Jason Evans289053c2009-06-22 12:08:42 -0700329/*
330 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
331 * implementation has to take pains to avoid infinite recursion during
332 * initialization.
333 */
334static inline bool
335malloc_init(void)
336{
337
338 if (malloc_initialized == false)
339 return (malloc_init_hard());
340
341 return (false);
342}
343
344static bool
Jason Evanse7339702010-10-23 18:37:06 -0700345malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
346 char const **v_p, size_t *vlen_p)
347{
348 bool accept;
349 const char *opts = *opts_p;
350
351 *k_p = opts;
352
353 for (accept = false; accept == false;) {
354 switch (*opts) {
355 case 'A': case 'B': case 'C': case 'D': case 'E':
356 case 'F': case 'G': case 'H': case 'I': case 'J':
357 case 'K': case 'L': case 'M': case 'N': case 'O':
358 case 'P': case 'Q': case 'R': case 'S': case 'T':
359 case 'U': case 'V': case 'W': case 'X': case 'Y':
360 case 'Z':
361 case 'a': case 'b': case 'c': case 'd': case 'e':
362 case 'f': case 'g': case 'h': case 'i': case 'j':
363 case 'k': case 'l': case 'm': case 'n': case 'o':
364 case 'p': case 'q': case 'r': case 's': case 't':
365 case 'u': case 'v': case 'w': case 'x': case 'y':
366 case 'z':
367 case '0': case '1': case '2': case '3': case '4':
368 case '5': case '6': case '7': case '8': case '9':
369 case '_':
370 opts++;
371 break;
372 case ':':
373 opts++;
374 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
375 *v_p = opts;
376 accept = true;
377 break;
378 case '\0':
379 if (opts != *opts_p) {
380 malloc_write("<jemalloc>: Conf string "
381 "ends with key\n");
382 }
383 return (true);
384 default:
385 malloc_write("<jemalloc>: Malformed conf "
386 "string\n");
387 return (true);
388 }
389 }
390
391 for (accept = false; accept == false;) {
392 switch (*opts) {
393 case ',':
394 opts++;
395 /*
396 * Look ahead one character here, because the
397 * next time this function is called, it will
398 * assume that end of input has been cleanly
399 * reached if no input remains, but we have
400 * optimistically already consumed the comma if
401 * one exists.
402 */
403 if (*opts == '\0') {
404 malloc_write("<jemalloc>: Conf string "
405 "ends with comma\n");
406 }
407 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
408 accept = true;
409 break;
410 case '\0':
411 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
412 accept = true;
413 break;
414 default:
415 opts++;
416 break;
417 }
418 }
419
420 *opts_p = opts;
421 return (false);
422}
423
424static void
425malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
426 size_t vlen)
427{
428 char buf[PATH_MAX + 1];
429
430 malloc_write("<jemalloc>: ");
431 malloc_write(msg);
432 malloc_write(": ");
433 memcpy(buf, k, klen);
434 memcpy(&buf[klen], ":", 1);
435 memcpy(&buf[klen+1], v, vlen);
436 buf[klen+1+vlen] = '\0';
437 malloc_write(buf);
438 malloc_write("\n");
439}
440
441static void
442malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700443{
444 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700445 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700446 const char *opts, *k, *v;
447 size_t klen, vlen;
448
449 for (i = 0; i < 3; i++) {
450 /* Get runtime configuration. */
451 switch (i) {
452 case 0:
453 if (JEMALLOC_P(malloc_conf) != NULL) {
454 /*
455 * Use options that were compiled into the
456 * program.
457 */
458 opts = JEMALLOC_P(malloc_conf);
459 } else {
460 /* No configuration specified. */
461 buf[0] = '\0';
462 opts = buf;
463 }
464 break;
465 case 1: {
466 int linklen;
467 const char *linkname =
468#ifdef JEMALLOC_PREFIX
469 "/etc/"JEMALLOC_PREFIX"malloc.conf"
470#else
471 "/etc/malloc.conf"
472#endif
473 ;
474
475 if ((linklen = readlink(linkname, buf,
476 sizeof(buf) - 1)) != -1) {
477 /*
478 * Use the contents of the "/etc/malloc.conf"
479 * symbolic link's name.
480 */
481 buf[linklen] = '\0';
482 opts = buf;
483 } else {
484 /* No configuration specified. */
485 buf[0] = '\0';
486 opts = buf;
487 }
488 break;
489 }
490 case 2: {
491 const char *envname =
492#ifdef JEMALLOC_PREFIX
493 JEMALLOC_CPREFIX"MALLOC_CONF"
494#else
495 "MALLOC_CONF"
496#endif
497 ;
498
499 if ((opts = getenv(envname)) != NULL) {
500 /*
501 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800502 * the value of the MALLOC_CONF environment
503 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700504 */
505 } else {
506 /* No configuration specified. */
507 buf[0] = '\0';
508 opts = buf;
509 }
510 break;
511 }
512 default:
513 /* NOTREACHED */
514 assert(false);
515 buf[0] = '\0';
516 opts = buf;
517 }
518
519 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
520 &vlen) == false) {
521#define CONF_HANDLE_BOOL(n) \
522 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
523 klen) == 0) { \
524 if (strncmp("true", v, vlen) == 0 && \
525 vlen == sizeof("true")-1) \
526 opt_##n = true; \
527 else if (strncmp("false", v, vlen) == \
528 0 && vlen == sizeof("false")-1) \
529 opt_##n = false; \
530 else { \
531 malloc_conf_error( \
532 "Invalid conf value", \
533 k, klen, v, vlen); \
534 } \
535 continue; \
536 }
537#define CONF_HANDLE_SIZE_T(n, min, max) \
538 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
539 klen) == 0) { \
540 unsigned long ul; \
541 char *end; \
542 \
543 errno = 0; \
544 ul = strtoul(v, &end, 0); \
545 if (errno != 0 || (uintptr_t)end - \
546 (uintptr_t)v != vlen) { \
547 malloc_conf_error( \
548 "Invalid conf value", \
549 k, klen, v, vlen); \
550 } else if (ul < min || ul > max) { \
551 malloc_conf_error( \
552 "Out-of-range conf value", \
553 k, klen, v, vlen); \
554 } else \
555 opt_##n = ul; \
556 continue; \
557 }
558#define CONF_HANDLE_SSIZE_T(n, min, max) \
559 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
560 klen) == 0) { \
561 long l; \
562 char *end; \
563 \
564 errno = 0; \
565 l = strtol(v, &end, 0); \
566 if (errno != 0 || (uintptr_t)end - \
567 (uintptr_t)v != vlen) { \
568 malloc_conf_error( \
569 "Invalid conf value", \
570 k, klen, v, vlen); \
571 } else if (l < (ssize_t)min || l > \
572 (ssize_t)max) { \
573 malloc_conf_error( \
574 "Out-of-range conf value", \
575 k, klen, v, vlen); \
576 } else \
577 opt_##n = l; \
578 continue; \
579 }
580#define CONF_HANDLE_CHAR_P(n, d) \
581 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
582 klen) == 0) { \
583 size_t cpylen = (vlen <= \
584 sizeof(opt_##n)-1) ? vlen : \
585 sizeof(opt_##n)-1; \
586 strncpy(opt_##n, v, cpylen); \
587 opt_##n[cpylen] = '\0'; \
588 continue; \
589 }
590
591 CONF_HANDLE_BOOL(abort)
592 CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
593 PAGE_SHIFT-1)
594 CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
595 PAGE_SHIFT-1)
596 /*
597 * Chunks always require at least one * header page,
598 * plus one data page.
599 */
600 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
601 (sizeof(size_t) << 3) - 1)
602 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
603 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
604 (sizeof(size_t) << 3) - 1)
605 CONF_HANDLE_BOOL(stats_print)
606#ifdef JEMALLOC_FILL
607 CONF_HANDLE_BOOL(junk)
608 CONF_HANDLE_BOOL(zero)
609#endif
610#ifdef JEMALLOC_SYSV
611 CONF_HANDLE_BOOL(sysv)
612#endif
613#ifdef JEMALLOC_XMALLOC
614 CONF_HANDLE_BOOL(xmalloc)
615#endif
616#ifdef JEMALLOC_TCACHE
617 CONF_HANDLE_BOOL(tcache)
618 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
619 (sizeof(size_t) << 3) - 1)
620 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
621 (sizeof(size_t) << 3) - 1)
622#endif
623#ifdef JEMALLOC_PROF
624 CONF_HANDLE_BOOL(prof)
625 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
626 CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
627 CONF_HANDLE_BOOL(prof_active)
628 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
629 (sizeof(uint64_t) << 3) - 1)
630 CONF_HANDLE_BOOL(prof_accum)
631 CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
632 (sizeof(size_t) << 3) - 1)
633 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
634 (sizeof(uint64_t) << 3) - 1)
635 CONF_HANDLE_BOOL(prof_gdump)
636 CONF_HANDLE_BOOL(prof_leak)
637#endif
638#ifdef JEMALLOC_SWAP
639 CONF_HANDLE_BOOL(overcommit)
640#endif
641 malloc_conf_error("Invalid conf pair", k, klen, v,
642 vlen);
643#undef CONF_HANDLE_BOOL
644#undef CONF_HANDLE_SIZE_T
645#undef CONF_HANDLE_SSIZE_T
646#undef CONF_HANDLE_CHAR_P
647 }
648
649 /* Validate configuration of options that are inter-related. */
650 if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
651 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
652 "relationship; restoring defaults\n");
653 opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
654 opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
655 }
656 }
657}
658
659static bool
660malloc_init_hard(void)
661{
Jason Evansb7924f52009-06-23 19:01:18 -0700662 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700663
664 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700665 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700666 /*
667 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800668 * acquired init_lock, or this thread is the initializing
669 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700670 */
671 malloc_mutex_unlock(&init_lock);
672 return (false);
673 }
Jason Evansb7924f52009-06-23 19:01:18 -0700674 if (malloc_initializer != (unsigned long)0) {
675 /* Busy-wait until the initializing thread completes. */
676 do {
677 malloc_mutex_unlock(&init_lock);
678 CPU_SPINWAIT;
679 malloc_mutex_lock(&init_lock);
680 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700681 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700682 return (false);
683 }
Jason Evans289053c2009-06-22 12:08:42 -0700684
Jason Evansb7924f52009-06-23 19:01:18 -0700685#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700686 /* Get page size. */
687 {
688 long result;
689
690 result = sysconf(_SC_PAGESIZE);
691 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700692 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700693
694 /*
695 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800696 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700697 */
698 assert(((result - 1) & result) == 0);
699 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800700 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700701 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700702#endif
Jason Evans289053c2009-06-22 12:08:42 -0700703
Jason Evans49d02932010-10-23 23:43:37 -0700704#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700705 prof_boot0();
Jason Evans49d02932010-10-23 23:43:37 -0700706#endif
Jason Evans289053c2009-06-22 12:08:42 -0700707
Jason Evanse7339702010-10-23 18:37:06 -0700708 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700709
Jason Evansa0bf2422010-01-29 14:30:41 -0800710 /* Register fork handlers. */
711 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
712 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800713 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800714 if (opt_abort)
715 abort();
716 }
717
Jason Evans3c234352010-01-27 13:10:55 -0800718 if (ctl_boot()) {
719 malloc_mutex_unlock(&init_lock);
720 return (true);
721 }
722
Jason Evans03c22372010-01-03 12:10:42 -0800723 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700724 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800725 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800726 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800727 if (opt_abort)
728 abort();
729 }
Jason Evans289053c2009-06-22 12:08:42 -0700730 }
731
Jason Evansa0bf2422010-01-29 14:30:41 -0800732 if (chunk_boot()) {
733 malloc_mutex_unlock(&init_lock);
734 return (true);
735 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700736
Jason Evans3c234352010-01-27 13:10:55 -0800737 if (base_boot()) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741
Jason Evans3383af62010-02-11 08:59:06 -0800742#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700743 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800744#endif
745
Jason Evansa0bf2422010-01-29 14:30:41 -0800746 if (arena_boot()) {
Jason Evans289053c2009-06-22 12:08:42 -0700747 malloc_mutex_unlock(&init_lock);
748 return (true);
749 }
750
Jason Evans84cbbcb2009-12-29 00:09:15 -0800751#ifdef JEMALLOC_TCACHE
Jason Evans84c8eef2011-03-16 10:30:13 -0700752 if (tcache_boot()) {
753 malloc_mutex_unlock(&init_lock);
754 return (true);
755 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800756#endif
757
Jason Evanse476f8a2010-01-16 09:53:50 -0800758 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700759 malloc_mutex_unlock(&init_lock);
760 return (true);
761 }
Jason Evans289053c2009-06-22 12:08:42 -0700762
Jason Evans93443682010-10-20 17:39:18 -0700763#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
764 /* Initialize allocation counters before any allocations can occur. */
765 if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
766 != 0) {
767 malloc_mutex_unlock(&init_lock);
768 return (true);
769 }
770#endif
771
Jason Evans8e6f8b42011-11-03 18:40:03 -0700772 if (malloc_mutex_init(&arenas_lock))
773 return (true);
774
775 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
776 malloc_mutex_unlock(&init_lock);
777 return (true);
778 }
779
Jason Evansb7924f52009-06-23 19:01:18 -0700780 /*
781 * Create enough scaffolding to allow recursive allocation in
782 * malloc_ncpus().
783 */
784 narenas = 1;
785 arenas = init_arenas;
786 memset(arenas, 0, sizeof(arena_t *) * narenas);
787
788 /*
789 * Initialize one arena here. The rest are lazily created in
790 * choose_arena_hard().
791 */
792 arenas_extend(0);
793 if (arenas[0] == NULL) {
794 malloc_mutex_unlock(&init_lock);
795 return (true);
796 }
797
Jason Evansb7924f52009-06-23 19:01:18 -0700798 /*
799 * Assign the initial arena to the initial thread, in order to avoid
800 * spurious creation of an extra arena if the application switches to
801 * threaded mode.
802 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700803 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700804 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700805
Jason Evans3383af62010-02-11 08:59:06 -0800806#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700807 if (prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800808 malloc_mutex_unlock(&init_lock);
809 return (true);
810 }
811#endif
812
Jason Evansb7924f52009-06-23 19:01:18 -0700813 /* Get number of CPUs. */
814 malloc_initializer = pthread_self();
815 malloc_mutex_unlock(&init_lock);
816 ncpus = malloc_ncpus();
817 malloc_mutex_lock(&init_lock);
818
Jason Evanse7339702010-10-23 18:37:06 -0700819 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700820 /*
Jason Evans5463a522009-12-29 00:09:15 -0800821 * For SMP systems, create more than one arena per CPU by
822 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700823 */
Jason Evanse7339702010-10-23 18:37:06 -0700824 if (ncpus > 1)
825 opt_narenas = ncpus << 2;
826 else
827 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700828 }
Jason Evanse7339702010-10-23 18:37:06 -0700829 narenas = opt_narenas;
830 /*
831 * Make sure that the arenas array can be allocated. In practice, this
832 * limit is enough to allow the allocator to function, but the ctl
833 * machinery will fail to allocate memory at far lower limits.
834 */
835 if (narenas > chunksize / sizeof(arena_t *)) {
836 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700837
Jason Evanse7339702010-10-23 18:37:06 -0700838 narenas = chunksize / sizeof(arena_t *);
839 malloc_write("<jemalloc>: Reducing narenas to limit (");
840 malloc_write(u2s(narenas, 10, buf));
841 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700842 }
Jason Evans289053c2009-06-22 12:08:42 -0700843
Jason Evans289053c2009-06-22 12:08:42 -0700844 /* Allocate and initialize arenas. */
845 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
846 if (arenas == NULL) {
847 malloc_mutex_unlock(&init_lock);
848 return (true);
849 }
850 /*
851 * Zero the array. In practice, this should always be pre-zeroed,
852 * since it was just mmap()ed, but let's be sure.
853 */
854 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700855 /* Copy the pointer to the one arena that was already initialized. */
856 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700857
Jason Evans2dbecf12010-09-05 10:35:13 -0700858#ifdef JEMALLOC_ZONE
859 /* Register the custom zone. */
860 malloc_zone_register(create_zone());
861
862 /*
863 * Convert the default szone to an "overlay zone" that is capable of
864 * deallocating szone-allocated objects, but allocating new objects
865 * from jemalloc.
866 */
867 szone2ozone(malloc_default_zone());
868#endif
869
Jason Evans289053c2009-06-22 12:08:42 -0700870 malloc_initialized = true;
871 malloc_mutex_unlock(&init_lock);
872 return (false);
873}
874
Jason Evans2dbecf12010-09-05 10:35:13 -0700875#ifdef JEMALLOC_ZONE
876JEMALLOC_ATTR(constructor)
877void
878jemalloc_darwin_init(void)
879{
880
881 if (malloc_init_hard())
882 abort();
883}
884#endif
885
Jason Evans289053c2009-06-22 12:08:42 -0700886/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800887 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700888 */
889/******************************************************************************/
890/*
891 * Begin malloc(3)-compatible functions.
892 */
893
Jason Evans9ad48232010-01-03 11:59:20 -0800894JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800895JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700896void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800897JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700898{
899 void *ret;
Jason Evans93443682010-10-20 17:39:18 -0700900#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
901 size_t usize
902# ifdef JEMALLOC_CC_SILENCE
903 = 0
904# endif
905 ;
906#endif
Jason Evans6109fe02010-02-10 10:37:56 -0800907#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -0700908 prof_thr_cnt_t *cnt
909# ifdef JEMALLOC_CC_SILENCE
910 = NULL
911# endif
912 ;
Jason Evans6109fe02010-02-10 10:37:56 -0800913#endif
Jason Evans289053c2009-06-22 12:08:42 -0700914
915 if (malloc_init()) {
916 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800917 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700918 }
919
920 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -0700921#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700922 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -0700923#endif
Jason Evans289053c2009-06-22 12:08:42 -0700924 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -0700925#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700926 else {
Jason Evansf2518142009-12-29 00:09:15 -0800927# ifdef JEMALLOC_XMALLOC
928 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800929 malloc_write("<jemalloc>: Error in malloc(): "
930 "invalid size 0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800931 abort();
932 }
933# endif
Jason Evans289053c2009-06-22 12:08:42 -0700934 ret = NULL;
935 goto RETURN;
936 }
Jason Evansb7924f52009-06-23 19:01:18 -0700937#endif
Jason Evans289053c2009-06-22 12:08:42 -0700938 }
939
Jason Evans6109fe02010-02-10 10:37:56 -0800940#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -0700941 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700942 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700943 PROF_ALLOC_PREP(1, usize, cnt);
944 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700945 ret = NULL;
946 goto OOM;
947 }
Jason Evans93443682010-10-20 17:39:18 -0700948 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -0700949 small_maxclass) {
950 ret = imalloc(small_maxclass+1);
951 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700952 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700953 } else
954 ret = imalloc(size);
955 } else
Jason Evans6109fe02010-02-10 10:37:56 -0800956#endif
Jason Evans93443682010-10-20 17:39:18 -0700957 {
958#ifdef JEMALLOC_STATS
959 usize = s2u(size);
960#endif
Jason Evans0b270a92010-03-31 16:45:04 -0700961 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700962 }
Jason Evans289053c2009-06-22 12:08:42 -0700963
Jason Evansf2518142009-12-29 00:09:15 -0800964OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700965 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -0700966#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700967 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800968 malloc_write("<jemalloc>: Error in malloc(): "
969 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700970 abort();
971 }
Jason Evansb7924f52009-06-23 19:01:18 -0700972#endif
Jason Evans289053c2009-06-22 12:08:42 -0700973 errno = ENOMEM;
974 }
975
Jason Evansf2518142009-12-29 00:09:15 -0800976#ifdef JEMALLOC_SYSV
977RETURN:
978#endif
Jason Evans6109fe02010-02-10 10:37:56 -0800979#ifdef JEMALLOC_PROF
980 if (opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700981 prof_malloc(ret, usize, cnt);
982#endif
983#ifdef JEMALLOC_STATS
984 if (ret != NULL) {
985 assert(usize == isalloc(ret));
986 ALLOCATED_ADD(usize, 0);
987 }
Jason Evans6109fe02010-02-10 10:37:56 -0800988#endif
Jason Evans289053c2009-06-22 12:08:42 -0700989 return (ret);
990}
991
Jason Evans9ad48232010-01-03 11:59:20 -0800992JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700993#ifdef JEMALLOC_PROF
994/*
995 * Avoid any uncertainty as to how many backtrace frames to ignore in
996 * PROF_ALLOC_PREP().
997 */
998JEMALLOC_ATTR(noinline)
999#endif
1000static int
1001imemalign(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001002{
1003 int ret;
Jason Evans93443682010-10-20 17:39:18 -07001004 size_t usize
Jason Evans38d92102011-03-23 00:37:29 -07001005#ifdef JEMALLOC_CC_SILENCE
Jason Evans93443682010-10-20 17:39:18 -07001006 = 0
Jason Evans93443682010-10-20 17:39:18 -07001007#endif
Jason Evans38d92102011-03-23 00:37:29 -07001008 ;
1009 void *result;
Jason Evans6109fe02010-02-10 10:37:56 -08001010#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001011 prof_thr_cnt_t *cnt
1012# ifdef JEMALLOC_CC_SILENCE
1013 = NULL
1014# endif
1015 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001016#endif
Jason Evans289053c2009-06-22 12:08:42 -07001017
1018 if (malloc_init())
1019 result = NULL;
1020 else {
Jason Evansf2518142009-12-29 00:09:15 -08001021 if (size == 0) {
1022#ifdef JEMALLOC_SYSV
1023 if (opt_sysv == false)
1024#endif
1025 size = 1;
1026#ifdef JEMALLOC_SYSV
1027 else {
1028# ifdef JEMALLOC_XMALLOC
1029 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001030 malloc_write("<jemalloc>: Error in "
1031 "posix_memalign(): invalid size "
1032 "0\n");
Jason Evansf2518142009-12-29 00:09:15 -08001033 abort();
1034 }
1035# endif
1036 result = NULL;
1037 *memptr = NULL;
1038 ret = 0;
1039 goto RETURN;
1040 }
1041#endif
1042 }
1043
Jason Evans289053c2009-06-22 12:08:42 -07001044 /* Make sure that alignment is a large enough power of 2. */
1045 if (((alignment - 1) & alignment) != 0
1046 || alignment < sizeof(void *)) {
Jason Evansb7924f52009-06-23 19:01:18 -07001047#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001048 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001049 malloc_write("<jemalloc>: Error in "
1050 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -07001051 abort();
1052 }
Jason Evansb7924f52009-06-23 19:01:18 -07001053#endif
Jason Evans289053c2009-06-22 12:08:42 -07001054 result = NULL;
1055 ret = EINVAL;
1056 goto RETURN;
1057 }
1058
Jason Evans38d92102011-03-23 00:37:29 -07001059 usize = sa2u(size, alignment, NULL);
1060 if (usize == 0) {
1061 result = NULL;
1062 ret = ENOMEM;
1063 goto RETURN;
1064 }
1065
Jason Evans6109fe02010-02-10 10:37:56 -08001066#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001067 if (opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001068 PROF_ALLOC_PREP(2, usize, cnt);
1069 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001070 result = NULL;
1071 ret = EINVAL;
1072 } else {
1073 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001074 (uintptr_t)1U && usize <= small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001075 assert(sa2u(small_maxclass+1,
1076 alignment, NULL) != 0);
1077 result = ipalloc(sa2u(small_maxclass+1,
1078 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001079 if (result != NULL) {
1080 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -07001081 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001082 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001083 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001084 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001085 false);
1086 }
Jason Evans0b270a92010-03-31 16:45:04 -07001087 }
Jason Evans6109fe02010-02-10 10:37:56 -08001088 } else
1089#endif
Jason Evans38d92102011-03-23 00:37:29 -07001090 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -07001091 }
1092
1093 if (result == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001094#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001095 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001096 malloc_write("<jemalloc>: Error in posix_memalign(): "
1097 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001098 abort();
1099 }
Jason Evansb7924f52009-06-23 19:01:18 -07001100#endif
Jason Evans289053c2009-06-22 12:08:42 -07001101 ret = ENOMEM;
1102 goto RETURN;
1103 }
1104
1105 *memptr = result;
1106 ret = 0;
1107
1108RETURN:
Jason Evans93443682010-10-20 17:39:18 -07001109#ifdef JEMALLOC_STATS
1110 if (result != NULL) {
1111 assert(usize == isalloc(result));
1112 ALLOCATED_ADD(usize, 0);
1113 }
1114#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001115#ifdef JEMALLOC_PROF
1116 if (opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001117 prof_malloc(result, usize, cnt);
Jason Evans6109fe02010-02-10 10:37:56 -08001118#endif
Jason Evans289053c2009-06-22 12:08:42 -07001119 return (ret);
1120}
1121
Jason Evansa5070042011-08-12 13:48:27 -07001122JEMALLOC_ATTR(nonnull(1))
1123JEMALLOC_ATTR(visibility("default"))
1124int
1125JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
1126{
1127
1128 return imemalign(memptr, alignment, size);
1129}
1130
Jason Evans9ad48232010-01-03 11:59:20 -08001131JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001132JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001133void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001134JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001135{
1136 void *ret;
1137 size_t num_size;
Jason Evans93443682010-10-20 17:39:18 -07001138#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1139 size_t usize
1140# ifdef JEMALLOC_CC_SILENCE
1141 = 0
1142# endif
1143 ;
1144#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001145#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001146 prof_thr_cnt_t *cnt
1147# ifdef JEMALLOC_CC_SILENCE
1148 = NULL
1149# endif
1150 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001151#endif
Jason Evans289053c2009-06-22 12:08:42 -07001152
1153 if (malloc_init()) {
1154 num_size = 0;
1155 ret = NULL;
1156 goto RETURN;
1157 }
1158
1159 num_size = num * size;
1160 if (num_size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001161#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001162 if ((opt_sysv == false) && ((num == 0) || (size == 0)))
Jason Evansb7924f52009-06-23 19:01:18 -07001163#endif
Jason Evans289053c2009-06-22 12:08:42 -07001164 num_size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001165#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001166 else {
1167 ret = NULL;
1168 goto RETURN;
1169 }
Jason Evansb7924f52009-06-23 19:01:18 -07001170#endif
Jason Evans289053c2009-06-22 12:08:42 -07001171 /*
1172 * Try to avoid division here. We know that it isn't possible to
1173 * overflow during multiplication if neither operand uses any of the
1174 * most significant half of the bits in a size_t.
1175 */
1176 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1177 && (num_size / size != num)) {
1178 /* size_t overflow. */
1179 ret = NULL;
1180 goto RETURN;
1181 }
1182
Jason Evans6109fe02010-02-10 10:37:56 -08001183#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001184 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001185 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001186 PROF_ALLOC_PREP(1, usize, cnt);
1187 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001188 ret = NULL;
1189 goto RETURN;
1190 }
Jason Evans93443682010-10-20 17:39:18 -07001191 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evans0b270a92010-03-31 16:45:04 -07001192 <= small_maxclass) {
1193 ret = icalloc(small_maxclass+1);
1194 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001195 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001196 } else
1197 ret = icalloc(num_size);
1198 } else
Jason Evans6109fe02010-02-10 10:37:56 -08001199#endif
Jason Evans93443682010-10-20 17:39:18 -07001200 {
1201#ifdef JEMALLOC_STATS
1202 usize = s2u(num_size);
1203#endif
Jason Evans0b270a92010-03-31 16:45:04 -07001204 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001205 }
Jason Evans289053c2009-06-22 12:08:42 -07001206
1207RETURN:
1208 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001209#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001210 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001211 malloc_write("<jemalloc>: Error in calloc(): out of "
1212 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001213 abort();
1214 }
Jason Evansb7924f52009-06-23 19:01:18 -07001215#endif
Jason Evans289053c2009-06-22 12:08:42 -07001216 errno = ENOMEM;
1217 }
1218
Jason Evans6109fe02010-02-10 10:37:56 -08001219#ifdef JEMALLOC_PROF
1220 if (opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001221 prof_malloc(ret, usize, cnt);
1222#endif
1223#ifdef JEMALLOC_STATS
1224 if (ret != NULL) {
1225 assert(usize == isalloc(ret));
1226 ALLOCATED_ADD(usize, 0);
1227 }
Jason Evans6109fe02010-02-10 10:37:56 -08001228#endif
Jason Evans289053c2009-06-22 12:08:42 -07001229 return (ret);
1230}
1231
Jason Evanse476f8a2010-01-16 09:53:50 -08001232JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001233void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001234JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001235{
1236 void *ret;
Jason Evans93443682010-10-20 17:39:18 -07001237#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1238 size_t usize
Jason Evans355b4382010-09-20 19:20:48 -07001239# ifdef JEMALLOC_CC_SILENCE
1240 = 0
1241# endif
1242 ;
Jason Evans93443682010-10-20 17:39:18 -07001243 size_t old_size = 0;
1244#endif
1245#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001246 prof_thr_cnt_t *cnt
1247# ifdef JEMALLOC_CC_SILENCE
1248 = NULL
1249# endif
1250 ;
1251 prof_ctx_t *old_ctx
1252# ifdef JEMALLOC_CC_SILENCE
1253 = NULL
1254# endif
1255 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001256#endif
1257
Jason Evans289053c2009-06-22 12:08:42 -07001258 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001259#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001260 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -07001261#endif
Jason Evans289053c2009-06-22 12:08:42 -07001262 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001263#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001264 else {
Jason Evanse476f8a2010-01-16 09:53:50 -08001265 if (ptr != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001266#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1267 old_size = isalloc(ptr);
1268#endif
Jason Evansfe5faa22010-02-11 13:38:12 -08001269#ifdef JEMALLOC_PROF
1270 if (opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001271 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001272 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001273 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001274#endif
Jason Evans289053c2009-06-22 12:08:42 -07001275 idalloc(ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -08001276 }
Jason Evansfe5faa22010-02-11 13:38:12 -08001277#ifdef JEMALLOC_PROF
1278 else if (opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001279 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001280 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001281 }
1282#endif
Jason Evans289053c2009-06-22 12:08:42 -07001283 ret = NULL;
1284 goto RETURN;
1285 }
Jason Evansb7924f52009-06-23 19:01:18 -07001286#endif
Jason Evans289053c2009-06-22 12:08:42 -07001287 }
1288
1289 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001290 assert(malloc_initialized || malloc_initializer ==
1291 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001292
Jason Evans93443682010-10-20 17:39:18 -07001293#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1294 old_size = isalloc(ptr);
1295#endif
Jason Evansfe5faa22010-02-11 13:38:12 -08001296#ifdef JEMALLOC_PROF
1297 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001298 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001299 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001300 PROF_ALLOC_PREP(1, usize, cnt);
1301 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001302 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001303 ret = NULL;
1304 goto OOM;
1305 }
Jason Evans0b270a92010-03-31 16:45:04 -07001306 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evans93443682010-10-20 17:39:18 -07001307 usize <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001308 ret = iralloc(ptr, small_maxclass+1, 0, 0,
1309 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001310 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001311 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001312 else
1313 old_ctx = NULL;
1314 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001315 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001316 if (ret == NULL)
1317 old_ctx = NULL;
1318 }
Jason Evans0b270a92010-03-31 16:45:04 -07001319 } else
Jason Evans569432c2009-12-29 00:09:15 -08001320#endif
Jason Evans93443682010-10-20 17:39:18 -07001321 {
1322#ifdef JEMALLOC_STATS
1323 usize = s2u(size);
1324#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001326 }
Jason Evans289053c2009-06-22 12:08:42 -07001327
Jason Evans6109fe02010-02-10 10:37:56 -08001328#ifdef JEMALLOC_PROF
1329OOM:
1330#endif
Jason Evans289053c2009-06-22 12:08:42 -07001331 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001332#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001333 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001334 malloc_write("<jemalloc>: Error in realloc(): "
1335 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001336 abort();
1337 }
Jason Evansb7924f52009-06-23 19:01:18 -07001338#endif
Jason Evans289053c2009-06-22 12:08:42 -07001339 errno = ENOMEM;
1340 }
1341 } else {
Jason Evansfe5faa22010-02-11 13:38:12 -08001342#ifdef JEMALLOC_PROF
Jason Evans93443682010-10-20 17:39:18 -07001343 if (opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001344 old_ctx = NULL;
Jason Evans569432c2009-12-29 00:09:15 -08001345#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001346 if (malloc_init()) {
1347#ifdef JEMALLOC_PROF
1348 if (opt_prof)
1349 cnt = NULL;
1350#endif
1351 ret = NULL;
1352 } else {
1353#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001354 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001355 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001356 PROF_ALLOC_PREP(1, usize, cnt);
1357 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001358 ret = NULL;
1359 else {
1360 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001361 (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -07001362 small_maxclass) {
1363 ret = imalloc(small_maxclass+1);
1364 if (ret != NULL) {
1365 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001366 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001367 }
1368 } else
1369 ret = imalloc(size);
1370 }
Jason Evans6109fe02010-02-10 10:37:56 -08001371 } else
1372#endif
Jason Evans93443682010-10-20 17:39:18 -07001373 {
1374#ifdef JEMALLOC_STATS
1375 usize = s2u(size);
1376#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001377 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001378 }
Jason Evans6109fe02010-02-10 10:37:56 -08001379 }
Jason Evans569432c2009-12-29 00:09:15 -08001380
Jason Evans289053c2009-06-22 12:08:42 -07001381 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001382#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001383 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001384 malloc_write("<jemalloc>: Error in realloc(): "
1385 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001386 abort();
1387 }
Jason Evansb7924f52009-06-23 19:01:18 -07001388#endif
Jason Evans289053c2009-06-22 12:08:42 -07001389 errno = ENOMEM;
1390 }
1391 }
1392
Jason Evansb8f0a652009-06-29 09:41:43 -07001393#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001394RETURN:
Jason Evansb8f0a652009-06-29 09:41:43 -07001395#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001396#ifdef JEMALLOC_PROF
1397 if (opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001398 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans93443682010-10-20 17:39:18 -07001399#endif
1400#ifdef JEMALLOC_STATS
1401 if (ret != NULL) {
1402 assert(usize == isalloc(ret));
1403 ALLOCATED_ADD(usize, old_size);
1404 }
Jason Evans6109fe02010-02-10 10:37:56 -08001405#endif
Jason Evans289053c2009-06-22 12:08:42 -07001406 return (ret);
1407}
1408
Jason Evanse476f8a2010-01-16 09:53:50 -08001409JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001410void
Jason Evanse476f8a2010-01-16 09:53:50 -08001411JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001412{
1413
Jason Evans289053c2009-06-22 12:08:42 -07001414 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001415#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1416 size_t usize;
1417#endif
1418
Jason Evansa25d0a82009-11-09 14:57:38 -08001419 assert(malloc_initialized || malloc_initializer ==
1420 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001421
Jason Evanse4f78462010-10-22 10:45:59 -07001422#ifdef JEMALLOC_STATS
1423 usize = isalloc(ptr);
1424#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001425#ifdef JEMALLOC_PROF
Jason Evanse4f78462010-10-22 10:45:59 -07001426 if (opt_prof) {
1427# ifndef JEMALLOC_STATS
1428 usize = isalloc(ptr);
1429# endif
1430 prof_free(ptr, usize);
1431 }
Jason Evans6109fe02010-02-10 10:37:56 -08001432#endif
Jason Evans93443682010-10-20 17:39:18 -07001433#ifdef JEMALLOC_STATS
Jason Evanse4f78462010-10-22 10:45:59 -07001434 ALLOCATED_ADD(0, usize);
Jason Evans93443682010-10-20 17:39:18 -07001435#endif
Jason Evans289053c2009-06-22 12:08:42 -07001436 idalloc(ptr);
1437 }
1438}
1439
1440/*
1441 * End malloc(3)-compatible functions.
1442 */
1443/******************************************************************************/
1444/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001445 * Begin non-standard override functions.
1446 *
1447 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1448 * entire point is to avoid accidental mixed allocator usage.
1449 */
1450#ifndef JEMALLOC_PREFIX
1451
1452#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1453JEMALLOC_ATTR(malloc)
1454JEMALLOC_ATTR(visibility("default"))
1455void *
1456JEMALLOC_P(memalign)(size_t alignment, size_t size)
1457{
1458 void *ret;
Jason Evans355b4382010-09-20 19:20:48 -07001459#ifdef JEMALLOC_CC_SILENCE
1460 int result =
1461#endif
Jason Evansa5070042011-08-12 13:48:27 -07001462 imemalign(&ret, alignment, size);
Jason Evans355b4382010-09-20 19:20:48 -07001463#ifdef JEMALLOC_CC_SILENCE
1464 if (result != 0)
1465 return (NULL);
1466#endif
Jason Evans6a0d2912010-09-20 16:44:23 -07001467 return (ret);
1468}
1469#endif
1470
1471#ifdef JEMALLOC_OVERRIDE_VALLOC
1472JEMALLOC_ATTR(malloc)
1473JEMALLOC_ATTR(visibility("default"))
1474void *
1475JEMALLOC_P(valloc)(size_t size)
1476{
1477 void *ret;
Jason Evans355b4382010-09-20 19:20:48 -07001478#ifdef JEMALLOC_CC_SILENCE
1479 int result =
1480#endif
Jason Evansa5070042011-08-12 13:48:27 -07001481 imemalign(&ret, PAGE_SIZE, size);
Jason Evans355b4382010-09-20 19:20:48 -07001482#ifdef JEMALLOC_CC_SILENCE
1483 if (result != 0)
1484 return (NULL);
1485#endif
Jason Evans6a0d2912010-09-20 16:44:23 -07001486 return (ret);
1487}
1488#endif
1489
1490#endif /* JEMALLOC_PREFIX */
1491/*
1492 * End non-standard override functions.
1493 */
1494/******************************************************************************/
1495/*
Jason Evans289053c2009-06-22 12:08:42 -07001496 * Begin non-standard functions.
1497 */
1498
Jason Evanse476f8a2010-01-16 09:53:50 -08001499JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001500size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001501JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001502{
Jason Evans569432c2009-12-29 00:09:15 -08001503 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001504
Jason Evans8e3c3c62010-09-17 15:46:18 -07001505 assert(malloc_initialized || malloc_initializer == pthread_self());
1506
Jason Evans2dbecf12010-09-05 10:35:13 -07001507#ifdef JEMALLOC_IVSALLOC
1508 ret = ivsalloc(ptr);
1509#else
Jason Evans289053c2009-06-22 12:08:42 -07001510 assert(ptr != NULL);
Jason Evans569432c2009-12-29 00:09:15 -08001511 ret = isalloc(ptr);
Jason Evans2dbecf12010-09-05 10:35:13 -07001512#endif
Jason Evans289053c2009-06-22 12:08:42 -07001513
Jason Evans569432c2009-12-29 00:09:15 -08001514 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001515}
1516
Jason Evans4201af02010-01-24 02:53:40 -08001517JEMALLOC_ATTR(visibility("default"))
1518void
Jason Evans698805c2010-03-03 17:45:38 -08001519JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1520 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001521{
1522
Jason Evans698805c2010-03-03 17:45:38 -08001523 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001524}
1525
Jason Evans3c234352010-01-27 13:10:55 -08001526JEMALLOC_ATTR(visibility("default"))
1527int
1528JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1529 size_t newlen)
1530{
1531
Jason Evans95833312010-01-27 13:45:21 -08001532 if (malloc_init())
1533 return (EAGAIN);
1534
Jason Evans3c234352010-01-27 13:10:55 -08001535 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1536}
1537
1538JEMALLOC_ATTR(visibility("default"))
1539int
1540JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1541{
1542
Jason Evans95833312010-01-27 13:45:21 -08001543 if (malloc_init())
1544 return (EAGAIN);
1545
Jason Evans3c234352010-01-27 13:10:55 -08001546 return (ctl_nametomib(name, mibp, miblenp));
1547}
1548
1549JEMALLOC_ATTR(visibility("default"))
1550int
1551JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1552 size_t *oldlenp, void *newp, size_t newlen)
1553{
1554
Jason Evans95833312010-01-27 13:45:21 -08001555 if (malloc_init())
1556 return (EAGAIN);
1557
Jason Evans3c234352010-01-27 13:10:55 -08001558 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1559}
1560
Jason Evans8e3c3c62010-09-17 15:46:18 -07001561JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001562iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001563{
1564
Jason Evans38d92102011-03-23 00:37:29 -07001565 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1566 NULL)));
1567
Jason Evans8e3c3c62010-09-17 15:46:18 -07001568 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001569 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001570 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001571 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001572 else
Jason Evans38d92102011-03-23 00:37:29 -07001573 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001574}
1575
Jason Evans6a0d2912010-09-20 16:44:23 -07001576JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001577JEMALLOC_ATTR(visibility("default"))
1578int
1579JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1580{
1581 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001582 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001583 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1584 & (SIZE_T_MAX-1));
1585 bool zero = flags & ALLOCM_ZERO;
1586#ifdef JEMALLOC_PROF
1587 prof_thr_cnt_t *cnt;
1588#endif
1589
1590 assert(ptr != NULL);
1591 assert(size != 0);
1592
1593 if (malloc_init())
1594 goto OOM;
1595
Jason Evans749c2a02011-08-12 18:37:54 -07001596 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001597 if (usize == 0)
1598 goto OOM;
1599
Jason Evans8e3c3c62010-09-17 15:46:18 -07001600#ifdef JEMALLOC_PROF
1601 if (opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001602 PROF_ALLOC_PREP(1, usize, cnt);
1603 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001604 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001605 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans8e3c3c62010-09-17 15:46:18 -07001606 small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001607 size_t usize_promoted = (alignment == 0) ?
1608 s2u(small_maxclass+1) : sa2u(small_maxclass+1,
1609 alignment, NULL);
1610 assert(usize_promoted != 0);
1611 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001612 if (p == NULL)
1613 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001614 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001615 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001616 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001617 if (p == NULL)
1618 goto OOM;
1619 }
Jason Evans749c2a02011-08-12 18:37:54 -07001620 prof_malloc(p, usize, cnt);
Jason Evans93443682010-10-20 17:39:18 -07001621 if (rsize != NULL)
1622 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001623 } else
1624#endif
1625 {
Jason Evans38d92102011-03-23 00:37:29 -07001626 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001627 if (p == NULL)
1628 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001629#ifndef JEMALLOC_STATS
1630 if (rsize != NULL)
1631#endif
1632 {
Jason Evans93443682010-10-20 17:39:18 -07001633#ifdef JEMALLOC_STATS
1634 if (rsize != NULL)
1635#endif
1636 *rsize = usize;
1637 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001638 }
1639
1640 *ptr = p;
Jason Evans93443682010-10-20 17:39:18 -07001641#ifdef JEMALLOC_STATS
1642 assert(usize == isalloc(p));
1643 ALLOCATED_ADD(usize, 0);
1644#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001645 return (ALLOCM_SUCCESS);
1646OOM:
1647#ifdef JEMALLOC_XMALLOC
1648 if (opt_xmalloc) {
1649 malloc_write("<jemalloc>: Error in allocm(): "
1650 "out of memory\n");
1651 abort();
1652 }
1653#endif
1654 *ptr = NULL;
1655 return (ALLOCM_ERR_OOM);
1656}
1657
Jason Evans6a0d2912010-09-20 16:44:23 -07001658JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001659JEMALLOC_ATTR(visibility("default"))
1660int
1661JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1662 int flags)
1663{
1664 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001665 size_t usize;
1666#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1667 size_t old_size;
1668#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001669 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1670 & (SIZE_T_MAX-1));
1671 bool zero = flags & ALLOCM_ZERO;
1672 bool no_move = flags & ALLOCM_NO_MOVE;
1673#ifdef JEMALLOC_PROF
Jason Evans8e3c3c62010-09-17 15:46:18 -07001674 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001675#endif
1676
1677 assert(ptr != NULL);
1678 assert(*ptr != NULL);
1679 assert(size != 0);
1680 assert(SIZE_T_MAX - size >= extra);
1681 assert(malloc_initialized || malloc_initializer == pthread_self());
1682
1683 p = *ptr;
1684#ifdef JEMALLOC_PROF
1685 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001686 /*
1687 * usize isn't knowable before iralloc() returns when extra is
1688 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001689 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001690 * backtrace. prof_realloc() will use the actual usize to
1691 * decide whether to sample.
1692 */
1693 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1694 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001695 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001696 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001697 PROF_ALLOC_PREP(1, max_usize, cnt);
1698 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001699 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001700 /*
1701 * Use minimum usize to determine whether promotion may happen.
1702 */
1703 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1704 && ((alignment == 0) ? s2u(size) : sa2u(size,
1705 alignment, NULL)) <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001706 q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
1707 size+extra) ? 0 : size+extra - (small_maxclass+1),
1708 alignment, zero, no_move);
1709 if (q == NULL)
1710 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001711 if (max_usize < PAGE_SIZE) {
1712 usize = max_usize;
1713 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001714 } else
1715 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001716 } else {
1717 q = iralloc(p, size, extra, alignment, zero, no_move);
1718 if (q == NULL)
1719 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001720 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001721 }
Jason Evanse4f78462010-10-22 10:45:59 -07001722 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001723 if (rsize != NULL)
1724 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001725 } else
1726#endif
1727 {
Jason Evans93443682010-10-20 17:39:18 -07001728#ifdef JEMALLOC_STATS
1729 old_size = isalloc(p);
1730#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001731 q = iralloc(p, size, extra, alignment, zero, no_move);
1732 if (q == NULL)
1733 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001734#ifndef JEMALLOC_STATS
1735 if (rsize != NULL)
1736#endif
1737 {
1738 usize = isalloc(q);
1739#ifdef JEMALLOC_STATS
1740 if (rsize != NULL)
1741#endif
1742 *rsize = usize;
1743 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001744 }
1745
1746 *ptr = q;
Jason Evans93443682010-10-20 17:39:18 -07001747#ifdef JEMALLOC_STATS
1748 ALLOCATED_ADD(usize, old_size);
1749#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001750 return (ALLOCM_SUCCESS);
1751ERR:
1752 if (no_move)
1753 return (ALLOCM_ERR_NOT_MOVED);
1754#ifdef JEMALLOC_PROF
1755OOM:
1756#endif
1757#ifdef JEMALLOC_XMALLOC
1758 if (opt_xmalloc) {
1759 malloc_write("<jemalloc>: Error in rallocm(): "
1760 "out of memory\n");
1761 abort();
1762 }
1763#endif
1764 return (ALLOCM_ERR_OOM);
1765}
1766
Jason Evans6a0d2912010-09-20 16:44:23 -07001767JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001768JEMALLOC_ATTR(visibility("default"))
1769int
1770JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1771{
1772 size_t sz;
1773
1774 assert(malloc_initialized || malloc_initializer == pthread_self());
1775
1776#ifdef JEMALLOC_IVSALLOC
1777 sz = ivsalloc(ptr);
1778#else
1779 assert(ptr != NULL);
1780 sz = isalloc(ptr);
1781#endif
1782 assert(rsize != NULL);
1783 *rsize = sz;
1784
1785 return (ALLOCM_SUCCESS);
1786}
1787
Jason Evans6a0d2912010-09-20 16:44:23 -07001788JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001789JEMALLOC_ATTR(visibility("default"))
1790int
1791JEMALLOC_P(dallocm)(void *ptr, int flags)
1792{
Jason Evanse4f78462010-10-22 10:45:59 -07001793#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1794 size_t usize;
1795#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001796
1797 assert(ptr != NULL);
1798 assert(malloc_initialized || malloc_initializer == pthread_self());
1799
Jason Evanse4f78462010-10-22 10:45:59 -07001800#ifdef JEMALLOC_STATS
1801 usize = isalloc(ptr);
1802#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001803#ifdef JEMALLOC_PROF
Jason Evanse4f78462010-10-22 10:45:59 -07001804 if (opt_prof) {
1805# ifndef JEMALLOC_STATS
1806 usize = isalloc(ptr);
1807# endif
1808 prof_free(ptr, usize);
1809 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001810#endif
Jason Evans93443682010-10-20 17:39:18 -07001811#ifdef JEMALLOC_STATS
Jason Evanse4f78462010-10-22 10:45:59 -07001812 ALLOCATED_ADD(0, usize);
Jason Evans93443682010-10-20 17:39:18 -07001813#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001814 idalloc(ptr);
1815
1816 return (ALLOCM_SUCCESS);
1817}
1818
Jason Evans289053c2009-06-22 12:08:42 -07001819/*
1820 * End non-standard functions.
1821 */
1822/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001823
Jason Evans289053c2009-06-22 12:08:42 -07001824/*
1825 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001826 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001827 */
1828
Jason Evans2dbecf12010-09-05 10:35:13 -07001829void
Jason Evans804c9ec2009-06-22 17:44:33 -07001830jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001831{
Jason Evansfbbb6242010-01-24 17:56:48 -08001832 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001833
1834 /* Acquire all mutexes in a safe order. */
1835
Jason Evansfbbb6242010-01-24 17:56:48 -08001836 malloc_mutex_lock(&arenas_lock);
1837 for (i = 0; i < narenas; i++) {
1838 if (arenas[i] != NULL)
1839 malloc_mutex_lock(&arenas[i]->lock);
1840 }
Jason Evans289053c2009-06-22 12:08:42 -07001841
1842 malloc_mutex_lock(&base_mtx);
1843
1844 malloc_mutex_lock(&huge_mtx);
1845
Jason Evansb7924f52009-06-23 19:01:18 -07001846#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001847 malloc_mutex_lock(&dss_mtx);
1848#endif
Jason Evans4201af02010-01-24 02:53:40 -08001849
1850#ifdef JEMALLOC_SWAP
1851 malloc_mutex_lock(&swap_mtx);
1852#endif
Jason Evans289053c2009-06-22 12:08:42 -07001853}
1854
Jason Evans2dbecf12010-09-05 10:35:13 -07001855void
Jason Evans804c9ec2009-06-22 17:44:33 -07001856jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001857{
1858 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001859
1860 /* Release all mutexes, now that fork() has completed. */
1861
Jason Evans4201af02010-01-24 02:53:40 -08001862#ifdef JEMALLOC_SWAP
1863 malloc_mutex_unlock(&swap_mtx);
1864#endif
1865
Jason Evansb7924f52009-06-23 19:01:18 -07001866#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001867 malloc_mutex_unlock(&dss_mtx);
1868#endif
1869
1870 malloc_mutex_unlock(&huge_mtx);
1871
1872 malloc_mutex_unlock(&base_mtx);
1873
Jason Evans289053c2009-06-22 12:08:42 -07001874 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001875 if (arenas[i] != NULL)
1876 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001877 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001878 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001879}
Jason Evans2dbecf12010-09-05 10:35:13 -07001880
1881/******************************************************************************/