blob: ad1ee8ef1aabb48c6af6d30c9a0882ed3b30457d [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080039const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans698805c2010-03-03 17:45:38 -080058static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080059static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070060static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070061static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080062#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070063static void thread_allocated_cleanup(void *arg);
64#endif
Jason Evanse7339702010-10-23 18:37:06 -070065static bool malloc_conf_next(char const **opts_p, char const **k_p,
66 size_t *klen_p, char const **v_p, size_t *vlen_p);
67static void malloc_conf_error(const char *msg, const char *k, size_t klen,
68 const char *v, size_t vlen);
69static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070070static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080071static int imemalign(void **memptr, size_t alignment, size_t size,
72 bool enforce_min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans289053c2009-06-22 12:08:42 -070074/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080075/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070076
Jason Evans7372b152012-02-10 20:22:09 -080077JEMALLOC_CATTR(visibility("hidden"), static)
Jason Evanse476f8a2010-01-16 09:53:50 -080078void
Jason Evans698805c2010-03-03 17:45:38 -080079wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070080{
Jason Evans7372b152012-02-10 20:22:09 -080081 UNUSED int result = write(STDERR_FILENO, s, strlen(s));
Jason Evansc9658dd2009-06-22 14:44:08 -070082}
83
Jason Evans0a5489e2012-03-01 17:19:20 -080084void (*je_malloc_message)(void *, const char *s)
Jason Evans698805c2010-03-03 17:45:38 -080085 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -070086
87/******************************************************************************/
88/*
Jason Evanse476f8a2010-01-16 09:53:50 -080089 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070090 */
91
Jason Evanse476f8a2010-01-16 09:53:50 -080092/* Create a new arena and insert it into the arenas array at index ind. */
93arena_t *
94arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070095{
96 arena_t *ret;
97
Jason Evansb1726102012-02-28 16:50:47 -080098 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080099 if (ret != NULL && arena_new(ret, ind) == false) {
100 arenas[ind] = ret;
101 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700102 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800103 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700104
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 /*
106 * OOM here is quite inconvenient to propagate, since dealing with it
107 * would require a check for failure in the fast path. Instead, punt
108 * by using arenas[0]. In practice, this is an extremely unlikely
109 * failure.
110 */
Jason Evans698805c2010-03-03 17:45:38 -0800111 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800112 if (opt_abort)
113 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700114
Jason Evanse476f8a2010-01-16 09:53:50 -0800115 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700116}
117
Jason Evans289053c2009-06-22 12:08:42 -0700118/*
119 * Choose an arena based on a per-thread value (slow-path code only, called
120 * only by choose_arena()).
121 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800122arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700123choose_arena_hard(void)
124{
125 arena_t *ret;
126
Jason Evans289053c2009-06-22 12:08:42 -0700127 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700128 unsigned i, choose, first_null;
129
130 choose = 0;
131 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800132 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700133 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700134 for (i = 1; i < narenas; i++) {
135 if (arenas[i] != NULL) {
136 /*
137 * Choose the first arena that has the lowest
138 * number of threads assigned to it.
139 */
140 if (arenas[i]->nthreads <
141 arenas[choose]->nthreads)
142 choose = i;
143 } else if (first_null == narenas) {
144 /*
145 * Record the index of the first uninitialized
146 * arena, in case all extant arenas are in use.
147 *
148 * NB: It is possible for there to be
149 * discontinuities in terms of initialized
150 * versus uninitialized arenas, due to the
151 * "thread.arena" mallctl.
152 */
153 first_null = i;
154 }
155 }
156
157 if (arenas[choose] == 0 || first_null == narenas) {
158 /*
159 * Use an unloaded arena, or the least loaded arena if
160 * all arenas are already initialized.
161 */
162 ret = arenas[choose];
163 } else {
164 /* Initialize a new arena. */
165 ret = arenas_extend(first_null);
166 }
167 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800168 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700169 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700170 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700171 malloc_mutex_lock(&arenas_lock);
172 ret->nthreads++;
173 malloc_mutex_unlock(&arenas_lock);
174 }
Jason Evans289053c2009-06-22 12:08:42 -0700175
Jason Evans2dbecf12010-09-05 10:35:13 -0700176 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700177
178 return (ret);
179}
Jason Evans289053c2009-06-22 12:08:42 -0700180
Jason Evansa09f55c2010-09-20 16:05:41 -0700181/*
182 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
183 * provide a wrapper.
184 */
185int
186buferror(int errnum, char *buf, size_t buflen)
187{
188#ifdef _GNU_SOURCE
189 char *b = strerror_r(errno, buf, buflen);
190 if (b != buf) {
191 strncpy(buf, b, buflen);
192 buf[buflen-1] = '\0';
193 }
194 return (0);
195#else
196 return (strerror_r(errno, buf, buflen));
197#endif
198}
199
Jason Evans03c22372010-01-03 12:10:42 -0800200static void
201stats_print_atexit(void)
202{
203
Jason Evans7372b152012-02-10 20:22:09 -0800204 if (config_tcache && config_stats) {
205 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800206
Jason Evans7372b152012-02-10 20:22:09 -0800207 /*
208 * Merge stats from extant threads. This is racy, since
209 * individual threads do not lock when recording tcache stats
210 * events. As a consequence, the final stats may be slightly
211 * out of date by the time they are reported, if other threads
212 * continue to allocate.
213 */
214 for (i = 0; i < narenas; i++) {
215 arena_t *arena = arenas[i];
216 if (arena != NULL) {
217 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800218
Jason Evans7372b152012-02-10 20:22:09 -0800219 /*
220 * tcache_stats_merge() locks bins, so if any
221 * code is introduced that acquires both arena
222 * and bin locks in the opposite order,
223 * deadlocks may result.
224 */
225 malloc_mutex_lock(&arena->lock);
226 ql_foreach(tcache, &arena->tcache_ql, link) {
227 tcache_stats_merge(tcache, arena);
228 }
229 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800230 }
Jason Evans03c22372010-01-03 12:10:42 -0800231 }
232 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800233 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700234}
235
Jason Evans9dcad2d2011-02-13 18:11:54 -0800236thread_allocated_t *
237thread_allocated_get_hard(void)
238{
239 thread_allocated_t *thread_allocated = (thread_allocated_t *)
240 imalloc(sizeof(thread_allocated_t));
241 if (thread_allocated == NULL) {
242 static thread_allocated_t static_thread_allocated = {0, 0};
243 malloc_write("<jemalloc>: Error allocating TSD;"
244 " mallctl(\"thread.{de,}allocated[p]\", ...)"
245 " will be inaccurate\n");
246 if (opt_abort)
247 abort();
248 return (&static_thread_allocated);
249 }
250 pthread_setspecific(thread_allocated_tsd, thread_allocated);
251 thread_allocated->allocated = 0;
252 thread_allocated->deallocated = 0;
253 return (thread_allocated);
254}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800255
Jason Evans289053c2009-06-22 12:08:42 -0700256/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800257 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700258 */
259/******************************************************************************/
260/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800261 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700262 */
263
Jason Evansc9658dd2009-06-22 14:44:08 -0700264static unsigned
265malloc_ncpus(void)
266{
267 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700268 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700269
Jason Evansb7924f52009-06-23 19:01:18 -0700270 result = sysconf(_SC_NPROCESSORS_ONLN);
271 if (result == -1) {
272 /* Error. */
273 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700274 }
Jason Evansb7924f52009-06-23 19:01:18 -0700275 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700276
277 return (ret);
278}
Jason Evansb7924f52009-06-23 19:01:18 -0700279
Jason Evans597632b2011-03-18 13:41:33 -0700280static void
281arenas_cleanup(void *arg)
282{
283 arena_t *arena = (arena_t *)arg;
284
285 malloc_mutex_lock(&arenas_lock);
286 arena->nthreads--;
287 malloc_mutex_unlock(&arenas_lock);
288}
289
Jason Evans7372b152012-02-10 20:22:09 -0800290#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700291static void
292thread_allocated_cleanup(void *arg)
293{
294 uint64_t *allocated = (uint64_t *)arg;
295
296 if (allocated != NULL)
297 idalloc(allocated);
298}
299#endif
300
Jason Evans289053c2009-06-22 12:08:42 -0700301/*
302 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
303 * implementation has to take pains to avoid infinite recursion during
304 * initialization.
305 */
306static inline bool
307malloc_init(void)
308{
309
310 if (malloc_initialized == false)
311 return (malloc_init_hard());
312
313 return (false);
314}
315
316static bool
Jason Evanse7339702010-10-23 18:37:06 -0700317malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
318 char const **v_p, size_t *vlen_p)
319{
320 bool accept;
321 const char *opts = *opts_p;
322
323 *k_p = opts;
324
325 for (accept = false; accept == false;) {
326 switch (*opts) {
327 case 'A': case 'B': case 'C': case 'D': case 'E':
328 case 'F': case 'G': case 'H': case 'I': case 'J':
329 case 'K': case 'L': case 'M': case 'N': case 'O':
330 case 'P': case 'Q': case 'R': case 'S': case 'T':
331 case 'U': case 'V': case 'W': case 'X': case 'Y':
332 case 'Z':
333 case 'a': case 'b': case 'c': case 'd': case 'e':
334 case 'f': case 'g': case 'h': case 'i': case 'j':
335 case 'k': case 'l': case 'm': case 'n': case 'o':
336 case 'p': case 'q': case 'r': case 's': case 't':
337 case 'u': case 'v': case 'w': case 'x': case 'y':
338 case 'z':
339 case '0': case '1': case '2': case '3': case '4':
340 case '5': case '6': case '7': case '8': case '9':
341 case '_':
342 opts++;
343 break;
344 case ':':
345 opts++;
346 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
347 *v_p = opts;
348 accept = true;
349 break;
350 case '\0':
351 if (opts != *opts_p) {
352 malloc_write("<jemalloc>: Conf string "
353 "ends with key\n");
354 }
355 return (true);
356 default:
357 malloc_write("<jemalloc>: Malformed conf "
358 "string\n");
359 return (true);
360 }
361 }
362
363 for (accept = false; accept == false;) {
364 switch (*opts) {
365 case ',':
366 opts++;
367 /*
368 * Look ahead one character here, because the
369 * next time this function is called, it will
370 * assume that end of input has been cleanly
371 * reached if no input remains, but we have
372 * optimistically already consumed the comma if
373 * one exists.
374 */
375 if (*opts == '\0') {
376 malloc_write("<jemalloc>: Conf string "
377 "ends with comma\n");
378 }
379 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
380 accept = true;
381 break;
382 case '\0':
383 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
384 accept = true;
385 break;
386 default:
387 opts++;
388 break;
389 }
390 }
391
392 *opts_p = opts;
393 return (false);
394}
395
396static void
397malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
398 size_t vlen)
399{
400 char buf[PATH_MAX + 1];
401
402 malloc_write("<jemalloc>: ");
403 malloc_write(msg);
404 malloc_write(": ");
405 memcpy(buf, k, klen);
406 memcpy(&buf[klen], ":", 1);
407 memcpy(&buf[klen+1], v, vlen);
408 buf[klen+1+vlen] = '\0';
409 malloc_write(buf);
410 malloc_write("\n");
411}
412
413static void
414malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700415{
416 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700417 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700418 const char *opts, *k, *v;
419 size_t klen, vlen;
420
421 for (i = 0; i < 3; i++) {
422 /* Get runtime configuration. */
423 switch (i) {
424 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800425 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700426 /*
427 * Use options that were compiled into the
428 * program.
429 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800430 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700431 } else {
432 /* No configuration specified. */
433 buf[0] = '\0';
434 opts = buf;
435 }
436 break;
437 case 1: {
438 int linklen;
439 const char *linkname =
440#ifdef JEMALLOC_PREFIX
441 "/etc/"JEMALLOC_PREFIX"malloc.conf"
442#else
443 "/etc/malloc.conf"
444#endif
445 ;
446
447 if ((linklen = readlink(linkname, buf,
448 sizeof(buf) - 1)) != -1) {
449 /*
450 * Use the contents of the "/etc/malloc.conf"
451 * symbolic link's name.
452 */
453 buf[linklen] = '\0';
454 opts = buf;
455 } else {
456 /* No configuration specified. */
457 buf[0] = '\0';
458 opts = buf;
459 }
460 break;
461 }
462 case 2: {
463 const char *envname =
464#ifdef JEMALLOC_PREFIX
465 JEMALLOC_CPREFIX"MALLOC_CONF"
466#else
467 "MALLOC_CONF"
468#endif
469 ;
470
471 if ((opts = getenv(envname)) != NULL) {
472 /*
473 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800474 * the value of the MALLOC_CONF environment
475 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700476 */
477 } else {
478 /* No configuration specified. */
479 buf[0] = '\0';
480 opts = buf;
481 }
482 break;
483 }
484 default:
485 /* NOTREACHED */
486 assert(false);
487 buf[0] = '\0';
488 opts = buf;
489 }
490
491 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
492 &vlen) == false) {
493#define CONF_HANDLE_BOOL(n) \
494 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
495 klen) == 0) { \
496 if (strncmp("true", v, vlen) == 0 && \
497 vlen == sizeof("true")-1) \
498 opt_##n = true; \
499 else if (strncmp("false", v, vlen) == \
500 0 && vlen == sizeof("false")-1) \
501 opt_##n = false; \
502 else { \
503 malloc_conf_error( \
504 "Invalid conf value", \
505 k, klen, v, vlen); \
506 } \
507 continue; \
508 }
509#define CONF_HANDLE_SIZE_T(n, min, max) \
510 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
511 klen) == 0) { \
512 unsigned long ul; \
513 char *end; \
514 \
515 errno = 0; \
516 ul = strtoul(v, &end, 0); \
517 if (errno != 0 || (uintptr_t)end - \
518 (uintptr_t)v != vlen) { \
519 malloc_conf_error( \
520 "Invalid conf value", \
521 k, klen, v, vlen); \
522 } else if (ul < min || ul > max) { \
523 malloc_conf_error( \
524 "Out-of-range conf value", \
525 k, klen, v, vlen); \
526 } else \
527 opt_##n = ul; \
528 continue; \
529 }
530#define CONF_HANDLE_SSIZE_T(n, min, max) \
531 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
532 klen) == 0) { \
533 long l; \
534 char *end; \
535 \
536 errno = 0; \
537 l = strtol(v, &end, 0); \
538 if (errno != 0 || (uintptr_t)end - \
539 (uintptr_t)v != vlen) { \
540 malloc_conf_error( \
541 "Invalid conf value", \
542 k, klen, v, vlen); \
543 } else if (l < (ssize_t)min || l > \
544 (ssize_t)max) { \
545 malloc_conf_error( \
546 "Out-of-range conf value", \
547 k, klen, v, vlen); \
548 } else \
549 opt_##n = l; \
550 continue; \
551 }
552#define CONF_HANDLE_CHAR_P(n, d) \
553 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
554 klen) == 0) { \
555 size_t cpylen = (vlen <= \
556 sizeof(opt_##n)-1) ? vlen : \
557 sizeof(opt_##n)-1; \
558 strncpy(opt_##n, v, cpylen); \
559 opt_##n[cpylen] = '\0'; \
560 continue; \
561 }
562
563 CONF_HANDLE_BOOL(abort)
Jason Evanse7339702010-10-23 18:37:06 -0700564 /*
565 * Chunks always require at least one * header page,
566 * plus one data page.
567 */
568 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
569 (sizeof(size_t) << 3) - 1)
570 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
571 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
572 (sizeof(size_t) << 3) - 1)
573 CONF_HANDLE_BOOL(stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800574 if (config_fill) {
575 CONF_HANDLE_BOOL(junk)
576 CONF_HANDLE_BOOL(zero)
577 }
Jason Evans7372b152012-02-10 20:22:09 -0800578 if (config_xmalloc) {
579 CONF_HANDLE_BOOL(xmalloc)
580 }
581 if (config_tcache) {
582 CONF_HANDLE_BOOL(tcache)
Jason Evans7372b152012-02-10 20:22:09 -0800583 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
584 (sizeof(size_t) << 3) - 1)
585 }
586 if (config_prof) {
587 CONF_HANDLE_BOOL(prof)
588 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
Jason Evans7372b152012-02-10 20:22:09 -0800589 CONF_HANDLE_BOOL(prof_active)
590 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
591 (sizeof(uint64_t) << 3) - 1)
592 CONF_HANDLE_BOOL(prof_accum)
Jason Evans7372b152012-02-10 20:22:09 -0800593 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
594 (sizeof(uint64_t) << 3) - 1)
595 CONF_HANDLE_BOOL(prof_gdump)
596 CONF_HANDLE_BOOL(prof_leak)
597 }
Jason Evanse7339702010-10-23 18:37:06 -0700598 malloc_conf_error("Invalid conf pair", k, klen, v,
599 vlen);
600#undef CONF_HANDLE_BOOL
601#undef CONF_HANDLE_SIZE_T
602#undef CONF_HANDLE_SSIZE_T
603#undef CONF_HANDLE_CHAR_P
604 }
Jason Evanse7339702010-10-23 18:37:06 -0700605 }
606}
607
608static bool
609malloc_init_hard(void)
610{
Jason Evansb7924f52009-06-23 19:01:18 -0700611 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700612
613 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700614 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700615 /*
616 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800617 * acquired init_lock, or this thread is the initializing
618 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700619 */
620 malloc_mutex_unlock(&init_lock);
621 return (false);
622 }
Jason Evansb7924f52009-06-23 19:01:18 -0700623 if (malloc_initializer != (unsigned long)0) {
624 /* Busy-wait until the initializing thread completes. */
625 do {
626 malloc_mutex_unlock(&init_lock);
627 CPU_SPINWAIT;
628 malloc_mutex_lock(&init_lock);
629 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700630 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700631 return (false);
632 }
Jason Evans289053c2009-06-22 12:08:42 -0700633
Jason Evansb7924f52009-06-23 19:01:18 -0700634#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700635 /* Get page size. */
636 {
637 long result;
638
639 result = sysconf(_SC_PAGESIZE);
640 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700641 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700642
643 /*
644 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800645 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700646 */
647 assert(((result - 1) & result) == 0);
648 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800649 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700650 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700651#endif
Jason Evans289053c2009-06-22 12:08:42 -0700652
Jason Evans7372b152012-02-10 20:22:09 -0800653 if (config_prof)
654 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700655
Jason Evanse7339702010-10-23 18:37:06 -0700656 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700657
Jason Evansa0bf2422010-01-29 14:30:41 -0800658 /* Register fork handlers. */
659 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
660 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800661 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800662 if (opt_abort)
663 abort();
664 }
665
Jason Evans3c234352010-01-27 13:10:55 -0800666 if (ctl_boot()) {
667 malloc_mutex_unlock(&init_lock);
668 return (true);
669 }
670
Jason Evans03c22372010-01-03 12:10:42 -0800671 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700672 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800673 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800674 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800675 if (opt_abort)
676 abort();
677 }
Jason Evans289053c2009-06-22 12:08:42 -0700678 }
679
Jason Evansa0bf2422010-01-29 14:30:41 -0800680 if (chunk_boot()) {
681 malloc_mutex_unlock(&init_lock);
682 return (true);
683 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700684
Jason Evans3c234352010-01-27 13:10:55 -0800685 if (base_boot()) {
686 malloc_mutex_unlock(&init_lock);
687 return (true);
688 }
689
Jason Evans7372b152012-02-10 20:22:09 -0800690 if (config_prof)
691 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800692
Jason Evansb1726102012-02-28 16:50:47 -0800693 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700694
Jason Evans7372b152012-02-10 20:22:09 -0800695 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700696 malloc_mutex_unlock(&init_lock);
697 return (true);
698 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800699
Jason Evanse476f8a2010-01-16 09:53:50 -0800700 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700701 malloc_mutex_unlock(&init_lock);
702 return (true);
703 }
Jason Evans289053c2009-06-22 12:08:42 -0700704
Jason Evans7372b152012-02-10 20:22:09 -0800705#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700706 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800707 if (config_stats && pthread_key_create(&thread_allocated_tsd,
708 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700709 malloc_mutex_unlock(&init_lock);
710 return (true);
711 }
712#endif
713
Jason Evans8e6f8b42011-11-03 18:40:03 -0700714 if (malloc_mutex_init(&arenas_lock))
715 return (true);
716
717 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
718 malloc_mutex_unlock(&init_lock);
719 return (true);
720 }
721
Jason Evansb7924f52009-06-23 19:01:18 -0700722 /*
723 * Create enough scaffolding to allow recursive allocation in
724 * malloc_ncpus().
725 */
726 narenas = 1;
727 arenas = init_arenas;
728 memset(arenas, 0, sizeof(arena_t *) * narenas);
729
730 /*
731 * Initialize one arena here. The rest are lazily created in
732 * choose_arena_hard().
733 */
734 arenas_extend(0);
735 if (arenas[0] == NULL) {
736 malloc_mutex_unlock(&init_lock);
737 return (true);
738 }
739
Jason Evansb7924f52009-06-23 19:01:18 -0700740 /*
741 * Assign the initial arena to the initial thread, in order to avoid
742 * spurious creation of an extra arena if the application switches to
743 * threaded mode.
744 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700745 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700746 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700747
Jason Evans7372b152012-02-10 20:22:09 -0800748 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800749 malloc_mutex_unlock(&init_lock);
750 return (true);
751 }
Jason Evans3383af62010-02-11 08:59:06 -0800752
Jason Evansb7924f52009-06-23 19:01:18 -0700753 /* Get number of CPUs. */
754 malloc_initializer = pthread_self();
755 malloc_mutex_unlock(&init_lock);
756 ncpus = malloc_ncpus();
757 malloc_mutex_lock(&init_lock);
758
Jason Evanse7339702010-10-23 18:37:06 -0700759 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700760 /*
Jason Evans5463a522009-12-29 00:09:15 -0800761 * For SMP systems, create more than one arena per CPU by
762 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700763 */
Jason Evanse7339702010-10-23 18:37:06 -0700764 if (ncpus > 1)
765 opt_narenas = ncpus << 2;
766 else
767 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700768 }
Jason Evanse7339702010-10-23 18:37:06 -0700769 narenas = opt_narenas;
770 /*
771 * Make sure that the arenas array can be allocated. In practice, this
772 * limit is enough to allow the allocator to function, but the ctl
773 * machinery will fail to allocate memory at far lower limits.
774 */
775 if (narenas > chunksize / sizeof(arena_t *)) {
776 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700777
Jason Evanse7339702010-10-23 18:37:06 -0700778 narenas = chunksize / sizeof(arena_t *);
779 malloc_write("<jemalloc>: Reducing narenas to limit (");
780 malloc_write(u2s(narenas, 10, buf));
781 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700782 }
Jason Evans289053c2009-06-22 12:08:42 -0700783
Jason Evans289053c2009-06-22 12:08:42 -0700784 /* Allocate and initialize arenas. */
785 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
786 if (arenas == NULL) {
787 malloc_mutex_unlock(&init_lock);
788 return (true);
789 }
790 /*
791 * Zero the array. In practice, this should always be pre-zeroed,
792 * since it was just mmap()ed, but let's be sure.
793 */
794 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700795 /* Copy the pointer to the one arena that was already initialized. */
796 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700797
Jason Evans2dbecf12010-09-05 10:35:13 -0700798#ifdef JEMALLOC_ZONE
799 /* Register the custom zone. */
800 malloc_zone_register(create_zone());
801
802 /*
803 * Convert the default szone to an "overlay zone" that is capable of
804 * deallocating szone-allocated objects, but allocating new objects
805 * from jemalloc.
806 */
807 szone2ozone(malloc_default_zone());
808#endif
809
Jason Evans289053c2009-06-22 12:08:42 -0700810 malloc_initialized = true;
811 malloc_mutex_unlock(&init_lock);
812 return (false);
813}
814
Jason Evans2dbecf12010-09-05 10:35:13 -0700815#ifdef JEMALLOC_ZONE
816JEMALLOC_ATTR(constructor)
817void
818jemalloc_darwin_init(void)
819{
820
821 if (malloc_init_hard())
822 abort();
823}
824#endif
825
Jason Evans289053c2009-06-22 12:08:42 -0700826/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800827 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700828 */
829/******************************************************************************/
830/*
831 * Begin malloc(3)-compatible functions.
832 */
833
Jason Evans9ad48232010-01-03 11:59:20 -0800834JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800835JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700836void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800837je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700838{
839 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800840 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800841 prof_thr_cnt_t *cnt
842#ifdef JEMALLOC_CC_SILENCE
843 = NULL
844#endif
845 ;
Jason Evans289053c2009-06-22 12:08:42 -0700846
847 if (malloc_init()) {
848 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800849 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700850 }
851
Jason Evansc90ad712012-02-28 20:31:37 -0800852 if (size == 0)
853 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700854
Jason Evans7372b152012-02-10 20:22:09 -0800855 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700856 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700857 PROF_ALLOC_PREP(1, usize, cnt);
858 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700859 ret = NULL;
860 goto OOM;
861 }
Jason Evans93443682010-10-20 17:39:18 -0700862 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800863 SMALL_MAXCLASS) {
864 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700865 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700866 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700867 } else
868 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800869 } else {
870 if (config_stats)
871 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700872 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700873 }
Jason Evans289053c2009-06-22 12:08:42 -0700874
Jason Evansf2518142009-12-29 00:09:15 -0800875OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700876 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800877 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800878 malloc_write("<jemalloc>: Error in malloc(): "
879 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700880 abort();
881 }
882 errno = ENOMEM;
883 }
Jason Evans7372b152012-02-10 20:22:09 -0800884 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700885 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800886 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700887 assert(usize == isalloc(ret));
888 ALLOCATED_ADD(usize, 0);
889 }
Jason Evans289053c2009-06-22 12:08:42 -0700890 return (ret);
891}
892
Jason Evans9ad48232010-01-03 11:59:20 -0800893JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700894#ifdef JEMALLOC_PROF
895/*
Jason Evans7372b152012-02-10 20:22:09 -0800896 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700897 * PROF_ALLOC_PREP().
898 */
899JEMALLOC_ATTR(noinline)
900#endif
901static int
Jason Evans59656312012-02-28 21:37:38 -0800902imemalign(void **memptr, size_t alignment, size_t size,
903 bool enforce_min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700904{
905 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800906 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700907 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800908 prof_thr_cnt_t *cnt
909#ifdef JEMALLOC_CC_SILENCE
910 = NULL
911#endif
912 ;
Jason Evans289053c2009-06-22 12:08:42 -0700913
914 if (malloc_init())
915 result = NULL;
916 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800917 if (size == 0)
918 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800919
Jason Evans289053c2009-06-22 12:08:42 -0700920 /* Make sure that alignment is a large enough power of 2. */
921 if (((alignment - 1) & alignment) != 0
Jason Evans59656312012-02-28 21:37:38 -0800922 || (enforce_min_alignment && alignment < sizeof(void *))) {
Jason Evans7372b152012-02-10 20:22:09 -0800923 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800924 malloc_write("<jemalloc>: Error in "
925 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700926 abort();
927 }
928 result = NULL;
929 ret = EINVAL;
930 goto RETURN;
931 }
932
Jason Evans38d92102011-03-23 00:37:29 -0700933 usize = sa2u(size, alignment, NULL);
934 if (usize == 0) {
935 result = NULL;
936 ret = ENOMEM;
937 goto RETURN;
938 }
939
Jason Evans7372b152012-02-10 20:22:09 -0800940 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700941 PROF_ALLOC_PREP(2, usize, cnt);
942 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700943 result = NULL;
944 ret = EINVAL;
945 } else {
946 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800947 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
948 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700949 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800950 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700951 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700952 if (result != NULL) {
953 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700954 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700955 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700956 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700957 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700958 false);
959 }
Jason Evans0b270a92010-03-31 16:45:04 -0700960 }
Jason Evans6109fe02010-02-10 10:37:56 -0800961 } else
Jason Evans38d92102011-03-23 00:37:29 -0700962 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700963 }
964
965 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800966 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800967 malloc_write("<jemalloc>: Error in posix_memalign(): "
968 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700969 abort();
970 }
971 ret = ENOMEM;
972 goto RETURN;
973 }
974
975 *memptr = result;
976 ret = 0;
977
978RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800979 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700980 assert(usize == isalloc(result));
981 ALLOCATED_ADD(usize, 0);
982 }
Jason Evans7372b152012-02-10 20:22:09 -0800983 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700984 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700985 return (ret);
986}
987
Jason Evansa5070042011-08-12 13:48:27 -0700988JEMALLOC_ATTR(nonnull(1))
989JEMALLOC_ATTR(visibility("default"))
990int
Jason Evans0a5489e2012-03-01 17:19:20 -0800991je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700992{
993
Jason Evans59656312012-02-28 21:37:38 -0800994 return imemalign(memptr, alignment, size, true);
Jason Evansa5070042011-08-12 13:48:27 -0700995}
996
Jason Evans9ad48232010-01-03 11:59:20 -0800997JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800998JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700999void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001000je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001001{
1002 void *ret;
1003 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -08001004 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001005 prof_thr_cnt_t *cnt
1006#ifdef JEMALLOC_CC_SILENCE
1007 = NULL
1008#endif
1009 ;
Jason Evans289053c2009-06-22 12:08:42 -07001010
1011 if (malloc_init()) {
1012 num_size = 0;
1013 ret = NULL;
1014 goto RETURN;
1015 }
1016
1017 num_size = num * size;
1018 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -08001019 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001020 num_size = 1;
1021 else {
1022 ret = NULL;
1023 goto RETURN;
1024 }
1025 /*
1026 * Try to avoid division here. We know that it isn't possible to
1027 * overflow during multiplication if neither operand uses any of the
1028 * most significant half of the bits in a size_t.
1029 */
1030 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1031 && (num_size / size != num)) {
1032 /* size_t overflow. */
1033 ret = NULL;
1034 goto RETURN;
1035 }
1036
Jason Evans7372b152012-02-10 20:22:09 -08001037 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001038 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001039 PROF_ALLOC_PREP(1, usize, cnt);
1040 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001041 ret = NULL;
1042 goto RETURN;
1043 }
Jason Evans93443682010-10-20 17:39:18 -07001044 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001045 <= SMALL_MAXCLASS) {
1046 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001047 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001048 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001049 } else
1050 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001051 } else {
1052 if (config_stats)
1053 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001054 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001055 }
Jason Evans289053c2009-06-22 12:08:42 -07001056
1057RETURN:
1058 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001059 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001060 malloc_write("<jemalloc>: Error in calloc(): out of "
1061 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001062 abort();
1063 }
1064 errno = ENOMEM;
1065 }
1066
Jason Evans7372b152012-02-10 20:22:09 -08001067 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001068 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001070 assert(usize == isalloc(ret));
1071 ALLOCATED_ADD(usize, 0);
1072 }
Jason Evans289053c2009-06-22 12:08:42 -07001073 return (ret);
1074}
1075
Jason Evanse476f8a2010-01-16 09:53:50 -08001076JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001077void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001078je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001079{
1080 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001081 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001082 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001083 prof_thr_cnt_t *cnt
1084#ifdef JEMALLOC_CC_SILENCE
1085 = NULL
1086#endif
1087 ;
1088 prof_ctx_t *old_ctx
1089#ifdef JEMALLOC_CC_SILENCE
1090 = NULL
1091#endif
1092 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001093
Jason Evans289053c2009-06-22 12:08:42 -07001094 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001095 if (ptr != NULL) {
1096 /* realloc(ptr, 0) is equivalent to free(p). */
1097 if (config_prof || config_stats)
1098 old_size = isalloc(ptr);
1099 if (config_prof && opt_prof) {
1100 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001101 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001102 }
Jason Evansf081b882012-02-28 20:24:05 -08001103 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001104 ret = NULL;
1105 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001106 } else
1107 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001108 }
1109
1110 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001111 assert(malloc_initialized || malloc_initializer ==
1112 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001113
Jason Evans7372b152012-02-10 20:22:09 -08001114 if (config_prof || config_stats)
1115 old_size = isalloc(ptr);
1116 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001117 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001118 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001119 PROF_ALLOC_PREP(1, usize, cnt);
1120 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001121 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001122 ret = NULL;
1123 goto OOM;
1124 }
Jason Evans0b270a92010-03-31 16:45:04 -07001125 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001126 usize <= SMALL_MAXCLASS) {
1127 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001128 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001129 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001130 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001131 else
1132 old_ctx = NULL;
1133 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001134 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001135 if (ret == NULL)
1136 old_ctx = NULL;
1137 }
Jason Evans7372b152012-02-10 20:22:09 -08001138 } else {
1139 if (config_stats)
1140 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001141 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001142 }
Jason Evans289053c2009-06-22 12:08:42 -07001143
Jason Evans6109fe02010-02-10 10:37:56 -08001144OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001145 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001146 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001147 malloc_write("<jemalloc>: Error in realloc(): "
1148 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001149 abort();
1150 }
1151 errno = ENOMEM;
1152 }
1153 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001154 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001155 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001156 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001157 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001158 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001159 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001160 ret = NULL;
1161 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001162 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001163 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001164 PROF_ALLOC_PREP(1, usize, cnt);
1165 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001166 ret = NULL;
1167 else {
1168 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001169 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001170 SMALL_MAXCLASS) {
1171 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001172 if (ret != NULL) {
1173 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001174 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001175 }
1176 } else
1177 ret = imalloc(size);
1178 }
Jason Evans7372b152012-02-10 20:22:09 -08001179 } else {
1180 if (config_stats)
1181 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001182 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001183 }
Jason Evans6109fe02010-02-10 10:37:56 -08001184 }
Jason Evans569432c2009-12-29 00:09:15 -08001185
Jason Evans289053c2009-06-22 12:08:42 -07001186 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001187 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001188 malloc_write("<jemalloc>: Error in realloc(): "
1189 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001190 abort();
1191 }
1192 errno = ENOMEM;
1193 }
1194 }
1195
1196RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001197 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001198 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001199 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001200 assert(usize == isalloc(ret));
1201 ALLOCATED_ADD(usize, old_size);
1202 }
Jason Evans289053c2009-06-22 12:08:42 -07001203 return (ret);
1204}
1205
Jason Evanse476f8a2010-01-16 09:53:50 -08001206JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001207void
Jason Evans0a5489e2012-03-01 17:19:20 -08001208je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001209{
1210
Jason Evans289053c2009-06-22 12:08:42 -07001211 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001212 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001213
Jason Evansa25d0a82009-11-09 14:57:38 -08001214 assert(malloc_initialized || malloc_initializer ==
1215 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001216
Jason Evans7372b152012-02-10 20:22:09 -08001217 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001218 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001219 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001220 } else if (config_stats) {
1221 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001222 }
Jason Evans7372b152012-02-10 20:22:09 -08001223 if (config_stats)
1224 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001225 idalloc(ptr);
1226 }
1227}
1228
1229/*
1230 * End malloc(3)-compatible functions.
1231 */
1232/******************************************************************************/
1233/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001234 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001235 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001236
1237#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1238JEMALLOC_ATTR(malloc)
1239JEMALLOC_ATTR(visibility("default"))
1240void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001241je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001242{
Jason Evans7372b152012-02-10 20:22:09 -08001243 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001244#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001245 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001246#endif
Jason Evans7372b152012-02-10 20:22:09 -08001247 ;
Jason Evans59656312012-02-28 21:37:38 -08001248 imemalign(&ret, alignment, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001249 return (ret);
1250}
1251#endif
1252
1253#ifdef JEMALLOC_OVERRIDE_VALLOC
1254JEMALLOC_ATTR(malloc)
1255JEMALLOC_ATTR(visibility("default"))
1256void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001257je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001258{
Jason Evans7372b152012-02-10 20:22:09 -08001259 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001260#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001261 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001262#endif
Jason Evans7372b152012-02-10 20:22:09 -08001263 ;
Jason Evans59656312012-02-28 21:37:38 -08001264 imemalign(&ret, PAGE_SIZE, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001265 return (ret);
1266}
1267#endif
1268
Jason Evans0a5489e2012-03-01 17:19:20 -08001269#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001270/*
1271 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1272 * to inconsistently reference libc's malloc(3)-compatible functions
1273 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1274 *
1275 * These definitions interpose hooks in glibc.  The functions are actually
1276 * passed an extra argument for the caller return address, which will be
1277 * ignored.
1278 */
1279JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001280void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001281
1282JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001283void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001284
1285JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001286void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001287
1288JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001289void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001290#endif
1291
Jason Evans6a0d2912010-09-20 16:44:23 -07001292/*
1293 * End non-standard override functions.
1294 */
1295/******************************************************************************/
1296/*
Jason Evans289053c2009-06-22 12:08:42 -07001297 * Begin non-standard functions.
1298 */
1299
Jason Evanse476f8a2010-01-16 09:53:50 -08001300JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001301size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001302je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001303{
Jason Evans569432c2009-12-29 00:09:15 -08001304 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001305
Jason Evans8e3c3c62010-09-17 15:46:18 -07001306 assert(malloc_initialized || malloc_initializer == pthread_self());
1307
Jason Evans7372b152012-02-10 20:22:09 -08001308 if (config_ivsalloc)
1309 ret = ivsalloc(ptr);
1310 else {
1311 assert(ptr != NULL);
1312 ret = isalloc(ptr);
1313 }
Jason Evans289053c2009-06-22 12:08:42 -07001314
Jason Evans569432c2009-12-29 00:09:15 -08001315 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001316}
1317
Jason Evans4201af02010-01-24 02:53:40 -08001318JEMALLOC_ATTR(visibility("default"))
1319void
Jason Evans0a5489e2012-03-01 17:19:20 -08001320je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1321 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001322{
1323
Jason Evans698805c2010-03-03 17:45:38 -08001324 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001325}
1326
Jason Evans3c234352010-01-27 13:10:55 -08001327JEMALLOC_ATTR(visibility("default"))
1328int
Jason Evans0a5489e2012-03-01 17:19:20 -08001329je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001330 size_t newlen)
1331{
1332
Jason Evans95833312010-01-27 13:45:21 -08001333 if (malloc_init())
1334 return (EAGAIN);
1335
Jason Evans3c234352010-01-27 13:10:55 -08001336 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1337}
1338
1339JEMALLOC_ATTR(visibility("default"))
1340int
Jason Evans0a5489e2012-03-01 17:19:20 -08001341je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001342{
1343
Jason Evans95833312010-01-27 13:45:21 -08001344 if (malloc_init())
1345 return (EAGAIN);
1346
Jason Evans3c234352010-01-27 13:10:55 -08001347 return (ctl_nametomib(name, mibp, miblenp));
1348}
1349
1350JEMALLOC_ATTR(visibility("default"))
1351int
Jason Evans0a5489e2012-03-01 17:19:20 -08001352je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1353 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001354{
1355
Jason Evans95833312010-01-27 13:45:21 -08001356 if (malloc_init())
1357 return (EAGAIN);
1358
Jason Evans3c234352010-01-27 13:10:55 -08001359 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1360}
1361
Jason Evans7e77eaf2012-03-02 17:47:37 -08001362/*
1363 * End non-standard functions.
1364 */
1365/******************************************************************************/
1366/*
1367 * Begin experimental functions.
1368 */
1369#ifdef JEMALLOC_EXPERIMENTAL
1370
Jason Evans8e3c3c62010-09-17 15:46:18 -07001371JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001372iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001373{
1374
Jason Evans38d92102011-03-23 00:37:29 -07001375 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1376 NULL)));
1377
Jason Evans8e3c3c62010-09-17 15:46:18 -07001378 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001379 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001380 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001381 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001382 else
Jason Evans38d92102011-03-23 00:37:29 -07001383 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001384}
1385
Jason Evans6a0d2912010-09-20 16:44:23 -07001386JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387JEMALLOC_ATTR(visibility("default"))
1388int
Jason Evans0a5489e2012-03-01 17:19:20 -08001389je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001390{
1391 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001392 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001393 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1394 & (SIZE_T_MAX-1));
1395 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001396 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001397
1398 assert(ptr != NULL);
1399 assert(size != 0);
1400
1401 if (malloc_init())
1402 goto OOM;
1403
Jason Evans749c2a02011-08-12 18:37:54 -07001404 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001405 if (usize == 0)
1406 goto OOM;
1407
Jason Evans7372b152012-02-10 20:22:09 -08001408 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001409 PROF_ALLOC_PREP(1, usize, cnt);
1410 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001412 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001413 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001414 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001415 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001416 alignment, NULL);
1417 assert(usize_promoted != 0);
1418 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419 if (p == NULL)
1420 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001421 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001422 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001423 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424 if (p == NULL)
1425 goto OOM;
1426 }
Jason Evans749c2a02011-08-12 18:37:54 -07001427 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001428 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001429 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001430 if (p == NULL)
1431 goto OOM;
1432 }
Jason Evans7372b152012-02-10 20:22:09 -08001433 if (rsize != NULL)
1434 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435
1436 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001437 if (config_stats) {
1438 assert(usize == isalloc(p));
1439 ALLOCATED_ADD(usize, 0);
1440 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441 return (ALLOCM_SUCCESS);
1442OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001443 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001444 malloc_write("<jemalloc>: Error in allocm(): "
1445 "out of memory\n");
1446 abort();
1447 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001448 *ptr = NULL;
1449 return (ALLOCM_ERR_OOM);
1450}
1451
Jason Evans6a0d2912010-09-20 16:44:23 -07001452JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001453JEMALLOC_ATTR(visibility("default"))
1454int
Jason Evans0a5489e2012-03-01 17:19:20 -08001455je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001456{
1457 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001458 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001459 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1461 & (SIZE_T_MAX-1));
1462 bool zero = flags & ALLOCM_ZERO;
1463 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001464 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001465
1466 assert(ptr != NULL);
1467 assert(*ptr != NULL);
1468 assert(size != 0);
1469 assert(SIZE_T_MAX - size >= extra);
1470 assert(malloc_initialized || malloc_initializer == pthread_self());
1471
1472 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001473 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001474 /*
1475 * usize isn't knowable before iralloc() returns when extra is
1476 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001477 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001478 * backtrace. prof_realloc() will use the actual usize to
1479 * decide whether to sample.
1480 */
1481 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1482 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001483 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001484 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001485 PROF_ALLOC_PREP(1, max_usize, cnt);
1486 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001487 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001488 /*
1489 * Use minimum usize to determine whether promotion may happen.
1490 */
1491 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1492 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001493 alignment, NULL)) <= SMALL_MAXCLASS) {
1494 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1495 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001496 alignment, zero, no_move);
1497 if (q == NULL)
1498 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001499 if (max_usize < PAGE_SIZE) {
1500 usize = max_usize;
1501 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001502 } else
1503 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001504 } else {
1505 q = iralloc(p, size, extra, alignment, zero, no_move);
1506 if (q == NULL)
1507 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001508 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001509 }
Jason Evanse4f78462010-10-22 10:45:59 -07001510 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001511 if (rsize != NULL)
1512 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001513 } else {
1514 if (config_stats)
1515 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001516 q = iralloc(p, size, extra, alignment, zero, no_move);
1517 if (q == NULL)
1518 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001519 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001520 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001521 if (rsize != NULL) {
1522 if (config_stats == false)
1523 usize = isalloc(q);
1524 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001525 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001526 }
1527
1528 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_stats)
1530 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001531 return (ALLOCM_SUCCESS);
1532ERR:
1533 if (no_move)
1534 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001535OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001536 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001537 malloc_write("<jemalloc>: Error in rallocm(): "
1538 "out of memory\n");
1539 abort();
1540 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001541 return (ALLOCM_ERR_OOM);
1542}
1543
Jason Evans6a0d2912010-09-20 16:44:23 -07001544JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545JEMALLOC_ATTR(visibility("default"))
1546int
Jason Evans0a5489e2012-03-01 17:19:20 -08001547je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001548{
1549 size_t sz;
1550
1551 assert(malloc_initialized || malloc_initializer == pthread_self());
1552
Jason Evans7372b152012-02-10 20:22:09 -08001553 if (config_ivsalloc)
1554 sz = ivsalloc(ptr);
1555 else {
1556 assert(ptr != NULL);
1557 sz = isalloc(ptr);
1558 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001559 assert(rsize != NULL);
1560 *rsize = sz;
1561
1562 return (ALLOCM_SUCCESS);
1563}
1564
Jason Evans6a0d2912010-09-20 16:44:23 -07001565JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001566JEMALLOC_ATTR(visibility("default"))
1567int
Jason Evans0a5489e2012-03-01 17:19:20 -08001568je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001569{
Jason Evanse4f78462010-10-22 10:45:59 -07001570 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001571
1572 assert(ptr != NULL);
1573 assert(malloc_initialized || malloc_initializer == pthread_self());
1574
Jason Evans7372b152012-02-10 20:22:09 -08001575 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001576 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001577 if (config_prof && opt_prof) {
1578 if (config_stats == false)
1579 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001580 prof_free(ptr, usize);
1581 }
Jason Evans7372b152012-02-10 20:22:09 -08001582 if (config_stats)
1583 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001584 idalloc(ptr);
1585
1586 return (ALLOCM_SUCCESS);
1587}
1588
Jason Evans7e15dab2012-02-29 12:56:37 -08001589JEMALLOC_ATTR(visibility("default"))
1590int
Jason Evans0a5489e2012-03-01 17:19:20 -08001591je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001592{
1593 size_t usize;
1594 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1595 & (SIZE_T_MAX-1));
1596
1597 assert(size != 0);
1598
1599 if (malloc_init())
1600 return (ALLOCM_ERR_OOM);
1601
1602 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1603 if (usize == 0)
1604 return (ALLOCM_ERR_OOM);
1605
1606 if (rsize != NULL)
1607 *rsize = usize;
1608 return (ALLOCM_SUCCESS);
1609}
1610
Jason Evans7e77eaf2012-03-02 17:47:37 -08001611#endif
Jason Evans289053c2009-06-22 12:08:42 -07001612/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001613 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001614 */
1615/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001616
Jason Evans289053c2009-06-22 12:08:42 -07001617/*
1618 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001619 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001620 */
1621
Jason Evans2dbecf12010-09-05 10:35:13 -07001622void
Jason Evans804c9ec2009-06-22 17:44:33 -07001623jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001624{
Jason Evansfbbb6242010-01-24 17:56:48 -08001625 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001626
1627 /* Acquire all mutexes in a safe order. */
1628
Jason Evansfbbb6242010-01-24 17:56:48 -08001629 malloc_mutex_lock(&arenas_lock);
1630 for (i = 0; i < narenas; i++) {
1631 if (arenas[i] != NULL)
1632 malloc_mutex_lock(&arenas[i]->lock);
1633 }
Jason Evans289053c2009-06-22 12:08:42 -07001634
1635 malloc_mutex_lock(&base_mtx);
1636
1637 malloc_mutex_lock(&huge_mtx);
1638
Jason Evans7372b152012-02-10 20:22:09 -08001639 if (config_dss)
1640 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001641}
1642
Jason Evans2dbecf12010-09-05 10:35:13 -07001643void
Jason Evans804c9ec2009-06-22 17:44:33 -07001644jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001645{
1646 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001647
1648 /* Release all mutexes, now that fork() has completed. */
1649
Jason Evans7372b152012-02-10 20:22:09 -08001650 if (config_dss)
1651 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001652
1653 malloc_mutex_unlock(&huge_mtx);
1654
1655 malloc_mutex_unlock(&base_mtx);
1656
Jason Evans289053c2009-06-22 12:08:42 -07001657 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001658 if (arenas[i] != NULL)
1659 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001660 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001661 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001662}
Jason Evans2dbecf12010-09-05 10:35:13 -07001663
1664/******************************************************************************/