blob: ccc3a2094f6276b7ce42931cb50e8fbcbbb20623 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070039const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans698805c2010-03-03 17:45:38 -080058static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080059static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070060static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070061static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080062#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070063static void thread_allocated_cleanup(void *arg);
64#endif
Jason Evanse7339702010-10-23 18:37:06 -070065static bool malloc_conf_next(char const **opts_p, char const **k_p,
66 size_t *klen_p, char const **v_p, size_t *vlen_p);
67static void malloc_conf_error(const char *msg, const char *k, size_t klen,
68 const char *v, size_t vlen);
69static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070070static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080071static int imemalign(void **memptr, size_t alignment, size_t size,
72 bool enforce_min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans289053c2009-06-22 12:08:42 -070074/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080075/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070076
Jason Evans7372b152012-02-10 20:22:09 -080077JEMALLOC_CATTR(visibility("hidden"), static)
Jason Evanse476f8a2010-01-16 09:53:50 -080078void
Jason Evans698805c2010-03-03 17:45:38 -080079wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070080{
Jason Evans7372b152012-02-10 20:22:09 -080081 UNUSED int result = write(STDERR_FILENO, s, strlen(s));
Jason Evansc9658dd2009-06-22 14:44:08 -070082}
83
Jason Evans698805c2010-03-03 17:45:38 -080084void (*JEMALLOC_P(malloc_message))(void *, const char *s)
85 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -070086
87/******************************************************************************/
88/*
Jason Evanse476f8a2010-01-16 09:53:50 -080089 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070090 */
91
Jason Evanse476f8a2010-01-16 09:53:50 -080092/* Create a new arena and insert it into the arenas array at index ind. */
93arena_t *
94arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070095{
96 arena_t *ret;
97
Jason Evansb1726102012-02-28 16:50:47 -080098 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080099 if (ret != NULL && arena_new(ret, ind) == false) {
100 arenas[ind] = ret;
101 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700102 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800103 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700104
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 /*
106 * OOM here is quite inconvenient to propagate, since dealing with it
107 * would require a check for failure in the fast path. Instead, punt
108 * by using arenas[0]. In practice, this is an extremely unlikely
109 * failure.
110 */
Jason Evans698805c2010-03-03 17:45:38 -0800111 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800112 if (opt_abort)
113 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700114
Jason Evanse476f8a2010-01-16 09:53:50 -0800115 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700116}
117
Jason Evans289053c2009-06-22 12:08:42 -0700118/*
119 * Choose an arena based on a per-thread value (slow-path code only, called
120 * only by choose_arena()).
121 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800122arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700123choose_arena_hard(void)
124{
125 arena_t *ret;
126
Jason Evans289053c2009-06-22 12:08:42 -0700127 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700128 unsigned i, choose, first_null;
129
130 choose = 0;
131 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800132 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700133 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700134 for (i = 1; i < narenas; i++) {
135 if (arenas[i] != NULL) {
136 /*
137 * Choose the first arena that has the lowest
138 * number of threads assigned to it.
139 */
140 if (arenas[i]->nthreads <
141 arenas[choose]->nthreads)
142 choose = i;
143 } else if (first_null == narenas) {
144 /*
145 * Record the index of the first uninitialized
146 * arena, in case all extant arenas are in use.
147 *
148 * NB: It is possible for there to be
149 * discontinuities in terms of initialized
150 * versus uninitialized arenas, due to the
151 * "thread.arena" mallctl.
152 */
153 first_null = i;
154 }
155 }
156
157 if (arenas[choose] == 0 || first_null == narenas) {
158 /*
159 * Use an unloaded arena, or the least loaded arena if
160 * all arenas are already initialized.
161 */
162 ret = arenas[choose];
163 } else {
164 /* Initialize a new arena. */
165 ret = arenas_extend(first_null);
166 }
167 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800168 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700169 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700170 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700171 malloc_mutex_lock(&arenas_lock);
172 ret->nthreads++;
173 malloc_mutex_unlock(&arenas_lock);
174 }
Jason Evans289053c2009-06-22 12:08:42 -0700175
Jason Evans2dbecf12010-09-05 10:35:13 -0700176 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700177
178 return (ret);
179}
Jason Evans289053c2009-06-22 12:08:42 -0700180
Jason Evansa09f55c2010-09-20 16:05:41 -0700181/*
182 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
183 * provide a wrapper.
184 */
185int
186buferror(int errnum, char *buf, size_t buflen)
187{
188#ifdef _GNU_SOURCE
189 char *b = strerror_r(errno, buf, buflen);
190 if (b != buf) {
191 strncpy(buf, b, buflen);
192 buf[buflen-1] = '\0';
193 }
194 return (0);
195#else
196 return (strerror_r(errno, buf, buflen));
197#endif
198}
199
Jason Evans03c22372010-01-03 12:10:42 -0800200static void
201stats_print_atexit(void)
202{
203
Jason Evans7372b152012-02-10 20:22:09 -0800204 if (config_tcache && config_stats) {
205 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800206
Jason Evans7372b152012-02-10 20:22:09 -0800207 /*
208 * Merge stats from extant threads. This is racy, since
209 * individual threads do not lock when recording tcache stats
210 * events. As a consequence, the final stats may be slightly
211 * out of date by the time they are reported, if other threads
212 * continue to allocate.
213 */
214 for (i = 0; i < narenas; i++) {
215 arena_t *arena = arenas[i];
216 if (arena != NULL) {
217 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800218
Jason Evans7372b152012-02-10 20:22:09 -0800219 /*
220 * tcache_stats_merge() locks bins, so if any
221 * code is introduced that acquires both arena
222 * and bin locks in the opposite order,
223 * deadlocks may result.
224 */
225 malloc_mutex_lock(&arena->lock);
226 ql_foreach(tcache, &arena->tcache_ql, link) {
227 tcache_stats_merge(tcache, arena);
228 }
229 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800230 }
Jason Evans03c22372010-01-03 12:10:42 -0800231 }
232 }
Jason Evansed1bf452010-01-19 12:11:25 -0800233 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700234}
235
Jason Evans9dcad2d2011-02-13 18:11:54 -0800236thread_allocated_t *
237thread_allocated_get_hard(void)
238{
239 thread_allocated_t *thread_allocated = (thread_allocated_t *)
240 imalloc(sizeof(thread_allocated_t));
241 if (thread_allocated == NULL) {
242 static thread_allocated_t static_thread_allocated = {0, 0};
243 malloc_write("<jemalloc>: Error allocating TSD;"
244 " mallctl(\"thread.{de,}allocated[p]\", ...)"
245 " will be inaccurate\n");
246 if (opt_abort)
247 abort();
248 return (&static_thread_allocated);
249 }
250 pthread_setspecific(thread_allocated_tsd, thread_allocated);
251 thread_allocated->allocated = 0;
252 thread_allocated->deallocated = 0;
253 return (thread_allocated);
254}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800255
Jason Evans289053c2009-06-22 12:08:42 -0700256/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800257 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700258 */
259/******************************************************************************/
260/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800261 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700262 */
263
Jason Evansc9658dd2009-06-22 14:44:08 -0700264static unsigned
265malloc_ncpus(void)
266{
267 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700268 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700269
Jason Evansb7924f52009-06-23 19:01:18 -0700270 result = sysconf(_SC_NPROCESSORS_ONLN);
271 if (result == -1) {
272 /* Error. */
273 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700274 }
Jason Evansb7924f52009-06-23 19:01:18 -0700275 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700276
277 return (ret);
278}
Jason Evansb7924f52009-06-23 19:01:18 -0700279
Jason Evans597632b2011-03-18 13:41:33 -0700280static void
281arenas_cleanup(void *arg)
282{
283 arena_t *arena = (arena_t *)arg;
284
285 malloc_mutex_lock(&arenas_lock);
286 arena->nthreads--;
287 malloc_mutex_unlock(&arenas_lock);
288}
289
Jason Evans7372b152012-02-10 20:22:09 -0800290#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700291static void
292thread_allocated_cleanup(void *arg)
293{
294 uint64_t *allocated = (uint64_t *)arg;
295
296 if (allocated != NULL)
297 idalloc(allocated);
298}
299#endif
300
Jason Evans289053c2009-06-22 12:08:42 -0700301/*
302 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
303 * implementation has to take pains to avoid infinite recursion during
304 * initialization.
305 */
306static inline bool
307malloc_init(void)
308{
309
310 if (malloc_initialized == false)
311 return (malloc_init_hard());
312
313 return (false);
314}
315
316static bool
Jason Evanse7339702010-10-23 18:37:06 -0700317malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
318 char const **v_p, size_t *vlen_p)
319{
320 bool accept;
321 const char *opts = *opts_p;
322
323 *k_p = opts;
324
325 for (accept = false; accept == false;) {
326 switch (*opts) {
327 case 'A': case 'B': case 'C': case 'D': case 'E':
328 case 'F': case 'G': case 'H': case 'I': case 'J':
329 case 'K': case 'L': case 'M': case 'N': case 'O':
330 case 'P': case 'Q': case 'R': case 'S': case 'T':
331 case 'U': case 'V': case 'W': case 'X': case 'Y':
332 case 'Z':
333 case 'a': case 'b': case 'c': case 'd': case 'e':
334 case 'f': case 'g': case 'h': case 'i': case 'j':
335 case 'k': case 'l': case 'm': case 'n': case 'o':
336 case 'p': case 'q': case 'r': case 's': case 't':
337 case 'u': case 'v': case 'w': case 'x': case 'y':
338 case 'z':
339 case '0': case '1': case '2': case '3': case '4':
340 case '5': case '6': case '7': case '8': case '9':
341 case '_':
342 opts++;
343 break;
344 case ':':
345 opts++;
346 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
347 *v_p = opts;
348 accept = true;
349 break;
350 case '\0':
351 if (opts != *opts_p) {
352 malloc_write("<jemalloc>: Conf string "
353 "ends with key\n");
354 }
355 return (true);
356 default:
357 malloc_write("<jemalloc>: Malformed conf "
358 "string\n");
359 return (true);
360 }
361 }
362
363 for (accept = false; accept == false;) {
364 switch (*opts) {
365 case ',':
366 opts++;
367 /*
368 * Look ahead one character here, because the
369 * next time this function is called, it will
370 * assume that end of input has been cleanly
371 * reached if no input remains, but we have
372 * optimistically already consumed the comma if
373 * one exists.
374 */
375 if (*opts == '\0') {
376 malloc_write("<jemalloc>: Conf string "
377 "ends with comma\n");
378 }
379 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
380 accept = true;
381 break;
382 case '\0':
383 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
384 accept = true;
385 break;
386 default:
387 opts++;
388 break;
389 }
390 }
391
392 *opts_p = opts;
393 return (false);
394}
395
396static void
397malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
398 size_t vlen)
399{
400 char buf[PATH_MAX + 1];
401
402 malloc_write("<jemalloc>: ");
403 malloc_write(msg);
404 malloc_write(": ");
405 memcpy(buf, k, klen);
406 memcpy(&buf[klen], ":", 1);
407 memcpy(&buf[klen+1], v, vlen);
408 buf[klen+1+vlen] = '\0';
409 malloc_write(buf);
410 malloc_write("\n");
411}
412
413static void
414malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700415{
416 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700417 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700418 const char *opts, *k, *v;
419 size_t klen, vlen;
420
421 for (i = 0; i < 3; i++) {
422 /* Get runtime configuration. */
423 switch (i) {
424 case 0:
425 if (JEMALLOC_P(malloc_conf) != NULL) {
426 /*
427 * Use options that were compiled into the
428 * program.
429 */
430 opts = JEMALLOC_P(malloc_conf);
431 } else {
432 /* No configuration specified. */
433 buf[0] = '\0';
434 opts = buf;
435 }
436 break;
437 case 1: {
438 int linklen;
439 const char *linkname =
440#ifdef JEMALLOC_PREFIX
441 "/etc/"JEMALLOC_PREFIX"malloc.conf"
442#else
443 "/etc/malloc.conf"
444#endif
445 ;
446
447 if ((linklen = readlink(linkname, buf,
448 sizeof(buf) - 1)) != -1) {
449 /*
450 * Use the contents of the "/etc/malloc.conf"
451 * symbolic link's name.
452 */
453 buf[linklen] = '\0';
454 opts = buf;
455 } else {
456 /* No configuration specified. */
457 buf[0] = '\0';
458 opts = buf;
459 }
460 break;
461 }
462 case 2: {
463 const char *envname =
464#ifdef JEMALLOC_PREFIX
465 JEMALLOC_CPREFIX"MALLOC_CONF"
466#else
467 "MALLOC_CONF"
468#endif
469 ;
470
471 if ((opts = getenv(envname)) != NULL) {
472 /*
473 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800474 * the value of the MALLOC_CONF environment
475 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700476 */
477 } else {
478 /* No configuration specified. */
479 buf[0] = '\0';
480 opts = buf;
481 }
482 break;
483 }
484 default:
485 /* NOTREACHED */
486 assert(false);
487 buf[0] = '\0';
488 opts = buf;
489 }
490
491 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
492 &vlen) == false) {
493#define CONF_HANDLE_BOOL(n) \
494 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
495 klen) == 0) { \
496 if (strncmp("true", v, vlen) == 0 && \
497 vlen == sizeof("true")-1) \
498 opt_##n = true; \
499 else if (strncmp("false", v, vlen) == \
500 0 && vlen == sizeof("false")-1) \
501 opt_##n = false; \
502 else { \
503 malloc_conf_error( \
504 "Invalid conf value", \
505 k, klen, v, vlen); \
506 } \
507 continue; \
508 }
509#define CONF_HANDLE_SIZE_T(n, min, max) \
510 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
511 klen) == 0) { \
512 unsigned long ul; \
513 char *end; \
514 \
515 errno = 0; \
516 ul = strtoul(v, &end, 0); \
517 if (errno != 0 || (uintptr_t)end - \
518 (uintptr_t)v != vlen) { \
519 malloc_conf_error( \
520 "Invalid conf value", \
521 k, klen, v, vlen); \
522 } else if (ul < min || ul > max) { \
523 malloc_conf_error( \
524 "Out-of-range conf value", \
525 k, klen, v, vlen); \
526 } else \
527 opt_##n = ul; \
528 continue; \
529 }
530#define CONF_HANDLE_SSIZE_T(n, min, max) \
531 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
532 klen) == 0) { \
533 long l; \
534 char *end; \
535 \
536 errno = 0; \
537 l = strtol(v, &end, 0); \
538 if (errno != 0 || (uintptr_t)end - \
539 (uintptr_t)v != vlen) { \
540 malloc_conf_error( \
541 "Invalid conf value", \
542 k, klen, v, vlen); \
543 } else if (l < (ssize_t)min || l > \
544 (ssize_t)max) { \
545 malloc_conf_error( \
546 "Out-of-range conf value", \
547 k, klen, v, vlen); \
548 } else \
549 opt_##n = l; \
550 continue; \
551 }
552#define CONF_HANDLE_CHAR_P(n, d) \
553 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
554 klen) == 0) { \
555 size_t cpylen = (vlen <= \
556 sizeof(opt_##n)-1) ? vlen : \
557 sizeof(opt_##n)-1; \
558 strncpy(opt_##n, v, cpylen); \
559 opt_##n[cpylen] = '\0'; \
560 continue; \
561 }
562
563 CONF_HANDLE_BOOL(abort)
Jason Evanse7339702010-10-23 18:37:06 -0700564 /*
565 * Chunks always require at least one * header page,
566 * plus one data page.
567 */
568 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
569 (sizeof(size_t) << 3) - 1)
570 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
571 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
572 (sizeof(size_t) << 3) - 1)
573 CONF_HANDLE_BOOL(stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800574 if (config_fill) {
575 CONF_HANDLE_BOOL(junk)
576 CONF_HANDLE_BOOL(zero)
577 }
Jason Evans7372b152012-02-10 20:22:09 -0800578 if (config_xmalloc) {
579 CONF_HANDLE_BOOL(xmalloc)
580 }
581 if (config_tcache) {
582 CONF_HANDLE_BOOL(tcache)
583 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
584 (sizeof(size_t) << 3) - 1)
585 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
586 (sizeof(size_t) << 3) - 1)
587 }
588 if (config_prof) {
589 CONF_HANDLE_BOOL(prof)
590 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
Jason Evans7372b152012-02-10 20:22:09 -0800591 CONF_HANDLE_BOOL(prof_active)
592 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
593 (sizeof(uint64_t) << 3) - 1)
594 CONF_HANDLE_BOOL(prof_accum)
Jason Evans7372b152012-02-10 20:22:09 -0800595 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
596 (sizeof(uint64_t) << 3) - 1)
597 CONF_HANDLE_BOOL(prof_gdump)
598 CONF_HANDLE_BOOL(prof_leak)
599 }
Jason Evanse7339702010-10-23 18:37:06 -0700600 malloc_conf_error("Invalid conf pair", k, klen, v,
601 vlen);
602#undef CONF_HANDLE_BOOL
603#undef CONF_HANDLE_SIZE_T
604#undef CONF_HANDLE_SSIZE_T
605#undef CONF_HANDLE_CHAR_P
606 }
Jason Evanse7339702010-10-23 18:37:06 -0700607 }
608}
609
610static bool
611malloc_init_hard(void)
612{
Jason Evansb7924f52009-06-23 19:01:18 -0700613 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700614
615 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700616 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700617 /*
618 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800619 * acquired init_lock, or this thread is the initializing
620 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700621 */
622 malloc_mutex_unlock(&init_lock);
623 return (false);
624 }
Jason Evansb7924f52009-06-23 19:01:18 -0700625 if (malloc_initializer != (unsigned long)0) {
626 /* Busy-wait until the initializing thread completes. */
627 do {
628 malloc_mutex_unlock(&init_lock);
629 CPU_SPINWAIT;
630 malloc_mutex_lock(&init_lock);
631 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700632 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700633 return (false);
634 }
Jason Evans289053c2009-06-22 12:08:42 -0700635
Jason Evansb7924f52009-06-23 19:01:18 -0700636#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700637 /* Get page size. */
638 {
639 long result;
640
641 result = sysconf(_SC_PAGESIZE);
642 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700643 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700644
645 /*
646 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800647 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700648 */
649 assert(((result - 1) & result) == 0);
650 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800651 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700652 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700653#endif
Jason Evans289053c2009-06-22 12:08:42 -0700654
Jason Evans7372b152012-02-10 20:22:09 -0800655 if (config_prof)
656 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700657
Jason Evanse7339702010-10-23 18:37:06 -0700658 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700659
Jason Evansa0bf2422010-01-29 14:30:41 -0800660 /* Register fork handlers. */
661 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
662 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800663 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800664 if (opt_abort)
665 abort();
666 }
667
Jason Evans3c234352010-01-27 13:10:55 -0800668 if (ctl_boot()) {
669 malloc_mutex_unlock(&init_lock);
670 return (true);
671 }
672
Jason Evans03c22372010-01-03 12:10:42 -0800673 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700674 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800675 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800676 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800677 if (opt_abort)
678 abort();
679 }
Jason Evans289053c2009-06-22 12:08:42 -0700680 }
681
Jason Evansa0bf2422010-01-29 14:30:41 -0800682 if (chunk_boot()) {
683 malloc_mutex_unlock(&init_lock);
684 return (true);
685 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700686
Jason Evans3c234352010-01-27 13:10:55 -0800687 if (base_boot()) {
688 malloc_mutex_unlock(&init_lock);
689 return (true);
690 }
691
Jason Evans7372b152012-02-10 20:22:09 -0800692 if (config_prof)
693 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800694
Jason Evansb1726102012-02-28 16:50:47 -0800695 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700696
Jason Evans7372b152012-02-10 20:22:09 -0800697 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700698 malloc_mutex_unlock(&init_lock);
699 return (true);
700 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800701
Jason Evanse476f8a2010-01-16 09:53:50 -0800702 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700703 malloc_mutex_unlock(&init_lock);
704 return (true);
705 }
Jason Evans289053c2009-06-22 12:08:42 -0700706
Jason Evans7372b152012-02-10 20:22:09 -0800707#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700708 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800709 if (config_stats && pthread_key_create(&thread_allocated_tsd,
710 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700711 malloc_mutex_unlock(&init_lock);
712 return (true);
713 }
714#endif
715
Jason Evans8e6f8b42011-11-03 18:40:03 -0700716 if (malloc_mutex_init(&arenas_lock))
717 return (true);
718
719 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
720 malloc_mutex_unlock(&init_lock);
721 return (true);
722 }
723
Jason Evansb7924f52009-06-23 19:01:18 -0700724 /*
725 * Create enough scaffolding to allow recursive allocation in
726 * malloc_ncpus().
727 */
728 narenas = 1;
729 arenas = init_arenas;
730 memset(arenas, 0, sizeof(arena_t *) * narenas);
731
732 /*
733 * Initialize one arena here. The rest are lazily created in
734 * choose_arena_hard().
735 */
736 arenas_extend(0);
737 if (arenas[0] == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741
Jason Evansb7924f52009-06-23 19:01:18 -0700742 /*
743 * Assign the initial arena to the initial thread, in order to avoid
744 * spurious creation of an extra arena if the application switches to
745 * threaded mode.
746 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700747 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700748 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700749
Jason Evans7372b152012-02-10 20:22:09 -0800750 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800751 malloc_mutex_unlock(&init_lock);
752 return (true);
753 }
Jason Evans3383af62010-02-11 08:59:06 -0800754
Jason Evansb7924f52009-06-23 19:01:18 -0700755 /* Get number of CPUs. */
756 malloc_initializer = pthread_self();
757 malloc_mutex_unlock(&init_lock);
758 ncpus = malloc_ncpus();
759 malloc_mutex_lock(&init_lock);
760
Jason Evanse7339702010-10-23 18:37:06 -0700761 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700762 /*
Jason Evans5463a522009-12-29 00:09:15 -0800763 * For SMP systems, create more than one arena per CPU by
764 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700765 */
Jason Evanse7339702010-10-23 18:37:06 -0700766 if (ncpus > 1)
767 opt_narenas = ncpus << 2;
768 else
769 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700770 }
Jason Evanse7339702010-10-23 18:37:06 -0700771 narenas = opt_narenas;
772 /*
773 * Make sure that the arenas array can be allocated. In practice, this
774 * limit is enough to allow the allocator to function, but the ctl
775 * machinery will fail to allocate memory at far lower limits.
776 */
777 if (narenas > chunksize / sizeof(arena_t *)) {
778 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700779
Jason Evanse7339702010-10-23 18:37:06 -0700780 narenas = chunksize / sizeof(arena_t *);
781 malloc_write("<jemalloc>: Reducing narenas to limit (");
782 malloc_write(u2s(narenas, 10, buf));
783 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700784 }
Jason Evans289053c2009-06-22 12:08:42 -0700785
Jason Evans289053c2009-06-22 12:08:42 -0700786 /* Allocate and initialize arenas. */
787 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
788 if (arenas == NULL) {
789 malloc_mutex_unlock(&init_lock);
790 return (true);
791 }
792 /*
793 * Zero the array. In practice, this should always be pre-zeroed,
794 * since it was just mmap()ed, but let's be sure.
795 */
796 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700797 /* Copy the pointer to the one arena that was already initialized. */
798 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700799
Jason Evans2dbecf12010-09-05 10:35:13 -0700800#ifdef JEMALLOC_ZONE
801 /* Register the custom zone. */
802 malloc_zone_register(create_zone());
803
804 /*
805 * Convert the default szone to an "overlay zone" that is capable of
806 * deallocating szone-allocated objects, but allocating new objects
807 * from jemalloc.
808 */
809 szone2ozone(malloc_default_zone());
810#endif
811
Jason Evans289053c2009-06-22 12:08:42 -0700812 malloc_initialized = true;
813 malloc_mutex_unlock(&init_lock);
814 return (false);
815}
816
Jason Evans2dbecf12010-09-05 10:35:13 -0700817#ifdef JEMALLOC_ZONE
818JEMALLOC_ATTR(constructor)
819void
820jemalloc_darwin_init(void)
821{
822
823 if (malloc_init_hard())
824 abort();
825}
826#endif
827
Jason Evans289053c2009-06-22 12:08:42 -0700828/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800829 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700830 */
831/******************************************************************************/
832/*
833 * Begin malloc(3)-compatible functions.
834 */
835
Jason Evans9ad48232010-01-03 11:59:20 -0800836JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800837JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700838void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800839JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700840{
841 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800842 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800843 prof_thr_cnt_t *cnt
844#ifdef JEMALLOC_CC_SILENCE
845 = NULL
846#endif
847 ;
Jason Evans289053c2009-06-22 12:08:42 -0700848
849 if (malloc_init()) {
850 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800851 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700852 }
853
Jason Evansc90ad712012-02-28 20:31:37 -0800854 if (size == 0)
855 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700856
Jason Evans7372b152012-02-10 20:22:09 -0800857 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700858 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700859 PROF_ALLOC_PREP(1, usize, cnt);
860 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700861 ret = NULL;
862 goto OOM;
863 }
Jason Evans93443682010-10-20 17:39:18 -0700864 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800865 SMALL_MAXCLASS) {
866 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700867 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700868 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700869 } else
870 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800871 } else {
872 if (config_stats)
873 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700874 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700875 }
Jason Evans289053c2009-06-22 12:08:42 -0700876
Jason Evansf2518142009-12-29 00:09:15 -0800877OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700878 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800879 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800880 malloc_write("<jemalloc>: Error in malloc(): "
881 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700882 abort();
883 }
884 errno = ENOMEM;
885 }
Jason Evans7372b152012-02-10 20:22:09 -0800886 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700887 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800888 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700889 assert(usize == isalloc(ret));
890 ALLOCATED_ADD(usize, 0);
891 }
Jason Evans289053c2009-06-22 12:08:42 -0700892 return (ret);
893}
894
Jason Evans9ad48232010-01-03 11:59:20 -0800895JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700896#ifdef JEMALLOC_PROF
897/*
Jason Evans7372b152012-02-10 20:22:09 -0800898 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700899 * PROF_ALLOC_PREP().
900 */
901JEMALLOC_ATTR(noinline)
902#endif
903static int
Jason Evans59656312012-02-28 21:37:38 -0800904imemalign(void **memptr, size_t alignment, size_t size,
905 bool enforce_min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700906{
907 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800908 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700909 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800910 prof_thr_cnt_t *cnt
911#ifdef JEMALLOC_CC_SILENCE
912 = NULL
913#endif
914 ;
Jason Evans289053c2009-06-22 12:08:42 -0700915
916 if (malloc_init())
917 result = NULL;
918 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800919 if (size == 0)
920 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800921
Jason Evans289053c2009-06-22 12:08:42 -0700922 /* Make sure that alignment is a large enough power of 2. */
923 if (((alignment - 1) & alignment) != 0
Jason Evans59656312012-02-28 21:37:38 -0800924 || (enforce_min_alignment && alignment < sizeof(void *))) {
Jason Evans7372b152012-02-10 20:22:09 -0800925 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800926 malloc_write("<jemalloc>: Error in "
927 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700928 abort();
929 }
930 result = NULL;
931 ret = EINVAL;
932 goto RETURN;
933 }
934
Jason Evans38d92102011-03-23 00:37:29 -0700935 usize = sa2u(size, alignment, NULL);
936 if (usize == 0) {
937 result = NULL;
938 ret = ENOMEM;
939 goto RETURN;
940 }
941
Jason Evans7372b152012-02-10 20:22:09 -0800942 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700943 PROF_ALLOC_PREP(2, usize, cnt);
944 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700945 result = NULL;
946 ret = EINVAL;
947 } else {
948 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800949 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
950 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700951 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800952 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700953 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700954 if (result != NULL) {
955 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700956 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700957 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700958 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700959 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700960 false);
961 }
Jason Evans0b270a92010-03-31 16:45:04 -0700962 }
Jason Evans6109fe02010-02-10 10:37:56 -0800963 } else
Jason Evans38d92102011-03-23 00:37:29 -0700964 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700965 }
966
967 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800968 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800969 malloc_write("<jemalloc>: Error in posix_memalign(): "
970 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700971 abort();
972 }
973 ret = ENOMEM;
974 goto RETURN;
975 }
976
977 *memptr = result;
978 ret = 0;
979
980RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800981 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700982 assert(usize == isalloc(result));
983 ALLOCATED_ADD(usize, 0);
984 }
Jason Evans7372b152012-02-10 20:22:09 -0800985 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700986 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700987 return (ret);
988}
989
Jason Evansa5070042011-08-12 13:48:27 -0700990JEMALLOC_ATTR(nonnull(1))
991JEMALLOC_ATTR(visibility("default"))
992int
993JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
994{
995
Jason Evans59656312012-02-28 21:37:38 -0800996 return imemalign(memptr, alignment, size, true);
Jason Evansa5070042011-08-12 13:48:27 -0700997}
998
Jason Evans9ad48232010-01-03 11:59:20 -0800999JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001000JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001001void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001002JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001003{
1004 void *ret;
1005 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -08001006 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001007 prof_thr_cnt_t *cnt
1008#ifdef JEMALLOC_CC_SILENCE
1009 = NULL
1010#endif
1011 ;
Jason Evans289053c2009-06-22 12:08:42 -07001012
1013 if (malloc_init()) {
1014 num_size = 0;
1015 ret = NULL;
1016 goto RETURN;
1017 }
1018
1019 num_size = num * size;
1020 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -08001021 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001022 num_size = 1;
1023 else {
1024 ret = NULL;
1025 goto RETURN;
1026 }
1027 /*
1028 * Try to avoid division here. We know that it isn't possible to
1029 * overflow during multiplication if neither operand uses any of the
1030 * most significant half of the bits in a size_t.
1031 */
1032 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1033 && (num_size / size != num)) {
1034 /* size_t overflow. */
1035 ret = NULL;
1036 goto RETURN;
1037 }
1038
Jason Evans7372b152012-02-10 20:22:09 -08001039 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001040 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001041 PROF_ALLOC_PREP(1, usize, cnt);
1042 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001043 ret = NULL;
1044 goto RETURN;
1045 }
Jason Evans93443682010-10-20 17:39:18 -07001046 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001047 <= SMALL_MAXCLASS) {
1048 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001049 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001050 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001051 } else
1052 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001053 } else {
1054 if (config_stats)
1055 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001056 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001057 }
Jason Evans289053c2009-06-22 12:08:42 -07001058
1059RETURN:
1060 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001061 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001062 malloc_write("<jemalloc>: Error in calloc(): out of "
1063 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001064 abort();
1065 }
1066 errno = ENOMEM;
1067 }
1068
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001070 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001071 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001072 assert(usize == isalloc(ret));
1073 ALLOCATED_ADD(usize, 0);
1074 }
Jason Evans289053c2009-06-22 12:08:42 -07001075 return (ret);
1076}
1077
Jason Evanse476f8a2010-01-16 09:53:50 -08001078JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001079void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001080JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001081{
1082 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001083 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001084 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001085 prof_thr_cnt_t *cnt
1086#ifdef JEMALLOC_CC_SILENCE
1087 = NULL
1088#endif
1089 ;
1090 prof_ctx_t *old_ctx
1091#ifdef JEMALLOC_CC_SILENCE
1092 = NULL
1093#endif
1094 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001095
Jason Evans289053c2009-06-22 12:08:42 -07001096 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001097 if (ptr != NULL) {
1098 /* realloc(ptr, 0) is equivalent to free(p). */
1099 if (config_prof || config_stats)
1100 old_size = isalloc(ptr);
1101 if (config_prof && opt_prof) {
1102 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001103 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001104 }
Jason Evansf081b882012-02-28 20:24:05 -08001105 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001106 ret = NULL;
1107 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001108 } else
1109 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001110 }
1111
1112 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001113 assert(malloc_initialized || malloc_initializer ==
1114 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001115
Jason Evans7372b152012-02-10 20:22:09 -08001116 if (config_prof || config_stats)
1117 old_size = isalloc(ptr);
1118 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001119 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001120 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001121 PROF_ALLOC_PREP(1, usize, cnt);
1122 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001123 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001124 ret = NULL;
1125 goto OOM;
1126 }
Jason Evans0b270a92010-03-31 16:45:04 -07001127 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001128 usize <= SMALL_MAXCLASS) {
1129 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001130 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001131 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001132 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001133 else
1134 old_ctx = NULL;
1135 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001136 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001137 if (ret == NULL)
1138 old_ctx = NULL;
1139 }
Jason Evans7372b152012-02-10 20:22:09 -08001140 } else {
1141 if (config_stats)
1142 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001143 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001144 }
Jason Evans289053c2009-06-22 12:08:42 -07001145
Jason Evans6109fe02010-02-10 10:37:56 -08001146OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001147 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001148 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001149 malloc_write("<jemalloc>: Error in realloc(): "
1150 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001151 abort();
1152 }
1153 errno = ENOMEM;
1154 }
1155 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001156 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001157 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001158 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001159 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001160 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001161 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001162 ret = NULL;
1163 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001164 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001165 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001166 PROF_ALLOC_PREP(1, usize, cnt);
1167 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001168 ret = NULL;
1169 else {
1170 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001171 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001172 SMALL_MAXCLASS) {
1173 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001174 if (ret != NULL) {
1175 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001176 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001177 }
1178 } else
1179 ret = imalloc(size);
1180 }
Jason Evans7372b152012-02-10 20:22:09 -08001181 } else {
1182 if (config_stats)
1183 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001184 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001185 }
Jason Evans6109fe02010-02-10 10:37:56 -08001186 }
Jason Evans569432c2009-12-29 00:09:15 -08001187
Jason Evans289053c2009-06-22 12:08:42 -07001188 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001189 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001190 malloc_write("<jemalloc>: Error in realloc(): "
1191 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001192 abort();
1193 }
1194 errno = ENOMEM;
1195 }
1196 }
1197
1198RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001199 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001200 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001201 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001202 assert(usize == isalloc(ret));
1203 ALLOCATED_ADD(usize, old_size);
1204 }
Jason Evans289053c2009-06-22 12:08:42 -07001205 return (ret);
1206}
1207
Jason Evanse476f8a2010-01-16 09:53:50 -08001208JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001209void
Jason Evanse476f8a2010-01-16 09:53:50 -08001210JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001211{
1212
Jason Evans289053c2009-06-22 12:08:42 -07001213 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001214 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001215
Jason Evansa25d0a82009-11-09 14:57:38 -08001216 assert(malloc_initialized || malloc_initializer ==
1217 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001218
Jason Evans7372b152012-02-10 20:22:09 -08001219 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001220 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001221 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001222 } else if (config_stats) {
1223 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001224 }
Jason Evans7372b152012-02-10 20:22:09 -08001225 if (config_stats)
1226 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001227 idalloc(ptr);
1228 }
1229}
1230
1231/*
1232 * End malloc(3)-compatible functions.
1233 */
1234/******************************************************************************/
1235/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001236 * Begin non-standard override functions.
1237 *
1238 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1239 * entire point is to avoid accidental mixed allocator usage.
1240 */
1241#ifndef JEMALLOC_PREFIX
1242
1243#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1244JEMALLOC_ATTR(malloc)
1245JEMALLOC_ATTR(visibility("default"))
1246void *
1247JEMALLOC_P(memalign)(size_t alignment, size_t size)
1248{
Jason Evans7372b152012-02-10 20:22:09 -08001249 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001250#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001251 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001252#endif
Jason Evans7372b152012-02-10 20:22:09 -08001253 ;
Jason Evans59656312012-02-28 21:37:38 -08001254 imemalign(&ret, alignment, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001255 return (ret);
1256}
1257#endif
1258
1259#ifdef JEMALLOC_OVERRIDE_VALLOC
1260JEMALLOC_ATTR(malloc)
1261JEMALLOC_ATTR(visibility("default"))
1262void *
1263JEMALLOC_P(valloc)(size_t size)
1264{
Jason Evans7372b152012-02-10 20:22:09 -08001265 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001266#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001267 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001268#endif
Jason Evans7372b152012-02-10 20:22:09 -08001269 ;
Jason Evans59656312012-02-28 21:37:38 -08001270 imemalign(&ret, PAGE_SIZE, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001271 return (ret);
1272}
1273#endif
1274
Jason Evans4bb09832012-02-29 10:37:27 -08001275#if defined(__GLIBC__) && !defined(__UCLIBC__)
1276/*
1277 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1278 * to inconsistently reference libc's malloc(3)-compatible functions
1279 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1280 *
1281 * These definitions interpose hooks in glibc.  The functions are actually
1282 * passed an extra argument for the caller return address, which will be
1283 * ignored.
1284 */
1285JEMALLOC_ATTR(visibility("default"))
1286void (* const __free_hook)(void *ptr) = JEMALLOC_P(free);
1287
1288JEMALLOC_ATTR(visibility("default"))
1289void *(* const __malloc_hook)(size_t size) = JEMALLOC_P(malloc);
1290
1291JEMALLOC_ATTR(visibility("default"))
1292void *(* const __realloc_hook)(void *ptr, size_t size) = JEMALLOC_P(realloc);
1293
1294JEMALLOC_ATTR(visibility("default"))
1295void *(* const __memalign_hook)(size_t alignment, size_t size) =
1296 JEMALLOC_P(memalign);
1297#endif
1298
Jason Evans6a0d2912010-09-20 16:44:23 -07001299#endif /* JEMALLOC_PREFIX */
1300/*
1301 * End non-standard override functions.
1302 */
1303/******************************************************************************/
1304/*
Jason Evans289053c2009-06-22 12:08:42 -07001305 * Begin non-standard functions.
1306 */
1307
Jason Evanse476f8a2010-01-16 09:53:50 -08001308JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001309size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001310JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001311{
Jason Evans569432c2009-12-29 00:09:15 -08001312 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001313
Jason Evans8e3c3c62010-09-17 15:46:18 -07001314 assert(malloc_initialized || malloc_initializer == pthread_self());
1315
Jason Evans7372b152012-02-10 20:22:09 -08001316 if (config_ivsalloc)
1317 ret = ivsalloc(ptr);
1318 else {
1319 assert(ptr != NULL);
1320 ret = isalloc(ptr);
1321 }
Jason Evans289053c2009-06-22 12:08:42 -07001322
Jason Evans569432c2009-12-29 00:09:15 -08001323 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001324}
1325
Jason Evans4201af02010-01-24 02:53:40 -08001326JEMALLOC_ATTR(visibility("default"))
1327void
Jason Evans698805c2010-03-03 17:45:38 -08001328JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1329 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001330{
1331
Jason Evans698805c2010-03-03 17:45:38 -08001332 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001333}
1334
Jason Evans3c234352010-01-27 13:10:55 -08001335JEMALLOC_ATTR(visibility("default"))
1336int
1337JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1338 size_t newlen)
1339{
1340
Jason Evans95833312010-01-27 13:45:21 -08001341 if (malloc_init())
1342 return (EAGAIN);
1343
Jason Evans3c234352010-01-27 13:10:55 -08001344 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1345}
1346
1347JEMALLOC_ATTR(visibility("default"))
1348int
1349JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1350{
1351
Jason Evans95833312010-01-27 13:45:21 -08001352 if (malloc_init())
1353 return (EAGAIN);
1354
Jason Evans3c234352010-01-27 13:10:55 -08001355 return (ctl_nametomib(name, mibp, miblenp));
1356}
1357
1358JEMALLOC_ATTR(visibility("default"))
1359int
1360JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1361 size_t *oldlenp, void *newp, size_t newlen)
1362{
1363
Jason Evans95833312010-01-27 13:45:21 -08001364 if (malloc_init())
1365 return (EAGAIN);
1366
Jason Evans3c234352010-01-27 13:10:55 -08001367 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1368}
1369
Jason Evans8e3c3c62010-09-17 15:46:18 -07001370JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001371iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001372{
1373
Jason Evans38d92102011-03-23 00:37:29 -07001374 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1375 NULL)));
1376
Jason Evans8e3c3c62010-09-17 15:46:18 -07001377 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001378 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001379 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001380 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001381 else
Jason Evans38d92102011-03-23 00:37:29 -07001382 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001383}
1384
Jason Evans6a0d2912010-09-20 16:44:23 -07001385JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001386JEMALLOC_ATTR(visibility("default"))
1387int
1388JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1389{
1390 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001391 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001392 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1393 & (SIZE_T_MAX-1));
1394 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001396
1397 assert(ptr != NULL);
1398 assert(size != 0);
1399
1400 if (malloc_init())
1401 goto OOM;
1402
Jason Evans749c2a02011-08-12 18:37:54 -07001403 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001404 if (usize == 0)
1405 goto OOM;
1406
Jason Evans7372b152012-02-10 20:22:09 -08001407 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001408 PROF_ALLOC_PREP(1, usize, cnt);
1409 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001411 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001412 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001413 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001414 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001415 alignment, NULL);
1416 assert(usize_promoted != 0);
1417 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001418 if (p == NULL)
1419 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001420 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001422 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001423 if (p == NULL)
1424 goto OOM;
1425 }
Jason Evans749c2a02011-08-12 18:37:54 -07001426 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001427 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001428 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001429 if (p == NULL)
1430 goto OOM;
1431 }
Jason Evans7372b152012-02-10 20:22:09 -08001432 if (rsize != NULL)
1433 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001434
1435 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001436 if (config_stats) {
1437 assert(usize == isalloc(p));
1438 ALLOCATED_ADD(usize, 0);
1439 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440 return (ALLOCM_SUCCESS);
1441OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001442 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443 malloc_write("<jemalloc>: Error in allocm(): "
1444 "out of memory\n");
1445 abort();
1446 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001447 *ptr = NULL;
1448 return (ALLOCM_ERR_OOM);
1449}
1450
Jason Evans6a0d2912010-09-20 16:44:23 -07001451JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001452JEMALLOC_ATTR(visibility("default"))
1453int
1454JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1455 int flags)
1456{
1457 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001458 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001459 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1461 & (SIZE_T_MAX-1));
1462 bool zero = flags & ALLOCM_ZERO;
1463 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001464 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001465
1466 assert(ptr != NULL);
1467 assert(*ptr != NULL);
1468 assert(size != 0);
1469 assert(SIZE_T_MAX - size >= extra);
1470 assert(malloc_initialized || malloc_initializer == pthread_self());
1471
1472 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001473 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001474 /*
1475 * usize isn't knowable before iralloc() returns when extra is
1476 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001477 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001478 * backtrace. prof_realloc() will use the actual usize to
1479 * decide whether to sample.
1480 */
1481 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1482 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001483 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001484 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001485 PROF_ALLOC_PREP(1, max_usize, cnt);
1486 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001487 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001488 /*
1489 * Use minimum usize to determine whether promotion may happen.
1490 */
1491 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1492 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001493 alignment, NULL)) <= SMALL_MAXCLASS) {
1494 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1495 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001496 alignment, zero, no_move);
1497 if (q == NULL)
1498 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001499 if (max_usize < PAGE_SIZE) {
1500 usize = max_usize;
1501 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001502 } else
1503 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001504 } else {
1505 q = iralloc(p, size, extra, alignment, zero, no_move);
1506 if (q == NULL)
1507 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001508 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001509 }
Jason Evanse4f78462010-10-22 10:45:59 -07001510 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001511 if (rsize != NULL)
1512 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001513 } else {
1514 if (config_stats)
1515 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001516 q = iralloc(p, size, extra, alignment, zero, no_move);
1517 if (q == NULL)
1518 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001519 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001520 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001521 if (rsize != NULL) {
1522 if (config_stats == false)
1523 usize = isalloc(q);
1524 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001525 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001526 }
1527
1528 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_stats)
1530 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001531 return (ALLOCM_SUCCESS);
1532ERR:
1533 if (no_move)
1534 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001535OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001536 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001537 malloc_write("<jemalloc>: Error in rallocm(): "
1538 "out of memory\n");
1539 abort();
1540 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001541 return (ALLOCM_ERR_OOM);
1542}
1543
Jason Evans6a0d2912010-09-20 16:44:23 -07001544JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545JEMALLOC_ATTR(visibility("default"))
1546int
1547JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1548{
1549 size_t sz;
1550
1551 assert(malloc_initialized || malloc_initializer == pthread_self());
1552
Jason Evans7372b152012-02-10 20:22:09 -08001553 if (config_ivsalloc)
1554 sz = ivsalloc(ptr);
1555 else {
1556 assert(ptr != NULL);
1557 sz = isalloc(ptr);
1558 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001559 assert(rsize != NULL);
1560 *rsize = sz;
1561
1562 return (ALLOCM_SUCCESS);
1563}
1564
Jason Evans6a0d2912010-09-20 16:44:23 -07001565JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001566JEMALLOC_ATTR(visibility("default"))
1567int
1568JEMALLOC_P(dallocm)(void *ptr, int flags)
1569{
Jason Evanse4f78462010-10-22 10:45:59 -07001570 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001571
1572 assert(ptr != NULL);
1573 assert(malloc_initialized || malloc_initializer == pthread_self());
1574
Jason Evans7372b152012-02-10 20:22:09 -08001575 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001576 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001577 if (config_prof && opt_prof) {
1578 if (config_stats == false)
1579 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001580 prof_free(ptr, usize);
1581 }
Jason Evans7372b152012-02-10 20:22:09 -08001582 if (config_stats)
1583 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001584 idalloc(ptr);
1585
1586 return (ALLOCM_SUCCESS);
1587}
1588
Jason Evans289053c2009-06-22 12:08:42 -07001589/*
1590 * End non-standard functions.
1591 */
1592/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001593
Jason Evans289053c2009-06-22 12:08:42 -07001594/*
1595 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001596 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001597 */
1598
Jason Evans2dbecf12010-09-05 10:35:13 -07001599void
Jason Evans804c9ec2009-06-22 17:44:33 -07001600jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001601{
Jason Evansfbbb6242010-01-24 17:56:48 -08001602 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001603
1604 /* Acquire all mutexes in a safe order. */
1605
Jason Evansfbbb6242010-01-24 17:56:48 -08001606 malloc_mutex_lock(&arenas_lock);
1607 for (i = 0; i < narenas; i++) {
1608 if (arenas[i] != NULL)
1609 malloc_mutex_lock(&arenas[i]->lock);
1610 }
Jason Evans289053c2009-06-22 12:08:42 -07001611
1612 malloc_mutex_lock(&base_mtx);
1613
1614 malloc_mutex_lock(&huge_mtx);
1615
Jason Evans7372b152012-02-10 20:22:09 -08001616 if (config_dss)
1617 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001618}
1619
Jason Evans2dbecf12010-09-05 10:35:13 -07001620void
Jason Evans804c9ec2009-06-22 17:44:33 -07001621jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001622{
1623 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001624
1625 /* Release all mutexes, now that fork() has completed. */
1626
Jason Evans7372b152012-02-10 20:22:09 -08001627 if (config_dss)
1628 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001629
1630 malloc_mutex_unlock(&huge_mtx);
1631
1632 malloc_mutex_unlock(&base_mtx);
1633
Jason Evans289053c2009-06-22 12:08:42 -07001634 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001635 if (arenas[i] != NULL)
1636 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001637 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001638 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001639}
Jason Evans2dbecf12010-09-05 10:35:13 -07001640
1641/******************************************************************************/