blob: a3a9a70a03ad7ea3e452215a2b37dbb482877ee4 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070039const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans698805c2010-03-03 17:45:38 -080058static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080059static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070060static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070061static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080062#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070063static void thread_allocated_cleanup(void *arg);
64#endif
Jason Evanse7339702010-10-23 18:37:06 -070065static bool malloc_conf_next(char const **opts_p, char const **k_p,
66 size_t *klen_p, char const **v_p, size_t *vlen_p);
67static void malloc_conf_error(const char *msg, const char *k, size_t klen,
68 const char *v, size_t vlen);
69static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070070static bool malloc_init_hard(void);
Jason Evansa5070042011-08-12 13:48:27 -070071static int imemalign(void **memptr, size_t alignment, size_t size);
Jason Evans289053c2009-06-22 12:08:42 -070072
Jason Evans289053c2009-06-22 12:08:42 -070073/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080074/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070075
Jason Evans7372b152012-02-10 20:22:09 -080076JEMALLOC_CATTR(visibility("hidden"), static)
Jason Evanse476f8a2010-01-16 09:53:50 -080077void
Jason Evans698805c2010-03-03 17:45:38 -080078wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070079{
Jason Evans7372b152012-02-10 20:22:09 -080080 UNUSED int result = write(STDERR_FILENO, s, strlen(s));
Jason Evansc9658dd2009-06-22 14:44:08 -070081}
82
Jason Evans698805c2010-03-03 17:45:38 -080083void (*JEMALLOC_P(malloc_message))(void *, const char *s)
84 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -070085
86/******************************************************************************/
87/*
Jason Evanse476f8a2010-01-16 09:53:50 -080088 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070089 */
90
Jason Evanse476f8a2010-01-16 09:53:50 -080091/* Create a new arena and insert it into the arenas array at index ind. */
92arena_t *
93arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070094{
95 arena_t *ret;
96
Jason Evansb1726102012-02-28 16:50:47 -080097 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (ret != NULL && arena_new(ret, ind) == false) {
99 arenas[ind] = ret;
100 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700101 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800102 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700103
Jason Evanse476f8a2010-01-16 09:53:50 -0800104 /*
105 * OOM here is quite inconvenient to propagate, since dealing with it
106 * would require a check for failure in the fast path. Instead, punt
107 * by using arenas[0]. In practice, this is an extremely unlikely
108 * failure.
109 */
Jason Evans698805c2010-03-03 17:45:38 -0800110 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800111 if (opt_abort)
112 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700113
Jason Evanse476f8a2010-01-16 09:53:50 -0800114 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700115}
116
Jason Evans289053c2009-06-22 12:08:42 -0700117/*
118 * Choose an arena based on a per-thread value (slow-path code only, called
119 * only by choose_arena()).
120 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800121arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700122choose_arena_hard(void)
123{
124 arena_t *ret;
125
Jason Evans289053c2009-06-22 12:08:42 -0700126 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700127 unsigned i, choose, first_null;
128
129 choose = 0;
130 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800131 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700132 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700133 for (i = 1; i < narenas; i++) {
134 if (arenas[i] != NULL) {
135 /*
136 * Choose the first arena that has the lowest
137 * number of threads assigned to it.
138 */
139 if (arenas[i]->nthreads <
140 arenas[choose]->nthreads)
141 choose = i;
142 } else if (first_null == narenas) {
143 /*
144 * Record the index of the first uninitialized
145 * arena, in case all extant arenas are in use.
146 *
147 * NB: It is possible for there to be
148 * discontinuities in terms of initialized
149 * versus uninitialized arenas, due to the
150 * "thread.arena" mallctl.
151 */
152 first_null = i;
153 }
154 }
155
156 if (arenas[choose] == 0 || first_null == narenas) {
157 /*
158 * Use an unloaded arena, or the least loaded arena if
159 * all arenas are already initialized.
160 */
161 ret = arenas[choose];
162 } else {
163 /* Initialize a new arena. */
164 ret = arenas_extend(first_null);
165 }
166 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800167 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700168 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700169 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700170 malloc_mutex_lock(&arenas_lock);
171 ret->nthreads++;
172 malloc_mutex_unlock(&arenas_lock);
173 }
Jason Evans289053c2009-06-22 12:08:42 -0700174
Jason Evans2dbecf12010-09-05 10:35:13 -0700175 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700176
177 return (ret);
178}
Jason Evans289053c2009-06-22 12:08:42 -0700179
Jason Evansa09f55c2010-09-20 16:05:41 -0700180/*
181 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
182 * provide a wrapper.
183 */
184int
185buferror(int errnum, char *buf, size_t buflen)
186{
187#ifdef _GNU_SOURCE
188 char *b = strerror_r(errno, buf, buflen);
189 if (b != buf) {
190 strncpy(buf, b, buflen);
191 buf[buflen-1] = '\0';
192 }
193 return (0);
194#else
195 return (strerror_r(errno, buf, buflen));
196#endif
197}
198
Jason Evans03c22372010-01-03 12:10:42 -0800199static void
200stats_print_atexit(void)
201{
202
Jason Evans7372b152012-02-10 20:22:09 -0800203 if (config_tcache && config_stats) {
204 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800205
Jason Evans7372b152012-02-10 20:22:09 -0800206 /*
207 * Merge stats from extant threads. This is racy, since
208 * individual threads do not lock when recording tcache stats
209 * events. As a consequence, the final stats may be slightly
210 * out of date by the time they are reported, if other threads
211 * continue to allocate.
212 */
213 for (i = 0; i < narenas; i++) {
214 arena_t *arena = arenas[i];
215 if (arena != NULL) {
216 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800217
Jason Evans7372b152012-02-10 20:22:09 -0800218 /*
219 * tcache_stats_merge() locks bins, so if any
220 * code is introduced that acquires both arena
221 * and bin locks in the opposite order,
222 * deadlocks may result.
223 */
224 malloc_mutex_lock(&arena->lock);
225 ql_foreach(tcache, &arena->tcache_ql, link) {
226 tcache_stats_merge(tcache, arena);
227 }
228 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800229 }
Jason Evans03c22372010-01-03 12:10:42 -0800230 }
231 }
Jason Evansed1bf452010-01-19 12:11:25 -0800232 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700233}
234
Jason Evans9dcad2d2011-02-13 18:11:54 -0800235thread_allocated_t *
236thread_allocated_get_hard(void)
237{
238 thread_allocated_t *thread_allocated = (thread_allocated_t *)
239 imalloc(sizeof(thread_allocated_t));
240 if (thread_allocated == NULL) {
241 static thread_allocated_t static_thread_allocated = {0, 0};
242 malloc_write("<jemalloc>: Error allocating TSD;"
243 " mallctl(\"thread.{de,}allocated[p]\", ...)"
244 " will be inaccurate\n");
245 if (opt_abort)
246 abort();
247 return (&static_thread_allocated);
248 }
249 pthread_setspecific(thread_allocated_tsd, thread_allocated);
250 thread_allocated->allocated = 0;
251 thread_allocated->deallocated = 0;
252 return (thread_allocated);
253}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800254
Jason Evans289053c2009-06-22 12:08:42 -0700255/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800256 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700257 */
258/******************************************************************************/
259/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800260 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700261 */
262
Jason Evansc9658dd2009-06-22 14:44:08 -0700263static unsigned
264malloc_ncpus(void)
265{
266 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700267 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700268
Jason Evansb7924f52009-06-23 19:01:18 -0700269 result = sysconf(_SC_NPROCESSORS_ONLN);
270 if (result == -1) {
271 /* Error. */
272 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700273 }
Jason Evansb7924f52009-06-23 19:01:18 -0700274 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700275
276 return (ret);
277}
Jason Evansb7924f52009-06-23 19:01:18 -0700278
Jason Evans597632b2011-03-18 13:41:33 -0700279static void
280arenas_cleanup(void *arg)
281{
282 arena_t *arena = (arena_t *)arg;
283
284 malloc_mutex_lock(&arenas_lock);
285 arena->nthreads--;
286 malloc_mutex_unlock(&arenas_lock);
287}
288
Jason Evans7372b152012-02-10 20:22:09 -0800289#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700290static void
291thread_allocated_cleanup(void *arg)
292{
293 uint64_t *allocated = (uint64_t *)arg;
294
295 if (allocated != NULL)
296 idalloc(allocated);
297}
298#endif
299
Jason Evans289053c2009-06-22 12:08:42 -0700300/*
301 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
302 * implementation has to take pains to avoid infinite recursion during
303 * initialization.
304 */
305static inline bool
306malloc_init(void)
307{
308
309 if (malloc_initialized == false)
310 return (malloc_init_hard());
311
312 return (false);
313}
314
315static bool
Jason Evanse7339702010-10-23 18:37:06 -0700316malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
317 char const **v_p, size_t *vlen_p)
318{
319 bool accept;
320 const char *opts = *opts_p;
321
322 *k_p = opts;
323
324 for (accept = false; accept == false;) {
325 switch (*opts) {
326 case 'A': case 'B': case 'C': case 'D': case 'E':
327 case 'F': case 'G': case 'H': case 'I': case 'J':
328 case 'K': case 'L': case 'M': case 'N': case 'O':
329 case 'P': case 'Q': case 'R': case 'S': case 'T':
330 case 'U': case 'V': case 'W': case 'X': case 'Y':
331 case 'Z':
332 case 'a': case 'b': case 'c': case 'd': case 'e':
333 case 'f': case 'g': case 'h': case 'i': case 'j':
334 case 'k': case 'l': case 'm': case 'n': case 'o':
335 case 'p': case 'q': case 'r': case 's': case 't':
336 case 'u': case 'v': case 'w': case 'x': case 'y':
337 case 'z':
338 case '0': case '1': case '2': case '3': case '4':
339 case '5': case '6': case '7': case '8': case '9':
340 case '_':
341 opts++;
342 break;
343 case ':':
344 opts++;
345 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
346 *v_p = opts;
347 accept = true;
348 break;
349 case '\0':
350 if (opts != *opts_p) {
351 malloc_write("<jemalloc>: Conf string "
352 "ends with key\n");
353 }
354 return (true);
355 default:
356 malloc_write("<jemalloc>: Malformed conf "
357 "string\n");
358 return (true);
359 }
360 }
361
362 for (accept = false; accept == false;) {
363 switch (*opts) {
364 case ',':
365 opts++;
366 /*
367 * Look ahead one character here, because the
368 * next time this function is called, it will
369 * assume that end of input has been cleanly
370 * reached if no input remains, but we have
371 * optimistically already consumed the comma if
372 * one exists.
373 */
374 if (*opts == '\0') {
375 malloc_write("<jemalloc>: Conf string "
376 "ends with comma\n");
377 }
378 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
379 accept = true;
380 break;
381 case '\0':
382 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
383 accept = true;
384 break;
385 default:
386 opts++;
387 break;
388 }
389 }
390
391 *opts_p = opts;
392 return (false);
393}
394
395static void
396malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
397 size_t vlen)
398{
399 char buf[PATH_MAX + 1];
400
401 malloc_write("<jemalloc>: ");
402 malloc_write(msg);
403 malloc_write(": ");
404 memcpy(buf, k, klen);
405 memcpy(&buf[klen], ":", 1);
406 memcpy(&buf[klen+1], v, vlen);
407 buf[klen+1+vlen] = '\0';
408 malloc_write(buf);
409 malloc_write("\n");
410}
411
412static void
413malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700414{
415 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700416 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700417 const char *opts, *k, *v;
418 size_t klen, vlen;
419
420 for (i = 0; i < 3; i++) {
421 /* Get runtime configuration. */
422 switch (i) {
423 case 0:
424 if (JEMALLOC_P(malloc_conf) != NULL) {
425 /*
426 * Use options that were compiled into the
427 * program.
428 */
429 opts = JEMALLOC_P(malloc_conf);
430 } else {
431 /* No configuration specified. */
432 buf[0] = '\0';
433 opts = buf;
434 }
435 break;
436 case 1: {
437 int linklen;
438 const char *linkname =
439#ifdef JEMALLOC_PREFIX
440 "/etc/"JEMALLOC_PREFIX"malloc.conf"
441#else
442 "/etc/malloc.conf"
443#endif
444 ;
445
446 if ((linklen = readlink(linkname, buf,
447 sizeof(buf) - 1)) != -1) {
448 /*
449 * Use the contents of the "/etc/malloc.conf"
450 * symbolic link's name.
451 */
452 buf[linklen] = '\0';
453 opts = buf;
454 } else {
455 /* No configuration specified. */
456 buf[0] = '\0';
457 opts = buf;
458 }
459 break;
460 }
461 case 2: {
462 const char *envname =
463#ifdef JEMALLOC_PREFIX
464 JEMALLOC_CPREFIX"MALLOC_CONF"
465#else
466 "MALLOC_CONF"
467#endif
468 ;
469
470 if ((opts = getenv(envname)) != NULL) {
471 /*
472 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800473 * the value of the MALLOC_CONF environment
474 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700475 */
476 } else {
477 /* No configuration specified. */
478 buf[0] = '\0';
479 opts = buf;
480 }
481 break;
482 }
483 default:
484 /* NOTREACHED */
485 assert(false);
486 buf[0] = '\0';
487 opts = buf;
488 }
489
490 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
491 &vlen) == false) {
492#define CONF_HANDLE_BOOL(n) \
493 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
494 klen) == 0) { \
495 if (strncmp("true", v, vlen) == 0 && \
496 vlen == sizeof("true")-1) \
497 opt_##n = true; \
498 else if (strncmp("false", v, vlen) == \
499 0 && vlen == sizeof("false")-1) \
500 opt_##n = false; \
501 else { \
502 malloc_conf_error( \
503 "Invalid conf value", \
504 k, klen, v, vlen); \
505 } \
506 continue; \
507 }
508#define CONF_HANDLE_SIZE_T(n, min, max) \
509 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
510 klen) == 0) { \
511 unsigned long ul; \
512 char *end; \
513 \
514 errno = 0; \
515 ul = strtoul(v, &end, 0); \
516 if (errno != 0 || (uintptr_t)end - \
517 (uintptr_t)v != vlen) { \
518 malloc_conf_error( \
519 "Invalid conf value", \
520 k, klen, v, vlen); \
521 } else if (ul < min || ul > max) { \
522 malloc_conf_error( \
523 "Out-of-range conf value", \
524 k, klen, v, vlen); \
525 } else \
526 opt_##n = ul; \
527 continue; \
528 }
529#define CONF_HANDLE_SSIZE_T(n, min, max) \
530 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
531 klen) == 0) { \
532 long l; \
533 char *end; \
534 \
535 errno = 0; \
536 l = strtol(v, &end, 0); \
537 if (errno != 0 || (uintptr_t)end - \
538 (uintptr_t)v != vlen) { \
539 malloc_conf_error( \
540 "Invalid conf value", \
541 k, klen, v, vlen); \
542 } else if (l < (ssize_t)min || l > \
543 (ssize_t)max) { \
544 malloc_conf_error( \
545 "Out-of-range conf value", \
546 k, klen, v, vlen); \
547 } else \
548 opt_##n = l; \
549 continue; \
550 }
551#define CONF_HANDLE_CHAR_P(n, d) \
552 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
553 klen) == 0) { \
554 size_t cpylen = (vlen <= \
555 sizeof(opt_##n)-1) ? vlen : \
556 sizeof(opt_##n)-1; \
557 strncpy(opt_##n, v, cpylen); \
558 opt_##n[cpylen] = '\0'; \
559 continue; \
560 }
561
562 CONF_HANDLE_BOOL(abort)
Jason Evanse7339702010-10-23 18:37:06 -0700563 /*
564 * Chunks always require at least one * header page,
565 * plus one data page.
566 */
567 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
568 (sizeof(size_t) << 3) - 1)
569 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
570 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
571 (sizeof(size_t) << 3) - 1)
572 CONF_HANDLE_BOOL(stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800573 if (config_fill) {
574 CONF_HANDLE_BOOL(junk)
575 CONF_HANDLE_BOOL(zero)
576 }
Jason Evans7372b152012-02-10 20:22:09 -0800577 if (config_xmalloc) {
578 CONF_HANDLE_BOOL(xmalloc)
579 }
580 if (config_tcache) {
581 CONF_HANDLE_BOOL(tcache)
582 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
583 (sizeof(size_t) << 3) - 1)
584 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
585 (sizeof(size_t) << 3) - 1)
586 }
587 if (config_prof) {
588 CONF_HANDLE_BOOL(prof)
589 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
Jason Evans7372b152012-02-10 20:22:09 -0800590 CONF_HANDLE_BOOL(prof_active)
591 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
592 (sizeof(uint64_t) << 3) - 1)
593 CONF_HANDLE_BOOL(prof_accum)
Jason Evans7372b152012-02-10 20:22:09 -0800594 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
595 (sizeof(uint64_t) << 3) - 1)
596 CONF_HANDLE_BOOL(prof_gdump)
597 CONF_HANDLE_BOOL(prof_leak)
598 }
Jason Evanse7339702010-10-23 18:37:06 -0700599 malloc_conf_error("Invalid conf pair", k, klen, v,
600 vlen);
601#undef CONF_HANDLE_BOOL
602#undef CONF_HANDLE_SIZE_T
603#undef CONF_HANDLE_SSIZE_T
604#undef CONF_HANDLE_CHAR_P
605 }
Jason Evanse7339702010-10-23 18:37:06 -0700606 }
607}
608
609static bool
610malloc_init_hard(void)
611{
Jason Evansb7924f52009-06-23 19:01:18 -0700612 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700613
614 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700615 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700616 /*
617 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800618 * acquired init_lock, or this thread is the initializing
619 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700620 */
621 malloc_mutex_unlock(&init_lock);
622 return (false);
623 }
Jason Evansb7924f52009-06-23 19:01:18 -0700624 if (malloc_initializer != (unsigned long)0) {
625 /* Busy-wait until the initializing thread completes. */
626 do {
627 malloc_mutex_unlock(&init_lock);
628 CPU_SPINWAIT;
629 malloc_mutex_lock(&init_lock);
630 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700631 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700632 return (false);
633 }
Jason Evans289053c2009-06-22 12:08:42 -0700634
Jason Evansb7924f52009-06-23 19:01:18 -0700635#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700636 /* Get page size. */
637 {
638 long result;
639
640 result = sysconf(_SC_PAGESIZE);
641 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700642 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700643
644 /*
645 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800646 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700647 */
648 assert(((result - 1) & result) == 0);
649 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800650 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700651 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700652#endif
Jason Evans289053c2009-06-22 12:08:42 -0700653
Jason Evans7372b152012-02-10 20:22:09 -0800654 if (config_prof)
655 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700656
Jason Evanse7339702010-10-23 18:37:06 -0700657 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700658
Jason Evansa0bf2422010-01-29 14:30:41 -0800659 /* Register fork handlers. */
660 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
661 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800662 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800663 if (opt_abort)
664 abort();
665 }
666
Jason Evans3c234352010-01-27 13:10:55 -0800667 if (ctl_boot()) {
668 malloc_mutex_unlock(&init_lock);
669 return (true);
670 }
671
Jason Evans03c22372010-01-03 12:10:42 -0800672 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700673 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800674 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800675 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800676 if (opt_abort)
677 abort();
678 }
Jason Evans289053c2009-06-22 12:08:42 -0700679 }
680
Jason Evansa0bf2422010-01-29 14:30:41 -0800681 if (chunk_boot()) {
682 malloc_mutex_unlock(&init_lock);
683 return (true);
684 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700685
Jason Evans3c234352010-01-27 13:10:55 -0800686 if (base_boot()) {
687 malloc_mutex_unlock(&init_lock);
688 return (true);
689 }
690
Jason Evans7372b152012-02-10 20:22:09 -0800691 if (config_prof)
692 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800693
Jason Evansb1726102012-02-28 16:50:47 -0800694 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700695
Jason Evans7372b152012-02-10 20:22:09 -0800696 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700697 malloc_mutex_unlock(&init_lock);
698 return (true);
699 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800700
Jason Evanse476f8a2010-01-16 09:53:50 -0800701 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700702 malloc_mutex_unlock(&init_lock);
703 return (true);
704 }
Jason Evans289053c2009-06-22 12:08:42 -0700705
Jason Evans7372b152012-02-10 20:22:09 -0800706#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700707 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800708 if (config_stats && pthread_key_create(&thread_allocated_tsd,
709 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700710 malloc_mutex_unlock(&init_lock);
711 return (true);
712 }
713#endif
714
Jason Evans8e6f8b42011-11-03 18:40:03 -0700715 if (malloc_mutex_init(&arenas_lock))
716 return (true);
717
718 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
719 malloc_mutex_unlock(&init_lock);
720 return (true);
721 }
722
Jason Evansb7924f52009-06-23 19:01:18 -0700723 /*
724 * Create enough scaffolding to allow recursive allocation in
725 * malloc_ncpus().
726 */
727 narenas = 1;
728 arenas = init_arenas;
729 memset(arenas, 0, sizeof(arena_t *) * narenas);
730
731 /*
732 * Initialize one arena here. The rest are lazily created in
733 * choose_arena_hard().
734 */
735 arenas_extend(0);
736 if (arenas[0] == NULL) {
737 malloc_mutex_unlock(&init_lock);
738 return (true);
739 }
740
Jason Evansb7924f52009-06-23 19:01:18 -0700741 /*
742 * Assign the initial arena to the initial thread, in order to avoid
743 * spurious creation of an extra arena if the application switches to
744 * threaded mode.
745 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700746 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700747 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700748
Jason Evans7372b152012-02-10 20:22:09 -0800749 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800750 malloc_mutex_unlock(&init_lock);
751 return (true);
752 }
Jason Evans3383af62010-02-11 08:59:06 -0800753
Jason Evansb7924f52009-06-23 19:01:18 -0700754 /* Get number of CPUs. */
755 malloc_initializer = pthread_self();
756 malloc_mutex_unlock(&init_lock);
757 ncpus = malloc_ncpus();
758 malloc_mutex_lock(&init_lock);
759
Jason Evanse7339702010-10-23 18:37:06 -0700760 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700761 /*
Jason Evans5463a522009-12-29 00:09:15 -0800762 * For SMP systems, create more than one arena per CPU by
763 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700764 */
Jason Evanse7339702010-10-23 18:37:06 -0700765 if (ncpus > 1)
766 opt_narenas = ncpus << 2;
767 else
768 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700769 }
Jason Evanse7339702010-10-23 18:37:06 -0700770 narenas = opt_narenas;
771 /*
772 * Make sure that the arenas array can be allocated. In practice, this
773 * limit is enough to allow the allocator to function, but the ctl
774 * machinery will fail to allocate memory at far lower limits.
775 */
776 if (narenas > chunksize / sizeof(arena_t *)) {
777 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700778
Jason Evanse7339702010-10-23 18:37:06 -0700779 narenas = chunksize / sizeof(arena_t *);
780 malloc_write("<jemalloc>: Reducing narenas to limit (");
781 malloc_write(u2s(narenas, 10, buf));
782 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700783 }
Jason Evans289053c2009-06-22 12:08:42 -0700784
Jason Evans289053c2009-06-22 12:08:42 -0700785 /* Allocate and initialize arenas. */
786 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
787 if (arenas == NULL) {
788 malloc_mutex_unlock(&init_lock);
789 return (true);
790 }
791 /*
792 * Zero the array. In practice, this should always be pre-zeroed,
793 * since it was just mmap()ed, but let's be sure.
794 */
795 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700796 /* Copy the pointer to the one arena that was already initialized. */
797 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700798
Jason Evans2dbecf12010-09-05 10:35:13 -0700799#ifdef JEMALLOC_ZONE
800 /* Register the custom zone. */
801 malloc_zone_register(create_zone());
802
803 /*
804 * Convert the default szone to an "overlay zone" that is capable of
805 * deallocating szone-allocated objects, but allocating new objects
806 * from jemalloc.
807 */
808 szone2ozone(malloc_default_zone());
809#endif
810
Jason Evans289053c2009-06-22 12:08:42 -0700811 malloc_initialized = true;
812 malloc_mutex_unlock(&init_lock);
813 return (false);
814}
815
Jason Evans2dbecf12010-09-05 10:35:13 -0700816#ifdef JEMALLOC_ZONE
817JEMALLOC_ATTR(constructor)
818void
819jemalloc_darwin_init(void)
820{
821
822 if (malloc_init_hard())
823 abort();
824}
825#endif
826
Jason Evans289053c2009-06-22 12:08:42 -0700827/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800828 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700829 */
830/******************************************************************************/
831/*
832 * Begin malloc(3)-compatible functions.
833 */
834
Jason Evans9ad48232010-01-03 11:59:20 -0800835JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800836JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700837void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800838JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700839{
840 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800841 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800842 prof_thr_cnt_t *cnt
843#ifdef JEMALLOC_CC_SILENCE
844 = NULL
845#endif
846 ;
Jason Evans289053c2009-06-22 12:08:42 -0700847
848 if (malloc_init()) {
849 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800850 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700851 }
852
Jason Evansc90ad712012-02-28 20:31:37 -0800853 if (size == 0)
854 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700855
Jason Evans7372b152012-02-10 20:22:09 -0800856 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700857 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700858 PROF_ALLOC_PREP(1, usize, cnt);
859 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700860 ret = NULL;
861 goto OOM;
862 }
Jason Evans93443682010-10-20 17:39:18 -0700863 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800864 SMALL_MAXCLASS) {
865 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700866 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700867 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700868 } else
869 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800870 } else {
871 if (config_stats)
872 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700873 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700874 }
Jason Evans289053c2009-06-22 12:08:42 -0700875
Jason Evansf2518142009-12-29 00:09:15 -0800876OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700877 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800878 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800879 malloc_write("<jemalloc>: Error in malloc(): "
880 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700881 abort();
882 }
883 errno = ENOMEM;
884 }
Jason Evans7372b152012-02-10 20:22:09 -0800885 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700886 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800887 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700888 assert(usize == isalloc(ret));
889 ALLOCATED_ADD(usize, 0);
890 }
Jason Evans289053c2009-06-22 12:08:42 -0700891 return (ret);
892}
893
Jason Evans9ad48232010-01-03 11:59:20 -0800894JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700895#ifdef JEMALLOC_PROF
896/*
Jason Evans7372b152012-02-10 20:22:09 -0800897 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700898 * PROF_ALLOC_PREP().
899 */
900JEMALLOC_ATTR(noinline)
901#endif
902static int
903imemalign(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700904{
905 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800906 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700907 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800908 prof_thr_cnt_t *cnt
909#ifdef JEMALLOC_CC_SILENCE
910 = NULL
911#endif
912 ;
Jason Evans289053c2009-06-22 12:08:42 -0700913
914 if (malloc_init())
915 result = NULL;
916 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800917 if (size == 0)
918 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800919
Jason Evans289053c2009-06-22 12:08:42 -0700920 /* Make sure that alignment is a large enough power of 2. */
921 if (((alignment - 1) & alignment) != 0
922 || alignment < sizeof(void *)) {
Jason Evans7372b152012-02-10 20:22:09 -0800923 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800924 malloc_write("<jemalloc>: Error in "
925 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700926 abort();
927 }
928 result = NULL;
929 ret = EINVAL;
930 goto RETURN;
931 }
932
Jason Evans38d92102011-03-23 00:37:29 -0700933 usize = sa2u(size, alignment, NULL);
934 if (usize == 0) {
935 result = NULL;
936 ret = ENOMEM;
937 goto RETURN;
938 }
939
Jason Evans7372b152012-02-10 20:22:09 -0800940 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700941 PROF_ALLOC_PREP(2, usize, cnt);
942 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700943 result = NULL;
944 ret = EINVAL;
945 } else {
946 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800947 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
948 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700949 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800950 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700951 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700952 if (result != NULL) {
953 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700954 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700955 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700956 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700957 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700958 false);
959 }
Jason Evans0b270a92010-03-31 16:45:04 -0700960 }
Jason Evans6109fe02010-02-10 10:37:56 -0800961 } else
Jason Evans38d92102011-03-23 00:37:29 -0700962 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700963 }
964
965 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800966 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800967 malloc_write("<jemalloc>: Error in posix_memalign(): "
968 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700969 abort();
970 }
971 ret = ENOMEM;
972 goto RETURN;
973 }
974
975 *memptr = result;
976 ret = 0;
977
978RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800979 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700980 assert(usize == isalloc(result));
981 ALLOCATED_ADD(usize, 0);
982 }
Jason Evans7372b152012-02-10 20:22:09 -0800983 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700984 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700985 return (ret);
986}
987
Jason Evansa5070042011-08-12 13:48:27 -0700988JEMALLOC_ATTR(nonnull(1))
989JEMALLOC_ATTR(visibility("default"))
990int
991JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
992{
993
994 return imemalign(memptr, alignment, size);
995}
996
Jason Evans9ad48232010-01-03 11:59:20 -0800997JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800998JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700999void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001000JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001001{
1002 void *ret;
1003 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -08001004 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001005 prof_thr_cnt_t *cnt
1006#ifdef JEMALLOC_CC_SILENCE
1007 = NULL
1008#endif
1009 ;
Jason Evans289053c2009-06-22 12:08:42 -07001010
1011 if (malloc_init()) {
1012 num_size = 0;
1013 ret = NULL;
1014 goto RETURN;
1015 }
1016
1017 num_size = num * size;
1018 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -08001019 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -07001020 num_size = 1;
1021 else {
1022 ret = NULL;
1023 goto RETURN;
1024 }
1025 /*
1026 * Try to avoid division here. We know that it isn't possible to
1027 * overflow during multiplication if neither operand uses any of the
1028 * most significant half of the bits in a size_t.
1029 */
1030 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1031 && (num_size / size != num)) {
1032 /* size_t overflow. */
1033 ret = NULL;
1034 goto RETURN;
1035 }
1036
Jason Evans7372b152012-02-10 20:22:09 -08001037 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001038 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001039 PROF_ALLOC_PREP(1, usize, cnt);
1040 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001041 ret = NULL;
1042 goto RETURN;
1043 }
Jason Evans93443682010-10-20 17:39:18 -07001044 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001045 <= SMALL_MAXCLASS) {
1046 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001047 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001048 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001049 } else
1050 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001051 } else {
1052 if (config_stats)
1053 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001054 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001055 }
Jason Evans289053c2009-06-22 12:08:42 -07001056
1057RETURN:
1058 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001059 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001060 malloc_write("<jemalloc>: Error in calloc(): out of "
1061 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001062 abort();
1063 }
1064 errno = ENOMEM;
1065 }
1066
Jason Evans7372b152012-02-10 20:22:09 -08001067 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001068 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001070 assert(usize == isalloc(ret));
1071 ALLOCATED_ADD(usize, 0);
1072 }
Jason Evans289053c2009-06-22 12:08:42 -07001073 return (ret);
1074}
1075
Jason Evanse476f8a2010-01-16 09:53:50 -08001076JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001077void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001078JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001079{
1080 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001081 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001082 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001083 prof_thr_cnt_t *cnt
1084#ifdef JEMALLOC_CC_SILENCE
1085 = NULL
1086#endif
1087 ;
1088 prof_ctx_t *old_ctx
1089#ifdef JEMALLOC_CC_SILENCE
1090 = NULL
1091#endif
1092 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001093
Jason Evans289053c2009-06-22 12:08:42 -07001094 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001095 if (ptr != NULL) {
1096 /* realloc(ptr, 0) is equivalent to free(p). */
1097 if (config_prof || config_stats)
1098 old_size = isalloc(ptr);
1099 if (config_prof && opt_prof) {
1100 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001101 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001102 }
Jason Evansf081b882012-02-28 20:24:05 -08001103 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001104 ret = NULL;
1105 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001106 } else
1107 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001108 }
1109
1110 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001111 assert(malloc_initialized || malloc_initializer ==
1112 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001113
Jason Evans7372b152012-02-10 20:22:09 -08001114 if (config_prof || config_stats)
1115 old_size = isalloc(ptr);
1116 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001117 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001118 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001119 PROF_ALLOC_PREP(1, usize, cnt);
1120 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001121 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001122 ret = NULL;
1123 goto OOM;
1124 }
Jason Evans0b270a92010-03-31 16:45:04 -07001125 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001126 usize <= SMALL_MAXCLASS) {
1127 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001128 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001129 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001130 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001131 else
1132 old_ctx = NULL;
1133 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001134 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001135 if (ret == NULL)
1136 old_ctx = NULL;
1137 }
Jason Evans7372b152012-02-10 20:22:09 -08001138 } else {
1139 if (config_stats)
1140 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001141 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001142 }
Jason Evans289053c2009-06-22 12:08:42 -07001143
Jason Evans6109fe02010-02-10 10:37:56 -08001144OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001145 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001146 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001147 malloc_write("<jemalloc>: Error in realloc(): "
1148 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001149 abort();
1150 }
1151 errno = ENOMEM;
1152 }
1153 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001154 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001155 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001156 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001157 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001158 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001159 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001160 ret = NULL;
1161 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001162 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001163 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001164 PROF_ALLOC_PREP(1, usize, cnt);
1165 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001166 ret = NULL;
1167 else {
1168 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001169 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001170 SMALL_MAXCLASS) {
1171 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001172 if (ret != NULL) {
1173 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001174 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001175 }
1176 } else
1177 ret = imalloc(size);
1178 }
Jason Evans7372b152012-02-10 20:22:09 -08001179 } else {
1180 if (config_stats)
1181 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001182 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001183 }
Jason Evans6109fe02010-02-10 10:37:56 -08001184 }
Jason Evans569432c2009-12-29 00:09:15 -08001185
Jason Evans289053c2009-06-22 12:08:42 -07001186 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001187 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001188 malloc_write("<jemalloc>: Error in realloc(): "
1189 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001190 abort();
1191 }
1192 errno = ENOMEM;
1193 }
1194 }
1195
1196RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001197 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001198 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001199 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001200 assert(usize == isalloc(ret));
1201 ALLOCATED_ADD(usize, old_size);
1202 }
Jason Evans289053c2009-06-22 12:08:42 -07001203 return (ret);
1204}
1205
Jason Evanse476f8a2010-01-16 09:53:50 -08001206JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001207void
Jason Evanse476f8a2010-01-16 09:53:50 -08001208JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001209{
1210
Jason Evans289053c2009-06-22 12:08:42 -07001211 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001212 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001213
Jason Evansa25d0a82009-11-09 14:57:38 -08001214 assert(malloc_initialized || malloc_initializer ==
1215 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001216
Jason Evans7372b152012-02-10 20:22:09 -08001217 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001218 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001219 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001220 } else if (config_stats) {
1221 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001222 }
Jason Evans7372b152012-02-10 20:22:09 -08001223 if (config_stats)
1224 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001225 idalloc(ptr);
1226 }
1227}
1228
1229/*
1230 * End malloc(3)-compatible functions.
1231 */
1232/******************************************************************************/
1233/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001234 * Begin non-standard override functions.
1235 *
1236 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1237 * entire point is to avoid accidental mixed allocator usage.
1238 */
1239#ifndef JEMALLOC_PREFIX
1240
1241#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1242JEMALLOC_ATTR(malloc)
1243JEMALLOC_ATTR(visibility("default"))
1244void *
1245JEMALLOC_P(memalign)(size_t alignment, size_t size)
1246{
Jason Evans7372b152012-02-10 20:22:09 -08001247 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001248#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001249 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001250#endif
Jason Evans7372b152012-02-10 20:22:09 -08001251 ;
1252 imemalign(&ret, alignment, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001253 return (ret);
1254}
1255#endif
1256
1257#ifdef JEMALLOC_OVERRIDE_VALLOC
1258JEMALLOC_ATTR(malloc)
1259JEMALLOC_ATTR(visibility("default"))
1260void *
1261JEMALLOC_P(valloc)(size_t size)
1262{
Jason Evans7372b152012-02-10 20:22:09 -08001263 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001264#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001265 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001266#endif
Jason Evans7372b152012-02-10 20:22:09 -08001267 ;
1268 imemalign(&ret, PAGE_SIZE, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001269 return (ret);
1270}
1271#endif
1272
1273#endif /* JEMALLOC_PREFIX */
1274/*
1275 * End non-standard override functions.
1276 */
1277/******************************************************************************/
1278/*
Jason Evans289053c2009-06-22 12:08:42 -07001279 * Begin non-standard functions.
1280 */
1281
Jason Evanse476f8a2010-01-16 09:53:50 -08001282JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001283size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001284JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001285{
Jason Evans569432c2009-12-29 00:09:15 -08001286 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001287
Jason Evans8e3c3c62010-09-17 15:46:18 -07001288 assert(malloc_initialized || malloc_initializer == pthread_self());
1289
Jason Evans7372b152012-02-10 20:22:09 -08001290 if (config_ivsalloc)
1291 ret = ivsalloc(ptr);
1292 else {
1293 assert(ptr != NULL);
1294 ret = isalloc(ptr);
1295 }
Jason Evans289053c2009-06-22 12:08:42 -07001296
Jason Evans569432c2009-12-29 00:09:15 -08001297 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001298}
1299
Jason Evans4201af02010-01-24 02:53:40 -08001300JEMALLOC_ATTR(visibility("default"))
1301void
Jason Evans698805c2010-03-03 17:45:38 -08001302JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1303 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001304{
1305
Jason Evans698805c2010-03-03 17:45:38 -08001306 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001307}
1308
Jason Evans3c234352010-01-27 13:10:55 -08001309JEMALLOC_ATTR(visibility("default"))
1310int
1311JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1312 size_t newlen)
1313{
1314
Jason Evans95833312010-01-27 13:45:21 -08001315 if (malloc_init())
1316 return (EAGAIN);
1317
Jason Evans3c234352010-01-27 13:10:55 -08001318 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1319}
1320
1321JEMALLOC_ATTR(visibility("default"))
1322int
1323JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1324{
1325
Jason Evans95833312010-01-27 13:45:21 -08001326 if (malloc_init())
1327 return (EAGAIN);
1328
Jason Evans3c234352010-01-27 13:10:55 -08001329 return (ctl_nametomib(name, mibp, miblenp));
1330}
1331
1332JEMALLOC_ATTR(visibility("default"))
1333int
1334JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1335 size_t *oldlenp, void *newp, size_t newlen)
1336{
1337
Jason Evans95833312010-01-27 13:45:21 -08001338 if (malloc_init())
1339 return (EAGAIN);
1340
Jason Evans3c234352010-01-27 13:10:55 -08001341 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1342}
1343
Jason Evans8e3c3c62010-09-17 15:46:18 -07001344JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001345iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001346{
1347
Jason Evans38d92102011-03-23 00:37:29 -07001348 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1349 NULL)));
1350
Jason Evans8e3c3c62010-09-17 15:46:18 -07001351 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001352 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001353 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001354 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001355 else
Jason Evans38d92102011-03-23 00:37:29 -07001356 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001357}
1358
Jason Evans6a0d2912010-09-20 16:44:23 -07001359JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001360JEMALLOC_ATTR(visibility("default"))
1361int
1362JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1363{
1364 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001365 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001366 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1367 & (SIZE_T_MAX-1));
1368 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001369 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001370
1371 assert(ptr != NULL);
1372 assert(size != 0);
1373
1374 if (malloc_init())
1375 goto OOM;
1376
Jason Evans749c2a02011-08-12 18:37:54 -07001377 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001378 if (usize == 0)
1379 goto OOM;
1380
Jason Evans7372b152012-02-10 20:22:09 -08001381 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001382 PROF_ALLOC_PREP(1, usize, cnt);
1383 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001384 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001385 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001386 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001387 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001388 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001389 alignment, NULL);
1390 assert(usize_promoted != 0);
1391 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001392 if (p == NULL)
1393 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001394 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001396 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001397 if (p == NULL)
1398 goto OOM;
1399 }
Jason Evans749c2a02011-08-12 18:37:54 -07001400 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001401 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001402 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001403 if (p == NULL)
1404 goto OOM;
1405 }
Jason Evans7372b152012-02-10 20:22:09 -08001406 if (rsize != NULL)
1407 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001408
1409 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001410 if (config_stats) {
1411 assert(usize == isalloc(p));
1412 ALLOCATED_ADD(usize, 0);
1413 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001414 return (ALLOCM_SUCCESS);
1415OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001416 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 malloc_write("<jemalloc>: Error in allocm(): "
1418 "out of memory\n");
1419 abort();
1420 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421 *ptr = NULL;
1422 return (ALLOCM_ERR_OOM);
1423}
1424
Jason Evans6a0d2912010-09-20 16:44:23 -07001425JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001426JEMALLOC_ATTR(visibility("default"))
1427int
1428JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1429 int flags)
1430{
1431 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001432 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001433 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001434 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1435 & (SIZE_T_MAX-1));
1436 bool zero = flags & ALLOCM_ZERO;
1437 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001438 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439
1440 assert(ptr != NULL);
1441 assert(*ptr != NULL);
1442 assert(size != 0);
1443 assert(SIZE_T_MAX - size >= extra);
1444 assert(malloc_initialized || malloc_initializer == pthread_self());
1445
1446 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001447 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001448 /*
1449 * usize isn't knowable before iralloc() returns when extra is
1450 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001451 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001452 * backtrace. prof_realloc() will use the actual usize to
1453 * decide whether to sample.
1454 */
1455 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1456 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001457 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001458 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001459 PROF_ALLOC_PREP(1, max_usize, cnt);
1460 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001461 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001462 /*
1463 * Use minimum usize to determine whether promotion may happen.
1464 */
1465 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1466 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001467 alignment, NULL)) <= SMALL_MAXCLASS) {
1468 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1469 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001470 alignment, zero, no_move);
1471 if (q == NULL)
1472 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001473 if (max_usize < PAGE_SIZE) {
1474 usize = max_usize;
1475 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001476 } else
1477 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001478 } else {
1479 q = iralloc(p, size, extra, alignment, zero, no_move);
1480 if (q == NULL)
1481 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001482 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001483 }
Jason Evanse4f78462010-10-22 10:45:59 -07001484 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001485 if (rsize != NULL)
1486 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001487 } else {
1488 if (config_stats)
1489 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001490 q = iralloc(p, size, extra, alignment, zero, no_move);
1491 if (q == NULL)
1492 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001493 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001494 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001495 if (rsize != NULL) {
1496 if (config_stats == false)
1497 usize = isalloc(q);
1498 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001499 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001500 }
1501
1502 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001503 if (config_stats)
1504 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001505 return (ALLOCM_SUCCESS);
1506ERR:
1507 if (no_move)
1508 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001509OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001510 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001511 malloc_write("<jemalloc>: Error in rallocm(): "
1512 "out of memory\n");
1513 abort();
1514 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001515 return (ALLOCM_ERR_OOM);
1516}
1517
Jason Evans6a0d2912010-09-20 16:44:23 -07001518JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001519JEMALLOC_ATTR(visibility("default"))
1520int
1521JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1522{
1523 size_t sz;
1524
1525 assert(malloc_initialized || malloc_initializer == pthread_self());
1526
Jason Evans7372b152012-02-10 20:22:09 -08001527 if (config_ivsalloc)
1528 sz = ivsalloc(ptr);
1529 else {
1530 assert(ptr != NULL);
1531 sz = isalloc(ptr);
1532 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001533 assert(rsize != NULL);
1534 *rsize = sz;
1535
1536 return (ALLOCM_SUCCESS);
1537}
1538
Jason Evans6a0d2912010-09-20 16:44:23 -07001539JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001540JEMALLOC_ATTR(visibility("default"))
1541int
1542JEMALLOC_P(dallocm)(void *ptr, int flags)
1543{
Jason Evanse4f78462010-10-22 10:45:59 -07001544 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545
1546 assert(ptr != NULL);
1547 assert(malloc_initialized || malloc_initializer == pthread_self());
1548
Jason Evans7372b152012-02-10 20:22:09 -08001549 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001550 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001551 if (config_prof && opt_prof) {
1552 if (config_stats == false)
1553 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001554 prof_free(ptr, usize);
1555 }
Jason Evans7372b152012-02-10 20:22:09 -08001556 if (config_stats)
1557 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001558 idalloc(ptr);
1559
1560 return (ALLOCM_SUCCESS);
1561}
1562
Jason Evans289053c2009-06-22 12:08:42 -07001563/*
1564 * End non-standard functions.
1565 */
1566/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001567
Jason Evans289053c2009-06-22 12:08:42 -07001568/*
1569 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001570 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001571 */
1572
Jason Evans2dbecf12010-09-05 10:35:13 -07001573void
Jason Evans804c9ec2009-06-22 17:44:33 -07001574jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001575{
Jason Evansfbbb6242010-01-24 17:56:48 -08001576 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001577
1578 /* Acquire all mutexes in a safe order. */
1579
Jason Evansfbbb6242010-01-24 17:56:48 -08001580 malloc_mutex_lock(&arenas_lock);
1581 for (i = 0; i < narenas; i++) {
1582 if (arenas[i] != NULL)
1583 malloc_mutex_lock(&arenas[i]->lock);
1584 }
Jason Evans289053c2009-06-22 12:08:42 -07001585
1586 malloc_mutex_lock(&base_mtx);
1587
1588 malloc_mutex_lock(&huge_mtx);
1589
Jason Evans7372b152012-02-10 20:22:09 -08001590 if (config_dss)
1591 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001592}
1593
Jason Evans2dbecf12010-09-05 10:35:13 -07001594void
Jason Evans804c9ec2009-06-22 17:44:33 -07001595jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001596{
1597 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001598
1599 /* Release all mutexes, now that fork() has completed. */
1600
Jason Evans7372b152012-02-10 20:22:09 -08001601 if (config_dss)
1602 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001603
1604 malloc_mutex_unlock(&huge_mtx);
1605
1606 malloc_mutex_unlock(&base_mtx);
1607
Jason Evans289053c2009-06-22 12:08:42 -07001608 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001609 if (arenas[i] != NULL)
1610 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001611 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001612 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001613}
Jason Evans2dbecf12010-09-05 10:35:13 -07001614
1615/******************************************************************************/