blob: 796c8158b71d60c8cd910983fdfc3f5bd94bd31e [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070039const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_sysv = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080053bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070054size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070055
Jason Evans289053c2009-06-22 12:08:42 -070056/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080057/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070058
Jason Evans698805c2010-03-03 17:45:38 -080059static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080060static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070061static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070062static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080063#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070064static void thread_allocated_cleanup(void *arg);
65#endif
Jason Evanse7339702010-10-23 18:37:06 -070066static bool malloc_conf_next(char const **opts_p, char const **k_p,
67 size_t *klen_p, char const **v_p, size_t *vlen_p);
68static void malloc_conf_error(const char *msg, const char *k, size_t klen,
69 const char *v, size_t vlen);
70static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070071static bool malloc_init_hard(void);
Jason Evansa5070042011-08-12 13:48:27 -070072static int imemalign(void **memptr, size_t alignment, size_t size);
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans289053c2009-06-22 12:08:42 -070074/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080075/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070076
Jason Evans7372b152012-02-10 20:22:09 -080077JEMALLOC_CATTR(visibility("hidden"), static)
Jason Evanse476f8a2010-01-16 09:53:50 -080078void
Jason Evans698805c2010-03-03 17:45:38 -080079wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070080{
Jason Evans7372b152012-02-10 20:22:09 -080081 UNUSED int result = write(STDERR_FILENO, s, strlen(s));
Jason Evansc9658dd2009-06-22 14:44:08 -070082}
83
Jason Evans698805c2010-03-03 17:45:38 -080084void (*JEMALLOC_P(malloc_message))(void *, const char *s)
85 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -070086
87/******************************************************************************/
88/*
Jason Evanse476f8a2010-01-16 09:53:50 -080089 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070090 */
91
Jason Evanse476f8a2010-01-16 09:53:50 -080092/* Create a new arena and insert it into the arenas array at index ind. */
93arena_t *
94arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070095{
96 arena_t *ret;
97
Jason Evanse476f8a2010-01-16 09:53:50 -080098 /* Allocate enough space for trailing bins. */
Jason Evansc2fc8c82010-10-01 18:02:43 -070099 ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
100 + (sizeof(arena_bin_t) * nbins));
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 if (ret != NULL && arena_new(ret, ind) == false) {
102 arenas[ind] = ret;
103 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700104 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700106
Jason Evanse476f8a2010-01-16 09:53:50 -0800107 /*
108 * OOM here is quite inconvenient to propagate, since dealing with it
109 * would require a check for failure in the fast path. Instead, punt
110 * by using arenas[0]. In practice, this is an extremely unlikely
111 * failure.
112 */
Jason Evans698805c2010-03-03 17:45:38 -0800113 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800114 if (opt_abort)
115 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700116
Jason Evanse476f8a2010-01-16 09:53:50 -0800117 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700118}
119
Jason Evans289053c2009-06-22 12:08:42 -0700120/*
121 * Choose an arena based on a per-thread value (slow-path code only, called
122 * only by choose_arena()).
123 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800124arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700125choose_arena_hard(void)
126{
127 arena_t *ret;
128
Jason Evans289053c2009-06-22 12:08:42 -0700129 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700130 unsigned i, choose, first_null;
131
132 choose = 0;
133 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800134 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700135 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700136 for (i = 1; i < narenas; i++) {
137 if (arenas[i] != NULL) {
138 /*
139 * Choose the first arena that has the lowest
140 * number of threads assigned to it.
141 */
142 if (arenas[i]->nthreads <
143 arenas[choose]->nthreads)
144 choose = i;
145 } else if (first_null == narenas) {
146 /*
147 * Record the index of the first uninitialized
148 * arena, in case all extant arenas are in use.
149 *
150 * NB: It is possible for there to be
151 * discontinuities in terms of initialized
152 * versus uninitialized arenas, due to the
153 * "thread.arena" mallctl.
154 */
155 first_null = i;
156 }
157 }
158
159 if (arenas[choose] == 0 || first_null == narenas) {
160 /*
161 * Use an unloaded arena, or the least loaded arena if
162 * all arenas are already initialized.
163 */
164 ret = arenas[choose];
165 } else {
166 /* Initialize a new arena. */
167 ret = arenas_extend(first_null);
168 }
169 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800170 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700171 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700172 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700173 malloc_mutex_lock(&arenas_lock);
174 ret->nthreads++;
175 malloc_mutex_unlock(&arenas_lock);
176 }
Jason Evans289053c2009-06-22 12:08:42 -0700177
Jason Evans2dbecf12010-09-05 10:35:13 -0700178 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700179
180 return (ret);
181}
Jason Evans289053c2009-06-22 12:08:42 -0700182
Jason Evansa09f55c2010-09-20 16:05:41 -0700183/*
184 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
185 * provide a wrapper.
186 */
187int
188buferror(int errnum, char *buf, size_t buflen)
189{
190#ifdef _GNU_SOURCE
191 char *b = strerror_r(errno, buf, buflen);
192 if (b != buf) {
193 strncpy(buf, b, buflen);
194 buf[buflen-1] = '\0';
195 }
196 return (0);
197#else
198 return (strerror_r(errno, buf, buflen));
199#endif
200}
201
Jason Evans03c22372010-01-03 12:10:42 -0800202static void
203stats_print_atexit(void)
204{
205
Jason Evans7372b152012-02-10 20:22:09 -0800206 if (config_tcache && config_stats) {
207 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800208
Jason Evans7372b152012-02-10 20:22:09 -0800209 /*
210 * Merge stats from extant threads. This is racy, since
211 * individual threads do not lock when recording tcache stats
212 * events. As a consequence, the final stats may be slightly
213 * out of date by the time they are reported, if other threads
214 * continue to allocate.
215 */
216 for (i = 0; i < narenas; i++) {
217 arena_t *arena = arenas[i];
218 if (arena != NULL) {
219 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800220
Jason Evans7372b152012-02-10 20:22:09 -0800221 /*
222 * tcache_stats_merge() locks bins, so if any
223 * code is introduced that acquires both arena
224 * and bin locks in the opposite order,
225 * deadlocks may result.
226 */
227 malloc_mutex_lock(&arena->lock);
228 ql_foreach(tcache, &arena->tcache_ql, link) {
229 tcache_stats_merge(tcache, arena);
230 }
231 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800232 }
Jason Evans03c22372010-01-03 12:10:42 -0800233 }
234 }
Jason Evansed1bf452010-01-19 12:11:25 -0800235 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700236}
237
Jason Evans9dcad2d2011-02-13 18:11:54 -0800238thread_allocated_t *
239thread_allocated_get_hard(void)
240{
241 thread_allocated_t *thread_allocated = (thread_allocated_t *)
242 imalloc(sizeof(thread_allocated_t));
243 if (thread_allocated == NULL) {
244 static thread_allocated_t static_thread_allocated = {0, 0};
245 malloc_write("<jemalloc>: Error allocating TSD;"
246 " mallctl(\"thread.{de,}allocated[p]\", ...)"
247 " will be inaccurate\n");
248 if (opt_abort)
249 abort();
250 return (&static_thread_allocated);
251 }
252 pthread_setspecific(thread_allocated_tsd, thread_allocated);
253 thread_allocated->allocated = 0;
254 thread_allocated->deallocated = 0;
255 return (thread_allocated);
256}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800257
Jason Evans289053c2009-06-22 12:08:42 -0700258/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800259 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700260 */
261/******************************************************************************/
262/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800263 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700264 */
265
Jason Evansc9658dd2009-06-22 14:44:08 -0700266static unsigned
267malloc_ncpus(void)
268{
269 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700270 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700271
Jason Evansb7924f52009-06-23 19:01:18 -0700272 result = sysconf(_SC_NPROCESSORS_ONLN);
273 if (result == -1) {
274 /* Error. */
275 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700276 }
Jason Evansb7924f52009-06-23 19:01:18 -0700277 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700278
279 return (ret);
280}
Jason Evansb7924f52009-06-23 19:01:18 -0700281
Jason Evans597632b2011-03-18 13:41:33 -0700282static void
283arenas_cleanup(void *arg)
284{
285 arena_t *arena = (arena_t *)arg;
286
287 malloc_mutex_lock(&arenas_lock);
288 arena->nthreads--;
289 malloc_mutex_unlock(&arenas_lock);
290}
291
Jason Evans7372b152012-02-10 20:22:09 -0800292#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700293static void
294thread_allocated_cleanup(void *arg)
295{
296 uint64_t *allocated = (uint64_t *)arg;
297
298 if (allocated != NULL)
299 idalloc(allocated);
300}
301#endif
302
Jason Evans289053c2009-06-22 12:08:42 -0700303/*
304 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
305 * implementation has to take pains to avoid infinite recursion during
306 * initialization.
307 */
308static inline bool
309malloc_init(void)
310{
311
312 if (malloc_initialized == false)
313 return (malloc_init_hard());
314
315 return (false);
316}
317
318static bool
Jason Evanse7339702010-10-23 18:37:06 -0700319malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
320 char const **v_p, size_t *vlen_p)
321{
322 bool accept;
323 const char *opts = *opts_p;
324
325 *k_p = opts;
326
327 for (accept = false; accept == false;) {
328 switch (*opts) {
329 case 'A': case 'B': case 'C': case 'D': case 'E':
330 case 'F': case 'G': case 'H': case 'I': case 'J':
331 case 'K': case 'L': case 'M': case 'N': case 'O':
332 case 'P': case 'Q': case 'R': case 'S': case 'T':
333 case 'U': case 'V': case 'W': case 'X': case 'Y':
334 case 'Z':
335 case 'a': case 'b': case 'c': case 'd': case 'e':
336 case 'f': case 'g': case 'h': case 'i': case 'j':
337 case 'k': case 'l': case 'm': case 'n': case 'o':
338 case 'p': case 'q': case 'r': case 's': case 't':
339 case 'u': case 'v': case 'w': case 'x': case 'y':
340 case 'z':
341 case '0': case '1': case '2': case '3': case '4':
342 case '5': case '6': case '7': case '8': case '9':
343 case '_':
344 opts++;
345 break;
346 case ':':
347 opts++;
348 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
349 *v_p = opts;
350 accept = true;
351 break;
352 case '\0':
353 if (opts != *opts_p) {
354 malloc_write("<jemalloc>: Conf string "
355 "ends with key\n");
356 }
357 return (true);
358 default:
359 malloc_write("<jemalloc>: Malformed conf "
360 "string\n");
361 return (true);
362 }
363 }
364
365 for (accept = false; accept == false;) {
366 switch (*opts) {
367 case ',':
368 opts++;
369 /*
370 * Look ahead one character here, because the
371 * next time this function is called, it will
372 * assume that end of input has been cleanly
373 * reached if no input remains, but we have
374 * optimistically already consumed the comma if
375 * one exists.
376 */
377 if (*opts == '\0') {
378 malloc_write("<jemalloc>: Conf string "
379 "ends with comma\n");
380 }
381 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
382 accept = true;
383 break;
384 case '\0':
385 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
386 accept = true;
387 break;
388 default:
389 opts++;
390 break;
391 }
392 }
393
394 *opts_p = opts;
395 return (false);
396}
397
398static void
399malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
400 size_t vlen)
401{
402 char buf[PATH_MAX + 1];
403
404 malloc_write("<jemalloc>: ");
405 malloc_write(msg);
406 malloc_write(": ");
407 memcpy(buf, k, klen);
408 memcpy(&buf[klen], ":", 1);
409 memcpy(&buf[klen+1], v, vlen);
410 buf[klen+1+vlen] = '\0';
411 malloc_write(buf);
412 malloc_write("\n");
413}
414
415static void
416malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700417{
418 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700419 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700420 const char *opts, *k, *v;
421 size_t klen, vlen;
422
423 for (i = 0; i < 3; i++) {
424 /* Get runtime configuration. */
425 switch (i) {
426 case 0:
427 if (JEMALLOC_P(malloc_conf) != NULL) {
428 /*
429 * Use options that were compiled into the
430 * program.
431 */
432 opts = JEMALLOC_P(malloc_conf);
433 } else {
434 /* No configuration specified. */
435 buf[0] = '\0';
436 opts = buf;
437 }
438 break;
439 case 1: {
440 int linklen;
441 const char *linkname =
442#ifdef JEMALLOC_PREFIX
443 "/etc/"JEMALLOC_PREFIX"malloc.conf"
444#else
445 "/etc/malloc.conf"
446#endif
447 ;
448
449 if ((linklen = readlink(linkname, buf,
450 sizeof(buf) - 1)) != -1) {
451 /*
452 * Use the contents of the "/etc/malloc.conf"
453 * symbolic link's name.
454 */
455 buf[linklen] = '\0';
456 opts = buf;
457 } else {
458 /* No configuration specified. */
459 buf[0] = '\0';
460 opts = buf;
461 }
462 break;
463 }
464 case 2: {
465 const char *envname =
466#ifdef JEMALLOC_PREFIX
467 JEMALLOC_CPREFIX"MALLOC_CONF"
468#else
469 "MALLOC_CONF"
470#endif
471 ;
472
473 if ((opts = getenv(envname)) != NULL) {
474 /*
475 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800476 * the value of the MALLOC_CONF environment
477 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700478 */
479 } else {
480 /* No configuration specified. */
481 buf[0] = '\0';
482 opts = buf;
483 }
484 break;
485 }
486 default:
487 /* NOTREACHED */
488 assert(false);
489 buf[0] = '\0';
490 opts = buf;
491 }
492
493 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
494 &vlen) == false) {
495#define CONF_HANDLE_BOOL(n) \
496 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
497 klen) == 0) { \
498 if (strncmp("true", v, vlen) == 0 && \
499 vlen == sizeof("true")-1) \
500 opt_##n = true; \
501 else if (strncmp("false", v, vlen) == \
502 0 && vlen == sizeof("false")-1) \
503 opt_##n = false; \
504 else { \
505 malloc_conf_error( \
506 "Invalid conf value", \
507 k, klen, v, vlen); \
508 } \
509 continue; \
510 }
511#define CONF_HANDLE_SIZE_T(n, min, max) \
512 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
513 klen) == 0) { \
514 unsigned long ul; \
515 char *end; \
516 \
517 errno = 0; \
518 ul = strtoul(v, &end, 0); \
519 if (errno != 0 || (uintptr_t)end - \
520 (uintptr_t)v != vlen) { \
521 malloc_conf_error( \
522 "Invalid conf value", \
523 k, klen, v, vlen); \
524 } else if (ul < min || ul > max) { \
525 malloc_conf_error( \
526 "Out-of-range conf value", \
527 k, klen, v, vlen); \
528 } else \
529 opt_##n = ul; \
530 continue; \
531 }
532#define CONF_HANDLE_SSIZE_T(n, min, max) \
533 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
534 klen) == 0) { \
535 long l; \
536 char *end; \
537 \
538 errno = 0; \
539 l = strtol(v, &end, 0); \
540 if (errno != 0 || (uintptr_t)end - \
541 (uintptr_t)v != vlen) { \
542 malloc_conf_error( \
543 "Invalid conf value", \
544 k, klen, v, vlen); \
545 } else if (l < (ssize_t)min || l > \
546 (ssize_t)max) { \
547 malloc_conf_error( \
548 "Out-of-range conf value", \
549 k, klen, v, vlen); \
550 } else \
551 opt_##n = l; \
552 continue; \
553 }
554#define CONF_HANDLE_CHAR_P(n, d) \
555 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
556 klen) == 0) { \
557 size_t cpylen = (vlen <= \
558 sizeof(opt_##n)-1) ? vlen : \
559 sizeof(opt_##n)-1; \
560 strncpy(opt_##n, v, cpylen); \
561 opt_##n[cpylen] = '\0'; \
562 continue; \
563 }
564
565 CONF_HANDLE_BOOL(abort)
566 CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
567 PAGE_SHIFT-1)
568 CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
569 PAGE_SHIFT-1)
570 /*
571 * Chunks always require at least one * header page,
572 * plus one data page.
573 */
574 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
575 (sizeof(size_t) << 3) - 1)
576 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
577 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
578 (sizeof(size_t) << 3) - 1)
579 CONF_HANDLE_BOOL(stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800580 if (config_fill) {
581 CONF_HANDLE_BOOL(junk)
582 CONF_HANDLE_BOOL(zero)
583 }
584 if (config_sysv) {
585 CONF_HANDLE_BOOL(sysv)
586 }
587 if (config_xmalloc) {
588 CONF_HANDLE_BOOL(xmalloc)
589 }
590 if (config_tcache) {
591 CONF_HANDLE_BOOL(tcache)
592 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
593 (sizeof(size_t) << 3) - 1)
594 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
595 (sizeof(size_t) << 3) - 1)
596 }
597 if (config_prof) {
598 CONF_HANDLE_BOOL(prof)
599 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
600 CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0,
601 LG_PROF_BT_MAX)
602 CONF_HANDLE_BOOL(prof_active)
603 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
604 (sizeof(uint64_t) << 3) - 1)
605 CONF_HANDLE_BOOL(prof_accum)
606 CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
607 (sizeof(size_t) << 3) - 1)
608 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
609 (sizeof(uint64_t) << 3) - 1)
610 CONF_HANDLE_BOOL(prof_gdump)
611 CONF_HANDLE_BOOL(prof_leak)
612 }
Jason Evanse7339702010-10-23 18:37:06 -0700613 malloc_conf_error("Invalid conf pair", k, klen, v,
614 vlen);
615#undef CONF_HANDLE_BOOL
616#undef CONF_HANDLE_SIZE_T
617#undef CONF_HANDLE_SSIZE_T
618#undef CONF_HANDLE_CHAR_P
619 }
620
621 /* Validate configuration of options that are inter-related. */
622 if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
623 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
624 "relationship; restoring defaults\n");
625 opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
626 opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
627 }
628 }
629}
630
631static bool
632malloc_init_hard(void)
633{
Jason Evansb7924f52009-06-23 19:01:18 -0700634 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700635
636 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700637 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700638 /*
639 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800640 * acquired init_lock, or this thread is the initializing
641 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700642 */
643 malloc_mutex_unlock(&init_lock);
644 return (false);
645 }
Jason Evansb7924f52009-06-23 19:01:18 -0700646 if (malloc_initializer != (unsigned long)0) {
647 /* Busy-wait until the initializing thread completes. */
648 do {
649 malloc_mutex_unlock(&init_lock);
650 CPU_SPINWAIT;
651 malloc_mutex_lock(&init_lock);
652 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700653 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700654 return (false);
655 }
Jason Evans289053c2009-06-22 12:08:42 -0700656
Jason Evansb7924f52009-06-23 19:01:18 -0700657#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700658 /* Get page size. */
659 {
660 long result;
661
662 result = sysconf(_SC_PAGESIZE);
663 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700664 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700665
666 /*
667 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800668 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700669 */
670 assert(((result - 1) & result) == 0);
671 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800672 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700673 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700674#endif
Jason Evans289053c2009-06-22 12:08:42 -0700675
Jason Evans7372b152012-02-10 20:22:09 -0800676 if (config_prof)
677 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700678
Jason Evanse7339702010-10-23 18:37:06 -0700679 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700680
Jason Evansa0bf2422010-01-29 14:30:41 -0800681 /* Register fork handlers. */
682 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
683 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800684 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800685 if (opt_abort)
686 abort();
687 }
688
Jason Evans3c234352010-01-27 13:10:55 -0800689 if (ctl_boot()) {
690 malloc_mutex_unlock(&init_lock);
691 return (true);
692 }
693
Jason Evans03c22372010-01-03 12:10:42 -0800694 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700695 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800696 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800697 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800698 if (opt_abort)
699 abort();
700 }
Jason Evans289053c2009-06-22 12:08:42 -0700701 }
702
Jason Evansa0bf2422010-01-29 14:30:41 -0800703 if (chunk_boot()) {
704 malloc_mutex_unlock(&init_lock);
705 return (true);
706 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700707
Jason Evans3c234352010-01-27 13:10:55 -0800708 if (base_boot()) {
709 malloc_mutex_unlock(&init_lock);
710 return (true);
711 }
712
Jason Evans7372b152012-02-10 20:22:09 -0800713 if (config_prof)
714 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800715
Jason Evansa0bf2422010-01-29 14:30:41 -0800716 if (arena_boot()) {
Jason Evans289053c2009-06-22 12:08:42 -0700717 malloc_mutex_unlock(&init_lock);
718 return (true);
719 }
720
Jason Evans7372b152012-02-10 20:22:09 -0800721 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700722 malloc_mutex_unlock(&init_lock);
723 return (true);
724 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800725
Jason Evanse476f8a2010-01-16 09:53:50 -0800726 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700727 malloc_mutex_unlock(&init_lock);
728 return (true);
729 }
Jason Evans289053c2009-06-22 12:08:42 -0700730
Jason Evans7372b152012-02-10 20:22:09 -0800731#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700732 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800733 if (config_stats && pthread_key_create(&thread_allocated_tsd,
734 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700735 malloc_mutex_unlock(&init_lock);
736 return (true);
737 }
738#endif
739
Jason Evans8e6f8b42011-11-03 18:40:03 -0700740 if (malloc_mutex_init(&arenas_lock))
741 return (true);
742
743 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
744 malloc_mutex_unlock(&init_lock);
745 return (true);
746 }
747
Jason Evansb7924f52009-06-23 19:01:18 -0700748 /*
749 * Create enough scaffolding to allow recursive allocation in
750 * malloc_ncpus().
751 */
752 narenas = 1;
753 arenas = init_arenas;
754 memset(arenas, 0, sizeof(arena_t *) * narenas);
755
756 /*
757 * Initialize one arena here. The rest are lazily created in
758 * choose_arena_hard().
759 */
760 arenas_extend(0);
761 if (arenas[0] == NULL) {
762 malloc_mutex_unlock(&init_lock);
763 return (true);
764 }
765
Jason Evansb7924f52009-06-23 19:01:18 -0700766 /*
767 * Assign the initial arena to the initial thread, in order to avoid
768 * spurious creation of an extra arena if the application switches to
769 * threaded mode.
770 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700771 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700772 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700773
Jason Evans7372b152012-02-10 20:22:09 -0800774 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800775 malloc_mutex_unlock(&init_lock);
776 return (true);
777 }
Jason Evans3383af62010-02-11 08:59:06 -0800778
Jason Evansb7924f52009-06-23 19:01:18 -0700779 /* Get number of CPUs. */
780 malloc_initializer = pthread_self();
781 malloc_mutex_unlock(&init_lock);
782 ncpus = malloc_ncpus();
783 malloc_mutex_lock(&init_lock);
784
Jason Evanse7339702010-10-23 18:37:06 -0700785 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700786 /*
Jason Evans5463a522009-12-29 00:09:15 -0800787 * For SMP systems, create more than one arena per CPU by
788 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700789 */
Jason Evanse7339702010-10-23 18:37:06 -0700790 if (ncpus > 1)
791 opt_narenas = ncpus << 2;
792 else
793 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700794 }
Jason Evanse7339702010-10-23 18:37:06 -0700795 narenas = opt_narenas;
796 /*
797 * Make sure that the arenas array can be allocated. In practice, this
798 * limit is enough to allow the allocator to function, but the ctl
799 * machinery will fail to allocate memory at far lower limits.
800 */
801 if (narenas > chunksize / sizeof(arena_t *)) {
802 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700803
Jason Evanse7339702010-10-23 18:37:06 -0700804 narenas = chunksize / sizeof(arena_t *);
805 malloc_write("<jemalloc>: Reducing narenas to limit (");
806 malloc_write(u2s(narenas, 10, buf));
807 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700808 }
Jason Evans289053c2009-06-22 12:08:42 -0700809
Jason Evans289053c2009-06-22 12:08:42 -0700810 /* Allocate and initialize arenas. */
811 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
812 if (arenas == NULL) {
813 malloc_mutex_unlock(&init_lock);
814 return (true);
815 }
816 /*
817 * Zero the array. In practice, this should always be pre-zeroed,
818 * since it was just mmap()ed, but let's be sure.
819 */
820 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700821 /* Copy the pointer to the one arena that was already initialized. */
822 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700823
Jason Evans2dbecf12010-09-05 10:35:13 -0700824#ifdef JEMALLOC_ZONE
825 /* Register the custom zone. */
826 malloc_zone_register(create_zone());
827
828 /*
829 * Convert the default szone to an "overlay zone" that is capable of
830 * deallocating szone-allocated objects, but allocating new objects
831 * from jemalloc.
832 */
833 szone2ozone(malloc_default_zone());
834#endif
835
Jason Evans289053c2009-06-22 12:08:42 -0700836 malloc_initialized = true;
837 malloc_mutex_unlock(&init_lock);
838 return (false);
839}
840
Jason Evans2dbecf12010-09-05 10:35:13 -0700841#ifdef JEMALLOC_ZONE
842JEMALLOC_ATTR(constructor)
843void
844jemalloc_darwin_init(void)
845{
846
847 if (malloc_init_hard())
848 abort();
849}
850#endif
851
Jason Evans289053c2009-06-22 12:08:42 -0700852/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800853 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700854 */
855/******************************************************************************/
856/*
857 * Begin malloc(3)-compatible functions.
858 */
859
Jason Evans9ad48232010-01-03 11:59:20 -0800860JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800861JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700862void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800863JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700864{
865 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800866 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800867 prof_thr_cnt_t *cnt
868#ifdef JEMALLOC_CC_SILENCE
869 = NULL
870#endif
871 ;
Jason Evans289053c2009-06-22 12:08:42 -0700872
873 if (malloc_init()) {
874 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800875 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700876 }
877
878 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -0800879 if (config_sysv == false || opt_sysv == false)
Jason Evans289053c2009-06-22 12:08:42 -0700880 size = 1;
881 else {
Jason Evans7372b152012-02-10 20:22:09 -0800882 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800883 malloc_write("<jemalloc>: Error in malloc(): "
884 "invalid size 0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800885 abort();
886 }
Jason Evans289053c2009-06-22 12:08:42 -0700887 ret = NULL;
888 goto RETURN;
889 }
890 }
891
Jason Evans7372b152012-02-10 20:22:09 -0800892 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700893 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700894 PROF_ALLOC_PREP(1, usize, cnt);
895 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700896 ret = NULL;
897 goto OOM;
898 }
Jason Evans93443682010-10-20 17:39:18 -0700899 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -0700900 small_maxclass) {
901 ret = imalloc(small_maxclass+1);
902 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700903 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700904 } else
905 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800906 } else {
907 if (config_stats)
908 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700909 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700910 }
Jason Evans289053c2009-06-22 12:08:42 -0700911
Jason Evansf2518142009-12-29 00:09:15 -0800912OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700913 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800914 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800915 malloc_write("<jemalloc>: Error in malloc(): "
916 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700917 abort();
918 }
919 errno = ENOMEM;
920 }
921
Jason Evansf2518142009-12-29 00:09:15 -0800922RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800923 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700924 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800925 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700926 assert(usize == isalloc(ret));
927 ALLOCATED_ADD(usize, 0);
928 }
Jason Evans289053c2009-06-22 12:08:42 -0700929 return (ret);
930}
931
Jason Evans9ad48232010-01-03 11:59:20 -0800932JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700933#ifdef JEMALLOC_PROF
934/*
Jason Evans7372b152012-02-10 20:22:09 -0800935 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700936 * PROF_ALLOC_PREP().
937 */
938JEMALLOC_ATTR(noinline)
939#endif
940static int
941imemalign(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700942{
943 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800944 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700945 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800946 prof_thr_cnt_t *cnt
947#ifdef JEMALLOC_CC_SILENCE
948 = NULL
949#endif
950 ;
Jason Evans289053c2009-06-22 12:08:42 -0700951
952 if (malloc_init())
953 result = NULL;
954 else {
Jason Evansf2518142009-12-29 00:09:15 -0800955 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -0800956 if (config_sysv == false || opt_sysv == false)
Jason Evansf2518142009-12-29 00:09:15 -0800957 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800958 else {
Jason Evans7372b152012-02-10 20:22:09 -0800959 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800960 malloc_write("<jemalloc>: Error in "
961 "posix_memalign(): invalid size "
962 "0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800963 abort();
964 }
Jason Evansf2518142009-12-29 00:09:15 -0800965 result = NULL;
966 *memptr = NULL;
967 ret = 0;
968 goto RETURN;
969 }
Jason Evansf2518142009-12-29 00:09:15 -0800970 }
971
Jason Evans289053c2009-06-22 12:08:42 -0700972 /* Make sure that alignment is a large enough power of 2. */
973 if (((alignment - 1) & alignment) != 0
974 || alignment < sizeof(void *)) {
Jason Evans7372b152012-02-10 20:22:09 -0800975 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800976 malloc_write("<jemalloc>: Error in "
977 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700978 abort();
979 }
980 result = NULL;
981 ret = EINVAL;
982 goto RETURN;
983 }
984
Jason Evans38d92102011-03-23 00:37:29 -0700985 usize = sa2u(size, alignment, NULL);
986 if (usize == 0) {
987 result = NULL;
988 ret = ENOMEM;
989 goto RETURN;
990 }
991
Jason Evans7372b152012-02-10 20:22:09 -0800992 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700993 PROF_ALLOC_PREP(2, usize, cnt);
994 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700995 result = NULL;
996 ret = EINVAL;
997 } else {
998 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -0700999 (uintptr_t)1U && usize <= small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001000 assert(sa2u(small_maxclass+1,
1001 alignment, NULL) != 0);
1002 result = ipalloc(sa2u(small_maxclass+1,
1003 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001004 if (result != NULL) {
1005 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -07001006 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001007 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001008 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001009 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001010 false);
1011 }
Jason Evans0b270a92010-03-31 16:45:04 -07001012 }
Jason Evans6109fe02010-02-10 10:37:56 -08001013 } else
Jason Evans38d92102011-03-23 00:37:29 -07001014 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -07001015 }
1016
1017 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001018 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001019 malloc_write("<jemalloc>: Error in posix_memalign(): "
1020 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001021 abort();
1022 }
1023 ret = ENOMEM;
1024 goto RETURN;
1025 }
1026
1027 *memptr = result;
1028 ret = 0;
1029
1030RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001031 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001032 assert(usize == isalloc(result));
1033 ALLOCATED_ADD(usize, 0);
1034 }
Jason Evans7372b152012-02-10 20:22:09 -08001035 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001036 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -07001037 return (ret);
1038}
1039
Jason Evansa5070042011-08-12 13:48:27 -07001040JEMALLOC_ATTR(nonnull(1))
1041JEMALLOC_ATTR(visibility("default"))
1042int
1043JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
1044{
1045
1046 return imemalign(memptr, alignment, size);
1047}
1048
Jason Evans9ad48232010-01-03 11:59:20 -08001049JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001050JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001051void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001052JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001053{
1054 void *ret;
1055 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -08001056 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001057 prof_thr_cnt_t *cnt
1058#ifdef JEMALLOC_CC_SILENCE
1059 = NULL
1060#endif
1061 ;
Jason Evans289053c2009-06-22 12:08:42 -07001062
1063 if (malloc_init()) {
1064 num_size = 0;
1065 ret = NULL;
1066 goto RETURN;
1067 }
1068
1069 num_size = num * size;
1070 if (num_size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -08001071 if ((config_sysv == false || opt_sysv == false)
1072 && ((num == 0) || (size == 0)))
Jason Evans289053c2009-06-22 12:08:42 -07001073 num_size = 1;
1074 else {
1075 ret = NULL;
1076 goto RETURN;
1077 }
1078 /*
1079 * Try to avoid division here. We know that it isn't possible to
1080 * overflow during multiplication if neither operand uses any of the
1081 * most significant half of the bits in a size_t.
1082 */
1083 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1084 && (num_size / size != num)) {
1085 /* size_t overflow. */
1086 ret = NULL;
1087 goto RETURN;
1088 }
1089
Jason Evans7372b152012-02-10 20:22:09 -08001090 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001091 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001092 PROF_ALLOC_PREP(1, usize, cnt);
1093 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001094 ret = NULL;
1095 goto RETURN;
1096 }
Jason Evans93443682010-10-20 17:39:18 -07001097 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evans0b270a92010-03-31 16:45:04 -07001098 <= small_maxclass) {
1099 ret = icalloc(small_maxclass+1);
1100 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001101 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001102 } else
1103 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001104 } else {
1105 if (config_stats)
1106 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001107 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001108 }
Jason Evans289053c2009-06-22 12:08:42 -07001109
1110RETURN:
1111 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001112 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001113 malloc_write("<jemalloc>: Error in calloc(): out of "
1114 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001115 abort();
1116 }
1117 errno = ENOMEM;
1118 }
1119
Jason Evans7372b152012-02-10 20:22:09 -08001120 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001121 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001122 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001123 assert(usize == isalloc(ret));
1124 ALLOCATED_ADD(usize, 0);
1125 }
Jason Evans289053c2009-06-22 12:08:42 -07001126 return (ret);
1127}
1128
Jason Evanse476f8a2010-01-16 09:53:50 -08001129JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001130void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001131JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001132{
1133 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001134 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001135 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001136 prof_thr_cnt_t *cnt
1137#ifdef JEMALLOC_CC_SILENCE
1138 = NULL
1139#endif
1140 ;
1141 prof_ctx_t *old_ctx
1142#ifdef JEMALLOC_CC_SILENCE
1143 = NULL
1144#endif
1145 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001146
Jason Evans289053c2009-06-22 12:08:42 -07001147 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -08001148 if (config_sysv == false || opt_sysv == false)
Jason Evans289053c2009-06-22 12:08:42 -07001149 size = 1;
1150 else {
Jason Evanse476f8a2010-01-16 09:53:50 -08001151 if (ptr != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001152 if (config_prof || config_stats)
1153 old_size = isalloc(ptr);
1154 if (config_prof && opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001155 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001156 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001157 }
Jason Evans289053c2009-06-22 12:08:42 -07001158 idalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001159 } else if (config_prof && opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001160 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001161 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001162 }
Jason Evans289053c2009-06-22 12:08:42 -07001163 ret = NULL;
1164 goto RETURN;
1165 }
1166 }
1167
1168 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001169 assert(malloc_initialized || malloc_initializer ==
1170 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001171
Jason Evans7372b152012-02-10 20:22:09 -08001172 if (config_prof || config_stats)
1173 old_size = isalloc(ptr);
1174 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001175 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001176 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001177 PROF_ALLOC_PREP(1, usize, cnt);
1178 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001179 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001180 ret = NULL;
1181 goto OOM;
1182 }
Jason Evans0b270a92010-03-31 16:45:04 -07001183 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evans93443682010-10-20 17:39:18 -07001184 usize <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001185 ret = iralloc(ptr, small_maxclass+1, 0, 0,
1186 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001187 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001188 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001189 else
1190 old_ctx = NULL;
1191 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001192 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001193 if (ret == NULL)
1194 old_ctx = NULL;
1195 }
Jason Evans7372b152012-02-10 20:22:09 -08001196 } else {
1197 if (config_stats)
1198 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001199 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001200 }
Jason Evans289053c2009-06-22 12:08:42 -07001201
Jason Evans6109fe02010-02-10 10:37:56 -08001202OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001203 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001204 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001205 malloc_write("<jemalloc>: Error in realloc(): "
1206 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001207 abort();
1208 }
1209 errno = ENOMEM;
1210 }
1211 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001212 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001213 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001214 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001215 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001216 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001217 ret = NULL;
1218 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001219 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001220 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001221 PROF_ALLOC_PREP(1, usize, cnt);
1222 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001223 ret = NULL;
1224 else {
1225 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001226 (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -07001227 small_maxclass) {
1228 ret = imalloc(small_maxclass+1);
1229 if (ret != NULL) {
1230 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001231 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001232 }
1233 } else
1234 ret = imalloc(size);
1235 }
Jason Evans7372b152012-02-10 20:22:09 -08001236 } else {
1237 if (config_stats)
1238 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001239 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001240 }
Jason Evans6109fe02010-02-10 10:37:56 -08001241 }
Jason Evans569432c2009-12-29 00:09:15 -08001242
Jason Evans289053c2009-06-22 12:08:42 -07001243 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001244 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001245 malloc_write("<jemalloc>: Error in realloc(): "
1246 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001247 abort();
1248 }
1249 errno = ENOMEM;
1250 }
1251 }
1252
1253RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001254 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001255 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001256 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001257 assert(usize == isalloc(ret));
1258 ALLOCATED_ADD(usize, old_size);
1259 }
Jason Evans289053c2009-06-22 12:08:42 -07001260 return (ret);
1261}
1262
Jason Evanse476f8a2010-01-16 09:53:50 -08001263JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001264void
Jason Evanse476f8a2010-01-16 09:53:50 -08001265JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001266{
1267
Jason Evans289053c2009-06-22 12:08:42 -07001268 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001269 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001270
Jason Evansa25d0a82009-11-09 14:57:38 -08001271 assert(malloc_initialized || malloc_initializer ==
1272 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001273
Jason Evans7372b152012-02-10 20:22:09 -08001274 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001275 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001276 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001277 } else if (config_stats) {
1278 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001279 }
Jason Evans7372b152012-02-10 20:22:09 -08001280 if (config_stats)
1281 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001282 idalloc(ptr);
1283 }
1284}
1285
1286/*
1287 * End malloc(3)-compatible functions.
1288 */
1289/******************************************************************************/
1290/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001291 * Begin non-standard override functions.
1292 *
1293 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1294 * entire point is to avoid accidental mixed allocator usage.
1295 */
1296#ifndef JEMALLOC_PREFIX
1297
1298#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1299JEMALLOC_ATTR(malloc)
1300JEMALLOC_ATTR(visibility("default"))
1301void *
1302JEMALLOC_P(memalign)(size_t alignment, size_t size)
1303{
Jason Evans7372b152012-02-10 20:22:09 -08001304 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001305#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001306 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001307#endif
Jason Evans7372b152012-02-10 20:22:09 -08001308 ;
1309 imemalign(&ret, alignment, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001310 return (ret);
1311}
1312#endif
1313
1314#ifdef JEMALLOC_OVERRIDE_VALLOC
1315JEMALLOC_ATTR(malloc)
1316JEMALLOC_ATTR(visibility("default"))
1317void *
1318JEMALLOC_P(valloc)(size_t size)
1319{
Jason Evans7372b152012-02-10 20:22:09 -08001320 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001321#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001322 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001323#endif
Jason Evans7372b152012-02-10 20:22:09 -08001324 ;
1325 imemalign(&ret, PAGE_SIZE, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001326 return (ret);
1327}
1328#endif
1329
1330#endif /* JEMALLOC_PREFIX */
1331/*
1332 * End non-standard override functions.
1333 */
1334/******************************************************************************/
1335/*
Jason Evans289053c2009-06-22 12:08:42 -07001336 * Begin non-standard functions.
1337 */
1338
Jason Evanse476f8a2010-01-16 09:53:50 -08001339JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001340size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001341JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001342{
Jason Evans569432c2009-12-29 00:09:15 -08001343 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001344
Jason Evans8e3c3c62010-09-17 15:46:18 -07001345 assert(malloc_initialized || malloc_initializer == pthread_self());
1346
Jason Evans7372b152012-02-10 20:22:09 -08001347 if (config_ivsalloc)
1348 ret = ivsalloc(ptr);
1349 else {
1350 assert(ptr != NULL);
1351 ret = isalloc(ptr);
1352 }
Jason Evans289053c2009-06-22 12:08:42 -07001353
Jason Evans569432c2009-12-29 00:09:15 -08001354 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001355}
1356
Jason Evans4201af02010-01-24 02:53:40 -08001357JEMALLOC_ATTR(visibility("default"))
1358void
Jason Evans698805c2010-03-03 17:45:38 -08001359JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1360 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001361{
1362
Jason Evans698805c2010-03-03 17:45:38 -08001363 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001364}
1365
Jason Evans3c234352010-01-27 13:10:55 -08001366JEMALLOC_ATTR(visibility("default"))
1367int
1368JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1369 size_t newlen)
1370{
1371
Jason Evans95833312010-01-27 13:45:21 -08001372 if (malloc_init())
1373 return (EAGAIN);
1374
Jason Evans3c234352010-01-27 13:10:55 -08001375 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1376}
1377
1378JEMALLOC_ATTR(visibility("default"))
1379int
1380JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1381{
1382
Jason Evans95833312010-01-27 13:45:21 -08001383 if (malloc_init())
1384 return (EAGAIN);
1385
Jason Evans3c234352010-01-27 13:10:55 -08001386 return (ctl_nametomib(name, mibp, miblenp));
1387}
1388
1389JEMALLOC_ATTR(visibility("default"))
1390int
1391JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1392 size_t *oldlenp, void *newp, size_t newlen)
1393{
1394
Jason Evans95833312010-01-27 13:45:21 -08001395 if (malloc_init())
1396 return (EAGAIN);
1397
Jason Evans3c234352010-01-27 13:10:55 -08001398 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1399}
1400
Jason Evans8e3c3c62010-09-17 15:46:18 -07001401JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001402iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001403{
1404
Jason Evans38d92102011-03-23 00:37:29 -07001405 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1406 NULL)));
1407
Jason Evans8e3c3c62010-09-17 15:46:18 -07001408 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001409 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001411 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001412 else
Jason Evans38d92102011-03-23 00:37:29 -07001413 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001414}
1415
Jason Evans6a0d2912010-09-20 16:44:23 -07001416JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417JEMALLOC_ATTR(visibility("default"))
1418int
1419JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1420{
1421 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001422 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001423 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1424 & (SIZE_T_MAX-1));
1425 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001426 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001427
1428 assert(ptr != NULL);
1429 assert(size != 0);
1430
1431 if (malloc_init())
1432 goto OOM;
1433
Jason Evans749c2a02011-08-12 18:37:54 -07001434 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001435 if (usize == 0)
1436 goto OOM;
1437
Jason Evans7372b152012-02-10 20:22:09 -08001438 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001439 PROF_ALLOC_PREP(1, usize, cnt);
1440 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001442 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443 small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001444 size_t usize_promoted = (alignment == 0) ?
1445 s2u(small_maxclass+1) : sa2u(small_maxclass+1,
1446 alignment, NULL);
1447 assert(usize_promoted != 0);
1448 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001449 if (p == NULL)
1450 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001451 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001452 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001453 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001454 if (p == NULL)
1455 goto OOM;
1456 }
Jason Evans749c2a02011-08-12 18:37:54 -07001457 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001458 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001459 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 if (p == NULL)
1461 goto OOM;
1462 }
Jason Evans7372b152012-02-10 20:22:09 -08001463 if (rsize != NULL)
1464 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001465
1466 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001467 if (config_stats) {
1468 assert(usize == isalloc(p));
1469 ALLOCATED_ADD(usize, 0);
1470 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001471 return (ALLOCM_SUCCESS);
1472OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001473 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001474 malloc_write("<jemalloc>: Error in allocm(): "
1475 "out of memory\n");
1476 abort();
1477 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001478 *ptr = NULL;
1479 return (ALLOCM_ERR_OOM);
1480}
1481
Jason Evans6a0d2912010-09-20 16:44:23 -07001482JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001483JEMALLOC_ATTR(visibility("default"))
1484int
1485JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1486 int flags)
1487{
1488 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001489 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001490 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001491 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1492 & (SIZE_T_MAX-1));
1493 bool zero = flags & ALLOCM_ZERO;
1494 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001495 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001496
1497 assert(ptr != NULL);
1498 assert(*ptr != NULL);
1499 assert(size != 0);
1500 assert(SIZE_T_MAX - size >= extra);
1501 assert(malloc_initialized || malloc_initializer == pthread_self());
1502
1503 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001504 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001505 /*
1506 * usize isn't knowable before iralloc() returns when extra is
1507 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001508 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001509 * backtrace. prof_realloc() will use the actual usize to
1510 * decide whether to sample.
1511 */
1512 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1513 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001514 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001515 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001516 PROF_ALLOC_PREP(1, max_usize, cnt);
1517 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001518 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001519 /*
1520 * Use minimum usize to determine whether promotion may happen.
1521 */
1522 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1523 && ((alignment == 0) ? s2u(size) : sa2u(size,
1524 alignment, NULL)) <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001525 q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
1526 size+extra) ? 0 : size+extra - (small_maxclass+1),
1527 alignment, zero, no_move);
1528 if (q == NULL)
1529 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001530 if (max_usize < PAGE_SIZE) {
1531 usize = max_usize;
1532 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001533 } else
1534 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001535 } else {
1536 q = iralloc(p, size, extra, alignment, zero, no_move);
1537 if (q == NULL)
1538 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001539 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001540 }
Jason Evanse4f78462010-10-22 10:45:59 -07001541 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001542 if (rsize != NULL)
1543 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001544 } else {
1545 if (config_stats)
1546 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001547 q = iralloc(p, size, extra, alignment, zero, no_move);
1548 if (q == NULL)
1549 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001550 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001551 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001552 if (rsize != NULL) {
1553 if (config_stats == false)
1554 usize = isalloc(q);
1555 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001556 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001557 }
1558
1559 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001560 if (config_stats)
1561 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001562 return (ALLOCM_SUCCESS);
1563ERR:
1564 if (no_move)
1565 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001566OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001567 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001568 malloc_write("<jemalloc>: Error in rallocm(): "
1569 "out of memory\n");
1570 abort();
1571 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001572 return (ALLOCM_ERR_OOM);
1573}
1574
Jason Evans6a0d2912010-09-20 16:44:23 -07001575JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001576JEMALLOC_ATTR(visibility("default"))
1577int
1578JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1579{
1580 size_t sz;
1581
1582 assert(malloc_initialized || malloc_initializer == pthread_self());
1583
Jason Evans7372b152012-02-10 20:22:09 -08001584 if (config_ivsalloc)
1585 sz = ivsalloc(ptr);
1586 else {
1587 assert(ptr != NULL);
1588 sz = isalloc(ptr);
1589 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001590 assert(rsize != NULL);
1591 *rsize = sz;
1592
1593 return (ALLOCM_SUCCESS);
1594}
1595
Jason Evans6a0d2912010-09-20 16:44:23 -07001596JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001597JEMALLOC_ATTR(visibility("default"))
1598int
1599JEMALLOC_P(dallocm)(void *ptr, int flags)
1600{
Jason Evanse4f78462010-10-22 10:45:59 -07001601 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001602
1603 assert(ptr != NULL);
1604 assert(malloc_initialized || malloc_initializer == pthread_self());
1605
Jason Evans7372b152012-02-10 20:22:09 -08001606 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001607 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001608 if (config_prof && opt_prof) {
1609 if (config_stats == false)
1610 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001611 prof_free(ptr, usize);
1612 }
Jason Evans7372b152012-02-10 20:22:09 -08001613 if (config_stats)
1614 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001615 idalloc(ptr);
1616
1617 return (ALLOCM_SUCCESS);
1618}
1619
Jason Evans289053c2009-06-22 12:08:42 -07001620/*
1621 * End non-standard functions.
1622 */
1623/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001624
Jason Evans289053c2009-06-22 12:08:42 -07001625/*
1626 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001627 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001628 */
1629
Jason Evans2dbecf12010-09-05 10:35:13 -07001630void
Jason Evans804c9ec2009-06-22 17:44:33 -07001631jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001632{
Jason Evansfbbb6242010-01-24 17:56:48 -08001633 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001634
1635 /* Acquire all mutexes in a safe order. */
1636
Jason Evansfbbb6242010-01-24 17:56:48 -08001637 malloc_mutex_lock(&arenas_lock);
1638 for (i = 0; i < narenas; i++) {
1639 if (arenas[i] != NULL)
1640 malloc_mutex_lock(&arenas[i]->lock);
1641 }
Jason Evans289053c2009-06-22 12:08:42 -07001642
1643 malloc_mutex_lock(&base_mtx);
1644
1645 malloc_mutex_lock(&huge_mtx);
1646
Jason Evans7372b152012-02-10 20:22:09 -08001647 if (config_dss)
1648 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001649}
1650
Jason Evans2dbecf12010-09-05 10:35:13 -07001651void
Jason Evans804c9ec2009-06-22 17:44:33 -07001652jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001653{
1654 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001655
1656 /* Release all mutexes, now that fork() has completed. */
1657
Jason Evans7372b152012-02-10 20:22:09 -08001658 if (config_dss)
1659 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001660
1661 malloc_mutex_unlock(&huge_mtx);
1662
1663 malloc_mutex_unlock(&base_mtx);
1664
Jason Evans289053c2009-06-22 12:08:42 -07001665 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001666 if (arenas[i] != NULL)
1667 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001668 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001669 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001670}
Jason Evans2dbecf12010-09-05 10:35:13 -07001671
1672/******************************************************************************/