blob: 2610452e001e87e13db14180e5bd359734e383d2 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse24c7af2012-03-19 10:21:17 -070012#ifdef JEMALLOC_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evanse24c7af2012-03-19 10:21:17 -070016#ifdef JEMALLOC_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080039const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans03c22372010-01-03 12:10:42 -080058static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070059static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070060static void arenas_cleanup(void *arg);
Jason Evanse24c7af2012-03-19 10:21:17 -070061#ifndef JEMALLOC_TLS
Jason Evans93443682010-10-20 17:39:18 -070062static void thread_allocated_cleanup(void *arg);
63#endif
Jason Evanse7339702010-10-23 18:37:06 -070064static bool malloc_conf_next(char const **opts_p, char const **k_p,
65 size_t *klen_p, char const **v_p, size_t *vlen_p);
66static void malloc_conf_error(const char *msg, const char *k, size_t klen,
67 const char *v, size_t vlen);
68static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070069static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080070static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070071 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070072
Jason Evans289053c2009-06-22 12:08:42 -070073/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070074/*
Jason Evanse476f8a2010-01-16 09:53:50 -080075 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070076 */
77
Jason Evanse476f8a2010-01-16 09:53:50 -080078/* Create a new arena and insert it into the arenas array at index ind. */
79arena_t *
80arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070081{
82 arena_t *ret;
83
Jason Evansb1726102012-02-28 16:50:47 -080084 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080085 if (ret != NULL && arena_new(ret, ind) == false) {
86 arenas[ind] = ret;
87 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070088 }
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evanse476f8a2010-01-16 09:53:50 -080091 /*
92 * OOM here is quite inconvenient to propagate, since dealing with it
93 * would require a check for failure in the fast path. Instead, punt
94 * by using arenas[0]. In practice, this is an extremely unlikely
95 * failure.
96 */
Jason Evans698805c2010-03-03 17:45:38 -080097 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (opt_abort)
99 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700100
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700102}
103
Jason Evans4c2faa82012-03-13 11:09:23 -0700104/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800105arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700106choose_arena_hard(void)
107{
108 arena_t *ret;
109
Jason Evans289053c2009-06-22 12:08:42 -0700110 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700111 unsigned i, choose, first_null;
112
113 choose = 0;
114 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800115 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700116 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700117 for (i = 1; i < narenas; i++) {
118 if (arenas[i] != NULL) {
119 /*
120 * Choose the first arena that has the lowest
121 * number of threads assigned to it.
122 */
123 if (arenas[i]->nthreads <
124 arenas[choose]->nthreads)
125 choose = i;
126 } else if (first_null == narenas) {
127 /*
128 * Record the index of the first uninitialized
129 * arena, in case all extant arenas are in use.
130 *
131 * NB: It is possible for there to be
132 * discontinuities in terms of initialized
133 * versus uninitialized arenas, due to the
134 * "thread.arena" mallctl.
135 */
136 first_null = i;
137 }
138 }
139
140 if (arenas[choose] == 0 || first_null == narenas) {
141 /*
142 * Use an unloaded arena, or the least loaded arena if
143 * all arenas are already initialized.
144 */
145 ret = arenas[choose];
146 } else {
147 /* Initialize a new arena. */
148 ret = arenas_extend(first_null);
149 }
150 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800151 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700152 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700153 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700154 malloc_mutex_lock(&arenas_lock);
155 ret->nthreads++;
156 malloc_mutex_unlock(&arenas_lock);
157 }
Jason Evans289053c2009-06-22 12:08:42 -0700158
Jason Evans2dbecf12010-09-05 10:35:13 -0700159 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700160
161 return (ret);
162}
Jason Evans289053c2009-06-22 12:08:42 -0700163
Jason Evans03c22372010-01-03 12:10:42 -0800164static void
165stats_print_atexit(void)
166{
167
Jason Evans7372b152012-02-10 20:22:09 -0800168 if (config_tcache && config_stats) {
169 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800170
Jason Evans7372b152012-02-10 20:22:09 -0800171 /*
172 * Merge stats from extant threads. This is racy, since
173 * individual threads do not lock when recording tcache stats
174 * events. As a consequence, the final stats may be slightly
175 * out of date by the time they are reported, if other threads
176 * continue to allocate.
177 */
178 for (i = 0; i < narenas; i++) {
179 arena_t *arena = arenas[i];
180 if (arena != NULL) {
181 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800182
Jason Evans7372b152012-02-10 20:22:09 -0800183 /*
184 * tcache_stats_merge() locks bins, so if any
185 * code is introduced that acquires both arena
186 * and bin locks in the opposite order,
187 * deadlocks may result.
188 */
189 malloc_mutex_lock(&arena->lock);
190 ql_foreach(tcache, &arena->tcache_ql, link) {
191 tcache_stats_merge(tcache, arena);
192 }
193 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800194 }
Jason Evans03c22372010-01-03 12:10:42 -0800195 }
196 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800197 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700198}
199
Jason Evans9dcad2d2011-02-13 18:11:54 -0800200thread_allocated_t *
201thread_allocated_get_hard(void)
202{
203 thread_allocated_t *thread_allocated = (thread_allocated_t *)
204 imalloc(sizeof(thread_allocated_t));
205 if (thread_allocated == NULL) {
206 static thread_allocated_t static_thread_allocated = {0, 0};
207 malloc_write("<jemalloc>: Error allocating TSD;"
208 " mallctl(\"thread.{de,}allocated[p]\", ...)"
209 " will be inaccurate\n");
210 if (opt_abort)
211 abort();
212 return (&static_thread_allocated);
213 }
214 pthread_setspecific(thread_allocated_tsd, thread_allocated);
215 thread_allocated->allocated = 0;
216 thread_allocated->deallocated = 0;
217 return (thread_allocated);
218}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800219
Jason Evans289053c2009-06-22 12:08:42 -0700220/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800221 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700222 */
223/******************************************************************************/
224/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800225 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700226 */
227
Jason Evansc9658dd2009-06-22 14:44:08 -0700228static unsigned
229malloc_ncpus(void)
230{
231 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700232 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700233
Jason Evansb7924f52009-06-23 19:01:18 -0700234 result = sysconf(_SC_NPROCESSORS_ONLN);
235 if (result == -1) {
236 /* Error. */
237 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700238 }
Jason Evansb7924f52009-06-23 19:01:18 -0700239 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700240
241 return (ret);
242}
Jason Evansb7924f52009-06-23 19:01:18 -0700243
Jason Evans597632b2011-03-18 13:41:33 -0700244static void
245arenas_cleanup(void *arg)
246{
247 arena_t *arena = (arena_t *)arg;
248
249 malloc_mutex_lock(&arenas_lock);
250 arena->nthreads--;
251 malloc_mutex_unlock(&arenas_lock);
252}
253
Jason Evanse24c7af2012-03-19 10:21:17 -0700254#ifndef JEMALLOC_TLS
Jason Evans93443682010-10-20 17:39:18 -0700255static void
256thread_allocated_cleanup(void *arg)
257{
258 uint64_t *allocated = (uint64_t *)arg;
259
260 if (allocated != NULL)
261 idalloc(allocated);
262}
263#endif
264
Jason Evans289053c2009-06-22 12:08:42 -0700265/*
266 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
267 * implementation has to take pains to avoid infinite recursion during
268 * initialization.
269 */
270static inline bool
271malloc_init(void)
272{
273
274 if (malloc_initialized == false)
275 return (malloc_init_hard());
276
277 return (false);
278}
279
280static bool
Jason Evanse7339702010-10-23 18:37:06 -0700281malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
282 char const **v_p, size_t *vlen_p)
283{
284 bool accept;
285 const char *opts = *opts_p;
286
287 *k_p = opts;
288
289 for (accept = false; accept == false;) {
290 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800291 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
292 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
293 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
294 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
295 case 'Y': case 'Z':
296 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
297 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
298 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
299 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
300 case 'y': case 'z':
301 case '0': case '1': case '2': case '3': case '4': case '5':
302 case '6': case '7': case '8': case '9':
303 case '_':
304 opts++;
305 break;
306 case ':':
307 opts++;
308 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
309 *v_p = opts;
310 accept = true;
311 break;
312 case '\0':
313 if (opts != *opts_p) {
314 malloc_write("<jemalloc>: Conf string ends "
315 "with key\n");
316 }
317 return (true);
318 default:
319 malloc_write("<jemalloc>: Malformed conf string\n");
320 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700321 }
322 }
323
324 for (accept = false; accept == false;) {
325 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800326 case ',':
327 opts++;
328 /*
329 * Look ahead one character here, because the next time
330 * this function is called, it will assume that end of
331 * input has been cleanly reached if no input remains,
332 * but we have optimistically already consumed the
333 * comma if one exists.
334 */
335 if (*opts == '\0') {
336 malloc_write("<jemalloc>: Conf string ends "
337 "with comma\n");
338 }
339 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
340 accept = true;
341 break;
342 case '\0':
343 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
344 accept = true;
345 break;
346 default:
347 opts++;
348 break;
Jason Evanse7339702010-10-23 18:37:06 -0700349 }
350 }
351
352 *opts_p = opts;
353 return (false);
354}
355
356static void
357malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
358 size_t vlen)
359{
Jason Evanse7339702010-10-23 18:37:06 -0700360
Jason Evansd81e4bd2012-03-06 14:57:45 -0800361 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
362 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700363}
364
365static void
366malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700367{
368 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700369 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *opts, *k, *v;
371 size_t klen, vlen;
372
373 for (i = 0; i < 3; i++) {
374 /* Get runtime configuration. */
375 switch (i) {
376 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800377 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700378 /*
379 * Use options that were compiled into the
380 * program.
381 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800382 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700383 } else {
384 /* No configuration specified. */
385 buf[0] = '\0';
386 opts = buf;
387 }
388 break;
389 case 1: {
390 int linklen;
391 const char *linkname =
392#ifdef JEMALLOC_PREFIX
393 "/etc/"JEMALLOC_PREFIX"malloc.conf"
394#else
395 "/etc/malloc.conf"
396#endif
397 ;
398
399 if ((linklen = readlink(linkname, buf,
400 sizeof(buf) - 1)) != -1) {
401 /*
402 * Use the contents of the "/etc/malloc.conf"
403 * symbolic link's name.
404 */
405 buf[linklen] = '\0';
406 opts = buf;
407 } else {
408 /* No configuration specified. */
409 buf[0] = '\0';
410 opts = buf;
411 }
412 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700414 const char *envname =
415#ifdef JEMALLOC_PREFIX
416 JEMALLOC_CPREFIX"MALLOC_CONF"
417#else
418 "MALLOC_CONF"
419#endif
420 ;
421
422 if ((opts = getenv(envname)) != NULL) {
423 /*
424 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800425 * the value of the MALLOC_CONF environment
426 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700427 */
428 } else {
429 /* No configuration specified. */
430 buf[0] = '\0';
431 opts = buf;
432 }
433 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800434 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700435 /* NOTREACHED */
436 assert(false);
437 buf[0] = '\0';
438 opts = buf;
439 }
440
441 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
442 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800443#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700444 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
445 klen) == 0) { \
446 if (strncmp("true", v, vlen) == 0 && \
447 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800448 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700449 else if (strncmp("false", v, vlen) == \
450 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800451 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 else { \
453 malloc_conf_error( \
454 "Invalid conf value", \
455 k, klen, v, vlen); \
456 } \
457 continue; \
458 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800459#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700460 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
461 klen) == 0) { \
462 unsigned long ul; \
463 char *end; \
464 \
465 errno = 0; \
466 ul = strtoul(v, &end, 0); \
467 if (errno != 0 || (uintptr_t)end - \
468 (uintptr_t)v != vlen) { \
469 malloc_conf_error( \
470 "Invalid conf value", \
471 k, klen, v, vlen); \
472 } else if (ul < min || ul > max) { \
473 malloc_conf_error( \
474 "Out-of-range conf value", \
475 k, klen, v, vlen); \
476 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800477 o = ul; \
Jason Evanse7339702010-10-23 18:37:06 -0700478 continue; \
479 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700481 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
482 klen) == 0) { \
483 long l; \
484 char *end; \
485 \
486 errno = 0; \
487 l = strtol(v, &end, 0); \
488 if (errno != 0 || (uintptr_t)end - \
489 (uintptr_t)v != vlen) { \
490 malloc_conf_error( \
491 "Invalid conf value", \
492 k, klen, v, vlen); \
493 } else if (l < (ssize_t)min || l > \
494 (ssize_t)max) { \
495 malloc_conf_error( \
496 "Out-of-range conf value", \
497 k, klen, v, vlen); \
498 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800499 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700500 continue; \
501 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800502#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700503 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
504 klen) == 0) { \
505 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800506 sizeof(o)-1) ? vlen : \
507 sizeof(o)-1; \
508 strncpy(o, v, cpylen); \
509 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700510 continue; \
511 }
512
Jason Evansd81e4bd2012-03-06 14:57:45 -0800513 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700514 /*
515 * Chunks always require at least one * header page,
516 * plus one data page.
517 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800518 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700519 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800520 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
521 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
522 -1, (sizeof(size_t) << 3) - 1)
523 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800524 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800525 CONF_HANDLE_BOOL(opt_junk, junk)
526 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800527 }
Jason Evans7372b152012-02-10 20:22:09 -0800528 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800529 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800530 }
531 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800532 CONF_HANDLE_BOOL(opt_tcache, tcache)
533 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
534 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800535 (sizeof(size_t) << 3) - 1)
536 }
537 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800538 CONF_HANDLE_BOOL(opt_prof, prof)
539 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
540 "jeprof")
541 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
542 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
543 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800544 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800545 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
546 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
547 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800548 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800549 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
550 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800551 }
Jason Evanse7339702010-10-23 18:37:06 -0700552 malloc_conf_error("Invalid conf pair", k, klen, v,
553 vlen);
554#undef CONF_HANDLE_BOOL
555#undef CONF_HANDLE_SIZE_T
556#undef CONF_HANDLE_SSIZE_T
557#undef CONF_HANDLE_CHAR_P
558 }
Jason Evanse7339702010-10-23 18:37:06 -0700559 }
560}
561
562static bool
563malloc_init_hard(void)
564{
Jason Evansb7924f52009-06-23 19:01:18 -0700565 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700566
567 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700568 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700569 /*
570 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800571 * acquired init_lock, or this thread is the initializing
572 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700573 */
574 malloc_mutex_unlock(&init_lock);
575 return (false);
576 }
Jason Evansb7924f52009-06-23 19:01:18 -0700577 if (malloc_initializer != (unsigned long)0) {
578 /* Busy-wait until the initializing thread completes. */
579 do {
580 malloc_mutex_unlock(&init_lock);
581 CPU_SPINWAIT;
582 malloc_mutex_lock(&init_lock);
583 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700584 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700585 return (false);
586 }
Jason Evans289053c2009-06-22 12:08:42 -0700587
Jason Evansb7924f52009-06-23 19:01:18 -0700588#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700589 /* Get page size. */
590 {
591 long result;
592
593 result = sysconf(_SC_PAGESIZE);
594 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700595 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700596
597 /*
598 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800599 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700600 */
601 assert(((result - 1) & result) == 0);
602 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800603 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700604 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700605#endif
Jason Evans289053c2009-06-22 12:08:42 -0700606
Jason Evans7372b152012-02-10 20:22:09 -0800607 if (config_prof)
608 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700609
Jason Evanse7339702010-10-23 18:37:06 -0700610 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700611
Jason Evansa0bf2422010-01-29 14:30:41 -0800612 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700613 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
614 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800615 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800616 if (opt_abort)
617 abort();
618 }
619
Jason Evans3c234352010-01-27 13:10:55 -0800620 if (ctl_boot()) {
621 malloc_mutex_unlock(&init_lock);
622 return (true);
623 }
624
Jason Evans03c22372010-01-03 12:10:42 -0800625 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700626 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800627 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800628 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800629 if (opt_abort)
630 abort();
631 }
Jason Evans289053c2009-06-22 12:08:42 -0700632 }
633
Jason Evansa0bf2422010-01-29 14:30:41 -0800634 if (chunk_boot()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700638
Jason Evans3c234352010-01-27 13:10:55 -0800639 if (base_boot()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evans7372b152012-02-10 20:22:09 -0800644 if (config_prof)
645 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800646
Jason Evansb1726102012-02-28 16:50:47 -0800647 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700648
Jason Evans7372b152012-02-10 20:22:09 -0800649 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800653
Jason Evanse476f8a2010-01-16 09:53:50 -0800654 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
Jason Evans289053c2009-06-22 12:08:42 -0700658
Jason Evanse24c7af2012-03-19 10:21:17 -0700659#ifndef JEMALLOC_TLS
Jason Evans93443682010-10-20 17:39:18 -0700660 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800661 if (config_stats && pthread_key_create(&thread_allocated_tsd,
662 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700663 malloc_mutex_unlock(&init_lock);
664 return (true);
665 }
666#endif
667
Jason Evans8e6f8b42011-11-03 18:40:03 -0700668 if (malloc_mutex_init(&arenas_lock))
669 return (true);
670
671 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
675
Jason Evansb7924f52009-06-23 19:01:18 -0700676 /*
677 * Create enough scaffolding to allow recursive allocation in
678 * malloc_ncpus().
679 */
680 narenas = 1;
681 arenas = init_arenas;
682 memset(arenas, 0, sizeof(arena_t *) * narenas);
683
684 /*
685 * Initialize one arena here. The rest are lazily created in
686 * choose_arena_hard().
687 */
688 arenas_extend(0);
689 if (arenas[0] == NULL) {
690 malloc_mutex_unlock(&init_lock);
691 return (true);
692 }
693
Jason Evansb7924f52009-06-23 19:01:18 -0700694 /*
695 * Assign the initial arena to the initial thread, in order to avoid
696 * spurious creation of an extra arena if the application switches to
697 * threaded mode.
698 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700699 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700700 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700701
Jason Evans7372b152012-02-10 20:22:09 -0800702 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800703 malloc_mutex_unlock(&init_lock);
704 return (true);
705 }
Jason Evans3383af62010-02-11 08:59:06 -0800706
Jason Evansb7924f52009-06-23 19:01:18 -0700707 /* Get number of CPUs. */
708 malloc_initializer = pthread_self();
709 malloc_mutex_unlock(&init_lock);
710 ncpus = malloc_ncpus();
711 malloc_mutex_lock(&init_lock);
712
Jason Evanse7339702010-10-23 18:37:06 -0700713 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700714 /*
Jason Evans5463a522009-12-29 00:09:15 -0800715 * For SMP systems, create more than one arena per CPU by
716 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700717 */
Jason Evanse7339702010-10-23 18:37:06 -0700718 if (ncpus > 1)
719 opt_narenas = ncpus << 2;
720 else
721 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700722 }
Jason Evanse7339702010-10-23 18:37:06 -0700723 narenas = opt_narenas;
724 /*
725 * Make sure that the arenas array can be allocated. In practice, this
726 * limit is enough to allow the allocator to function, but the ctl
727 * machinery will fail to allocate memory at far lower limits.
728 */
729 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700730 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800731 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
732 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700733 }
Jason Evans289053c2009-06-22 12:08:42 -0700734
Jason Evans289053c2009-06-22 12:08:42 -0700735 /* Allocate and initialize arenas. */
736 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
737 if (arenas == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741 /*
742 * Zero the array. In practice, this should always be pre-zeroed,
743 * since it was just mmap()ed, but let's be sure.
744 */
745 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700746 /* Copy the pointer to the one arena that was already initialized. */
747 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700748
Jason Evans2dbecf12010-09-05 10:35:13 -0700749#ifdef JEMALLOC_ZONE
Mike Hommey154829d2012-03-20 18:01:38 +0100750 /* Register the custom zone. At this point it won't be the default. */
751 malloc_zone_t *jemalloc_zone = create_zone();
752 malloc_zone_register(jemalloc_zone);
Jason Evans2dbecf12010-09-05 10:35:13 -0700753
754 /*
Mike Hommey154829d2012-03-20 18:01:38 +0100755 * Unregister and reregister the default zone. On OSX >= 10.6,
756 * unregistering takes the last registered zone and places it at the
757 * location of the specified zone. Unregistering the default zone thus
758 * makes the last registered one the default. On OSX < 10.6,
759 * unregistering shifts all registered zones. The first registered zone
760 * then becomes the default.
Jason Evans2dbecf12010-09-05 10:35:13 -0700761 */
Mike Hommey154829d2012-03-20 18:01:38 +0100762 do {
763 malloc_zone_t *default_zone = malloc_default_zone();
764 malloc_zone_unregister(default_zone);
765 malloc_zone_register(default_zone);
766 } while (malloc_default_zone() != jemalloc_zone);
Jason Evans2dbecf12010-09-05 10:35:13 -0700767#endif
768
Jason Evans289053c2009-06-22 12:08:42 -0700769 malloc_initialized = true;
770 malloc_mutex_unlock(&init_lock);
771 return (false);
772}
773
Jason Evans2dbecf12010-09-05 10:35:13 -0700774#ifdef JEMALLOC_ZONE
775JEMALLOC_ATTR(constructor)
776void
777jemalloc_darwin_init(void)
778{
779
780 if (malloc_init_hard())
781 abort();
782}
783#endif
784
Jason Evans289053c2009-06-22 12:08:42 -0700785/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800786 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700787 */
788/******************************************************************************/
789/*
790 * Begin malloc(3)-compatible functions.
791 */
792
Jason Evans9ad48232010-01-03 11:59:20 -0800793JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800794JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700795void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800796je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700797{
798 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800799 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800800 prof_thr_cnt_t *cnt
801#ifdef JEMALLOC_CC_SILENCE
802 = NULL
803#endif
804 ;
Jason Evans289053c2009-06-22 12:08:42 -0700805
806 if (malloc_init()) {
807 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800808 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700809 }
810
Jason Evansc90ad712012-02-28 20:31:37 -0800811 if (size == 0)
812 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700813
Jason Evans7372b152012-02-10 20:22:09 -0800814 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700815 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700816 PROF_ALLOC_PREP(1, usize, cnt);
817 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700818 ret = NULL;
819 goto OOM;
820 }
Jason Evans93443682010-10-20 17:39:18 -0700821 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800822 SMALL_MAXCLASS) {
823 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700824 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700825 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700826 } else
827 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800828 } else {
829 if (config_stats)
830 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700831 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700832 }
Jason Evans289053c2009-06-22 12:08:42 -0700833
Jason Evansf2518142009-12-29 00:09:15 -0800834OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700835 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800836 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800837 malloc_write("<jemalloc>: Error in malloc(): "
838 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700839 abort();
840 }
841 errno = ENOMEM;
842 }
Jason Evans7372b152012-02-10 20:22:09 -0800843 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700844 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800845 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700846 assert(usize == isalloc(ret));
847 ALLOCATED_ADD(usize, 0);
848 }
Jason Evans289053c2009-06-22 12:08:42 -0700849 return (ret);
850}
851
Jason Evans9ad48232010-01-03 11:59:20 -0800852JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700853#ifdef JEMALLOC_PROF
854/*
Jason Evans7372b152012-02-10 20:22:09 -0800855 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700856 * PROF_ALLOC_PREP().
857 */
858JEMALLOC_ATTR(noinline)
859#endif
860static int
Jason Evans59656312012-02-28 21:37:38 -0800861imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700862 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700863{
864 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800865 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700866 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800867 prof_thr_cnt_t *cnt
868#ifdef JEMALLOC_CC_SILENCE
869 = NULL
870#endif
871 ;
Jason Evans289053c2009-06-22 12:08:42 -0700872
Jason Evans0a0bbf62012-03-13 12:55:21 -0700873 assert(min_alignment != 0);
874
Jason Evans289053c2009-06-22 12:08:42 -0700875 if (malloc_init())
876 result = NULL;
877 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800878 if (size == 0)
879 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800880
Jason Evans289053c2009-06-22 12:08:42 -0700881 /* Make sure that alignment is a large enough power of 2. */
882 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700883 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800884 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700885 malloc_write("<jemalloc>: Error allocating "
886 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700887 abort();
888 }
889 result = NULL;
890 ret = EINVAL;
891 goto RETURN;
892 }
893
Jason Evans38d92102011-03-23 00:37:29 -0700894 usize = sa2u(size, alignment, NULL);
895 if (usize == 0) {
896 result = NULL;
897 ret = ENOMEM;
898 goto RETURN;
899 }
900
Jason Evans7372b152012-02-10 20:22:09 -0800901 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700902 PROF_ALLOC_PREP(2, usize, cnt);
903 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700904 result = NULL;
905 ret = EINVAL;
906 } else {
907 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800908 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
909 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700910 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800911 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700912 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700913 if (result != NULL) {
914 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700915 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700916 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700917 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700918 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700919 false);
920 }
Jason Evans0b270a92010-03-31 16:45:04 -0700921 }
Jason Evans6109fe02010-02-10 10:37:56 -0800922 } else
Jason Evans38d92102011-03-23 00:37:29 -0700923 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700924 }
925
926 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800927 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700928 malloc_write("<jemalloc>: Error allocating aligned "
929 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700930 abort();
931 }
932 ret = ENOMEM;
933 goto RETURN;
934 }
935
936 *memptr = result;
937 ret = 0;
938
939RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800940 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700941 assert(usize == isalloc(result));
942 ALLOCATED_ADD(usize, 0);
943 }
Jason Evans7372b152012-02-10 20:22:09 -0800944 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700945 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700946 return (ret);
947}
948
Jason Evansa5070042011-08-12 13:48:27 -0700949JEMALLOC_ATTR(nonnull(1))
950JEMALLOC_ATTR(visibility("default"))
951int
Jason Evans0a5489e2012-03-01 17:19:20 -0800952je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700953{
954
Jason Evans0a0bbf62012-03-13 12:55:21 -0700955 return imemalign(memptr, alignment, size, sizeof(void *));
956}
957
958JEMALLOC_ATTR(malloc)
959JEMALLOC_ATTR(visibility("default"))
960void *
961je_aligned_alloc(size_t alignment, size_t size)
962{
963 void *ret;
964 int err;
965
966 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
967 ret = NULL;
968 errno = err;
969 }
970 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700971}
972
Jason Evans9ad48232010-01-03 11:59:20 -0800973JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800974JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700975void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800976je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700977{
978 void *ret;
979 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800980 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800981 prof_thr_cnt_t *cnt
982#ifdef JEMALLOC_CC_SILENCE
983 = NULL
984#endif
985 ;
Jason Evans289053c2009-06-22 12:08:42 -0700986
987 if (malloc_init()) {
988 num_size = 0;
989 ret = NULL;
990 goto RETURN;
991 }
992
993 num_size = num * size;
994 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800995 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700996 num_size = 1;
997 else {
998 ret = NULL;
999 goto RETURN;
1000 }
1001 /*
1002 * Try to avoid division here. We know that it isn't possible to
1003 * overflow during multiplication if neither operand uses any of the
1004 * most significant half of the bits in a size_t.
1005 */
1006 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1007 && (num_size / size != num)) {
1008 /* size_t overflow. */
1009 ret = NULL;
1010 goto RETURN;
1011 }
1012
Jason Evans7372b152012-02-10 20:22:09 -08001013 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001014 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001015 PROF_ALLOC_PREP(1, usize, cnt);
1016 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001017 ret = NULL;
1018 goto RETURN;
1019 }
Jason Evans93443682010-10-20 17:39:18 -07001020 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001021 <= SMALL_MAXCLASS) {
1022 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001023 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001024 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001025 } else
1026 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001027 } else {
1028 if (config_stats)
1029 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001030 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001031 }
Jason Evans289053c2009-06-22 12:08:42 -07001032
1033RETURN:
1034 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001035 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001036 malloc_write("<jemalloc>: Error in calloc(): out of "
1037 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001038 abort();
1039 }
1040 errno = ENOMEM;
1041 }
1042
Jason Evans7372b152012-02-10 20:22:09 -08001043 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001044 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001045 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001046 assert(usize == isalloc(ret));
1047 ALLOCATED_ADD(usize, 0);
1048 }
Jason Evans289053c2009-06-22 12:08:42 -07001049 return (ret);
1050}
1051
Jason Evanse476f8a2010-01-16 09:53:50 -08001052JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001053void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001054je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001055{
1056 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001057 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001058 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001059 prof_thr_cnt_t *cnt
1060#ifdef JEMALLOC_CC_SILENCE
1061 = NULL
1062#endif
1063 ;
1064 prof_ctx_t *old_ctx
1065#ifdef JEMALLOC_CC_SILENCE
1066 = NULL
1067#endif
1068 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001069
Jason Evans289053c2009-06-22 12:08:42 -07001070 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001071 if (ptr != NULL) {
1072 /* realloc(ptr, 0) is equivalent to free(p). */
1073 if (config_prof || config_stats)
1074 old_size = isalloc(ptr);
1075 if (config_prof && opt_prof) {
1076 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001077 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001078 }
Jason Evansf081b882012-02-28 20:24:05 -08001079 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001080 ret = NULL;
1081 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001082 } else
1083 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001084 }
1085
1086 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001087 assert(malloc_initialized || malloc_initializer ==
1088 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001089
Jason Evans7372b152012-02-10 20:22:09 -08001090 if (config_prof || config_stats)
1091 old_size = isalloc(ptr);
1092 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001093 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001094 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001095 PROF_ALLOC_PREP(1, usize, cnt);
1096 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001097 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001098 ret = NULL;
1099 goto OOM;
1100 }
Jason Evans0b270a92010-03-31 16:45:04 -07001101 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001102 usize <= SMALL_MAXCLASS) {
1103 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001104 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001105 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001106 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001107 else
1108 old_ctx = NULL;
1109 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001110 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001111 if (ret == NULL)
1112 old_ctx = NULL;
1113 }
Jason Evans7372b152012-02-10 20:22:09 -08001114 } else {
1115 if (config_stats)
1116 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001117 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001118 }
Jason Evans289053c2009-06-22 12:08:42 -07001119
Jason Evans6109fe02010-02-10 10:37:56 -08001120OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001121 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001122 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001123 malloc_write("<jemalloc>: Error in realloc(): "
1124 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001125 abort();
1126 }
1127 errno = ENOMEM;
1128 }
1129 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001130 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001131 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001132 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001133 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001134 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001135 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001136 ret = NULL;
1137 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001138 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001139 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001140 PROF_ALLOC_PREP(1, usize, cnt);
1141 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001142 ret = NULL;
1143 else {
1144 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001145 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001146 SMALL_MAXCLASS) {
1147 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001148 if (ret != NULL) {
1149 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001150 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001151 }
1152 } else
1153 ret = imalloc(size);
1154 }
Jason Evans7372b152012-02-10 20:22:09 -08001155 } else {
1156 if (config_stats)
1157 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001158 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001159 }
Jason Evans6109fe02010-02-10 10:37:56 -08001160 }
Jason Evans569432c2009-12-29 00:09:15 -08001161
Jason Evans289053c2009-06-22 12:08:42 -07001162 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001163 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001164 malloc_write("<jemalloc>: Error in realloc(): "
1165 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001166 abort();
1167 }
1168 errno = ENOMEM;
1169 }
1170 }
1171
1172RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001173 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001174 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001175 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001176 assert(usize == isalloc(ret));
1177 ALLOCATED_ADD(usize, old_size);
1178 }
Jason Evans289053c2009-06-22 12:08:42 -07001179 return (ret);
1180}
1181
Jason Evanse476f8a2010-01-16 09:53:50 -08001182JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001183void
Jason Evans0a5489e2012-03-01 17:19:20 -08001184je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001185{
1186
Jason Evans289053c2009-06-22 12:08:42 -07001187 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001188 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001189
Jason Evansa25d0a82009-11-09 14:57:38 -08001190 assert(malloc_initialized || malloc_initializer ==
1191 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001192
Jason Evans7372b152012-02-10 20:22:09 -08001193 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001194 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001195 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001196 } else if (config_stats) {
1197 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001198 }
Jason Evans7372b152012-02-10 20:22:09 -08001199 if (config_stats)
1200 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001201 idalloc(ptr);
1202 }
1203}
1204
1205/*
1206 * End malloc(3)-compatible functions.
1207 */
1208/******************************************************************************/
1209/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001210 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001211 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001212
1213#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1214JEMALLOC_ATTR(malloc)
1215JEMALLOC_ATTR(visibility("default"))
1216void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001217je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001218{
Jason Evans7372b152012-02-10 20:22:09 -08001219 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001220#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001221 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001222#endif
Jason Evans7372b152012-02-10 20:22:09 -08001223 ;
Jason Evans0a0bbf62012-03-13 12:55:21 -07001224 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001225 return (ret);
1226}
1227#endif
1228
1229#ifdef JEMALLOC_OVERRIDE_VALLOC
1230JEMALLOC_ATTR(malloc)
1231JEMALLOC_ATTR(visibility("default"))
1232void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001233je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001234{
Jason Evans7372b152012-02-10 20:22:09 -08001235 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001236#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001237 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001238#endif
Jason Evans7372b152012-02-10 20:22:09 -08001239 ;
Jason Evans0a0bbf62012-03-13 12:55:21 -07001240 imemalign(&ret, PAGE_SIZE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001241 return (ret);
1242}
1243#endif
1244
Jason Evans0a5489e2012-03-01 17:19:20 -08001245#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001246/*
1247 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1248 * to inconsistently reference libc's malloc(3)-compatible functions
1249 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1250 *
1251 * These definitions interpose hooks in glibc.  The functions are actually
1252 * passed an extra argument for the caller return address, which will be
1253 * ignored.
1254 */
1255JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001256void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001257
1258JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001259void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001260
1261JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001262void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001263
1264JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001265void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001266#endif
1267
Jason Evans6a0d2912010-09-20 16:44:23 -07001268/*
1269 * End non-standard override functions.
1270 */
1271/******************************************************************************/
1272/*
Jason Evans289053c2009-06-22 12:08:42 -07001273 * Begin non-standard functions.
1274 */
1275
Jason Evanse476f8a2010-01-16 09:53:50 -08001276JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001277size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001278je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001279{
Jason Evans569432c2009-12-29 00:09:15 -08001280 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001281
Jason Evans8e3c3c62010-09-17 15:46:18 -07001282 assert(malloc_initialized || malloc_initializer == pthread_self());
1283
Jason Evans7372b152012-02-10 20:22:09 -08001284 if (config_ivsalloc)
1285 ret = ivsalloc(ptr);
1286 else {
1287 assert(ptr != NULL);
1288 ret = isalloc(ptr);
1289 }
Jason Evans289053c2009-06-22 12:08:42 -07001290
Jason Evans569432c2009-12-29 00:09:15 -08001291 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001292}
1293
Jason Evans4201af02010-01-24 02:53:40 -08001294JEMALLOC_ATTR(visibility("default"))
1295void
Jason Evans0a5489e2012-03-01 17:19:20 -08001296je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1297 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001298{
1299
Jason Evans698805c2010-03-03 17:45:38 -08001300 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001301}
1302
Jason Evans3c234352010-01-27 13:10:55 -08001303JEMALLOC_ATTR(visibility("default"))
1304int
Jason Evans0a5489e2012-03-01 17:19:20 -08001305je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001306 size_t newlen)
1307{
1308
Jason Evans95833312010-01-27 13:45:21 -08001309 if (malloc_init())
1310 return (EAGAIN);
1311
Jason Evans3c234352010-01-27 13:10:55 -08001312 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1313}
1314
1315JEMALLOC_ATTR(visibility("default"))
1316int
Jason Evans0a5489e2012-03-01 17:19:20 -08001317je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001318{
1319
Jason Evans95833312010-01-27 13:45:21 -08001320 if (malloc_init())
1321 return (EAGAIN);
1322
Jason Evans3c234352010-01-27 13:10:55 -08001323 return (ctl_nametomib(name, mibp, miblenp));
1324}
1325
1326JEMALLOC_ATTR(visibility("default"))
1327int
Jason Evans0a5489e2012-03-01 17:19:20 -08001328je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1329 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001330{
1331
Jason Evans95833312010-01-27 13:45:21 -08001332 if (malloc_init())
1333 return (EAGAIN);
1334
Jason Evans3c234352010-01-27 13:10:55 -08001335 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1336}
1337
Jason Evans7e77eaf2012-03-02 17:47:37 -08001338/*
1339 * End non-standard functions.
1340 */
1341/******************************************************************************/
1342/*
1343 * Begin experimental functions.
1344 */
1345#ifdef JEMALLOC_EXPERIMENTAL
1346
Jason Evans8e3c3c62010-09-17 15:46:18 -07001347JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001348iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001349{
1350
Jason Evans38d92102011-03-23 00:37:29 -07001351 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1352 NULL)));
1353
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001355 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001357 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001358 else
Jason Evans38d92102011-03-23 00:37:29 -07001359 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001360}
1361
Jason Evans6a0d2912010-09-20 16:44:23 -07001362JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001363JEMALLOC_ATTR(visibility("default"))
1364int
Jason Evans0a5489e2012-03-01 17:19:20 -08001365je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001366{
1367 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001368 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001369 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1370 & (SIZE_T_MAX-1));
1371 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001372 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001373
1374 assert(ptr != NULL);
1375 assert(size != 0);
1376
1377 if (malloc_init())
1378 goto OOM;
1379
Jason Evans749c2a02011-08-12 18:37:54 -07001380 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001381 if (usize == 0)
1382 goto OOM;
1383
Jason Evans7372b152012-02-10 20:22:09 -08001384 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001385 PROF_ALLOC_PREP(1, usize, cnt);
1386 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001388 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001389 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001390 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001391 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001392 alignment, NULL);
1393 assert(usize_promoted != 0);
1394 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 if (p == NULL)
1396 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001397 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001398 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001399 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001400 if (p == NULL)
1401 goto OOM;
1402 }
Jason Evans749c2a02011-08-12 18:37:54 -07001403 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001404 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001405 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406 if (p == NULL)
1407 goto OOM;
1408 }
Jason Evans7372b152012-02-10 20:22:09 -08001409 if (rsize != NULL)
1410 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411
1412 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001413 if (config_stats) {
1414 assert(usize == isalloc(p));
1415 ALLOCATED_ADD(usize, 0);
1416 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 return (ALLOCM_SUCCESS);
1418OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001419 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001420 malloc_write("<jemalloc>: Error in allocm(): "
1421 "out of memory\n");
1422 abort();
1423 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424 *ptr = NULL;
1425 return (ALLOCM_ERR_OOM);
1426}
1427
Jason Evans6a0d2912010-09-20 16:44:23 -07001428JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001429JEMALLOC_ATTR(visibility("default"))
1430int
Jason Evans0a5489e2012-03-01 17:19:20 -08001431je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001432{
1433 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001434 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001435 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001436 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1437 & (SIZE_T_MAX-1));
1438 bool zero = flags & ALLOCM_ZERO;
1439 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441
1442 assert(ptr != NULL);
1443 assert(*ptr != NULL);
1444 assert(size != 0);
1445 assert(SIZE_T_MAX - size >= extra);
1446 assert(malloc_initialized || malloc_initializer == pthread_self());
1447
1448 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001449 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001450 /*
1451 * usize isn't knowable before iralloc() returns when extra is
1452 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001453 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001454 * backtrace. prof_realloc() will use the actual usize to
1455 * decide whether to sample.
1456 */
1457 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1458 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001459 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001461 PROF_ALLOC_PREP(1, max_usize, cnt);
1462 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001463 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001464 /*
1465 * Use minimum usize to determine whether promotion may happen.
1466 */
1467 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1468 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001469 alignment, NULL)) <= SMALL_MAXCLASS) {
1470 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1471 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001472 alignment, zero, no_move);
1473 if (q == NULL)
1474 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001475 if (max_usize < PAGE_SIZE) {
1476 usize = max_usize;
1477 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001478 } else
1479 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001480 } else {
1481 q = iralloc(p, size, extra, alignment, zero, no_move);
1482 if (q == NULL)
1483 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001484 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001485 }
Jason Evanse4f78462010-10-22 10:45:59 -07001486 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001487 if (rsize != NULL)
1488 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001489 } else {
1490 if (config_stats)
1491 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001492 q = iralloc(p, size, extra, alignment, zero, no_move);
1493 if (q == NULL)
1494 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001495 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001496 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001497 if (rsize != NULL) {
1498 if (config_stats == false)
1499 usize = isalloc(q);
1500 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001501 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001502 }
1503
1504 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001505 if (config_stats)
1506 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001507 return (ALLOCM_SUCCESS);
1508ERR:
1509 if (no_move)
1510 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001511OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001512 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001513 malloc_write("<jemalloc>: Error in rallocm(): "
1514 "out of memory\n");
1515 abort();
1516 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001517 return (ALLOCM_ERR_OOM);
1518}
1519
Jason Evans6a0d2912010-09-20 16:44:23 -07001520JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001521JEMALLOC_ATTR(visibility("default"))
1522int
Jason Evans0a5489e2012-03-01 17:19:20 -08001523je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001524{
1525 size_t sz;
1526
1527 assert(malloc_initialized || malloc_initializer == pthread_self());
1528
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_ivsalloc)
1530 sz = ivsalloc(ptr);
1531 else {
1532 assert(ptr != NULL);
1533 sz = isalloc(ptr);
1534 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001535 assert(rsize != NULL);
1536 *rsize = sz;
1537
1538 return (ALLOCM_SUCCESS);
1539}
1540
Jason Evans6a0d2912010-09-20 16:44:23 -07001541JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001542JEMALLOC_ATTR(visibility("default"))
1543int
Jason Evans0a5489e2012-03-01 17:19:20 -08001544je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545{
Jason Evanse4f78462010-10-22 10:45:59 -07001546 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001547
1548 assert(ptr != NULL);
1549 assert(malloc_initialized || malloc_initializer == pthread_self());
1550
Jason Evans7372b152012-02-10 20:22:09 -08001551 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001552 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001553 if (config_prof && opt_prof) {
1554 if (config_stats == false)
1555 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001556 prof_free(ptr, usize);
1557 }
Jason Evans7372b152012-02-10 20:22:09 -08001558 if (config_stats)
1559 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001560 idalloc(ptr);
1561
1562 return (ALLOCM_SUCCESS);
1563}
1564
Jason Evans7e15dab2012-02-29 12:56:37 -08001565JEMALLOC_ATTR(visibility("default"))
1566int
Jason Evans0a5489e2012-03-01 17:19:20 -08001567je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001568{
1569 size_t usize;
1570 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1571 & (SIZE_T_MAX-1));
1572
1573 assert(size != 0);
1574
1575 if (malloc_init())
1576 return (ALLOCM_ERR_OOM);
1577
1578 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1579 if (usize == 0)
1580 return (ALLOCM_ERR_OOM);
1581
1582 if (rsize != NULL)
1583 *rsize = usize;
1584 return (ALLOCM_SUCCESS);
1585}
1586
Jason Evans7e77eaf2012-03-02 17:47:37 -08001587#endif
Jason Evans289053c2009-06-22 12:08:42 -07001588/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001589 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001590 */
1591/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001592
Jason Evans289053c2009-06-22 12:08:42 -07001593/*
1594 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001595 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001596 */
1597
Jason Evans2dbecf12010-09-05 10:35:13 -07001598void
Jason Evans804c9ec2009-06-22 17:44:33 -07001599jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001600{
Jason Evansfbbb6242010-01-24 17:56:48 -08001601 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001602
1603 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001604 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001605 for (i = 0; i < narenas; i++) {
1606 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001607 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001608 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001609 base_prefork();
1610 huge_prefork();
1611 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001612}
1613
Jason Evans2dbecf12010-09-05 10:35:13 -07001614void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001615jemalloc_postfork_parent(void)
Jason Evans289053c2009-06-22 12:08:42 -07001616{
1617 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001618
1619 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001620 chunk_dss_postfork_parent();
1621 huge_postfork_parent();
1622 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001623 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001624 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001625 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001626 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001627 malloc_mutex_postfork_parent(&arenas_lock);
1628}
1629
1630void
1631jemalloc_postfork_child(void)
1632{
1633 unsigned i;
1634
1635 /* Release all mutexes, now that fork() has completed. */
1636 chunk_dss_postfork_child();
1637 huge_postfork_child();
1638 base_postfork_child();
1639 for (i = 0; i < narenas; i++) {
1640 if (arenas[i] != NULL)
1641 arena_postfork_child(arenas[i]);
1642 }
1643 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001644}
Jason Evans2dbecf12010-09-05 10:35:13 -07001645
1646/******************************************************************************/