blob: f564b65eef06e804bb60b870749700526192f939 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080039const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans03c22372010-01-03 12:10:42 -080058static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070059static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070060static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080061#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070062static void thread_allocated_cleanup(void *arg);
63#endif
Jason Evanse7339702010-10-23 18:37:06 -070064static bool malloc_conf_next(char const **opts_p, char const **k_p,
65 size_t *klen_p, char const **v_p, size_t *vlen_p);
66static void malloc_conf_error(const char *msg, const char *k, size_t klen,
67 const char *v, size_t vlen);
68static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070069static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080070static int imemalign(void **memptr, size_t alignment, size_t size,
71 bool enforce_min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070072
Jason Evans289053c2009-06-22 12:08:42 -070073/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070074/*
Jason Evanse476f8a2010-01-16 09:53:50 -080075 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070076 */
77
Jason Evanse476f8a2010-01-16 09:53:50 -080078/* Create a new arena and insert it into the arenas array at index ind. */
79arena_t *
80arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070081{
82 arena_t *ret;
83
Jason Evansb1726102012-02-28 16:50:47 -080084 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080085 if (ret != NULL && arena_new(ret, ind) == false) {
86 arenas[ind] = ret;
87 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070088 }
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evanse476f8a2010-01-16 09:53:50 -080091 /*
92 * OOM here is quite inconvenient to propagate, since dealing with it
93 * would require a check for failure in the fast path. Instead, punt
94 * by using arenas[0]. In practice, this is an extremely unlikely
95 * failure.
96 */
Jason Evans698805c2010-03-03 17:45:38 -080097 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (opt_abort)
99 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700100
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700102}
103
Jason Evans4c2faa82012-03-13 11:09:23 -0700104/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800105arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700106choose_arena_hard(void)
107{
108 arena_t *ret;
109
Jason Evans289053c2009-06-22 12:08:42 -0700110 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700111 unsigned i, choose, first_null;
112
113 choose = 0;
114 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800115 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700116 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700117 for (i = 1; i < narenas; i++) {
118 if (arenas[i] != NULL) {
119 /*
120 * Choose the first arena that has the lowest
121 * number of threads assigned to it.
122 */
123 if (arenas[i]->nthreads <
124 arenas[choose]->nthreads)
125 choose = i;
126 } else if (first_null == narenas) {
127 /*
128 * Record the index of the first uninitialized
129 * arena, in case all extant arenas are in use.
130 *
131 * NB: It is possible for there to be
132 * discontinuities in terms of initialized
133 * versus uninitialized arenas, due to the
134 * "thread.arena" mallctl.
135 */
136 first_null = i;
137 }
138 }
139
140 if (arenas[choose] == 0 || first_null == narenas) {
141 /*
142 * Use an unloaded arena, or the least loaded arena if
143 * all arenas are already initialized.
144 */
145 ret = arenas[choose];
146 } else {
147 /* Initialize a new arena. */
148 ret = arenas_extend(first_null);
149 }
150 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800151 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700152 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700153 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700154 malloc_mutex_lock(&arenas_lock);
155 ret->nthreads++;
156 malloc_mutex_unlock(&arenas_lock);
157 }
Jason Evans289053c2009-06-22 12:08:42 -0700158
Jason Evans2dbecf12010-09-05 10:35:13 -0700159 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700160
161 return (ret);
162}
Jason Evans289053c2009-06-22 12:08:42 -0700163
Jason Evans03c22372010-01-03 12:10:42 -0800164static void
165stats_print_atexit(void)
166{
167
Jason Evans7372b152012-02-10 20:22:09 -0800168 if (config_tcache && config_stats) {
169 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800170
Jason Evans7372b152012-02-10 20:22:09 -0800171 /*
172 * Merge stats from extant threads. This is racy, since
173 * individual threads do not lock when recording tcache stats
174 * events. As a consequence, the final stats may be slightly
175 * out of date by the time they are reported, if other threads
176 * continue to allocate.
177 */
178 for (i = 0; i < narenas; i++) {
179 arena_t *arena = arenas[i];
180 if (arena != NULL) {
181 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800182
Jason Evans7372b152012-02-10 20:22:09 -0800183 /*
184 * tcache_stats_merge() locks bins, so if any
185 * code is introduced that acquires both arena
186 * and bin locks in the opposite order,
187 * deadlocks may result.
188 */
189 malloc_mutex_lock(&arena->lock);
190 ql_foreach(tcache, &arena->tcache_ql, link) {
191 tcache_stats_merge(tcache, arena);
192 }
193 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800194 }
Jason Evans03c22372010-01-03 12:10:42 -0800195 }
196 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800197 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700198}
199
Jason Evans9dcad2d2011-02-13 18:11:54 -0800200thread_allocated_t *
201thread_allocated_get_hard(void)
202{
203 thread_allocated_t *thread_allocated = (thread_allocated_t *)
204 imalloc(sizeof(thread_allocated_t));
205 if (thread_allocated == NULL) {
206 static thread_allocated_t static_thread_allocated = {0, 0};
207 malloc_write("<jemalloc>: Error allocating TSD;"
208 " mallctl(\"thread.{de,}allocated[p]\", ...)"
209 " will be inaccurate\n");
210 if (opt_abort)
211 abort();
212 return (&static_thread_allocated);
213 }
214 pthread_setspecific(thread_allocated_tsd, thread_allocated);
215 thread_allocated->allocated = 0;
216 thread_allocated->deallocated = 0;
217 return (thread_allocated);
218}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800219
Jason Evans289053c2009-06-22 12:08:42 -0700220/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800221 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700222 */
223/******************************************************************************/
224/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800225 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700226 */
227
Jason Evansc9658dd2009-06-22 14:44:08 -0700228static unsigned
229malloc_ncpus(void)
230{
231 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700232 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700233
Jason Evansb7924f52009-06-23 19:01:18 -0700234 result = sysconf(_SC_NPROCESSORS_ONLN);
235 if (result == -1) {
236 /* Error. */
237 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700238 }
Jason Evansb7924f52009-06-23 19:01:18 -0700239 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700240
241 return (ret);
242}
Jason Evansb7924f52009-06-23 19:01:18 -0700243
Jason Evans597632b2011-03-18 13:41:33 -0700244static void
245arenas_cleanup(void *arg)
246{
247 arena_t *arena = (arena_t *)arg;
248
249 malloc_mutex_lock(&arenas_lock);
250 arena->nthreads--;
251 malloc_mutex_unlock(&arenas_lock);
252}
253
Jason Evans7372b152012-02-10 20:22:09 -0800254#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700255static void
256thread_allocated_cleanup(void *arg)
257{
258 uint64_t *allocated = (uint64_t *)arg;
259
260 if (allocated != NULL)
261 idalloc(allocated);
262}
263#endif
264
Jason Evans289053c2009-06-22 12:08:42 -0700265/*
266 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
267 * implementation has to take pains to avoid infinite recursion during
268 * initialization.
269 */
270static inline bool
271malloc_init(void)
272{
273
274 if (malloc_initialized == false)
275 return (malloc_init_hard());
276
277 return (false);
278}
279
280static bool
Jason Evanse7339702010-10-23 18:37:06 -0700281malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
282 char const **v_p, size_t *vlen_p)
283{
284 bool accept;
285 const char *opts = *opts_p;
286
287 *k_p = opts;
288
289 for (accept = false; accept == false;) {
290 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800291 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
292 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
293 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
294 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
295 case 'Y': case 'Z':
296 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
297 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
298 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
299 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
300 case 'y': case 'z':
301 case '0': case '1': case '2': case '3': case '4': case '5':
302 case '6': case '7': case '8': case '9':
303 case '_':
304 opts++;
305 break;
306 case ':':
307 opts++;
308 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
309 *v_p = opts;
310 accept = true;
311 break;
312 case '\0':
313 if (opts != *opts_p) {
314 malloc_write("<jemalloc>: Conf string ends "
315 "with key\n");
316 }
317 return (true);
318 default:
319 malloc_write("<jemalloc>: Malformed conf string\n");
320 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700321 }
322 }
323
324 for (accept = false; accept == false;) {
325 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800326 case ',':
327 opts++;
328 /*
329 * Look ahead one character here, because the next time
330 * this function is called, it will assume that end of
331 * input has been cleanly reached if no input remains,
332 * but we have optimistically already consumed the
333 * comma if one exists.
334 */
335 if (*opts == '\0') {
336 malloc_write("<jemalloc>: Conf string ends "
337 "with comma\n");
338 }
339 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
340 accept = true;
341 break;
342 case '\0':
343 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
344 accept = true;
345 break;
346 default:
347 opts++;
348 break;
Jason Evanse7339702010-10-23 18:37:06 -0700349 }
350 }
351
352 *opts_p = opts;
353 return (false);
354}
355
356static void
357malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
358 size_t vlen)
359{
Jason Evanse7339702010-10-23 18:37:06 -0700360
Jason Evansd81e4bd2012-03-06 14:57:45 -0800361 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
362 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700363}
364
365static void
366malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700367{
368 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700369 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *opts, *k, *v;
371 size_t klen, vlen;
372
373 for (i = 0; i < 3; i++) {
374 /* Get runtime configuration. */
375 switch (i) {
376 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800377 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700378 /*
379 * Use options that were compiled into the
380 * program.
381 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800382 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700383 } else {
384 /* No configuration specified. */
385 buf[0] = '\0';
386 opts = buf;
387 }
388 break;
389 case 1: {
390 int linklen;
391 const char *linkname =
392#ifdef JEMALLOC_PREFIX
393 "/etc/"JEMALLOC_PREFIX"malloc.conf"
394#else
395 "/etc/malloc.conf"
396#endif
397 ;
398
399 if ((linklen = readlink(linkname, buf,
400 sizeof(buf) - 1)) != -1) {
401 /*
402 * Use the contents of the "/etc/malloc.conf"
403 * symbolic link's name.
404 */
405 buf[linklen] = '\0';
406 opts = buf;
407 } else {
408 /* No configuration specified. */
409 buf[0] = '\0';
410 opts = buf;
411 }
412 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700414 const char *envname =
415#ifdef JEMALLOC_PREFIX
416 JEMALLOC_CPREFIX"MALLOC_CONF"
417#else
418 "MALLOC_CONF"
419#endif
420 ;
421
422 if ((opts = getenv(envname)) != NULL) {
423 /*
424 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800425 * the value of the MALLOC_CONF environment
426 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700427 */
428 } else {
429 /* No configuration specified. */
430 buf[0] = '\0';
431 opts = buf;
432 }
433 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800434 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700435 /* NOTREACHED */
436 assert(false);
437 buf[0] = '\0';
438 opts = buf;
439 }
440
441 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
442 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800443#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700444 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
445 klen) == 0) { \
446 if (strncmp("true", v, vlen) == 0 && \
447 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800448 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700449 else if (strncmp("false", v, vlen) == \
450 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800451 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 else { \
453 malloc_conf_error( \
454 "Invalid conf value", \
455 k, klen, v, vlen); \
456 } \
457 continue; \
458 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800459#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700460 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
461 klen) == 0) { \
462 unsigned long ul; \
463 char *end; \
464 \
465 errno = 0; \
466 ul = strtoul(v, &end, 0); \
467 if (errno != 0 || (uintptr_t)end - \
468 (uintptr_t)v != vlen) { \
469 malloc_conf_error( \
470 "Invalid conf value", \
471 k, klen, v, vlen); \
472 } else if (ul < min || ul > max) { \
473 malloc_conf_error( \
474 "Out-of-range conf value", \
475 k, klen, v, vlen); \
476 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800477 o = ul; \
Jason Evanse7339702010-10-23 18:37:06 -0700478 continue; \
479 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700481 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
482 klen) == 0) { \
483 long l; \
484 char *end; \
485 \
486 errno = 0; \
487 l = strtol(v, &end, 0); \
488 if (errno != 0 || (uintptr_t)end - \
489 (uintptr_t)v != vlen) { \
490 malloc_conf_error( \
491 "Invalid conf value", \
492 k, klen, v, vlen); \
493 } else if (l < (ssize_t)min || l > \
494 (ssize_t)max) { \
495 malloc_conf_error( \
496 "Out-of-range conf value", \
497 k, klen, v, vlen); \
498 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800499 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700500 continue; \
501 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800502#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700503 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
504 klen) == 0) { \
505 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800506 sizeof(o)-1) ? vlen : \
507 sizeof(o)-1; \
508 strncpy(o, v, cpylen); \
509 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700510 continue; \
511 }
512
Jason Evansd81e4bd2012-03-06 14:57:45 -0800513 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700514 /*
515 * Chunks always require at least one * header page,
516 * plus one data page.
517 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800518 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700519 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800520 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
521 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
522 -1, (sizeof(size_t) << 3) - 1)
523 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800524 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800525 CONF_HANDLE_BOOL(opt_junk, junk)
526 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800527 }
Jason Evans7372b152012-02-10 20:22:09 -0800528 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800529 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800530 }
531 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800532 CONF_HANDLE_BOOL(opt_tcache, tcache)
533 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
534 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800535 (sizeof(size_t) << 3) - 1)
536 }
537 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800538 CONF_HANDLE_BOOL(opt_prof, prof)
539 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
540 "jeprof")
541 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
542 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
543 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800544 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800545 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
546 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
547 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800548 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800549 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
550 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800551 }
Jason Evanse7339702010-10-23 18:37:06 -0700552 malloc_conf_error("Invalid conf pair", k, klen, v,
553 vlen);
554#undef CONF_HANDLE_BOOL
555#undef CONF_HANDLE_SIZE_T
556#undef CONF_HANDLE_SSIZE_T
557#undef CONF_HANDLE_CHAR_P
558 }
Jason Evanse7339702010-10-23 18:37:06 -0700559 }
560}
561
562static bool
563malloc_init_hard(void)
564{
Jason Evansb7924f52009-06-23 19:01:18 -0700565 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700566
567 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700568 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700569 /*
570 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800571 * acquired init_lock, or this thread is the initializing
572 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700573 */
574 malloc_mutex_unlock(&init_lock);
575 return (false);
576 }
Jason Evansb7924f52009-06-23 19:01:18 -0700577 if (malloc_initializer != (unsigned long)0) {
578 /* Busy-wait until the initializing thread completes. */
579 do {
580 malloc_mutex_unlock(&init_lock);
581 CPU_SPINWAIT;
582 malloc_mutex_lock(&init_lock);
583 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700584 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700585 return (false);
586 }
Jason Evans289053c2009-06-22 12:08:42 -0700587
Jason Evansb7924f52009-06-23 19:01:18 -0700588#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700589 /* Get page size. */
590 {
591 long result;
592
593 result = sysconf(_SC_PAGESIZE);
594 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700595 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700596
597 /*
598 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800599 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700600 */
601 assert(((result - 1) & result) == 0);
602 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800603 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700604 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700605#endif
Jason Evans289053c2009-06-22 12:08:42 -0700606
Jason Evans7372b152012-02-10 20:22:09 -0800607 if (config_prof)
608 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700609
Jason Evanse7339702010-10-23 18:37:06 -0700610 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700611
Jason Evansa0bf2422010-01-29 14:30:41 -0800612 /* Register fork handlers. */
613 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
614 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800615 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800616 if (opt_abort)
617 abort();
618 }
619
Jason Evans3c234352010-01-27 13:10:55 -0800620 if (ctl_boot()) {
621 malloc_mutex_unlock(&init_lock);
622 return (true);
623 }
624
Jason Evans03c22372010-01-03 12:10:42 -0800625 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700626 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800627 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800628 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800629 if (opt_abort)
630 abort();
631 }
Jason Evans289053c2009-06-22 12:08:42 -0700632 }
633
Jason Evansa0bf2422010-01-29 14:30:41 -0800634 if (chunk_boot()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700638
Jason Evans3c234352010-01-27 13:10:55 -0800639 if (base_boot()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evans7372b152012-02-10 20:22:09 -0800644 if (config_prof)
645 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800646
Jason Evansb1726102012-02-28 16:50:47 -0800647 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700648
Jason Evans7372b152012-02-10 20:22:09 -0800649 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800653
Jason Evanse476f8a2010-01-16 09:53:50 -0800654 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
Jason Evans289053c2009-06-22 12:08:42 -0700658
Jason Evans7372b152012-02-10 20:22:09 -0800659#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700660 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800661 if (config_stats && pthread_key_create(&thread_allocated_tsd,
662 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700663 malloc_mutex_unlock(&init_lock);
664 return (true);
665 }
666#endif
667
Jason Evans8e6f8b42011-11-03 18:40:03 -0700668 if (malloc_mutex_init(&arenas_lock))
669 return (true);
670
671 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
675
Jason Evansb7924f52009-06-23 19:01:18 -0700676 /*
677 * Create enough scaffolding to allow recursive allocation in
678 * malloc_ncpus().
679 */
680 narenas = 1;
681 arenas = init_arenas;
682 memset(arenas, 0, sizeof(arena_t *) * narenas);
683
684 /*
685 * Initialize one arena here. The rest are lazily created in
686 * choose_arena_hard().
687 */
688 arenas_extend(0);
689 if (arenas[0] == NULL) {
690 malloc_mutex_unlock(&init_lock);
691 return (true);
692 }
693
Jason Evansb7924f52009-06-23 19:01:18 -0700694 /*
695 * Assign the initial arena to the initial thread, in order to avoid
696 * spurious creation of an extra arena if the application switches to
697 * threaded mode.
698 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700699 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700700 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700701
Jason Evans7372b152012-02-10 20:22:09 -0800702 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800703 malloc_mutex_unlock(&init_lock);
704 return (true);
705 }
Jason Evans3383af62010-02-11 08:59:06 -0800706
Jason Evansb7924f52009-06-23 19:01:18 -0700707 /* Get number of CPUs. */
708 malloc_initializer = pthread_self();
709 malloc_mutex_unlock(&init_lock);
710 ncpus = malloc_ncpus();
711 malloc_mutex_lock(&init_lock);
712
Jason Evanse7339702010-10-23 18:37:06 -0700713 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700714 /*
Jason Evans5463a522009-12-29 00:09:15 -0800715 * For SMP systems, create more than one arena per CPU by
716 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700717 */
Jason Evanse7339702010-10-23 18:37:06 -0700718 if (ncpus > 1)
719 opt_narenas = ncpus << 2;
720 else
721 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700722 }
Jason Evanse7339702010-10-23 18:37:06 -0700723 narenas = opt_narenas;
724 /*
725 * Make sure that the arenas array can be allocated. In practice, this
726 * limit is enough to allow the allocator to function, but the ctl
727 * machinery will fail to allocate memory at far lower limits.
728 */
729 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700730 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800731 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
732 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700733 }
Jason Evans289053c2009-06-22 12:08:42 -0700734
Jason Evans289053c2009-06-22 12:08:42 -0700735 /* Allocate and initialize arenas. */
736 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
737 if (arenas == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741 /*
742 * Zero the array. In practice, this should always be pre-zeroed,
743 * since it was just mmap()ed, but let's be sure.
744 */
745 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700746 /* Copy the pointer to the one arena that was already initialized. */
747 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700748
Jason Evans2dbecf12010-09-05 10:35:13 -0700749#ifdef JEMALLOC_ZONE
750 /* Register the custom zone. */
751 malloc_zone_register(create_zone());
752
753 /*
754 * Convert the default szone to an "overlay zone" that is capable of
755 * deallocating szone-allocated objects, but allocating new objects
756 * from jemalloc.
757 */
758 szone2ozone(malloc_default_zone());
759#endif
760
Jason Evans289053c2009-06-22 12:08:42 -0700761 malloc_initialized = true;
762 malloc_mutex_unlock(&init_lock);
763 return (false);
764}
765
Jason Evans2dbecf12010-09-05 10:35:13 -0700766#ifdef JEMALLOC_ZONE
767JEMALLOC_ATTR(constructor)
768void
769jemalloc_darwin_init(void)
770{
771
772 if (malloc_init_hard())
773 abort();
774}
775#endif
776
Jason Evans289053c2009-06-22 12:08:42 -0700777/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800778 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700779 */
780/******************************************************************************/
781/*
782 * Begin malloc(3)-compatible functions.
783 */
784
Jason Evans9ad48232010-01-03 11:59:20 -0800785JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800786JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700787void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800788je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700789{
790 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800791 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800792 prof_thr_cnt_t *cnt
793#ifdef JEMALLOC_CC_SILENCE
794 = NULL
795#endif
796 ;
Jason Evans289053c2009-06-22 12:08:42 -0700797
798 if (malloc_init()) {
799 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800800 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700801 }
802
Jason Evansc90ad712012-02-28 20:31:37 -0800803 if (size == 0)
804 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700805
Jason Evans7372b152012-02-10 20:22:09 -0800806 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700807 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700808 PROF_ALLOC_PREP(1, usize, cnt);
809 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700810 ret = NULL;
811 goto OOM;
812 }
Jason Evans93443682010-10-20 17:39:18 -0700813 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800814 SMALL_MAXCLASS) {
815 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700816 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700817 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700818 } else
819 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800820 } else {
821 if (config_stats)
822 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700823 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700824 }
Jason Evans289053c2009-06-22 12:08:42 -0700825
Jason Evansf2518142009-12-29 00:09:15 -0800826OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700827 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800828 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800829 malloc_write("<jemalloc>: Error in malloc(): "
830 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700831 abort();
832 }
833 errno = ENOMEM;
834 }
Jason Evans7372b152012-02-10 20:22:09 -0800835 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700836 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800837 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700838 assert(usize == isalloc(ret));
839 ALLOCATED_ADD(usize, 0);
840 }
Jason Evans289053c2009-06-22 12:08:42 -0700841 return (ret);
842}
843
Jason Evans9ad48232010-01-03 11:59:20 -0800844JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700845#ifdef JEMALLOC_PROF
846/*
Jason Evans7372b152012-02-10 20:22:09 -0800847 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700848 * PROF_ALLOC_PREP().
849 */
850JEMALLOC_ATTR(noinline)
851#endif
852static int
Jason Evans59656312012-02-28 21:37:38 -0800853imemalign(void **memptr, size_t alignment, size_t size,
854 bool enforce_min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700855{
856 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800857 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700858 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800859 prof_thr_cnt_t *cnt
860#ifdef JEMALLOC_CC_SILENCE
861 = NULL
862#endif
863 ;
Jason Evans289053c2009-06-22 12:08:42 -0700864
865 if (malloc_init())
866 result = NULL;
867 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800868 if (size == 0)
869 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800870
Jason Evans289053c2009-06-22 12:08:42 -0700871 /* Make sure that alignment is a large enough power of 2. */
872 if (((alignment - 1) & alignment) != 0
Jason Evans59656312012-02-28 21:37:38 -0800873 || (enforce_min_alignment && alignment < sizeof(void *))) {
Jason Evans7372b152012-02-10 20:22:09 -0800874 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800875 malloc_write("<jemalloc>: Error in "
876 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700877 abort();
878 }
879 result = NULL;
880 ret = EINVAL;
881 goto RETURN;
882 }
883
Jason Evans38d92102011-03-23 00:37:29 -0700884 usize = sa2u(size, alignment, NULL);
885 if (usize == 0) {
886 result = NULL;
887 ret = ENOMEM;
888 goto RETURN;
889 }
890
Jason Evans7372b152012-02-10 20:22:09 -0800891 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700892 PROF_ALLOC_PREP(2, usize, cnt);
893 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700894 result = NULL;
895 ret = EINVAL;
896 } else {
897 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800898 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
899 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700900 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800901 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700902 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700903 if (result != NULL) {
904 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700905 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700906 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700907 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700908 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700909 false);
910 }
Jason Evans0b270a92010-03-31 16:45:04 -0700911 }
Jason Evans6109fe02010-02-10 10:37:56 -0800912 } else
Jason Evans38d92102011-03-23 00:37:29 -0700913 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700914 }
915
916 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800917 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800918 malloc_write("<jemalloc>: Error in posix_memalign(): "
919 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700920 abort();
921 }
922 ret = ENOMEM;
923 goto RETURN;
924 }
925
926 *memptr = result;
927 ret = 0;
928
929RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800930 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700931 assert(usize == isalloc(result));
932 ALLOCATED_ADD(usize, 0);
933 }
Jason Evans7372b152012-02-10 20:22:09 -0800934 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700935 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700936 return (ret);
937}
938
Jason Evansa5070042011-08-12 13:48:27 -0700939JEMALLOC_ATTR(nonnull(1))
940JEMALLOC_ATTR(visibility("default"))
941int
Jason Evans0a5489e2012-03-01 17:19:20 -0800942je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700943{
944
Jason Evans59656312012-02-28 21:37:38 -0800945 return imemalign(memptr, alignment, size, true);
Jason Evansa5070042011-08-12 13:48:27 -0700946}
947
Jason Evans9ad48232010-01-03 11:59:20 -0800948JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800949JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700950void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800951je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700952{
953 void *ret;
954 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800955 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800956 prof_thr_cnt_t *cnt
957#ifdef JEMALLOC_CC_SILENCE
958 = NULL
959#endif
960 ;
Jason Evans289053c2009-06-22 12:08:42 -0700961
962 if (malloc_init()) {
963 num_size = 0;
964 ret = NULL;
965 goto RETURN;
966 }
967
968 num_size = num * size;
969 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800970 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700971 num_size = 1;
972 else {
973 ret = NULL;
974 goto RETURN;
975 }
976 /*
977 * Try to avoid division here. We know that it isn't possible to
978 * overflow during multiplication if neither operand uses any of the
979 * most significant half of the bits in a size_t.
980 */
981 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
982 && (num_size / size != num)) {
983 /* size_t overflow. */
984 ret = NULL;
985 goto RETURN;
986 }
987
Jason Evans7372b152012-02-10 20:22:09 -0800988 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700989 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700990 PROF_ALLOC_PREP(1, usize, cnt);
991 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700992 ret = NULL;
993 goto RETURN;
994 }
Jason Evans93443682010-10-20 17:39:18 -0700995 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800996 <= SMALL_MAXCLASS) {
997 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700998 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700999 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001000 } else
1001 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001002 } else {
1003 if (config_stats)
1004 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001005 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001006 }
Jason Evans289053c2009-06-22 12:08:42 -07001007
1008RETURN:
1009 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001010 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001011 malloc_write("<jemalloc>: Error in calloc(): out of "
1012 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001013 abort();
1014 }
1015 errno = ENOMEM;
1016 }
1017
Jason Evans7372b152012-02-10 20:22:09 -08001018 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001019 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001020 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001021 assert(usize == isalloc(ret));
1022 ALLOCATED_ADD(usize, 0);
1023 }
Jason Evans289053c2009-06-22 12:08:42 -07001024 return (ret);
1025}
1026
Jason Evanse476f8a2010-01-16 09:53:50 -08001027JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001028void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001029je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001030{
1031 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001032 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001033 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001034 prof_thr_cnt_t *cnt
1035#ifdef JEMALLOC_CC_SILENCE
1036 = NULL
1037#endif
1038 ;
1039 prof_ctx_t *old_ctx
1040#ifdef JEMALLOC_CC_SILENCE
1041 = NULL
1042#endif
1043 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001044
Jason Evans289053c2009-06-22 12:08:42 -07001045 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001046 if (ptr != NULL) {
1047 /* realloc(ptr, 0) is equivalent to free(p). */
1048 if (config_prof || config_stats)
1049 old_size = isalloc(ptr);
1050 if (config_prof && opt_prof) {
1051 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001052 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001053 }
Jason Evansf081b882012-02-28 20:24:05 -08001054 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001055 ret = NULL;
1056 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001057 } else
1058 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001059 }
1060
1061 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001062 assert(malloc_initialized || malloc_initializer ==
1063 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001064
Jason Evans7372b152012-02-10 20:22:09 -08001065 if (config_prof || config_stats)
1066 old_size = isalloc(ptr);
1067 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001068 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001069 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001070 PROF_ALLOC_PREP(1, usize, cnt);
1071 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001072 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001073 ret = NULL;
1074 goto OOM;
1075 }
Jason Evans0b270a92010-03-31 16:45:04 -07001076 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001077 usize <= SMALL_MAXCLASS) {
1078 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001079 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001080 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001081 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001082 else
1083 old_ctx = NULL;
1084 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001085 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001086 if (ret == NULL)
1087 old_ctx = NULL;
1088 }
Jason Evans7372b152012-02-10 20:22:09 -08001089 } else {
1090 if (config_stats)
1091 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001092 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001093 }
Jason Evans289053c2009-06-22 12:08:42 -07001094
Jason Evans6109fe02010-02-10 10:37:56 -08001095OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001096 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001097 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001098 malloc_write("<jemalloc>: Error in realloc(): "
1099 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001100 abort();
1101 }
1102 errno = ENOMEM;
1103 }
1104 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001105 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001106 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001107 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001108 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001109 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001110 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001111 ret = NULL;
1112 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001113 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001114 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001115 PROF_ALLOC_PREP(1, usize, cnt);
1116 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001117 ret = NULL;
1118 else {
1119 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001120 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001121 SMALL_MAXCLASS) {
1122 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001123 if (ret != NULL) {
1124 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001125 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001126 }
1127 } else
1128 ret = imalloc(size);
1129 }
Jason Evans7372b152012-02-10 20:22:09 -08001130 } else {
1131 if (config_stats)
1132 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001133 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001134 }
Jason Evans6109fe02010-02-10 10:37:56 -08001135 }
Jason Evans569432c2009-12-29 00:09:15 -08001136
Jason Evans289053c2009-06-22 12:08:42 -07001137 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001138 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001139 malloc_write("<jemalloc>: Error in realloc(): "
1140 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001141 abort();
1142 }
1143 errno = ENOMEM;
1144 }
1145 }
1146
1147RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001148 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001149 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001150 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001151 assert(usize == isalloc(ret));
1152 ALLOCATED_ADD(usize, old_size);
1153 }
Jason Evans289053c2009-06-22 12:08:42 -07001154 return (ret);
1155}
1156
Jason Evanse476f8a2010-01-16 09:53:50 -08001157JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001158void
Jason Evans0a5489e2012-03-01 17:19:20 -08001159je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001160{
1161
Jason Evans289053c2009-06-22 12:08:42 -07001162 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001163 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001164
Jason Evansa25d0a82009-11-09 14:57:38 -08001165 assert(malloc_initialized || malloc_initializer ==
1166 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001167
Jason Evans7372b152012-02-10 20:22:09 -08001168 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001169 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001170 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001171 } else if (config_stats) {
1172 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001173 }
Jason Evans7372b152012-02-10 20:22:09 -08001174 if (config_stats)
1175 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001176 idalloc(ptr);
1177 }
1178}
1179
1180/*
1181 * End malloc(3)-compatible functions.
1182 */
1183/******************************************************************************/
1184/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001185 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001186 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001187
1188#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1189JEMALLOC_ATTR(malloc)
1190JEMALLOC_ATTR(visibility("default"))
1191void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001192je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001193{
Jason Evans7372b152012-02-10 20:22:09 -08001194 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001195#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001196 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001197#endif
Jason Evans7372b152012-02-10 20:22:09 -08001198 ;
Jason Evans59656312012-02-28 21:37:38 -08001199 imemalign(&ret, alignment, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001200 return (ret);
1201}
1202#endif
1203
1204#ifdef JEMALLOC_OVERRIDE_VALLOC
1205JEMALLOC_ATTR(malloc)
1206JEMALLOC_ATTR(visibility("default"))
1207void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001208je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001209{
Jason Evans7372b152012-02-10 20:22:09 -08001210 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001211#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001212 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001213#endif
Jason Evans7372b152012-02-10 20:22:09 -08001214 ;
Jason Evans59656312012-02-28 21:37:38 -08001215 imemalign(&ret, PAGE_SIZE, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001216 return (ret);
1217}
1218#endif
1219
Jason Evans0a5489e2012-03-01 17:19:20 -08001220#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001221/*
1222 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1223 * to inconsistently reference libc's malloc(3)-compatible functions
1224 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1225 *
1226 * These definitions interpose hooks in glibc.  The functions are actually
1227 * passed an extra argument for the caller return address, which will be
1228 * ignored.
1229 */
1230JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001231void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001232
1233JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001234void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001235
1236JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001237void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001238
1239JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001240void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001241#endif
1242
Jason Evans6a0d2912010-09-20 16:44:23 -07001243/*
1244 * End non-standard override functions.
1245 */
1246/******************************************************************************/
1247/*
Jason Evans289053c2009-06-22 12:08:42 -07001248 * Begin non-standard functions.
1249 */
1250
Jason Evanse476f8a2010-01-16 09:53:50 -08001251JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001252size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001253je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001254{
Jason Evans569432c2009-12-29 00:09:15 -08001255 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001256
Jason Evans8e3c3c62010-09-17 15:46:18 -07001257 assert(malloc_initialized || malloc_initializer == pthread_self());
1258
Jason Evans7372b152012-02-10 20:22:09 -08001259 if (config_ivsalloc)
1260 ret = ivsalloc(ptr);
1261 else {
1262 assert(ptr != NULL);
1263 ret = isalloc(ptr);
1264 }
Jason Evans289053c2009-06-22 12:08:42 -07001265
Jason Evans569432c2009-12-29 00:09:15 -08001266 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001267}
1268
Jason Evans4201af02010-01-24 02:53:40 -08001269JEMALLOC_ATTR(visibility("default"))
1270void
Jason Evans0a5489e2012-03-01 17:19:20 -08001271je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1272 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001273{
1274
Jason Evans698805c2010-03-03 17:45:38 -08001275 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001276}
1277
Jason Evans3c234352010-01-27 13:10:55 -08001278JEMALLOC_ATTR(visibility("default"))
1279int
Jason Evans0a5489e2012-03-01 17:19:20 -08001280je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001281 size_t newlen)
1282{
1283
Jason Evans95833312010-01-27 13:45:21 -08001284 if (malloc_init())
1285 return (EAGAIN);
1286
Jason Evans3c234352010-01-27 13:10:55 -08001287 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1288}
1289
1290JEMALLOC_ATTR(visibility("default"))
1291int
Jason Evans0a5489e2012-03-01 17:19:20 -08001292je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001293{
1294
Jason Evans95833312010-01-27 13:45:21 -08001295 if (malloc_init())
1296 return (EAGAIN);
1297
Jason Evans3c234352010-01-27 13:10:55 -08001298 return (ctl_nametomib(name, mibp, miblenp));
1299}
1300
1301JEMALLOC_ATTR(visibility("default"))
1302int
Jason Evans0a5489e2012-03-01 17:19:20 -08001303je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1304 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001305{
1306
Jason Evans95833312010-01-27 13:45:21 -08001307 if (malloc_init())
1308 return (EAGAIN);
1309
Jason Evans3c234352010-01-27 13:10:55 -08001310 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1311}
1312
Jason Evans7e77eaf2012-03-02 17:47:37 -08001313/*
1314 * End non-standard functions.
1315 */
1316/******************************************************************************/
1317/*
1318 * Begin experimental functions.
1319 */
1320#ifdef JEMALLOC_EXPERIMENTAL
1321
Jason Evans8e3c3c62010-09-17 15:46:18 -07001322JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001323iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001324{
1325
Jason Evans38d92102011-03-23 00:37:29 -07001326 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1327 NULL)));
1328
Jason Evans8e3c3c62010-09-17 15:46:18 -07001329 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001330 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001331 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001332 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001333 else
Jason Evans38d92102011-03-23 00:37:29 -07001334 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001335}
1336
Jason Evans6a0d2912010-09-20 16:44:23 -07001337JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001338JEMALLOC_ATTR(visibility("default"))
1339int
Jason Evans0a5489e2012-03-01 17:19:20 -08001340je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001341{
1342 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001343 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001344 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1345 & (SIZE_T_MAX-1));
1346 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001347 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001348
1349 assert(ptr != NULL);
1350 assert(size != 0);
1351
1352 if (malloc_init())
1353 goto OOM;
1354
Jason Evans749c2a02011-08-12 18:37:54 -07001355 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001356 if (usize == 0)
1357 goto OOM;
1358
Jason Evans7372b152012-02-10 20:22:09 -08001359 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001360 PROF_ALLOC_PREP(1, usize, cnt);
1361 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001362 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001363 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001364 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001365 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001366 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001367 alignment, NULL);
1368 assert(usize_promoted != 0);
1369 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001370 if (p == NULL)
1371 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001372 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001373 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001374 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001375 if (p == NULL)
1376 goto OOM;
1377 }
Jason Evans749c2a02011-08-12 18:37:54 -07001378 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001379 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001380 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001381 if (p == NULL)
1382 goto OOM;
1383 }
Jason Evans7372b152012-02-10 20:22:09 -08001384 if (rsize != NULL)
1385 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001386
1387 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001388 if (config_stats) {
1389 assert(usize == isalloc(p));
1390 ALLOCATED_ADD(usize, 0);
1391 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001392 return (ALLOCM_SUCCESS);
1393OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001394 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 malloc_write("<jemalloc>: Error in allocm(): "
1396 "out of memory\n");
1397 abort();
1398 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001399 *ptr = NULL;
1400 return (ALLOCM_ERR_OOM);
1401}
1402
Jason Evans6a0d2912010-09-20 16:44:23 -07001403JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001404JEMALLOC_ATTR(visibility("default"))
1405int
Jason Evans0a5489e2012-03-01 17:19:20 -08001406je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001407{
1408 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001409 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001410 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1412 & (SIZE_T_MAX-1));
1413 bool zero = flags & ALLOCM_ZERO;
1414 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001415 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001416
1417 assert(ptr != NULL);
1418 assert(*ptr != NULL);
1419 assert(size != 0);
1420 assert(SIZE_T_MAX - size >= extra);
1421 assert(malloc_initialized || malloc_initializer == pthread_self());
1422
1423 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001424 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001425 /*
1426 * usize isn't knowable before iralloc() returns when extra is
1427 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001428 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001429 * backtrace. prof_realloc() will use the actual usize to
1430 * decide whether to sample.
1431 */
1432 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1433 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001434 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001436 PROF_ALLOC_PREP(1, max_usize, cnt);
1437 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001438 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001439 /*
1440 * Use minimum usize to determine whether promotion may happen.
1441 */
1442 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1443 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001444 alignment, NULL)) <= SMALL_MAXCLASS) {
1445 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1446 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001447 alignment, zero, no_move);
1448 if (q == NULL)
1449 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001450 if (max_usize < PAGE_SIZE) {
1451 usize = max_usize;
1452 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001453 } else
1454 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001455 } else {
1456 q = iralloc(p, size, extra, alignment, zero, no_move);
1457 if (q == NULL)
1458 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001459 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 }
Jason Evanse4f78462010-10-22 10:45:59 -07001461 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001462 if (rsize != NULL)
1463 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001464 } else {
1465 if (config_stats)
1466 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001467 q = iralloc(p, size, extra, alignment, zero, no_move);
1468 if (q == NULL)
1469 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001470 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001471 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001472 if (rsize != NULL) {
1473 if (config_stats == false)
1474 usize = isalloc(q);
1475 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001476 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001477 }
1478
1479 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001480 if (config_stats)
1481 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001482 return (ALLOCM_SUCCESS);
1483ERR:
1484 if (no_move)
1485 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001486OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001487 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001488 malloc_write("<jemalloc>: Error in rallocm(): "
1489 "out of memory\n");
1490 abort();
1491 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001492 return (ALLOCM_ERR_OOM);
1493}
1494
Jason Evans6a0d2912010-09-20 16:44:23 -07001495JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001496JEMALLOC_ATTR(visibility("default"))
1497int
Jason Evans0a5489e2012-03-01 17:19:20 -08001498je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001499{
1500 size_t sz;
1501
1502 assert(malloc_initialized || malloc_initializer == pthread_self());
1503
Jason Evans7372b152012-02-10 20:22:09 -08001504 if (config_ivsalloc)
1505 sz = ivsalloc(ptr);
1506 else {
1507 assert(ptr != NULL);
1508 sz = isalloc(ptr);
1509 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001510 assert(rsize != NULL);
1511 *rsize = sz;
1512
1513 return (ALLOCM_SUCCESS);
1514}
1515
Jason Evans6a0d2912010-09-20 16:44:23 -07001516JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001517JEMALLOC_ATTR(visibility("default"))
1518int
Jason Evans0a5489e2012-03-01 17:19:20 -08001519je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001520{
Jason Evanse4f78462010-10-22 10:45:59 -07001521 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001522
1523 assert(ptr != NULL);
1524 assert(malloc_initialized || malloc_initializer == pthread_self());
1525
Jason Evans7372b152012-02-10 20:22:09 -08001526 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001527 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001528 if (config_prof && opt_prof) {
1529 if (config_stats == false)
1530 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001531 prof_free(ptr, usize);
1532 }
Jason Evans7372b152012-02-10 20:22:09 -08001533 if (config_stats)
1534 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001535 idalloc(ptr);
1536
1537 return (ALLOCM_SUCCESS);
1538}
1539
Jason Evans7e15dab2012-02-29 12:56:37 -08001540JEMALLOC_ATTR(visibility("default"))
1541int
Jason Evans0a5489e2012-03-01 17:19:20 -08001542je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001543{
1544 size_t usize;
1545 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1546 & (SIZE_T_MAX-1));
1547
1548 assert(size != 0);
1549
1550 if (malloc_init())
1551 return (ALLOCM_ERR_OOM);
1552
1553 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1554 if (usize == 0)
1555 return (ALLOCM_ERR_OOM);
1556
1557 if (rsize != NULL)
1558 *rsize = usize;
1559 return (ALLOCM_SUCCESS);
1560}
1561
Jason Evans7e77eaf2012-03-02 17:47:37 -08001562#endif
Jason Evans289053c2009-06-22 12:08:42 -07001563/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001564 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001565 */
1566/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001567
Jason Evans289053c2009-06-22 12:08:42 -07001568/*
1569 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001570 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001571 */
1572
Jason Evans2dbecf12010-09-05 10:35:13 -07001573void
Jason Evans804c9ec2009-06-22 17:44:33 -07001574jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001575{
Jason Evansfbbb6242010-01-24 17:56:48 -08001576 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001577
1578 /* Acquire all mutexes in a safe order. */
1579
Jason Evansfbbb6242010-01-24 17:56:48 -08001580 malloc_mutex_lock(&arenas_lock);
1581 for (i = 0; i < narenas; i++) {
1582 if (arenas[i] != NULL)
1583 malloc_mutex_lock(&arenas[i]->lock);
1584 }
Jason Evans289053c2009-06-22 12:08:42 -07001585
1586 malloc_mutex_lock(&base_mtx);
1587
1588 malloc_mutex_lock(&huge_mtx);
1589
Jason Evans7372b152012-02-10 20:22:09 -08001590 if (config_dss)
1591 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001592}
1593
Jason Evans2dbecf12010-09-05 10:35:13 -07001594void
Jason Evans804c9ec2009-06-22 17:44:33 -07001595jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001596{
1597 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001598
1599 /* Release all mutexes, now that fork() has completed. */
1600
Jason Evans7372b152012-02-10 20:22:09 -08001601 if (config_dss)
1602 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001603
1604 malloc_mutex_unlock(&huge_mtx);
1605
1606 malloc_mutex_unlock(&base_mtx);
1607
Jason Evans289053c2009-06-22 12:08:42 -07001608 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001609 if (arenas[i] != NULL)
1610 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001611 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001612 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001613}
Jason Evans2dbecf12010-09-05 10:35:13 -07001614
1615/******************************************************************************/