blob: 385eb03a6468fd60550cf7cc593b245f447d7b9f [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080039const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans03c22372010-01-03 12:10:42 -080058static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070059static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070060static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080061#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070062static void thread_allocated_cleanup(void *arg);
63#endif
Jason Evanse7339702010-10-23 18:37:06 -070064static bool malloc_conf_next(char const **opts_p, char const **k_p,
65 size_t *klen_p, char const **v_p, size_t *vlen_p);
66static void malloc_conf_error(const char *msg, const char *k, size_t klen,
67 const char *v, size_t vlen);
68static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070069static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080070static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070071 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070072
Jason Evans289053c2009-06-22 12:08:42 -070073/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070074/*
Jason Evanse476f8a2010-01-16 09:53:50 -080075 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070076 */
77
Jason Evanse476f8a2010-01-16 09:53:50 -080078/* Create a new arena and insert it into the arenas array at index ind. */
79arena_t *
80arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070081{
82 arena_t *ret;
83
Jason Evansb1726102012-02-28 16:50:47 -080084 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080085 if (ret != NULL && arena_new(ret, ind) == false) {
86 arenas[ind] = ret;
87 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070088 }
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evanse476f8a2010-01-16 09:53:50 -080091 /*
92 * OOM here is quite inconvenient to propagate, since dealing with it
93 * would require a check for failure in the fast path. Instead, punt
94 * by using arenas[0]. In practice, this is an extremely unlikely
95 * failure.
96 */
Jason Evans698805c2010-03-03 17:45:38 -080097 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (opt_abort)
99 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700100
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700102}
103
Jason Evans4c2faa82012-03-13 11:09:23 -0700104/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800105arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700106choose_arena_hard(void)
107{
108 arena_t *ret;
109
Jason Evans289053c2009-06-22 12:08:42 -0700110 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700111 unsigned i, choose, first_null;
112
113 choose = 0;
114 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800115 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700116 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700117 for (i = 1; i < narenas; i++) {
118 if (arenas[i] != NULL) {
119 /*
120 * Choose the first arena that has the lowest
121 * number of threads assigned to it.
122 */
123 if (arenas[i]->nthreads <
124 arenas[choose]->nthreads)
125 choose = i;
126 } else if (first_null == narenas) {
127 /*
128 * Record the index of the first uninitialized
129 * arena, in case all extant arenas are in use.
130 *
131 * NB: It is possible for there to be
132 * discontinuities in terms of initialized
133 * versus uninitialized arenas, due to the
134 * "thread.arena" mallctl.
135 */
136 first_null = i;
137 }
138 }
139
140 if (arenas[choose] == 0 || first_null == narenas) {
141 /*
142 * Use an unloaded arena, or the least loaded arena if
143 * all arenas are already initialized.
144 */
145 ret = arenas[choose];
146 } else {
147 /* Initialize a new arena. */
148 ret = arenas_extend(first_null);
149 }
150 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800151 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700152 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700153 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700154 malloc_mutex_lock(&arenas_lock);
155 ret->nthreads++;
156 malloc_mutex_unlock(&arenas_lock);
157 }
Jason Evans289053c2009-06-22 12:08:42 -0700158
Jason Evans2dbecf12010-09-05 10:35:13 -0700159 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700160
161 return (ret);
162}
Jason Evans289053c2009-06-22 12:08:42 -0700163
Jason Evans03c22372010-01-03 12:10:42 -0800164static void
165stats_print_atexit(void)
166{
167
Jason Evans7372b152012-02-10 20:22:09 -0800168 if (config_tcache && config_stats) {
169 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800170
Jason Evans7372b152012-02-10 20:22:09 -0800171 /*
172 * Merge stats from extant threads. This is racy, since
173 * individual threads do not lock when recording tcache stats
174 * events. As a consequence, the final stats may be slightly
175 * out of date by the time they are reported, if other threads
176 * continue to allocate.
177 */
178 for (i = 0; i < narenas; i++) {
179 arena_t *arena = arenas[i];
180 if (arena != NULL) {
181 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800182
Jason Evans7372b152012-02-10 20:22:09 -0800183 /*
184 * tcache_stats_merge() locks bins, so if any
185 * code is introduced that acquires both arena
186 * and bin locks in the opposite order,
187 * deadlocks may result.
188 */
189 malloc_mutex_lock(&arena->lock);
190 ql_foreach(tcache, &arena->tcache_ql, link) {
191 tcache_stats_merge(tcache, arena);
192 }
193 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800194 }
Jason Evans03c22372010-01-03 12:10:42 -0800195 }
196 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800197 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700198}
199
Jason Evans9dcad2d2011-02-13 18:11:54 -0800200thread_allocated_t *
201thread_allocated_get_hard(void)
202{
203 thread_allocated_t *thread_allocated = (thread_allocated_t *)
204 imalloc(sizeof(thread_allocated_t));
205 if (thread_allocated == NULL) {
206 static thread_allocated_t static_thread_allocated = {0, 0};
207 malloc_write("<jemalloc>: Error allocating TSD;"
208 " mallctl(\"thread.{de,}allocated[p]\", ...)"
209 " will be inaccurate\n");
210 if (opt_abort)
211 abort();
212 return (&static_thread_allocated);
213 }
214 pthread_setspecific(thread_allocated_tsd, thread_allocated);
215 thread_allocated->allocated = 0;
216 thread_allocated->deallocated = 0;
217 return (thread_allocated);
218}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800219
Jason Evans289053c2009-06-22 12:08:42 -0700220/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800221 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700222 */
223/******************************************************************************/
224/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800225 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700226 */
227
Jason Evansc9658dd2009-06-22 14:44:08 -0700228static unsigned
229malloc_ncpus(void)
230{
231 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700232 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700233
Jason Evansb7924f52009-06-23 19:01:18 -0700234 result = sysconf(_SC_NPROCESSORS_ONLN);
235 if (result == -1) {
236 /* Error. */
237 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700238 }
Jason Evansb7924f52009-06-23 19:01:18 -0700239 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700240
241 return (ret);
242}
Jason Evansb7924f52009-06-23 19:01:18 -0700243
Jason Evans597632b2011-03-18 13:41:33 -0700244static void
245arenas_cleanup(void *arg)
246{
247 arena_t *arena = (arena_t *)arg;
248
249 malloc_mutex_lock(&arenas_lock);
250 arena->nthreads--;
251 malloc_mutex_unlock(&arenas_lock);
252}
253
Jason Evans7372b152012-02-10 20:22:09 -0800254#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700255static void
256thread_allocated_cleanup(void *arg)
257{
258 uint64_t *allocated = (uint64_t *)arg;
259
260 if (allocated != NULL)
261 idalloc(allocated);
262}
263#endif
264
Jason Evans289053c2009-06-22 12:08:42 -0700265/*
266 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
267 * implementation has to take pains to avoid infinite recursion during
268 * initialization.
269 */
270static inline bool
271malloc_init(void)
272{
273
274 if (malloc_initialized == false)
275 return (malloc_init_hard());
276
277 return (false);
278}
279
280static bool
Jason Evanse7339702010-10-23 18:37:06 -0700281malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
282 char const **v_p, size_t *vlen_p)
283{
284 bool accept;
285 const char *opts = *opts_p;
286
287 *k_p = opts;
288
289 for (accept = false; accept == false;) {
290 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800291 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
292 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
293 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
294 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
295 case 'Y': case 'Z':
296 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
297 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
298 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
299 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
300 case 'y': case 'z':
301 case '0': case '1': case '2': case '3': case '4': case '5':
302 case '6': case '7': case '8': case '9':
303 case '_':
304 opts++;
305 break;
306 case ':':
307 opts++;
308 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
309 *v_p = opts;
310 accept = true;
311 break;
312 case '\0':
313 if (opts != *opts_p) {
314 malloc_write("<jemalloc>: Conf string ends "
315 "with key\n");
316 }
317 return (true);
318 default:
319 malloc_write("<jemalloc>: Malformed conf string\n");
320 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700321 }
322 }
323
324 for (accept = false; accept == false;) {
325 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800326 case ',':
327 opts++;
328 /*
329 * Look ahead one character here, because the next time
330 * this function is called, it will assume that end of
331 * input has been cleanly reached if no input remains,
332 * but we have optimistically already consumed the
333 * comma if one exists.
334 */
335 if (*opts == '\0') {
336 malloc_write("<jemalloc>: Conf string ends "
337 "with comma\n");
338 }
339 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
340 accept = true;
341 break;
342 case '\0':
343 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
344 accept = true;
345 break;
346 default:
347 opts++;
348 break;
Jason Evanse7339702010-10-23 18:37:06 -0700349 }
350 }
351
352 *opts_p = opts;
353 return (false);
354}
355
356static void
357malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
358 size_t vlen)
359{
Jason Evanse7339702010-10-23 18:37:06 -0700360
Jason Evansd81e4bd2012-03-06 14:57:45 -0800361 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
362 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700363}
364
365static void
366malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700367{
368 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700369 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *opts, *k, *v;
371 size_t klen, vlen;
372
373 for (i = 0; i < 3; i++) {
374 /* Get runtime configuration. */
375 switch (i) {
376 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800377 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700378 /*
379 * Use options that were compiled into the
380 * program.
381 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800382 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700383 } else {
384 /* No configuration specified. */
385 buf[0] = '\0';
386 opts = buf;
387 }
388 break;
389 case 1: {
390 int linklen;
391 const char *linkname =
392#ifdef JEMALLOC_PREFIX
393 "/etc/"JEMALLOC_PREFIX"malloc.conf"
394#else
395 "/etc/malloc.conf"
396#endif
397 ;
398
399 if ((linklen = readlink(linkname, buf,
400 sizeof(buf) - 1)) != -1) {
401 /*
402 * Use the contents of the "/etc/malloc.conf"
403 * symbolic link's name.
404 */
405 buf[linklen] = '\0';
406 opts = buf;
407 } else {
408 /* No configuration specified. */
409 buf[0] = '\0';
410 opts = buf;
411 }
412 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700414 const char *envname =
415#ifdef JEMALLOC_PREFIX
416 JEMALLOC_CPREFIX"MALLOC_CONF"
417#else
418 "MALLOC_CONF"
419#endif
420 ;
421
422 if ((opts = getenv(envname)) != NULL) {
423 /*
424 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800425 * the value of the MALLOC_CONF environment
426 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700427 */
428 } else {
429 /* No configuration specified. */
430 buf[0] = '\0';
431 opts = buf;
432 }
433 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800434 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700435 /* NOTREACHED */
436 assert(false);
437 buf[0] = '\0';
438 opts = buf;
439 }
440
441 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
442 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800443#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700444 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
445 klen) == 0) { \
446 if (strncmp("true", v, vlen) == 0 && \
447 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800448 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700449 else if (strncmp("false", v, vlen) == \
450 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800451 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 else { \
453 malloc_conf_error( \
454 "Invalid conf value", \
455 k, klen, v, vlen); \
456 } \
457 continue; \
458 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800459#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700460 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
461 klen) == 0) { \
462 unsigned long ul; \
463 char *end; \
464 \
465 errno = 0; \
466 ul = strtoul(v, &end, 0); \
467 if (errno != 0 || (uintptr_t)end - \
468 (uintptr_t)v != vlen) { \
469 malloc_conf_error( \
470 "Invalid conf value", \
471 k, klen, v, vlen); \
472 } else if (ul < min || ul > max) { \
473 malloc_conf_error( \
474 "Out-of-range conf value", \
475 k, klen, v, vlen); \
476 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800477 o = ul; \
Jason Evanse7339702010-10-23 18:37:06 -0700478 continue; \
479 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700481 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
482 klen) == 0) { \
483 long l; \
484 char *end; \
485 \
486 errno = 0; \
487 l = strtol(v, &end, 0); \
488 if (errno != 0 || (uintptr_t)end - \
489 (uintptr_t)v != vlen) { \
490 malloc_conf_error( \
491 "Invalid conf value", \
492 k, klen, v, vlen); \
493 } else if (l < (ssize_t)min || l > \
494 (ssize_t)max) { \
495 malloc_conf_error( \
496 "Out-of-range conf value", \
497 k, klen, v, vlen); \
498 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800499 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700500 continue; \
501 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800502#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700503 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
504 klen) == 0) { \
505 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800506 sizeof(o)-1) ? vlen : \
507 sizeof(o)-1; \
508 strncpy(o, v, cpylen); \
509 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700510 continue; \
511 }
512
Jason Evansd81e4bd2012-03-06 14:57:45 -0800513 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700514 /*
515 * Chunks always require at least one * header page,
516 * plus one data page.
517 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800518 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700519 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800520 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
521 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
522 -1, (sizeof(size_t) << 3) - 1)
523 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800524 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800525 CONF_HANDLE_BOOL(opt_junk, junk)
526 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800527 }
Jason Evans7372b152012-02-10 20:22:09 -0800528 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800529 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800530 }
531 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800532 CONF_HANDLE_BOOL(opt_tcache, tcache)
533 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
534 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800535 (sizeof(size_t) << 3) - 1)
536 }
537 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800538 CONF_HANDLE_BOOL(opt_prof, prof)
539 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
540 "jeprof")
541 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
542 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
543 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800544 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800545 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
546 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
547 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800548 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800549 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
550 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800551 }
Jason Evanse7339702010-10-23 18:37:06 -0700552 malloc_conf_error("Invalid conf pair", k, klen, v,
553 vlen);
554#undef CONF_HANDLE_BOOL
555#undef CONF_HANDLE_SIZE_T
556#undef CONF_HANDLE_SSIZE_T
557#undef CONF_HANDLE_CHAR_P
558 }
Jason Evanse7339702010-10-23 18:37:06 -0700559 }
560}
561
562static bool
563malloc_init_hard(void)
564{
Jason Evansb7924f52009-06-23 19:01:18 -0700565 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700566
567 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700568 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700569 /*
570 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800571 * acquired init_lock, or this thread is the initializing
572 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700573 */
574 malloc_mutex_unlock(&init_lock);
575 return (false);
576 }
Jason Evansb7924f52009-06-23 19:01:18 -0700577 if (malloc_initializer != (unsigned long)0) {
578 /* Busy-wait until the initializing thread completes. */
579 do {
580 malloc_mutex_unlock(&init_lock);
581 CPU_SPINWAIT;
582 malloc_mutex_lock(&init_lock);
583 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700584 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700585 return (false);
586 }
Jason Evans289053c2009-06-22 12:08:42 -0700587
Jason Evansb7924f52009-06-23 19:01:18 -0700588#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700589 /* Get page size. */
590 {
591 long result;
592
593 result = sysconf(_SC_PAGESIZE);
594 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700595 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700596
597 /*
598 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800599 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700600 */
601 assert(((result - 1) & result) == 0);
602 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800603 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700604 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700605#endif
Jason Evans289053c2009-06-22 12:08:42 -0700606
Jason Evans7372b152012-02-10 20:22:09 -0800607 if (config_prof)
608 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700609
Jason Evanse7339702010-10-23 18:37:06 -0700610 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700611
Jason Evansa0bf2422010-01-29 14:30:41 -0800612 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700613 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
614 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800615 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800616 if (opt_abort)
617 abort();
618 }
619
Jason Evans3c234352010-01-27 13:10:55 -0800620 if (ctl_boot()) {
621 malloc_mutex_unlock(&init_lock);
622 return (true);
623 }
624
Jason Evans03c22372010-01-03 12:10:42 -0800625 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700626 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800627 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800628 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800629 if (opt_abort)
630 abort();
631 }
Jason Evans289053c2009-06-22 12:08:42 -0700632 }
633
Jason Evansa0bf2422010-01-29 14:30:41 -0800634 if (chunk_boot()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700638
Jason Evans3c234352010-01-27 13:10:55 -0800639 if (base_boot()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evans7372b152012-02-10 20:22:09 -0800644 if (config_prof)
645 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800646
Jason Evansb1726102012-02-28 16:50:47 -0800647 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700648
Jason Evans7372b152012-02-10 20:22:09 -0800649 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800653
Jason Evanse476f8a2010-01-16 09:53:50 -0800654 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
Jason Evans289053c2009-06-22 12:08:42 -0700658
Jason Evans7372b152012-02-10 20:22:09 -0800659#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700660 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800661 if (config_stats && pthread_key_create(&thread_allocated_tsd,
662 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700663 malloc_mutex_unlock(&init_lock);
664 return (true);
665 }
666#endif
667
Jason Evans8e6f8b42011-11-03 18:40:03 -0700668 if (malloc_mutex_init(&arenas_lock))
669 return (true);
670
671 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
675
Jason Evansb7924f52009-06-23 19:01:18 -0700676 /*
677 * Create enough scaffolding to allow recursive allocation in
678 * malloc_ncpus().
679 */
680 narenas = 1;
681 arenas = init_arenas;
682 memset(arenas, 0, sizeof(arena_t *) * narenas);
683
684 /*
685 * Initialize one arena here. The rest are lazily created in
686 * choose_arena_hard().
687 */
688 arenas_extend(0);
689 if (arenas[0] == NULL) {
690 malloc_mutex_unlock(&init_lock);
691 return (true);
692 }
693
Jason Evansb7924f52009-06-23 19:01:18 -0700694 /*
695 * Assign the initial arena to the initial thread, in order to avoid
696 * spurious creation of an extra arena if the application switches to
697 * threaded mode.
698 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700699 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700700 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700701
Jason Evans7372b152012-02-10 20:22:09 -0800702 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800703 malloc_mutex_unlock(&init_lock);
704 return (true);
705 }
Jason Evans3383af62010-02-11 08:59:06 -0800706
Jason Evansb7924f52009-06-23 19:01:18 -0700707 /* Get number of CPUs. */
708 malloc_initializer = pthread_self();
709 malloc_mutex_unlock(&init_lock);
710 ncpus = malloc_ncpus();
711 malloc_mutex_lock(&init_lock);
712
Jason Evanse7339702010-10-23 18:37:06 -0700713 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700714 /*
Jason Evans5463a522009-12-29 00:09:15 -0800715 * For SMP systems, create more than one arena per CPU by
716 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700717 */
Jason Evanse7339702010-10-23 18:37:06 -0700718 if (ncpus > 1)
719 opt_narenas = ncpus << 2;
720 else
721 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700722 }
Jason Evanse7339702010-10-23 18:37:06 -0700723 narenas = opt_narenas;
724 /*
725 * Make sure that the arenas array can be allocated. In practice, this
726 * limit is enough to allow the allocator to function, but the ctl
727 * machinery will fail to allocate memory at far lower limits.
728 */
729 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700730 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800731 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
732 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700733 }
Jason Evans289053c2009-06-22 12:08:42 -0700734
Jason Evans289053c2009-06-22 12:08:42 -0700735 /* Allocate and initialize arenas. */
736 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
737 if (arenas == NULL) {
738 malloc_mutex_unlock(&init_lock);
739 return (true);
740 }
741 /*
742 * Zero the array. In practice, this should always be pre-zeroed,
743 * since it was just mmap()ed, but let's be sure.
744 */
745 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700746 /* Copy the pointer to the one arena that was already initialized. */
747 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700748
Jason Evans2dbecf12010-09-05 10:35:13 -0700749#ifdef JEMALLOC_ZONE
750 /* Register the custom zone. */
751 malloc_zone_register(create_zone());
752
753 /*
754 * Convert the default szone to an "overlay zone" that is capable of
755 * deallocating szone-allocated objects, but allocating new objects
756 * from jemalloc.
757 */
758 szone2ozone(malloc_default_zone());
759#endif
760
Jason Evans289053c2009-06-22 12:08:42 -0700761 malloc_initialized = true;
762 malloc_mutex_unlock(&init_lock);
763 return (false);
764}
765
Jason Evans2dbecf12010-09-05 10:35:13 -0700766#ifdef JEMALLOC_ZONE
767JEMALLOC_ATTR(constructor)
768void
769jemalloc_darwin_init(void)
770{
771
772 if (malloc_init_hard())
773 abort();
774}
775#endif
776
Jason Evans289053c2009-06-22 12:08:42 -0700777/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800778 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700779 */
780/******************************************************************************/
781/*
782 * Begin malloc(3)-compatible functions.
783 */
784
Jason Evans9ad48232010-01-03 11:59:20 -0800785JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800786JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700787void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800788je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700789{
790 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800791 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800792 prof_thr_cnt_t *cnt
793#ifdef JEMALLOC_CC_SILENCE
794 = NULL
795#endif
796 ;
Jason Evans289053c2009-06-22 12:08:42 -0700797
798 if (malloc_init()) {
799 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800800 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700801 }
802
Jason Evansc90ad712012-02-28 20:31:37 -0800803 if (size == 0)
804 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700805
Jason Evans7372b152012-02-10 20:22:09 -0800806 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700807 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700808 PROF_ALLOC_PREP(1, usize, cnt);
809 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700810 ret = NULL;
811 goto OOM;
812 }
Jason Evans93443682010-10-20 17:39:18 -0700813 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800814 SMALL_MAXCLASS) {
815 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700816 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700817 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700818 } else
819 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800820 } else {
821 if (config_stats)
822 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700823 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700824 }
Jason Evans289053c2009-06-22 12:08:42 -0700825
Jason Evansf2518142009-12-29 00:09:15 -0800826OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700827 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800828 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800829 malloc_write("<jemalloc>: Error in malloc(): "
830 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700831 abort();
832 }
833 errno = ENOMEM;
834 }
Jason Evans7372b152012-02-10 20:22:09 -0800835 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700836 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800837 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700838 assert(usize == isalloc(ret));
839 ALLOCATED_ADD(usize, 0);
840 }
Jason Evans289053c2009-06-22 12:08:42 -0700841 return (ret);
842}
843
Jason Evans9ad48232010-01-03 11:59:20 -0800844JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700845#ifdef JEMALLOC_PROF
846/*
Jason Evans7372b152012-02-10 20:22:09 -0800847 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700848 * PROF_ALLOC_PREP().
849 */
850JEMALLOC_ATTR(noinline)
851#endif
852static int
Jason Evans59656312012-02-28 21:37:38 -0800853imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700854 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700855{
856 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800857 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700858 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800859 prof_thr_cnt_t *cnt
860#ifdef JEMALLOC_CC_SILENCE
861 = NULL
862#endif
863 ;
Jason Evans289053c2009-06-22 12:08:42 -0700864
Jason Evans0a0bbf62012-03-13 12:55:21 -0700865 assert(min_alignment != 0);
866
Jason Evans289053c2009-06-22 12:08:42 -0700867 if (malloc_init())
868 result = NULL;
869 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800870 if (size == 0)
871 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800872
Jason Evans289053c2009-06-22 12:08:42 -0700873 /* Make sure that alignment is a large enough power of 2. */
874 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700875 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800876 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700877 malloc_write("<jemalloc>: Error allocating "
878 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700879 abort();
880 }
881 result = NULL;
882 ret = EINVAL;
883 goto RETURN;
884 }
885
Jason Evans38d92102011-03-23 00:37:29 -0700886 usize = sa2u(size, alignment, NULL);
887 if (usize == 0) {
888 result = NULL;
889 ret = ENOMEM;
890 goto RETURN;
891 }
892
Jason Evans7372b152012-02-10 20:22:09 -0800893 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700894 PROF_ALLOC_PREP(2, usize, cnt);
895 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700896 result = NULL;
897 ret = EINVAL;
898 } else {
899 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800900 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
901 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700902 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800903 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700904 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700905 if (result != NULL) {
906 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700907 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700908 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700909 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700910 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700911 false);
912 }
Jason Evans0b270a92010-03-31 16:45:04 -0700913 }
Jason Evans6109fe02010-02-10 10:37:56 -0800914 } else
Jason Evans38d92102011-03-23 00:37:29 -0700915 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700916 }
917
918 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800919 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700920 malloc_write("<jemalloc>: Error allocating aligned "
921 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700922 abort();
923 }
924 ret = ENOMEM;
925 goto RETURN;
926 }
927
928 *memptr = result;
929 ret = 0;
930
931RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800932 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700933 assert(usize == isalloc(result));
934 ALLOCATED_ADD(usize, 0);
935 }
Jason Evans7372b152012-02-10 20:22:09 -0800936 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700937 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700938 return (ret);
939}
940
Jason Evansa5070042011-08-12 13:48:27 -0700941JEMALLOC_ATTR(nonnull(1))
942JEMALLOC_ATTR(visibility("default"))
943int
Jason Evans0a5489e2012-03-01 17:19:20 -0800944je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700945{
946
Jason Evans0a0bbf62012-03-13 12:55:21 -0700947 return imemalign(memptr, alignment, size, sizeof(void *));
948}
949
950JEMALLOC_ATTR(malloc)
951JEMALLOC_ATTR(visibility("default"))
952void *
953je_aligned_alloc(size_t alignment, size_t size)
954{
955 void *ret;
956 int err;
957
958 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
959 ret = NULL;
960 errno = err;
961 }
962 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700963}
964
Jason Evans9ad48232010-01-03 11:59:20 -0800965JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800966JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700967void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800968je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700969{
970 void *ret;
971 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800972 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800973 prof_thr_cnt_t *cnt
974#ifdef JEMALLOC_CC_SILENCE
975 = NULL
976#endif
977 ;
Jason Evans289053c2009-06-22 12:08:42 -0700978
979 if (malloc_init()) {
980 num_size = 0;
981 ret = NULL;
982 goto RETURN;
983 }
984
985 num_size = num * size;
986 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800987 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700988 num_size = 1;
989 else {
990 ret = NULL;
991 goto RETURN;
992 }
993 /*
994 * Try to avoid division here. We know that it isn't possible to
995 * overflow during multiplication if neither operand uses any of the
996 * most significant half of the bits in a size_t.
997 */
998 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
999 && (num_size / size != num)) {
1000 /* size_t overflow. */
1001 ret = NULL;
1002 goto RETURN;
1003 }
1004
Jason Evans7372b152012-02-10 20:22:09 -08001005 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001006 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001007 PROF_ALLOC_PREP(1, usize, cnt);
1008 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001009 ret = NULL;
1010 goto RETURN;
1011 }
Jason Evans93443682010-10-20 17:39:18 -07001012 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001013 <= SMALL_MAXCLASS) {
1014 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001015 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001016 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001017 } else
1018 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001019 } else {
1020 if (config_stats)
1021 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001022 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001023 }
Jason Evans289053c2009-06-22 12:08:42 -07001024
1025RETURN:
1026 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001027 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001028 malloc_write("<jemalloc>: Error in calloc(): out of "
1029 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001030 abort();
1031 }
1032 errno = ENOMEM;
1033 }
1034
Jason Evans7372b152012-02-10 20:22:09 -08001035 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001036 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001037 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001038 assert(usize == isalloc(ret));
1039 ALLOCATED_ADD(usize, 0);
1040 }
Jason Evans289053c2009-06-22 12:08:42 -07001041 return (ret);
1042}
1043
Jason Evanse476f8a2010-01-16 09:53:50 -08001044JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001045void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001046je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001047{
1048 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001049 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001050 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001051 prof_thr_cnt_t *cnt
1052#ifdef JEMALLOC_CC_SILENCE
1053 = NULL
1054#endif
1055 ;
1056 prof_ctx_t *old_ctx
1057#ifdef JEMALLOC_CC_SILENCE
1058 = NULL
1059#endif
1060 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001061
Jason Evans289053c2009-06-22 12:08:42 -07001062 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001063 if (ptr != NULL) {
1064 /* realloc(ptr, 0) is equivalent to free(p). */
1065 if (config_prof || config_stats)
1066 old_size = isalloc(ptr);
1067 if (config_prof && opt_prof) {
1068 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001069 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001070 }
Jason Evansf081b882012-02-28 20:24:05 -08001071 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001072 ret = NULL;
1073 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001074 } else
1075 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001076 }
1077
1078 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001079 assert(malloc_initialized || malloc_initializer ==
1080 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001081
Jason Evans7372b152012-02-10 20:22:09 -08001082 if (config_prof || config_stats)
1083 old_size = isalloc(ptr);
1084 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001085 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001086 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001087 PROF_ALLOC_PREP(1, usize, cnt);
1088 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001089 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001090 ret = NULL;
1091 goto OOM;
1092 }
Jason Evans0b270a92010-03-31 16:45:04 -07001093 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001094 usize <= SMALL_MAXCLASS) {
1095 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001096 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001097 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001098 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001099 else
1100 old_ctx = NULL;
1101 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001102 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001103 if (ret == NULL)
1104 old_ctx = NULL;
1105 }
Jason Evans7372b152012-02-10 20:22:09 -08001106 } else {
1107 if (config_stats)
1108 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001109 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001110 }
Jason Evans289053c2009-06-22 12:08:42 -07001111
Jason Evans6109fe02010-02-10 10:37:56 -08001112OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001113 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001114 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001115 malloc_write("<jemalloc>: Error in realloc(): "
1116 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001117 abort();
1118 }
1119 errno = ENOMEM;
1120 }
1121 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001122 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001123 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001124 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001125 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001126 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001127 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001128 ret = NULL;
1129 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001130 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001131 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001132 PROF_ALLOC_PREP(1, usize, cnt);
1133 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001134 ret = NULL;
1135 else {
1136 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001137 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001138 SMALL_MAXCLASS) {
1139 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001140 if (ret != NULL) {
1141 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001142 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001143 }
1144 } else
1145 ret = imalloc(size);
1146 }
Jason Evans7372b152012-02-10 20:22:09 -08001147 } else {
1148 if (config_stats)
1149 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001150 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001151 }
Jason Evans6109fe02010-02-10 10:37:56 -08001152 }
Jason Evans569432c2009-12-29 00:09:15 -08001153
Jason Evans289053c2009-06-22 12:08:42 -07001154 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001155 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001156 malloc_write("<jemalloc>: Error in realloc(): "
1157 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001158 abort();
1159 }
1160 errno = ENOMEM;
1161 }
1162 }
1163
1164RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001165 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001166 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001167 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001168 assert(usize == isalloc(ret));
1169 ALLOCATED_ADD(usize, old_size);
1170 }
Jason Evans289053c2009-06-22 12:08:42 -07001171 return (ret);
1172}
1173
Jason Evanse476f8a2010-01-16 09:53:50 -08001174JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001175void
Jason Evans0a5489e2012-03-01 17:19:20 -08001176je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001177{
1178
Jason Evans289053c2009-06-22 12:08:42 -07001179 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001180 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001181
Jason Evansa25d0a82009-11-09 14:57:38 -08001182 assert(malloc_initialized || malloc_initializer ==
1183 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001184
Jason Evans7372b152012-02-10 20:22:09 -08001185 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001186 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001187 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001188 } else if (config_stats) {
1189 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001190 }
Jason Evans7372b152012-02-10 20:22:09 -08001191 if (config_stats)
1192 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001193 idalloc(ptr);
1194 }
1195}
1196
1197/*
1198 * End malloc(3)-compatible functions.
1199 */
1200/******************************************************************************/
1201/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001202 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001203 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001204
1205#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1206JEMALLOC_ATTR(malloc)
1207JEMALLOC_ATTR(visibility("default"))
1208void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001209je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001210{
Jason Evans7372b152012-02-10 20:22:09 -08001211 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001212#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001213 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001214#endif
Jason Evans7372b152012-02-10 20:22:09 -08001215 ;
Jason Evans0a0bbf62012-03-13 12:55:21 -07001216 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001217 return (ret);
1218}
1219#endif
1220
1221#ifdef JEMALLOC_OVERRIDE_VALLOC
1222JEMALLOC_ATTR(malloc)
1223JEMALLOC_ATTR(visibility("default"))
1224void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001225je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001226{
Jason Evans7372b152012-02-10 20:22:09 -08001227 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001228#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001229 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001230#endif
Jason Evans7372b152012-02-10 20:22:09 -08001231 ;
Jason Evans0a0bbf62012-03-13 12:55:21 -07001232 imemalign(&ret, PAGE_SIZE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001233 return (ret);
1234}
1235#endif
1236
Jason Evans0a5489e2012-03-01 17:19:20 -08001237#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001238/*
1239 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1240 * to inconsistently reference libc's malloc(3)-compatible functions
1241 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1242 *
1243 * These definitions interpose hooks in glibc.  The functions are actually
1244 * passed an extra argument for the caller return address, which will be
1245 * ignored.
1246 */
1247JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001248void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001249
1250JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001251void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001252
1253JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001254void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001255
1256JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001257void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001258#endif
1259
Jason Evans6a0d2912010-09-20 16:44:23 -07001260/*
1261 * End non-standard override functions.
1262 */
1263/******************************************************************************/
1264/*
Jason Evans289053c2009-06-22 12:08:42 -07001265 * Begin non-standard functions.
1266 */
1267
Jason Evanse476f8a2010-01-16 09:53:50 -08001268JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001269size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001270je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001271{
Jason Evans569432c2009-12-29 00:09:15 -08001272 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001273
Jason Evans8e3c3c62010-09-17 15:46:18 -07001274 assert(malloc_initialized || malloc_initializer == pthread_self());
1275
Jason Evans7372b152012-02-10 20:22:09 -08001276 if (config_ivsalloc)
1277 ret = ivsalloc(ptr);
1278 else {
1279 assert(ptr != NULL);
1280 ret = isalloc(ptr);
1281 }
Jason Evans289053c2009-06-22 12:08:42 -07001282
Jason Evans569432c2009-12-29 00:09:15 -08001283 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001284}
1285
Jason Evans4201af02010-01-24 02:53:40 -08001286JEMALLOC_ATTR(visibility("default"))
1287void
Jason Evans0a5489e2012-03-01 17:19:20 -08001288je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1289 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001290{
1291
Jason Evans698805c2010-03-03 17:45:38 -08001292 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001293}
1294
Jason Evans3c234352010-01-27 13:10:55 -08001295JEMALLOC_ATTR(visibility("default"))
1296int
Jason Evans0a5489e2012-03-01 17:19:20 -08001297je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001298 size_t newlen)
1299{
1300
Jason Evans95833312010-01-27 13:45:21 -08001301 if (malloc_init())
1302 return (EAGAIN);
1303
Jason Evans3c234352010-01-27 13:10:55 -08001304 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1305}
1306
1307JEMALLOC_ATTR(visibility("default"))
1308int
Jason Evans0a5489e2012-03-01 17:19:20 -08001309je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001310{
1311
Jason Evans95833312010-01-27 13:45:21 -08001312 if (malloc_init())
1313 return (EAGAIN);
1314
Jason Evans3c234352010-01-27 13:10:55 -08001315 return (ctl_nametomib(name, mibp, miblenp));
1316}
1317
1318JEMALLOC_ATTR(visibility("default"))
1319int
Jason Evans0a5489e2012-03-01 17:19:20 -08001320je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1321 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001322{
1323
Jason Evans95833312010-01-27 13:45:21 -08001324 if (malloc_init())
1325 return (EAGAIN);
1326
Jason Evans3c234352010-01-27 13:10:55 -08001327 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1328}
1329
Jason Evans7e77eaf2012-03-02 17:47:37 -08001330/*
1331 * End non-standard functions.
1332 */
1333/******************************************************************************/
1334/*
1335 * Begin experimental functions.
1336 */
1337#ifdef JEMALLOC_EXPERIMENTAL
1338
Jason Evans8e3c3c62010-09-17 15:46:18 -07001339JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001340iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001341{
1342
Jason Evans38d92102011-03-23 00:37:29 -07001343 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1344 NULL)));
1345
Jason Evans8e3c3c62010-09-17 15:46:18 -07001346 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001347 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001348 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001349 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001350 else
Jason Evans38d92102011-03-23 00:37:29 -07001351 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001352}
1353
Jason Evans6a0d2912010-09-20 16:44:23 -07001354JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001355JEMALLOC_ATTR(visibility("default"))
1356int
Jason Evans0a5489e2012-03-01 17:19:20 -08001357je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001358{
1359 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001360 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1362 & (SIZE_T_MAX-1));
1363 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001364 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001365
1366 assert(ptr != NULL);
1367 assert(size != 0);
1368
1369 if (malloc_init())
1370 goto OOM;
1371
Jason Evans749c2a02011-08-12 18:37:54 -07001372 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001373 if (usize == 0)
1374 goto OOM;
1375
Jason Evans7372b152012-02-10 20:22:09 -08001376 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001377 PROF_ALLOC_PREP(1, usize, cnt);
1378 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001379 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001380 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001381 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001382 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001383 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001384 alignment, NULL);
1385 assert(usize_promoted != 0);
1386 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387 if (p == NULL)
1388 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001389 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001390 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001391 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001392 if (p == NULL)
1393 goto OOM;
1394 }
Jason Evans749c2a02011-08-12 18:37:54 -07001395 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001396 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001397 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001398 if (p == NULL)
1399 goto OOM;
1400 }
Jason Evans7372b152012-02-10 20:22:09 -08001401 if (rsize != NULL)
1402 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001403
1404 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001405 if (config_stats) {
1406 assert(usize == isalloc(p));
1407 ALLOCATED_ADD(usize, 0);
1408 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001409 return (ALLOCM_SUCCESS);
1410OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001411 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001412 malloc_write("<jemalloc>: Error in allocm(): "
1413 "out of memory\n");
1414 abort();
1415 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001416 *ptr = NULL;
1417 return (ALLOCM_ERR_OOM);
1418}
1419
Jason Evans6a0d2912010-09-20 16:44:23 -07001420JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421JEMALLOC_ATTR(visibility("default"))
1422int
Jason Evans0a5489e2012-03-01 17:19:20 -08001423je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424{
1425 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001426 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001427 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001428 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1429 & (SIZE_T_MAX-1));
1430 bool zero = flags & ALLOCM_ZERO;
1431 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001432 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001433
1434 assert(ptr != NULL);
1435 assert(*ptr != NULL);
1436 assert(size != 0);
1437 assert(SIZE_T_MAX - size >= extra);
1438 assert(malloc_initialized || malloc_initializer == pthread_self());
1439
1440 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001441 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001442 /*
1443 * usize isn't knowable before iralloc() returns when extra is
1444 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001445 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001446 * backtrace. prof_realloc() will use the actual usize to
1447 * decide whether to sample.
1448 */
1449 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1450 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001451 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001452 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001453 PROF_ALLOC_PREP(1, max_usize, cnt);
1454 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001455 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001456 /*
1457 * Use minimum usize to determine whether promotion may happen.
1458 */
1459 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1460 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001461 alignment, NULL)) <= SMALL_MAXCLASS) {
1462 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1463 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001464 alignment, zero, no_move);
1465 if (q == NULL)
1466 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001467 if (max_usize < PAGE_SIZE) {
1468 usize = max_usize;
1469 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001470 } else
1471 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001472 } else {
1473 q = iralloc(p, size, extra, alignment, zero, no_move);
1474 if (q == NULL)
1475 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001476 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001477 }
Jason Evanse4f78462010-10-22 10:45:59 -07001478 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001479 if (rsize != NULL)
1480 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001481 } else {
1482 if (config_stats)
1483 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001484 q = iralloc(p, size, extra, alignment, zero, no_move);
1485 if (q == NULL)
1486 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001487 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001488 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001489 if (rsize != NULL) {
1490 if (config_stats == false)
1491 usize = isalloc(q);
1492 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001493 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001494 }
1495
1496 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001497 if (config_stats)
1498 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001499 return (ALLOCM_SUCCESS);
1500ERR:
1501 if (no_move)
1502 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001503OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001504 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001505 malloc_write("<jemalloc>: Error in rallocm(): "
1506 "out of memory\n");
1507 abort();
1508 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001509 return (ALLOCM_ERR_OOM);
1510}
1511
Jason Evans6a0d2912010-09-20 16:44:23 -07001512JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001513JEMALLOC_ATTR(visibility("default"))
1514int
Jason Evans0a5489e2012-03-01 17:19:20 -08001515je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001516{
1517 size_t sz;
1518
1519 assert(malloc_initialized || malloc_initializer == pthread_self());
1520
Jason Evans7372b152012-02-10 20:22:09 -08001521 if (config_ivsalloc)
1522 sz = ivsalloc(ptr);
1523 else {
1524 assert(ptr != NULL);
1525 sz = isalloc(ptr);
1526 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001527 assert(rsize != NULL);
1528 *rsize = sz;
1529
1530 return (ALLOCM_SUCCESS);
1531}
1532
Jason Evans6a0d2912010-09-20 16:44:23 -07001533JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001534JEMALLOC_ATTR(visibility("default"))
1535int
Jason Evans0a5489e2012-03-01 17:19:20 -08001536je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001537{
Jason Evanse4f78462010-10-22 10:45:59 -07001538 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001539
1540 assert(ptr != NULL);
1541 assert(malloc_initialized || malloc_initializer == pthread_self());
1542
Jason Evans7372b152012-02-10 20:22:09 -08001543 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001544 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001545 if (config_prof && opt_prof) {
1546 if (config_stats == false)
1547 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001548 prof_free(ptr, usize);
1549 }
Jason Evans7372b152012-02-10 20:22:09 -08001550 if (config_stats)
1551 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001552 idalloc(ptr);
1553
1554 return (ALLOCM_SUCCESS);
1555}
1556
Jason Evans7e15dab2012-02-29 12:56:37 -08001557JEMALLOC_ATTR(visibility("default"))
1558int
Jason Evans0a5489e2012-03-01 17:19:20 -08001559je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001560{
1561 size_t usize;
1562 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1563 & (SIZE_T_MAX-1));
1564
1565 assert(size != 0);
1566
1567 if (malloc_init())
1568 return (ALLOCM_ERR_OOM);
1569
1570 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1571 if (usize == 0)
1572 return (ALLOCM_ERR_OOM);
1573
1574 if (rsize != NULL)
1575 *rsize = usize;
1576 return (ALLOCM_SUCCESS);
1577}
1578
Jason Evans7e77eaf2012-03-02 17:47:37 -08001579#endif
Jason Evans289053c2009-06-22 12:08:42 -07001580/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001581 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001582 */
1583/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001584
Jason Evans289053c2009-06-22 12:08:42 -07001585/*
1586 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001587 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001588 */
1589
Jason Evans2dbecf12010-09-05 10:35:13 -07001590void
Jason Evans804c9ec2009-06-22 17:44:33 -07001591jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001592{
Jason Evansfbbb6242010-01-24 17:56:48 -08001593 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001594
1595 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001596 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001597 for (i = 0; i < narenas; i++) {
1598 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001599 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001600 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001601 base_prefork();
1602 huge_prefork();
1603 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001604}
1605
Jason Evans2dbecf12010-09-05 10:35:13 -07001606void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001607jemalloc_postfork_parent(void)
Jason Evans289053c2009-06-22 12:08:42 -07001608{
1609 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001610
1611 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001612 chunk_dss_postfork_parent();
1613 huge_postfork_parent();
1614 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001615 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001616 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001617 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001618 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001619 malloc_mutex_postfork_parent(&arenas_lock);
1620}
1621
1622void
1623jemalloc_postfork_child(void)
1624{
1625 unsigned i;
1626
1627 /* Release all mutexes, now that fork() has completed. */
1628 chunk_dss_postfork_child();
1629 huge_postfork_child();
1630 base_postfork_child();
1631 for (i = 0; i < narenas; i++) {
1632 if (arenas[i] != NULL)
1633 arena_postfork_child(arenas[i]);
1634 }
1635 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001636}
Jason Evans2dbecf12010-09-05 10:35:13 -07001637
1638/******************************************************************************/