blob: 8e10c556318d28f662732287d21b96de0f109acb [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans122449b2012-04-06 00:35:09 -070017bool opt_redzone = true;
Jason Evans7372b152012-02-10 20:22:09 -080018# else
19bool opt_junk = false;
Jason Evans122449b2012-04-06 00:35:09 -070020bool opt_redzone = false;
Jason Evansb7924f52009-06-23 19:01:18 -070021# endif
Jason Evans289053c2009-06-22 12:08:42 -070022#else
Jason Evanse476f8a2010-01-16 09:53:50 -080023bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_junk = false;
Jason Evans122449b2012-04-06 00:35:09 -070025bool opt_redzone = false;
Jason Evans289053c2009-06-22 12:08:42 -070026#endif
Jason Evans122449b2012-04-06 00:35:09 -070027size_t opt_quarantine = ZU(0);
Jason Evansb1476112012-04-05 13:36:17 -070028bool opt_utrace = false;
Jason Evans122449b2012-04-06 00:35:09 -070029bool opt_valgrind = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080030bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080031bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070032size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070033
Jason Evanscd9a1342012-03-21 18:33:03 -070034unsigned ncpus;
35
36malloc_mutex_t arenas_lock;
37arena_t **arenas;
38unsigned narenas;
39
40/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070041static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070042
Jason Evans41b6afb2012-02-02 22:04:57 -080043#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070044/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070045# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080046# define INITIALIZER pthread_self()
47# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070048static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080049#else
Jason Evans02b23122012-04-05 11:06:23 -070050# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080051# define INITIALIZER true
52# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070053static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080054#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070055
56/* Used to avoid initialization races. */
57static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
58
Jason Evansb1476112012-04-05 13:36:17 -070059typedef struct {
60 void *p; /* Input pointer (as in realloc(p, s)). */
61 size_t s; /* Request size. */
62 void *r; /* Result pointer. */
63} malloc_utrace_t;
64
65#ifdef JEMALLOC_UTRACE
66# define UTRACE(a, b, c) do { \
67 if (opt_utrace) { \
68 malloc_utrace_t ut; \
69 ut.p = (a); \
70 ut.s = (b); \
71 ut.r = (c); \
72 utrace(&ut, sizeof(ut)); \
73 } \
74} while (0)
75#else
76# define UTRACE(a, b, c)
77#endif
78
Jason Evans289053c2009-06-22 12:08:42 -070079/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080080/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070081
Jason Evans03c22372010-01-03 12:10:42 -080082static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070083static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070084static bool malloc_conf_next(char const **opts_p, char const **k_p,
85 size_t *klen_p, char const **v_p, size_t *vlen_p);
86static void malloc_conf_error(const char *msg, const char *k, size_t klen,
87 const char *v, size_t vlen);
88static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070089static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080090static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070091 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070092
Jason Evans289053c2009-06-22 12:08:42 -070093/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070094/*
Jason Evanse476f8a2010-01-16 09:53:50 -080095 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070096 */
97
Jason Evanse476f8a2010-01-16 09:53:50 -080098/* Create a new arena and insert it into the arenas array at index ind. */
99arena_t *
100arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700101{
102 arena_t *ret;
103
Jason Evansb1726102012-02-28 16:50:47 -0800104 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 if (ret != NULL && arena_new(ret, ind) == false) {
106 arenas[ind] = ret;
107 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700108 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800109 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700110
Jason Evanse476f8a2010-01-16 09:53:50 -0800111 /*
112 * OOM here is quite inconvenient to propagate, since dealing with it
113 * would require a check for failure in the fast path. Instead, punt
114 * by using arenas[0]. In practice, this is an extremely unlikely
115 * failure.
116 */
Jason Evans698805c2010-03-03 17:45:38 -0800117 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800118 if (opt_abort)
119 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700120
Jason Evanse476f8a2010-01-16 09:53:50 -0800121 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700122}
123
Jason Evans4c2faa82012-03-13 11:09:23 -0700124/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800125arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700126choose_arena_hard(void)
127{
128 arena_t *ret;
129
Jason Evans289053c2009-06-22 12:08:42 -0700130 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700131 unsigned i, choose, first_null;
132
133 choose = 0;
134 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800135 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700136 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700137 for (i = 1; i < narenas; i++) {
138 if (arenas[i] != NULL) {
139 /*
140 * Choose the first arena that has the lowest
141 * number of threads assigned to it.
142 */
143 if (arenas[i]->nthreads <
144 arenas[choose]->nthreads)
145 choose = i;
146 } else if (first_null == narenas) {
147 /*
148 * Record the index of the first uninitialized
149 * arena, in case all extant arenas are in use.
150 *
151 * NB: It is possible for there to be
152 * discontinuities in terms of initialized
153 * versus uninitialized arenas, due to the
154 * "thread.arena" mallctl.
155 */
156 first_null = i;
157 }
158 }
159
Jason Evans41b6afb2012-02-02 22:04:57 -0800160 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700161 /*
162 * Use an unloaded arena, or the least loaded arena if
163 * all arenas are already initialized.
164 */
165 ret = arenas[choose];
166 } else {
167 /* Initialize a new arena. */
168 ret = arenas_extend(first_null);
169 }
170 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800171 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700172 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700173 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700174 malloc_mutex_lock(&arenas_lock);
175 ret->nthreads++;
176 malloc_mutex_unlock(&arenas_lock);
177 }
Jason Evans289053c2009-06-22 12:08:42 -0700178
Jason Evanscd9a1342012-03-21 18:33:03 -0700179 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700180
181 return (ret);
182}
Jason Evans289053c2009-06-22 12:08:42 -0700183
Jason Evans03c22372010-01-03 12:10:42 -0800184static void
185stats_print_atexit(void)
186{
187
Jason Evans7372b152012-02-10 20:22:09 -0800188 if (config_tcache && config_stats) {
189 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800190
Jason Evans7372b152012-02-10 20:22:09 -0800191 /*
192 * Merge stats from extant threads. This is racy, since
193 * individual threads do not lock when recording tcache stats
194 * events. As a consequence, the final stats may be slightly
195 * out of date by the time they are reported, if other threads
196 * continue to allocate.
197 */
198 for (i = 0; i < narenas; i++) {
199 arena_t *arena = arenas[i];
200 if (arena != NULL) {
201 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800202
Jason Evans7372b152012-02-10 20:22:09 -0800203 /*
204 * tcache_stats_merge() locks bins, so if any
205 * code is introduced that acquires both arena
206 * and bin locks in the opposite order,
207 * deadlocks may result.
208 */
209 malloc_mutex_lock(&arena->lock);
210 ql_foreach(tcache, &arena->tcache_ql, link) {
211 tcache_stats_merge(tcache, arena);
212 }
213 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800214 }
Jason Evans03c22372010-01-03 12:10:42 -0800215 }
216 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800217 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700218}
219
Jason Evans289053c2009-06-22 12:08:42 -0700220/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800221 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700222 */
223/******************************************************************************/
224/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800225 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700226 */
227
Jason Evansc9658dd2009-06-22 14:44:08 -0700228static unsigned
229malloc_ncpus(void)
230{
231 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700232 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700233
Jason Evansb7924f52009-06-23 19:01:18 -0700234 result = sysconf(_SC_NPROCESSORS_ONLN);
235 if (result == -1) {
236 /* Error. */
237 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700238 }
Jason Evansb7924f52009-06-23 19:01:18 -0700239 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700240
241 return (ret);
242}
Jason Evansb7924f52009-06-23 19:01:18 -0700243
Jason Evanscd9a1342012-03-21 18:33:03 -0700244void
Jason Evans597632b2011-03-18 13:41:33 -0700245arenas_cleanup(void *arg)
246{
Jason Evanscd9a1342012-03-21 18:33:03 -0700247 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700248
249 malloc_mutex_lock(&arenas_lock);
250 arena->nthreads--;
251 malloc_mutex_unlock(&arenas_lock);
252}
253
Jason Evans289053c2009-06-22 12:08:42 -0700254static inline bool
255malloc_init(void)
256{
257
258 if (malloc_initialized == false)
259 return (malloc_init_hard());
260
261 return (false);
262}
263
264static bool
Jason Evanse7339702010-10-23 18:37:06 -0700265malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
266 char const **v_p, size_t *vlen_p)
267{
268 bool accept;
269 const char *opts = *opts_p;
270
271 *k_p = opts;
272
273 for (accept = false; accept == false;) {
274 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800275 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
276 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
277 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
278 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
279 case 'Y': case 'Z':
280 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
281 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
282 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
283 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
284 case 'y': case 'z':
285 case '0': case '1': case '2': case '3': case '4': case '5':
286 case '6': case '7': case '8': case '9':
287 case '_':
288 opts++;
289 break;
290 case ':':
291 opts++;
292 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
293 *v_p = opts;
294 accept = true;
295 break;
296 case '\0':
297 if (opts != *opts_p) {
298 malloc_write("<jemalloc>: Conf string ends "
299 "with key\n");
300 }
301 return (true);
302 default:
303 malloc_write("<jemalloc>: Malformed conf string\n");
304 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700305 }
306 }
307
308 for (accept = false; accept == false;) {
309 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800310 case ',':
311 opts++;
312 /*
313 * Look ahead one character here, because the next time
314 * this function is called, it will assume that end of
315 * input has been cleanly reached if no input remains,
316 * but we have optimistically already consumed the
317 * comma if one exists.
318 */
319 if (*opts == '\0') {
320 malloc_write("<jemalloc>: Conf string ends "
321 "with comma\n");
322 }
323 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
324 accept = true;
325 break;
326 case '\0':
327 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
328 accept = true;
329 break;
330 default:
331 opts++;
332 break;
Jason Evanse7339702010-10-23 18:37:06 -0700333 }
334 }
335
336 *opts_p = opts;
337 return (false);
338}
339
340static void
341malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
342 size_t vlen)
343{
Jason Evanse7339702010-10-23 18:37:06 -0700344
Jason Evansd81e4bd2012-03-06 14:57:45 -0800345 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
346 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700347}
348
349static void
350malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700351{
352 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700353 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700354 const char *opts, *k, *v;
355 size_t klen, vlen;
356
357 for (i = 0; i < 3; i++) {
358 /* Get runtime configuration. */
359 switch (i) {
360 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800361 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700362 /*
363 * Use options that were compiled into the
364 * program.
365 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800366 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700367 } else {
368 /* No configuration specified. */
369 buf[0] = '\0';
370 opts = buf;
371 }
372 break;
373 case 1: {
374 int linklen;
375 const char *linkname =
376#ifdef JEMALLOC_PREFIX
377 "/etc/"JEMALLOC_PREFIX"malloc.conf"
378#else
379 "/etc/malloc.conf"
380#endif
381 ;
382
383 if ((linklen = readlink(linkname, buf,
384 sizeof(buf) - 1)) != -1) {
385 /*
386 * Use the contents of the "/etc/malloc.conf"
387 * symbolic link's name.
388 */
389 buf[linklen] = '\0';
390 opts = buf;
391 } else {
392 /* No configuration specified. */
393 buf[0] = '\0';
394 opts = buf;
395 }
396 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800397 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700398 const char *envname =
399#ifdef JEMALLOC_PREFIX
400 JEMALLOC_CPREFIX"MALLOC_CONF"
401#else
402 "MALLOC_CONF"
403#endif
404 ;
405
406 if ((opts = getenv(envname)) != NULL) {
407 /*
408 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800409 * the value of the MALLOC_CONF environment
410 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700411 */
412 } else {
413 /* No configuration specified. */
414 buf[0] = '\0';
415 opts = buf;
416 }
417 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800418 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700419 /* NOTREACHED */
420 assert(false);
421 buf[0] = '\0';
422 opts = buf;
423 }
424
425 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
426 &vlen) == false) {
Jason Evans122449b2012-04-06 00:35:09 -0700427#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
Jason Evanse7339702010-10-23 18:37:06 -0700428 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
429 klen) == 0) { \
430 if (strncmp("true", v, vlen) == 0 && \
431 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800432 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700433 else if (strncmp("false", v, vlen) == \
434 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800435 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700436 else { \
437 malloc_conf_error( \
438 "Invalid conf value", \
439 k, klen, v, vlen); \
440 } \
Jason Evans122449b2012-04-06 00:35:09 -0700441 hit = true; \
442 } else \
443 hit = false;
444#define CONF_HANDLE_BOOL(o, n) { \
445 bool hit; \
446 CONF_HANDLE_BOOL_HIT(o, n, hit); \
447 if (hit) \
Jason Evanse7339702010-10-23 18:37:06 -0700448 continue; \
Jason Evans122449b2012-04-06 00:35:09 -0700449}
Jason Evansd81e4bd2012-03-06 14:57:45 -0800450#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700451 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
452 klen) == 0) { \
Jason Evans122449b2012-04-06 00:35:09 -0700453 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700454 char *end; \
455 \
456 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800457 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700458 if (errno != 0 || (uintptr_t)end - \
459 (uintptr_t)v != vlen) { \
460 malloc_conf_error( \
461 "Invalid conf value", \
462 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800463 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700464 malloc_conf_error( \
465 "Out-of-range conf value", \
466 k, klen, v, vlen); \
467 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800468 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700469 continue; \
470 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800471#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700472 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
473 klen) == 0) { \
474 long l; \
475 char *end; \
476 \
477 errno = 0; \
478 l = strtol(v, &end, 0); \
479 if (errno != 0 || (uintptr_t)end - \
480 (uintptr_t)v != vlen) { \
481 malloc_conf_error( \
482 "Invalid conf value", \
483 k, klen, v, vlen); \
484 } else if (l < (ssize_t)min || l > \
485 (ssize_t)max) { \
486 malloc_conf_error( \
487 "Out-of-range conf value", \
488 k, klen, v, vlen); \
489 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800490 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700491 continue; \
492 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800493#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700494 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
495 klen) == 0) { \
496 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800497 sizeof(o)-1) ? vlen : \
498 sizeof(o)-1; \
499 strncpy(o, v, cpylen); \
500 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700501 continue; \
502 }
503
Jason Evansd81e4bd2012-03-06 14:57:45 -0800504 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700505 /*
506 * Chunks always require at least one * header page,
507 * plus one data page.
508 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700509 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
Jason Evanse7339702010-10-23 18:37:06 -0700510 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800511 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
512 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
513 -1, (sizeof(size_t) << 3) - 1)
514 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800515 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800516 CONF_HANDLE_BOOL(opt_junk, junk)
Jason Evans122449b2012-04-06 00:35:09 -0700517 CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
518 0, SIZE_T_MAX)
519 CONF_HANDLE_BOOL(opt_redzone, redzone)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800520 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800521 }
Jason Evansb1476112012-04-05 13:36:17 -0700522 if (config_utrace) {
523 CONF_HANDLE_BOOL(opt_utrace, utrace)
524 }
Jason Evans122449b2012-04-06 00:35:09 -0700525 if (config_valgrind) {
526 bool hit;
527 CONF_HANDLE_BOOL_HIT(opt_valgrind,
528 valgrind, hit)
529 if (config_fill && opt_valgrind && hit) {
530 opt_junk = false;
531 opt_zero = false;
532 if (opt_quarantine == 0) {
533 opt_quarantine =
534 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
535 }
536 opt_redzone = true;
537 }
538 if (hit)
539 continue;
540 }
Jason Evans7372b152012-02-10 20:22:09 -0800541 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800542 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800543 }
544 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800545 CONF_HANDLE_BOOL(opt_tcache, tcache)
546 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
547 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800548 (sizeof(size_t) << 3) - 1)
549 }
550 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800551 CONF_HANDLE_BOOL(opt_prof, prof)
552 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
553 "jeprof")
554 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
555 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
556 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800557 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800558 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
559 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
560 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800561 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800562 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
563 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800564 }
Jason Evanse7339702010-10-23 18:37:06 -0700565 malloc_conf_error("Invalid conf pair", k, klen, v,
566 vlen);
567#undef CONF_HANDLE_BOOL
568#undef CONF_HANDLE_SIZE_T
569#undef CONF_HANDLE_SSIZE_T
570#undef CONF_HANDLE_CHAR_P
571 }
Jason Evanse7339702010-10-23 18:37:06 -0700572 }
573}
574
575static bool
576malloc_init_hard(void)
577{
Jason Evansb7924f52009-06-23 19:01:18 -0700578 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700579
580 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800581 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700582 /*
583 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800584 * acquired init_lock, or this thread is the initializing
585 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700586 */
587 malloc_mutex_unlock(&init_lock);
588 return (false);
589 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800590#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700591 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700592 /* Busy-wait until the initializing thread completes. */
593 do {
594 malloc_mutex_unlock(&init_lock);
595 CPU_SPINWAIT;
596 malloc_mutex_lock(&init_lock);
597 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700598 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700599 return (false);
600 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800601#endif
602 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700603
Jason Evanscd9a1342012-03-21 18:33:03 -0700604 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800605 if (config_prof)
606 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700607
Jason Evanse7339702010-10-23 18:37:06 -0700608 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700609
Mike Hommeye77fa592012-03-28 09:53:16 +0200610#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800611 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700612 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
613 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800614 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800615 if (opt_abort)
616 abort();
617 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800618#endif
Jason Evans3c234352010-01-27 13:10:55 -0800619
Jason Evans03c22372010-01-03 12:10:42 -0800620 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700621 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800622 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800623 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800624 if (opt_abort)
625 abort();
626 }
Jason Evans289053c2009-06-22 12:08:42 -0700627 }
628
Jason Evanscd9a1342012-03-21 18:33:03 -0700629 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800630 malloc_mutex_unlock(&init_lock);
631 return (true);
632 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700633
Jason Evans3c234352010-01-27 13:10:55 -0800634 if (base_boot()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
638
Jason Evans41b6afb2012-02-02 22:04:57 -0800639 if (ctl_boot()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evans7372b152012-02-10 20:22:09 -0800644 if (config_prof)
645 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800646
Jason Evansb1726102012-02-28 16:50:47 -0800647 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700648
Jason Evanscd9a1342012-03-21 18:33:03 -0700649 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800653
Jason Evanse476f8a2010-01-16 09:53:50 -0800654 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
Jason Evans289053c2009-06-22 12:08:42 -0700658
Jason Evans8e6f8b42011-11-03 18:40:03 -0700659 if (malloc_mutex_init(&arenas_lock))
660 return (true);
661
Jason Evansb7924f52009-06-23 19:01:18 -0700662 /*
663 * Create enough scaffolding to allow recursive allocation in
664 * malloc_ncpus().
665 */
666 narenas = 1;
667 arenas = init_arenas;
668 memset(arenas, 0, sizeof(arena_t *) * narenas);
669
670 /*
671 * Initialize one arena here. The rest are lazily created in
672 * choose_arena_hard().
673 */
674 arenas_extend(0);
675 if (arenas[0] == NULL) {
676 malloc_mutex_unlock(&init_lock);
677 return (true);
678 }
679
Jason Evanscd9a1342012-03-21 18:33:03 -0700680 /* Initialize allocation counters before any allocations can occur. */
681 if (config_stats && thread_allocated_tsd_boot()) {
682 malloc_mutex_unlock(&init_lock);
683 return (true);
684 }
Jason Evansb7924f52009-06-23 19:01:18 -0700685
Jason Evanscd9a1342012-03-21 18:33:03 -0700686 if (arenas_tsd_boot()) {
687 malloc_mutex_unlock(&init_lock);
688 return (true);
689 }
690
691 if (config_tcache && tcache_boot1()) {
692 malloc_mutex_unlock(&init_lock);
693 return (true);
694 }
695
Jason Evans122449b2012-04-06 00:35:09 -0700696 if (config_fill && quarantine_boot()) {
697 malloc_mutex_unlock(&init_lock);
698 return (true);
699 }
700
Jason Evans6da54182012-03-23 18:05:51 -0700701 if (config_prof && prof_boot2()) {
702 malloc_mutex_unlock(&init_lock);
703 return (true);
704 }
705
Jason Evansb7924f52009-06-23 19:01:18 -0700706 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700707 malloc_mutex_unlock(&init_lock);
708 ncpus = malloc_ncpus();
709 malloc_mutex_lock(&init_lock);
710
Jason Evanscd9a1342012-03-21 18:33:03 -0700711 if (chunk_boot1()) {
712 malloc_mutex_unlock(&init_lock);
713 return (true);
714 }
715
Jason Evans633aaff2012-04-03 08:47:07 -0700716 if (mutex_boot()) {
717 malloc_mutex_unlock(&init_lock);
718 return (true);
719 }
720
Jason Evanse7339702010-10-23 18:37:06 -0700721 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700722 /*
Jason Evans5463a522009-12-29 00:09:15 -0800723 * For SMP systems, create more than one arena per CPU by
724 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700725 */
Jason Evanse7339702010-10-23 18:37:06 -0700726 if (ncpus > 1)
727 opt_narenas = ncpus << 2;
728 else
729 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700730 }
Jason Evanse7339702010-10-23 18:37:06 -0700731 narenas = opt_narenas;
732 /*
733 * Make sure that the arenas array can be allocated. In practice, this
734 * limit is enough to allow the allocator to function, but the ctl
735 * machinery will fail to allocate memory at far lower limits.
736 */
737 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700738 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800739 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
740 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700741 }
Jason Evans289053c2009-06-22 12:08:42 -0700742
Jason Evans289053c2009-06-22 12:08:42 -0700743 /* Allocate and initialize arenas. */
744 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
745 if (arenas == NULL) {
746 malloc_mutex_unlock(&init_lock);
747 return (true);
748 }
749 /*
750 * Zero the array. In practice, this should always be pre-zeroed,
751 * since it was just mmap()ed, but let's be sure.
752 */
753 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700754 /* Copy the pointer to the one arena that was already initialized. */
755 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700756
757 malloc_initialized = true;
758 malloc_mutex_unlock(&init_lock);
759 return (false);
760}
761
762/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800763 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700764 */
765/******************************************************************************/
766/*
767 * Begin malloc(3)-compatible functions.
768 */
769
Jason Evans9ad48232010-01-03 11:59:20 -0800770JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800771JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700772void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800773je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700774{
775 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800776 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700777 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700778
779 if (malloc_init()) {
780 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700781 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -0700782 }
783
Jason Evansc90ad712012-02-28 20:31:37 -0800784 if (size == 0)
785 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700786
Jason Evans7372b152012-02-10 20:22:09 -0800787 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700788 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700789 PROF_ALLOC_PREP(1, usize, cnt);
790 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700791 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700792 goto label_oom;
Jason Evans0b270a92010-03-31 16:45:04 -0700793 }
Jason Evans93443682010-10-20 17:39:18 -0700794 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800795 SMALL_MAXCLASS) {
796 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700797 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700798 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700799 } else
800 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800801 } else {
Jason Evans122449b2012-04-06 00:35:09 -0700802 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -0800803 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700804 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700805 }
Jason Evans289053c2009-06-22 12:08:42 -0700806
Jason Evansa1ee7832012-04-10 15:07:44 -0700807label_oom:
Jason Evans289053c2009-06-22 12:08:42 -0700808 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800809 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800810 malloc_write("<jemalloc>: Error in malloc(): "
811 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700812 abort();
813 }
814 errno = ENOMEM;
815 }
Jason Evans7372b152012-02-10 20:22:09 -0800816 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700817 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800818 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700819 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700820 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700821 }
Jason Evansb1476112012-04-05 13:36:17 -0700822 UTRACE(0, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -0700823 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -0700824 return (ret);
825}
826
Jason Evans9ad48232010-01-03 11:59:20 -0800827JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700828#ifdef JEMALLOC_PROF
829/*
Jason Evans7372b152012-02-10 20:22:09 -0800830 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700831 * PROF_ALLOC_PREP().
832 */
833JEMALLOC_ATTR(noinline)
834#endif
835static int
Jason Evans59656312012-02-28 21:37:38 -0800836imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700837 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700838{
839 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800840 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700841 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700842 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700843
Jason Evans0a0bbf62012-03-13 12:55:21 -0700844 assert(min_alignment != 0);
845
Jason Evans289053c2009-06-22 12:08:42 -0700846 if (malloc_init())
847 result = NULL;
848 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800849 if (size == 0)
850 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800851
Jason Evans289053c2009-06-22 12:08:42 -0700852 /* Make sure that alignment is a large enough power of 2. */
853 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700854 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800855 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700856 malloc_write("<jemalloc>: Error allocating "
857 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700858 abort();
859 }
860 result = NULL;
861 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700862 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700863 }
864
Jason Evans5ff709c2012-04-11 18:13:45 -0700865 usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -0700866 if (usize == 0) {
867 result = NULL;
868 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700869 goto label_return;
Jason Evans38d92102011-03-23 00:37:29 -0700870 }
871
Jason Evans7372b152012-02-10 20:22:09 -0800872 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700873 PROF_ALLOC_PREP(2, usize, cnt);
874 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700875 result = NULL;
876 ret = EINVAL;
877 } else {
878 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800879 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
880 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700881 alignment) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800882 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700883 alignment), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700884 if (result != NULL) {
885 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700886 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700887 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700888 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700889 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700890 false);
891 }
Jason Evans0b270a92010-03-31 16:45:04 -0700892 }
Jason Evans6109fe02010-02-10 10:37:56 -0800893 } else
Jason Evans38d92102011-03-23 00:37:29 -0700894 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700895 }
896
897 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800898 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700899 malloc_write("<jemalloc>: Error allocating aligned "
900 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700901 abort();
902 }
903 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700904 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700905 }
906
907 *memptr = result;
908 ret = 0;
909
Jason Evansa1ee7832012-04-10 15:07:44 -0700910label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800911 if (config_stats && result != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700912 assert(usize == isalloc(result, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700913 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700914 }
Jason Evans7372b152012-02-10 20:22:09 -0800915 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700916 prof_malloc(result, usize, cnt);
Jason Evansb1476112012-04-05 13:36:17 -0700917 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -0700918 return (ret);
919}
920
Jason Evansa5070042011-08-12 13:48:27 -0700921JEMALLOC_ATTR(nonnull(1))
922JEMALLOC_ATTR(visibility("default"))
923int
Jason Evans0a5489e2012-03-01 17:19:20 -0800924je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700925{
Jason Evans122449b2012-04-06 00:35:09 -0700926 int ret = imemalign(memptr, alignment, size, sizeof(void *));
927 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
928 config_prof), false);
929 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700930}
931
932JEMALLOC_ATTR(malloc)
933JEMALLOC_ATTR(visibility("default"))
934void *
935je_aligned_alloc(size_t alignment, size_t size)
936{
937 void *ret;
938 int err;
939
940 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
941 ret = NULL;
942 errno = err;
943 }
Jason Evans122449b2012-04-06 00:35:09 -0700944 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
945 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700946 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700947}
948
Jason Evans9ad48232010-01-03 11:59:20 -0800949JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800950JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700951void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800952je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700953{
954 void *ret;
955 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800956 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700957 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700958
959 if (malloc_init()) {
960 num_size = 0;
961 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700962 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700963 }
964
965 num_size = num * size;
966 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800967 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700968 num_size = 1;
969 else {
970 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700971 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700972 }
973 /*
974 * Try to avoid division here. We know that it isn't possible to
975 * overflow during multiplication if neither operand uses any of the
976 * most significant half of the bits in a size_t.
977 */
978 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
979 && (num_size / size != num)) {
980 /* size_t overflow. */
981 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700982 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700983 }
984
Jason Evans7372b152012-02-10 20:22:09 -0800985 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700986 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700987 PROF_ALLOC_PREP(1, usize, cnt);
988 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700989 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700990 goto label_return;
Jason Evans0b270a92010-03-31 16:45:04 -0700991 }
Jason Evans93443682010-10-20 17:39:18 -0700992 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800993 <= SMALL_MAXCLASS) {
994 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700995 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700996 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700997 } else
998 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800999 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001000 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001001 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001002 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001003 }
Jason Evans289053c2009-06-22 12:08:42 -07001004
Jason Evansa1ee7832012-04-10 15:07:44 -07001005label_return:
Jason Evans289053c2009-06-22 12:08:42 -07001006 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001007 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001008 malloc_write("<jemalloc>: Error in calloc(): out of "
1009 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001010 abort();
1011 }
1012 errno = ENOMEM;
1013 }
1014
Jason Evans7372b152012-02-10 20:22:09 -08001015 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001016 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001017 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -07001018 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001019 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -07001020 }
Jason Evansb1476112012-04-05 13:36:17 -07001021 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001022 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001023 return (ret);
1024}
1025
Jason Evanse476f8a2010-01-16 09:53:50 -08001026JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001027void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001028je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001029{
1030 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001031 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001032 size_t old_size = 0;
Jason Evans122449b2012-04-06 00:35:09 -07001033 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -07001034 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1035 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001036
Jason Evans289053c2009-06-22 12:08:42 -07001037 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001038 if (ptr != NULL) {
1039 /* realloc(ptr, 0) is equivalent to free(p). */
Jason Evans122449b2012-04-06 00:35:09 -07001040 if (config_prof) {
1041 old_size = isalloc(ptr, true);
1042 if (config_valgrind && opt_valgrind)
1043 old_rzsize = p2rz(ptr);
1044 } else if (config_stats) {
1045 old_size = isalloc(ptr, false);
1046 if (config_valgrind && opt_valgrind)
1047 old_rzsize = u2rz(old_size);
1048 } else if (config_valgrind && opt_valgrind) {
1049 old_size = isalloc(ptr, false);
1050 old_rzsize = u2rz(old_size);
1051 }
Jason Evansf081b882012-02-28 20:24:05 -08001052 if (config_prof && opt_prof) {
1053 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001054 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001055 }
Jason Evans122449b2012-04-06 00:35:09 -07001056 iqalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001057 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001058 goto label_return;
Jason Evansc90ad712012-02-28 20:31:37 -08001059 } else
1060 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001061 }
1062
1063 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001064 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001065
Jason Evans122449b2012-04-06 00:35:09 -07001066 if (config_prof) {
1067 old_size = isalloc(ptr, true);
1068 if (config_valgrind && opt_valgrind)
1069 old_rzsize = p2rz(ptr);
1070 } else if (config_stats) {
1071 old_size = isalloc(ptr, false);
1072 if (config_valgrind && opt_valgrind)
1073 old_rzsize = u2rz(old_size);
1074 } else if (config_valgrind && opt_valgrind) {
1075 old_size = isalloc(ptr, false);
1076 old_rzsize = u2rz(old_size);
1077 }
Jason Evans7372b152012-02-10 20:22:09 -08001078 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001079 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001080 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001081 PROF_ALLOC_PREP(1, usize, cnt);
1082 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001083 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001084 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001085 goto label_oom;
Jason Evans6109fe02010-02-10 10:37:56 -08001086 }
Jason Evans0b270a92010-03-31 16:45:04 -07001087 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001088 usize <= SMALL_MAXCLASS) {
1089 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001090 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001091 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001092 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001093 else
1094 old_ctx = NULL;
1095 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001096 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001097 if (ret == NULL)
1098 old_ctx = NULL;
1099 }
Jason Evans7372b152012-02-10 20:22:09 -08001100 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001101 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001102 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001103 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001104 }
Jason Evans289053c2009-06-22 12:08:42 -07001105
Jason Evansa1ee7832012-04-10 15:07:44 -07001106label_oom:
Jason Evans289053c2009-06-22 12:08:42 -07001107 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001108 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001109 malloc_write("<jemalloc>: Error in realloc(): "
1110 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001111 abort();
1112 }
1113 errno = ENOMEM;
1114 }
1115 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001116 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001117 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001118 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001119 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001120 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001121 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001122 ret = NULL;
1123 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001124 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001125 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001126 PROF_ALLOC_PREP(1, usize, cnt);
1127 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001128 ret = NULL;
1129 else {
1130 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001131 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001132 SMALL_MAXCLASS) {
1133 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001134 if (ret != NULL) {
1135 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001136 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001137 }
1138 } else
1139 ret = imalloc(size);
1140 }
Jason Evans7372b152012-02-10 20:22:09 -08001141 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001142 if (config_stats || (config_valgrind &&
1143 opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001144 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001145 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001146 }
Jason Evans6109fe02010-02-10 10:37:56 -08001147 }
Jason Evans569432c2009-12-29 00:09:15 -08001148
Jason Evans289053c2009-06-22 12:08:42 -07001149 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001150 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001151 malloc_write("<jemalloc>: Error in realloc(): "
1152 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001153 abort();
1154 }
1155 errno = ENOMEM;
1156 }
1157 }
1158
Jason Evansa1ee7832012-04-10 15:07:44 -07001159label_return:
Jason Evans7372b152012-02-10 20:22:09 -08001160 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001161 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001162 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001163 thread_allocated_t *ta;
Jason Evans122449b2012-04-06 00:35:09 -07001164 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001165 ta = thread_allocated_tsd_get();
1166 ta->allocated += usize;
1167 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001168 }
Jason Evansb1476112012-04-05 13:36:17 -07001169 UTRACE(ptr, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001170 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001171 return (ret);
1172}
1173
Jason Evanse476f8a2010-01-16 09:53:50 -08001174JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001175void
Jason Evans0a5489e2012-03-01 17:19:20 -08001176je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001177{
1178
Jason Evansb1476112012-04-05 13:36:17 -07001179 UTRACE(ptr, 0, 0);
Jason Evansf0047372012-04-02 15:18:24 -07001180 if (ptr != NULL) {
1181 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001182 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evanse4f78462010-10-22 10:45:59 -07001183
Jason Evansf0047372012-04-02 15:18:24 -07001184 assert(malloc_initialized || IS_INITIALIZER);
1185
1186 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001187 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001188 prof_free(ptr, usize);
Jason Evans122449b2012-04-06 00:35:09 -07001189 } else if (config_stats || config_valgrind)
1190 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001191 if (config_stats)
1192 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001193 if (config_valgrind && opt_valgrind)
1194 rzsize = p2rz(ptr);
1195 iqalloc(ptr);
1196 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evansf0047372012-04-02 15:18:24 -07001197 }
Jason Evans289053c2009-06-22 12:08:42 -07001198}
1199
1200/*
1201 * End malloc(3)-compatible functions.
1202 */
1203/******************************************************************************/
1204/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001205 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001206 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001207
1208#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1209JEMALLOC_ATTR(malloc)
1210JEMALLOC_ATTR(visibility("default"))
1211void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001212je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001213{
Jason Evans9225a192012-03-23 15:39:07 -07001214 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001215 imemalign(&ret, alignment, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001216 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001217 return (ret);
1218}
1219#endif
1220
1221#ifdef JEMALLOC_OVERRIDE_VALLOC
1222JEMALLOC_ATTR(malloc)
1223JEMALLOC_ATTR(visibility("default"))
1224void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001225je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001226{
Jason Evans9225a192012-03-23 15:39:07 -07001227 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001228 imemalign(&ret, PAGE, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001229 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001230 return (ret);
1231}
1232#endif
1233
Mike Hommey5c89c502012-03-26 17:46:57 +02001234/*
1235 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1236 * #define je_malloc malloc
1237 */
1238#define malloc_is_malloc 1
1239#define is_malloc_(a) malloc_is_ ## a
1240#define is_malloc(a) is_malloc_(a)
1241
1242#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001243/*
1244 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1245 * to inconsistently reference libc's malloc(3)-compatible functions
1246 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1247 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001248 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001249 * passed an extra argument for the caller return address, which will be
1250 * ignored.
1251 */
1252JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001253void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001254
1255JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001256void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001257
1258JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001259void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001260
1261JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001262void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001263#endif
1264
Jason Evans6a0d2912010-09-20 16:44:23 -07001265/*
1266 * End non-standard override functions.
1267 */
1268/******************************************************************************/
1269/*
Jason Evans289053c2009-06-22 12:08:42 -07001270 * Begin non-standard functions.
1271 */
1272
Jason Evanse476f8a2010-01-16 09:53:50 -08001273JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001274size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001275je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001276{
Jason Evans569432c2009-12-29 00:09:15 -08001277 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001278
Jason Evans41b6afb2012-02-02 22:04:57 -08001279 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001280
Jason Evans7372b152012-02-10 20:22:09 -08001281 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001282 ret = ivsalloc(ptr, config_prof);
Jason Evans2465bdf2012-03-26 13:13:55 -07001283 else
Jason Evans122449b2012-04-06 00:35:09 -07001284 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001285
Jason Evans569432c2009-12-29 00:09:15 -08001286 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001287}
1288
Jason Evans4201af02010-01-24 02:53:40 -08001289JEMALLOC_ATTR(visibility("default"))
1290void
Jason Evans0a5489e2012-03-01 17:19:20 -08001291je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1292 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001293{
1294
Jason Evans698805c2010-03-03 17:45:38 -08001295 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001296}
1297
Jason Evans3c234352010-01-27 13:10:55 -08001298JEMALLOC_ATTR(visibility("default"))
1299int
Jason Evans0a5489e2012-03-01 17:19:20 -08001300je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001301 size_t newlen)
1302{
1303
Jason Evans95833312010-01-27 13:45:21 -08001304 if (malloc_init())
1305 return (EAGAIN);
1306
Jason Evans3c234352010-01-27 13:10:55 -08001307 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1308}
1309
1310JEMALLOC_ATTR(visibility("default"))
1311int
Jason Evans0a5489e2012-03-01 17:19:20 -08001312je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001313{
1314
Jason Evans95833312010-01-27 13:45:21 -08001315 if (malloc_init())
1316 return (EAGAIN);
1317
Jason Evans3c234352010-01-27 13:10:55 -08001318 return (ctl_nametomib(name, mibp, miblenp));
1319}
1320
1321JEMALLOC_ATTR(visibility("default"))
1322int
Jason Evans0a5489e2012-03-01 17:19:20 -08001323je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1324 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001325{
1326
Jason Evans95833312010-01-27 13:45:21 -08001327 if (malloc_init())
1328 return (EAGAIN);
1329
Jason Evans3c234352010-01-27 13:10:55 -08001330 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1331}
1332
Jason Evans7e77eaf2012-03-02 17:47:37 -08001333/*
1334 * End non-standard functions.
1335 */
1336/******************************************************************************/
1337/*
1338 * Begin experimental functions.
1339 */
1340#ifdef JEMALLOC_EXPERIMENTAL
1341
Jason Evans8e3c3c62010-09-17 15:46:18 -07001342JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001343iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001344{
1345
Jason Evans5ff709c2012-04-11 18:13:45 -07001346 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1347 alignment)));
Jason Evans38d92102011-03-23 00:37:29 -07001348
Jason Evans8e3c3c62010-09-17 15:46:18 -07001349 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001350 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001351 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001352 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001353 else
Jason Evans38d92102011-03-23 00:37:29 -07001354 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001355}
1356
Jason Evans6a0d2912010-09-20 16:44:23 -07001357JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001358JEMALLOC_ATTR(visibility("default"))
1359int
Jason Evans0a5489e2012-03-01 17:19:20 -08001360je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361{
1362 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001363 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001364 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1365 & (SIZE_T_MAX-1));
1366 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001367 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001368
1369 assert(ptr != NULL);
1370 assert(size != 0);
1371
1372 if (malloc_init())
Jason Evansa1ee7832012-04-10 15:07:44 -07001373 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374
Jason Evans5ff709c2012-04-11 18:13:45 -07001375 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001376 if (usize == 0)
Jason Evansa1ee7832012-04-10 15:07:44 -07001377 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001378
Jason Evans7372b152012-02-10 20:22:09 -08001379 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001380 PROF_ALLOC_PREP(1, usize, cnt);
1381 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001382 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001383 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001384 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001385 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001386 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -07001387 alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001388 assert(usize_promoted != 0);
1389 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001390 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001391 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001392 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001393 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001394 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001396 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001397 }
Jason Evans749c2a02011-08-12 18:37:54 -07001398 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001399 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001400 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001401 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001402 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001403 }
Jason Evans7372b152012-02-10 20:22:09 -08001404 if (rsize != NULL)
1405 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406
1407 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001408 if (config_stats) {
Jason Evans122449b2012-04-06 00:35:09 -07001409 assert(usize == isalloc(p, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001410 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001411 }
Jason Evansb1476112012-04-05 13:36:17 -07001412 UTRACE(0, size, p);
Jason Evans122449b2012-04-06 00:35:09 -07001413 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001414 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001415label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001416 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 malloc_write("<jemalloc>: Error in allocm(): "
1418 "out of memory\n");
1419 abort();
1420 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421 *ptr = NULL;
Jason Evansb1476112012-04-05 13:36:17 -07001422 UTRACE(0, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001423 return (ALLOCM_ERR_OOM);
1424}
1425
Jason Evans6a0d2912010-09-20 16:44:23 -07001426JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001427JEMALLOC_ATTR(visibility("default"))
1428int
Jason Evans0a5489e2012-03-01 17:19:20 -08001429je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001430{
1431 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001432 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001433 size_t old_size;
Jason Evans122449b2012-04-06 00:35:09 -07001434 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1436 & (SIZE_T_MAX-1));
1437 bool zero = flags & ALLOCM_ZERO;
1438 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440
1441 assert(ptr != NULL);
1442 assert(*ptr != NULL);
1443 assert(size != 0);
1444 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001445 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001446
1447 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001448 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001449 /*
1450 * usize isn't knowable before iralloc() returns when extra is
1451 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001452 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001453 * backtrace. prof_realloc() will use the actual usize to
1454 * decide whether to sample.
1455 */
1456 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
Jason Evans5ff709c2012-04-11 18:13:45 -07001457 sa2u(size+extra, alignment);
Jason Evans46405e62011-08-30 23:37:29 -07001458 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans122449b2012-04-06 00:35:09 -07001459 old_size = isalloc(p, true);
1460 if (config_valgrind && opt_valgrind)
1461 old_rzsize = p2rz(p);
Jason Evansa5070042011-08-12 13:48:27 -07001462 PROF_ALLOC_PREP(1, max_usize, cnt);
1463 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001464 goto label_oom;
Jason Evans183ba502011-08-11 22:51:00 -07001465 /*
1466 * Use minimum usize to determine whether promotion may happen.
1467 */
1468 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
Jason Evans5ff709c2012-04-11 18:13:45 -07001469 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1470 <= SMALL_MAXCLASS) {
Jason Evansb1726102012-02-28 16:50:47 -08001471 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1472 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001473 alignment, zero, no_move);
1474 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001475 goto label_err;
Jason Evansae4c7b42012-04-02 07:04:34 -07001476 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001477 usize = max_usize;
1478 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001479 } else
Jason Evans122449b2012-04-06 00:35:09 -07001480 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001481 } else {
1482 q = iralloc(p, size, extra, alignment, zero, no_move);
1483 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001484 goto label_err;
Jason Evans122449b2012-04-06 00:35:09 -07001485 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001486 }
Jason Evanse4f78462010-10-22 10:45:59 -07001487 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001488 if (rsize != NULL)
1489 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001490 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001491 if (config_stats) {
1492 old_size = isalloc(p, false);
1493 if (config_valgrind && opt_valgrind)
1494 old_rzsize = u2rz(old_size);
1495 } else if (config_valgrind && opt_valgrind) {
1496 old_size = isalloc(p, false);
1497 old_rzsize = u2rz(old_size);
1498 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001499 q = iralloc(p, size, extra, alignment, zero, no_move);
1500 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001501 goto label_err;
Jason Evans7372b152012-02-10 20:22:09 -08001502 if (config_stats)
Jason Evans122449b2012-04-06 00:35:09 -07001503 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001504 if (rsize != NULL) {
1505 if (config_stats == false)
Jason Evans122449b2012-04-06 00:35:09 -07001506 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001507 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001508 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001509 }
1510
1511 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001512 if (config_stats) {
1513 thread_allocated_t *ta;
1514 ta = thread_allocated_tsd_get();
1515 ta->allocated += usize;
1516 ta->deallocated += old_size;
1517 }
Jason Evansb1476112012-04-05 13:36:17 -07001518 UTRACE(p, size, q);
Jason Evans122449b2012-04-06 00:35:09 -07001519 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001520 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001521label_err:
Jason Evansb1476112012-04-05 13:36:17 -07001522 if (no_move) {
1523 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001524 return (ALLOCM_ERR_NOT_MOVED);
Jason Evansb1476112012-04-05 13:36:17 -07001525 }
Jason Evansa1ee7832012-04-10 15:07:44 -07001526label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001527 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001528 malloc_write("<jemalloc>: Error in rallocm(): "
1529 "out of memory\n");
1530 abort();
1531 }
Jason Evansb1476112012-04-05 13:36:17 -07001532 UTRACE(p, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001533 return (ALLOCM_ERR_OOM);
1534}
1535
Jason Evans6a0d2912010-09-20 16:44:23 -07001536JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001537JEMALLOC_ATTR(visibility("default"))
1538int
Jason Evans0a5489e2012-03-01 17:19:20 -08001539je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001540{
1541 size_t sz;
1542
Jason Evans41b6afb2012-02-02 22:04:57 -08001543 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001544
Jason Evans7372b152012-02-10 20:22:09 -08001545 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001546 sz = ivsalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001547 else {
1548 assert(ptr != NULL);
Jason Evans122449b2012-04-06 00:35:09 -07001549 sz = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001550 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001551 assert(rsize != NULL);
1552 *rsize = sz;
1553
1554 return (ALLOCM_SUCCESS);
1555}
1556
Jason Evans6a0d2912010-09-20 16:44:23 -07001557JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001558JEMALLOC_ATTR(visibility("default"))
1559int
Jason Evans0a5489e2012-03-01 17:19:20 -08001560je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001561{
Jason Evanse4f78462010-10-22 10:45:59 -07001562 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001563 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001564
1565 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001566 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001567
Jason Evansb1476112012-04-05 13:36:17 -07001568 UTRACE(ptr, 0, 0);
Jason Evans122449b2012-04-06 00:35:09 -07001569 if (config_stats || config_valgrind)
1570 usize = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001571 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001572 if (config_stats == false && config_valgrind == false)
1573 usize = isalloc(ptr, config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -07001574 prof_free(ptr, usize);
1575 }
Jason Evans7372b152012-02-10 20:22:09 -08001576 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001577 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001578 if (config_valgrind && opt_valgrind)
1579 rzsize = p2rz(ptr);
1580 iqalloc(ptr);
1581 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001582
1583 return (ALLOCM_SUCCESS);
1584}
1585
Jason Evans7e15dab2012-02-29 12:56:37 -08001586JEMALLOC_ATTR(visibility("default"))
1587int
Jason Evans0a5489e2012-03-01 17:19:20 -08001588je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001589{
1590 size_t usize;
1591 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1592 & (SIZE_T_MAX-1));
1593
1594 assert(size != 0);
1595
1596 if (malloc_init())
1597 return (ALLOCM_ERR_OOM);
1598
Jason Evans5ff709c2012-04-11 18:13:45 -07001599 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans7e15dab2012-02-29 12:56:37 -08001600 if (usize == 0)
1601 return (ALLOCM_ERR_OOM);
1602
1603 if (rsize != NULL)
1604 *rsize = usize;
1605 return (ALLOCM_SUCCESS);
1606}
1607
Jason Evans7e77eaf2012-03-02 17:47:37 -08001608#endif
Jason Evans289053c2009-06-22 12:08:42 -07001609/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001610 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001611 */
1612/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001613/*
1614 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001615 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001616 */
1617
Jason Evans41b6afb2012-02-02 22:04:57 -08001618#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001619void
Jason Evans804c9ec2009-06-22 17:44:33 -07001620jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001621#else
1622void
1623_malloc_prefork(void)
1624#endif
Jason Evans289053c2009-06-22 12:08:42 -07001625{
Jason Evansfbbb6242010-01-24 17:56:48 -08001626 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001627
1628 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001629 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001630 for (i = 0; i < narenas; i++) {
1631 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001632 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001633 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001634 base_prefork();
1635 huge_prefork();
1636 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001637}
1638
Jason Evans41b6afb2012-02-02 22:04:57 -08001639#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001640void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001641jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001642#else
1643void
1644_malloc_postfork(void)
1645#endif
Jason Evans289053c2009-06-22 12:08:42 -07001646{
1647 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001648
1649 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001650 chunk_dss_postfork_parent();
1651 huge_postfork_parent();
1652 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001653 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001654 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001655 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001656 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001657 malloc_mutex_postfork_parent(&arenas_lock);
1658}
1659
1660void
1661jemalloc_postfork_child(void)
1662{
1663 unsigned i;
1664
1665 /* Release all mutexes, now that fork() has completed. */
1666 chunk_dss_postfork_child();
1667 huge_postfork_child();
1668 base_postfork_child();
1669 for (i = 0; i < narenas; i++) {
1670 if (arenas[i] != NULL)
1671 arena_postfork_child(arenas[i]);
1672 }
1673 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001674}
Jason Evans2dbecf12010-09-05 10:35:13 -07001675
1676/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001677/*
1678 * The following functions are used for TLS allocation/deallocation in static
1679 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1680 * is that these avoid accessing TLS variables.
1681 */
1682
1683static void *
1684a0alloc(size_t size, bool zero)
1685{
1686
1687 if (malloc_init())
1688 return (NULL);
1689
1690 if (size == 0)
1691 size = 1;
1692
1693 if (size <= arena_maxclass)
1694 return (arena_malloc(arenas[0], size, zero, false));
1695 else
1696 return (huge_malloc(size, zero));
1697}
1698
1699void *
1700a0malloc(size_t size)
1701{
1702
1703 return (a0alloc(size, false));
1704}
1705
1706void *
1707a0calloc(size_t num, size_t size)
1708{
1709
1710 return (a0alloc(num * size, true));
1711}
1712
1713void
1714a0free(void *ptr)
1715{
1716 arena_chunk_t *chunk;
1717
1718 if (ptr == NULL)
1719 return;
1720
1721 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1722 if (chunk != ptr)
1723 arena_dalloc(chunk->arena, chunk, ptr, false);
1724 else
1725 huge_dalloc(ptr, true);
1726}
1727
1728/******************************************************************************/