blob: 00c2b23cd0e77790e2331633a5d9356d1dee7f71 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evans122449b2012-04-06 00:35:09 -070024size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070025bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070026bool opt_utrace = false;
Jason Evans122449b2012-04-06 00:35:09 -070027bool opt_valgrind = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080028bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080029bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070030size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070031
Jason Evanscd9a1342012-03-21 18:33:03 -070032unsigned ncpus;
33
34malloc_mutex_t arenas_lock;
35arena_t **arenas;
36unsigned narenas;
37
38/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070039static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070040
Jason Evans41b6afb2012-02-02 22:04:57 -080041#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070042/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070043# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080044# define INITIALIZER pthread_self()
45# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070046static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080047#else
Jason Evans02b23122012-04-05 11:06:23 -070048# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080049# define INITIALIZER true
50# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070051static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080052#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
55static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
56
Jason Evansb1476112012-04-05 13:36:17 -070057typedef struct {
58 void *p; /* Input pointer (as in realloc(p, s)). */
59 size_t s; /* Request size. */
60 void *r; /* Result pointer. */
61} malloc_utrace_t;
62
63#ifdef JEMALLOC_UTRACE
64# define UTRACE(a, b, c) do { \
65 if (opt_utrace) { \
66 malloc_utrace_t ut; \
67 ut.p = (a); \
68 ut.s = (b); \
69 ut.r = (c); \
70 utrace(&ut, sizeof(ut)); \
71 } \
72} while (0)
73#else
74# define UTRACE(a, b, c)
75#endif
76
Jason Evans289053c2009-06-22 12:08:42 -070077/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080078/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070079
Jason Evans03c22372010-01-03 12:10:42 -080080static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070081static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070082static bool malloc_conf_next(char const **opts_p, char const **k_p,
83 size_t *klen_p, char const **v_p, size_t *vlen_p);
84static void malloc_conf_error(const char *msg, const char *k, size_t klen,
85 const char *v, size_t vlen);
86static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070087static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080088static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070089 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evans289053c2009-06-22 12:08:42 -070091/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070092/*
Jason Evanse476f8a2010-01-16 09:53:50 -080093 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070094 */
95
Jason Evanse476f8a2010-01-16 09:53:50 -080096/* Create a new arena and insert it into the arenas array at index ind. */
97arena_t *
98arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070099{
100 arena_t *ret;
101
Jason Evansb1726102012-02-28 16:50:47 -0800102 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -0800103 if (ret != NULL && arena_new(ret, ind) == false) {
104 arenas[ind] = ret;
105 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700106 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800107 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700108
Jason Evanse476f8a2010-01-16 09:53:50 -0800109 /*
110 * OOM here is quite inconvenient to propagate, since dealing with it
111 * would require a check for failure in the fast path. Instead, punt
112 * by using arenas[0]. In practice, this is an extremely unlikely
113 * failure.
114 */
Jason Evans698805c2010-03-03 17:45:38 -0800115 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800116 if (opt_abort)
117 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700118
Jason Evanse476f8a2010-01-16 09:53:50 -0800119 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700120}
121
Jason Evans4c2faa82012-03-13 11:09:23 -0700122/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800123arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700124choose_arena_hard(void)
125{
126 arena_t *ret;
127
Jason Evans289053c2009-06-22 12:08:42 -0700128 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700129 unsigned i, choose, first_null;
130
131 choose = 0;
132 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800133 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700134 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700135 for (i = 1; i < narenas; i++) {
136 if (arenas[i] != NULL) {
137 /*
138 * Choose the first arena that has the lowest
139 * number of threads assigned to it.
140 */
141 if (arenas[i]->nthreads <
142 arenas[choose]->nthreads)
143 choose = i;
144 } else if (first_null == narenas) {
145 /*
146 * Record the index of the first uninitialized
147 * arena, in case all extant arenas are in use.
148 *
149 * NB: It is possible for there to be
150 * discontinuities in terms of initialized
151 * versus uninitialized arenas, due to the
152 * "thread.arena" mallctl.
153 */
154 first_null = i;
155 }
156 }
157
Jason Evans41b6afb2012-02-02 22:04:57 -0800158 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700159 /*
160 * Use an unloaded arena, or the least loaded arena if
161 * all arenas are already initialized.
162 */
163 ret = arenas[choose];
164 } else {
165 /* Initialize a new arena. */
166 ret = arenas_extend(first_null);
167 }
168 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800169 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700170 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700171 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700172 malloc_mutex_lock(&arenas_lock);
173 ret->nthreads++;
174 malloc_mutex_unlock(&arenas_lock);
175 }
Jason Evans289053c2009-06-22 12:08:42 -0700176
Jason Evanscd9a1342012-03-21 18:33:03 -0700177 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700178
179 return (ret);
180}
Jason Evans289053c2009-06-22 12:08:42 -0700181
Jason Evans03c22372010-01-03 12:10:42 -0800182static void
183stats_print_atexit(void)
184{
185
Jason Evans7372b152012-02-10 20:22:09 -0800186 if (config_tcache && config_stats) {
187 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800188
Jason Evans7372b152012-02-10 20:22:09 -0800189 /*
190 * Merge stats from extant threads. This is racy, since
191 * individual threads do not lock when recording tcache stats
192 * events. As a consequence, the final stats may be slightly
193 * out of date by the time they are reported, if other threads
194 * continue to allocate.
195 */
196 for (i = 0; i < narenas; i++) {
197 arena_t *arena = arenas[i];
198 if (arena != NULL) {
199 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800200
Jason Evans7372b152012-02-10 20:22:09 -0800201 /*
202 * tcache_stats_merge() locks bins, so if any
203 * code is introduced that acquires both arena
204 * and bin locks in the opposite order,
205 * deadlocks may result.
206 */
207 malloc_mutex_lock(&arena->lock);
208 ql_foreach(tcache, &arena->tcache_ql, link) {
209 tcache_stats_merge(tcache, arena);
210 }
211 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800212 }
Jason Evans03c22372010-01-03 12:10:42 -0800213 }
214 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800215 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700216}
217
Jason Evans289053c2009-06-22 12:08:42 -0700218/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800219 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700220 */
221/******************************************************************************/
222/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800223 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700224 */
225
Jason Evansc9658dd2009-06-22 14:44:08 -0700226static unsigned
227malloc_ncpus(void)
228{
229 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700230 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700231
Jason Evansb7924f52009-06-23 19:01:18 -0700232 result = sysconf(_SC_NPROCESSORS_ONLN);
233 if (result == -1) {
234 /* Error. */
235 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700236 }
Jason Evansb7924f52009-06-23 19:01:18 -0700237 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700238
239 return (ret);
240}
Jason Evansb7924f52009-06-23 19:01:18 -0700241
Jason Evanscd9a1342012-03-21 18:33:03 -0700242void
Jason Evans597632b2011-03-18 13:41:33 -0700243arenas_cleanup(void *arg)
244{
Jason Evanscd9a1342012-03-21 18:33:03 -0700245 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700246
247 malloc_mutex_lock(&arenas_lock);
248 arena->nthreads--;
249 malloc_mutex_unlock(&arenas_lock);
250}
251
Jason Evans289053c2009-06-22 12:08:42 -0700252static inline bool
253malloc_init(void)
254{
255
256 if (malloc_initialized == false)
257 return (malloc_init_hard());
258
259 return (false);
260}
261
262static bool
Jason Evanse7339702010-10-23 18:37:06 -0700263malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
264 char const **v_p, size_t *vlen_p)
265{
266 bool accept;
267 const char *opts = *opts_p;
268
269 *k_p = opts;
270
271 for (accept = false; accept == false;) {
272 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800273 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
274 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
275 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
276 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
277 case 'Y': case 'Z':
278 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
279 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
280 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
281 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
282 case 'y': case 'z':
283 case '0': case '1': case '2': case '3': case '4': case '5':
284 case '6': case '7': case '8': case '9':
285 case '_':
286 opts++;
287 break;
288 case ':':
289 opts++;
290 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
291 *v_p = opts;
292 accept = true;
293 break;
294 case '\0':
295 if (opts != *opts_p) {
296 malloc_write("<jemalloc>: Conf string ends "
297 "with key\n");
298 }
299 return (true);
300 default:
301 malloc_write("<jemalloc>: Malformed conf string\n");
302 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700303 }
304 }
305
306 for (accept = false; accept == false;) {
307 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800308 case ',':
309 opts++;
310 /*
311 * Look ahead one character here, because the next time
312 * this function is called, it will assume that end of
313 * input has been cleanly reached if no input remains,
314 * but we have optimistically already consumed the
315 * comma if one exists.
316 */
317 if (*opts == '\0') {
318 malloc_write("<jemalloc>: Conf string ends "
319 "with comma\n");
320 }
321 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
322 accept = true;
323 break;
324 case '\0':
325 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
326 accept = true;
327 break;
328 default:
329 opts++;
330 break;
Jason Evanse7339702010-10-23 18:37:06 -0700331 }
332 }
333
334 *opts_p = opts;
335 return (false);
336}
337
338static void
339malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
340 size_t vlen)
341{
Jason Evanse7339702010-10-23 18:37:06 -0700342
Jason Evansd81e4bd2012-03-06 14:57:45 -0800343 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
344 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700345}
346
347static void
348malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700349{
350 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700351 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700352 const char *opts, *k, *v;
353 size_t klen, vlen;
354
355 for (i = 0; i < 3; i++) {
356 /* Get runtime configuration. */
357 switch (i) {
358 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800359 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700360 /*
361 * Use options that were compiled into the
362 * program.
363 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800364 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700365 } else {
366 /* No configuration specified. */
367 buf[0] = '\0';
368 opts = buf;
369 }
370 break;
371 case 1: {
372 int linklen;
373 const char *linkname =
374#ifdef JEMALLOC_PREFIX
375 "/etc/"JEMALLOC_PREFIX"malloc.conf"
376#else
377 "/etc/malloc.conf"
378#endif
379 ;
380
381 if ((linklen = readlink(linkname, buf,
382 sizeof(buf) - 1)) != -1) {
383 /*
384 * Use the contents of the "/etc/malloc.conf"
385 * symbolic link's name.
386 */
387 buf[linklen] = '\0';
388 opts = buf;
389 } else {
390 /* No configuration specified. */
391 buf[0] = '\0';
392 opts = buf;
393 }
394 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800395 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700396 const char *envname =
397#ifdef JEMALLOC_PREFIX
398 JEMALLOC_CPREFIX"MALLOC_CONF"
399#else
400 "MALLOC_CONF"
401#endif
402 ;
403
404 if ((opts = getenv(envname)) != NULL) {
405 /*
406 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800407 * the value of the MALLOC_CONF environment
408 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700409 */
410 } else {
411 /* No configuration specified. */
412 buf[0] = '\0';
413 opts = buf;
414 }
415 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800416 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700417 /* NOTREACHED */
418 assert(false);
419 buf[0] = '\0';
420 opts = buf;
421 }
422
423 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
424 &vlen) == false) {
Jason Evans122449b2012-04-06 00:35:09 -0700425#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700426 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700427 klen) == 0) { \
428 if (strncmp("true", v, vlen) == 0 && \
429 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800430 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700431 else if (strncmp("false", v, vlen) == \
432 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800433 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700434 else { \
435 malloc_conf_error( \
436 "Invalid conf value", \
437 k, klen, v, vlen); \
438 } \
Jason Evans122449b2012-04-06 00:35:09 -0700439 hit = true; \
440 } else \
441 hit = false;
442#define CONF_HANDLE_BOOL(o, n) { \
443 bool hit; \
444 CONF_HANDLE_BOOL_HIT(o, n, hit); \
445 if (hit) \
Jason Evanse7339702010-10-23 18:37:06 -0700446 continue; \
Jason Evans122449b2012-04-06 00:35:09 -0700447}
Jason Evansd81e4bd2012-03-06 14:57:45 -0800448#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700449 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700450 klen) == 0) { \
Jason Evans122449b2012-04-06 00:35:09 -0700451 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 char *end; \
453 \
454 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800455 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700456 if (errno != 0 || (uintptr_t)end - \
457 (uintptr_t)v != vlen) { \
458 malloc_conf_error( \
459 "Invalid conf value", \
460 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800461 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700462 malloc_conf_error( \
463 "Out-of-range conf value", \
464 k, klen, v, vlen); \
465 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800466 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700467 continue; \
468 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800469#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700470 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700471 klen) == 0) { \
472 long l; \
473 char *end; \
474 \
475 errno = 0; \
476 l = strtol(v, &end, 0); \
477 if (errno != 0 || (uintptr_t)end - \
478 (uintptr_t)v != vlen) { \
479 malloc_conf_error( \
480 "Invalid conf value", \
481 k, klen, v, vlen); \
482 } else if (l < (ssize_t)min || l > \
483 (ssize_t)max) { \
484 malloc_conf_error( \
485 "Out-of-range conf value", \
486 k, klen, v, vlen); \
487 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800488 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700489 continue; \
490 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800491#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700492 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700493 klen) == 0) { \
494 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800495 sizeof(o)-1) ? vlen : \
496 sizeof(o)-1; \
497 strncpy(o, v, cpylen); \
498 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700499 continue; \
500 }
501
Jason Evans606f1fd2012-04-20 21:39:14 -0700502 CONF_HANDLE_BOOL(opt_abort, "abort")
Jason Evanse7339702010-10-23 18:37:06 -0700503 /*
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700504 * Chunks always require at least one header page, plus
505 * one data page in the absence of redzones, or three
506 * pages in the presence of redzones. In order to
507 * simplify options processing, fix the limit based on
508 * config_fill.
Jason Evanse7339702010-10-23 18:37:06 -0700509 */
Jason Evans606f1fd2012-04-20 21:39:14 -0700510 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700511 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700512 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
513 SIZE_T_MAX)
514 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -0800515 -1, (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700516 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
Jason Evans7372b152012-02-10 20:22:09 -0800517 if (config_fill) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700518 CONF_HANDLE_BOOL(opt_junk, "junk")
519 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans122449b2012-04-06 00:35:09 -0700520 0, SIZE_T_MAX)
Jason Evans606f1fd2012-04-20 21:39:14 -0700521 CONF_HANDLE_BOOL(opt_redzone, "redzone")
522 CONF_HANDLE_BOOL(opt_zero, "zero")
Jason Evans7372b152012-02-10 20:22:09 -0800523 }
Jason Evansb1476112012-04-05 13:36:17 -0700524 if (config_utrace) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700525 CONF_HANDLE_BOOL(opt_utrace, "utrace")
Jason Evansb1476112012-04-05 13:36:17 -0700526 }
Jason Evans122449b2012-04-06 00:35:09 -0700527 if (config_valgrind) {
528 bool hit;
529 CONF_HANDLE_BOOL_HIT(opt_valgrind,
Jason Evans606f1fd2012-04-20 21:39:14 -0700530 "valgrind", hit)
Jason Evans122449b2012-04-06 00:35:09 -0700531 if (config_fill && opt_valgrind && hit) {
532 opt_junk = false;
533 opt_zero = false;
534 if (opt_quarantine == 0) {
535 opt_quarantine =
536 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
537 }
538 opt_redzone = true;
539 }
540 if (hit)
541 continue;
542 }
Jason Evans7372b152012-02-10 20:22:09 -0800543 if (config_xmalloc) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700544 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
Jason Evans7372b152012-02-10 20:22:09 -0800545 }
546 if (config_tcache) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700547 CONF_HANDLE_BOOL(opt_tcache, "tcache")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800548 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -0700549 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800550 (sizeof(size_t) << 3) - 1)
551 }
552 if (config_prof) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700553 CONF_HANDLE_BOOL(opt_prof, "prof")
554 CONF_HANDLE_CHAR_P(opt_prof_prefix,
555 "prof_prefix", "jeprof")
556 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800557 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
Jason Evans606f1fd2012-04-20 21:39:14 -0700558 "lg_prof_sample", 0,
Jason Evans7372b152012-02-10 20:22:09 -0800559 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700560 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800561 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -0700562 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800563 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700564 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
565 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
566 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
Jason Evans7372b152012-02-10 20:22:09 -0800567 }
Jason Evanse7339702010-10-23 18:37:06 -0700568 malloc_conf_error("Invalid conf pair", k, klen, v,
569 vlen);
570#undef CONF_HANDLE_BOOL
571#undef CONF_HANDLE_SIZE_T
572#undef CONF_HANDLE_SSIZE_T
573#undef CONF_HANDLE_CHAR_P
574 }
Jason Evanse7339702010-10-23 18:37:06 -0700575 }
576}
577
578static bool
579malloc_init_hard(void)
580{
Jason Evansb7924f52009-06-23 19:01:18 -0700581 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700582
583 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800584 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700585 /*
586 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800587 * acquired init_lock, or this thread is the initializing
588 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700589 */
590 malloc_mutex_unlock(&init_lock);
591 return (false);
592 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800593#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700594 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700595 /* Busy-wait until the initializing thread completes. */
596 do {
597 malloc_mutex_unlock(&init_lock);
598 CPU_SPINWAIT;
599 malloc_mutex_lock(&init_lock);
600 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700601 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700602 return (false);
603 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800604#endif
605 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700606
Jason Evanscd9a1342012-03-21 18:33:03 -0700607 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800608 if (config_prof)
609 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700610
Jason Evanse7339702010-10-23 18:37:06 -0700611 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700612
Mike Hommeye77fa592012-03-28 09:53:16 +0200613#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800614 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700615 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
616 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800617 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800618 if (opt_abort)
619 abort();
620 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800621#endif
Jason Evans3c234352010-01-27 13:10:55 -0800622
Jason Evans03c22372010-01-03 12:10:42 -0800623 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700624 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800625 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800626 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800627 if (opt_abort)
628 abort();
629 }
Jason Evans289053c2009-06-22 12:08:42 -0700630 }
631
Mike Hommeyb8325f92012-04-12 15:15:35 +0200632 if (base_boot()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800633 malloc_mutex_unlock(&init_lock);
634 return (true);
635 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700636
Mike Hommeyb8325f92012-04-12 15:15:35 +0200637 if (chunk_boot0()) {
Jason Evans3c234352010-01-27 13:10:55 -0800638 malloc_mutex_unlock(&init_lock);
639 return (true);
640 }
641
Jason Evans41b6afb2012-02-02 22:04:57 -0800642 if (ctl_boot()) {
643 malloc_mutex_unlock(&init_lock);
644 return (true);
645 }
646
Jason Evans7372b152012-02-10 20:22:09 -0800647 if (config_prof)
648 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800649
Jason Evansb1726102012-02-28 16:50:47 -0800650 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700651
Jason Evanscd9a1342012-03-21 18:33:03 -0700652 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700653 malloc_mutex_unlock(&init_lock);
654 return (true);
655 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800656
Jason Evanse476f8a2010-01-16 09:53:50 -0800657 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700658 malloc_mutex_unlock(&init_lock);
659 return (true);
660 }
Jason Evans289053c2009-06-22 12:08:42 -0700661
Jason Evans8e6f8b42011-11-03 18:40:03 -0700662 if (malloc_mutex_init(&arenas_lock))
663 return (true);
664
Jason Evansb7924f52009-06-23 19:01:18 -0700665 /*
666 * Create enough scaffolding to allow recursive allocation in
667 * malloc_ncpus().
668 */
669 narenas = 1;
670 arenas = init_arenas;
671 memset(arenas, 0, sizeof(arena_t *) * narenas);
672
673 /*
674 * Initialize one arena here. The rest are lazily created in
675 * choose_arena_hard().
676 */
677 arenas_extend(0);
678 if (arenas[0] == NULL) {
679 malloc_mutex_unlock(&init_lock);
680 return (true);
681 }
682
Jason Evanscd9a1342012-03-21 18:33:03 -0700683 /* Initialize allocation counters before any allocations can occur. */
684 if (config_stats && thread_allocated_tsd_boot()) {
685 malloc_mutex_unlock(&init_lock);
686 return (true);
687 }
Jason Evansb7924f52009-06-23 19:01:18 -0700688
Jason Evanscd9a1342012-03-21 18:33:03 -0700689 if (arenas_tsd_boot()) {
690 malloc_mutex_unlock(&init_lock);
691 return (true);
692 }
693
694 if (config_tcache && tcache_boot1()) {
695 malloc_mutex_unlock(&init_lock);
696 return (true);
697 }
698
Jason Evans122449b2012-04-06 00:35:09 -0700699 if (config_fill && quarantine_boot()) {
700 malloc_mutex_unlock(&init_lock);
701 return (true);
702 }
703
Jason Evans6da54182012-03-23 18:05:51 -0700704 if (config_prof && prof_boot2()) {
705 malloc_mutex_unlock(&init_lock);
706 return (true);
707 }
708
Jason Evansb7924f52009-06-23 19:01:18 -0700709 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700710 malloc_mutex_unlock(&init_lock);
711 ncpus = malloc_ncpus();
712 malloc_mutex_lock(&init_lock);
713
Jason Evanscd9a1342012-03-21 18:33:03 -0700714 if (chunk_boot1()) {
715 malloc_mutex_unlock(&init_lock);
716 return (true);
717 }
718
Jason Evans633aaff2012-04-03 08:47:07 -0700719 if (mutex_boot()) {
720 malloc_mutex_unlock(&init_lock);
721 return (true);
722 }
723
Jason Evanse7339702010-10-23 18:37:06 -0700724 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700725 /*
Jason Evans5463a522009-12-29 00:09:15 -0800726 * For SMP systems, create more than one arena per CPU by
727 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700728 */
Jason Evanse7339702010-10-23 18:37:06 -0700729 if (ncpus > 1)
730 opt_narenas = ncpus << 2;
731 else
732 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700733 }
Jason Evanse7339702010-10-23 18:37:06 -0700734 narenas = opt_narenas;
735 /*
736 * Make sure that the arenas array can be allocated. In practice, this
737 * limit is enough to allow the allocator to function, but the ctl
738 * machinery will fail to allocate memory at far lower limits.
739 */
740 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700741 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800742 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
743 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700744 }
Jason Evans289053c2009-06-22 12:08:42 -0700745
Jason Evans289053c2009-06-22 12:08:42 -0700746 /* Allocate and initialize arenas. */
747 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
748 if (arenas == NULL) {
749 malloc_mutex_unlock(&init_lock);
750 return (true);
751 }
752 /*
753 * Zero the array. In practice, this should always be pre-zeroed,
754 * since it was just mmap()ed, but let's be sure.
755 */
756 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700757 /* Copy the pointer to the one arena that was already initialized. */
758 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700759
760 malloc_initialized = true;
761 malloc_mutex_unlock(&init_lock);
762 return (false);
763}
764
765/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800766 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700767 */
768/******************************************************************************/
769/*
770 * Begin malloc(3)-compatible functions.
771 */
772
Jason Evans9ad48232010-01-03 11:59:20 -0800773JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800774JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700775void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800776je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700777{
778 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800779 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700780 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700781
782 if (malloc_init()) {
783 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700784 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -0700785 }
786
Jason Evansc90ad712012-02-28 20:31:37 -0800787 if (size == 0)
788 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700789
Jason Evans7372b152012-02-10 20:22:09 -0800790 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700791 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700792 PROF_ALLOC_PREP(1, usize, cnt);
793 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700794 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700795 goto label_oom;
Jason Evans0b270a92010-03-31 16:45:04 -0700796 }
Jason Evans93443682010-10-20 17:39:18 -0700797 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800798 SMALL_MAXCLASS) {
799 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700800 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700801 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700802 } else
803 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800804 } else {
Jason Evans122449b2012-04-06 00:35:09 -0700805 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -0800806 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700807 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700808 }
Jason Evans289053c2009-06-22 12:08:42 -0700809
Jason Evansa1ee7832012-04-10 15:07:44 -0700810label_oom:
Jason Evans289053c2009-06-22 12:08:42 -0700811 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800812 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800813 malloc_write("<jemalloc>: Error in malloc(): "
814 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700815 abort();
816 }
817 errno = ENOMEM;
818 }
Jason Evans7372b152012-02-10 20:22:09 -0800819 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700820 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800821 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700822 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700823 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700824 }
Jason Evansb1476112012-04-05 13:36:17 -0700825 UTRACE(0, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -0700826 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -0700827 return (ret);
828}
829
Jason Evans9ad48232010-01-03 11:59:20 -0800830JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700831#ifdef JEMALLOC_PROF
832/*
Jason Evans7372b152012-02-10 20:22:09 -0800833 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700834 * PROF_ALLOC_PREP().
835 */
836JEMALLOC_ATTR(noinline)
837#endif
838static int
Jason Evans59656312012-02-28 21:37:38 -0800839imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700840 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700841{
842 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800843 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700844 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700845 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700846
Jason Evans0a0bbf62012-03-13 12:55:21 -0700847 assert(min_alignment != 0);
848
Jason Evans289053c2009-06-22 12:08:42 -0700849 if (malloc_init())
850 result = NULL;
851 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800852 if (size == 0)
853 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800854
Jason Evans289053c2009-06-22 12:08:42 -0700855 /* Make sure that alignment is a large enough power of 2. */
856 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700857 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800858 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700859 malloc_write("<jemalloc>: Error allocating "
860 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700861 abort();
862 }
863 result = NULL;
864 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700865 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700866 }
867
Jason Evans5ff709c2012-04-11 18:13:45 -0700868 usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -0700869 if (usize == 0) {
870 result = NULL;
871 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700872 goto label_return;
Jason Evans38d92102011-03-23 00:37:29 -0700873 }
874
Jason Evans7372b152012-02-10 20:22:09 -0800875 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700876 PROF_ALLOC_PREP(2, usize, cnt);
877 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700878 result = NULL;
879 ret = EINVAL;
880 } else {
881 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800882 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
883 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700884 alignment) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800885 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700886 alignment), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700887 if (result != NULL) {
888 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700889 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700890 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700891 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700892 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700893 false);
894 }
Jason Evans0b270a92010-03-31 16:45:04 -0700895 }
Jason Evans6109fe02010-02-10 10:37:56 -0800896 } else
Jason Evans38d92102011-03-23 00:37:29 -0700897 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700898 }
899
900 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800901 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700902 malloc_write("<jemalloc>: Error allocating aligned "
903 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700904 abort();
905 }
906 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700907 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700908 }
909
910 *memptr = result;
911 ret = 0;
912
Jason Evansa1ee7832012-04-10 15:07:44 -0700913label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800914 if (config_stats && result != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700915 assert(usize == isalloc(result, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700916 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700917 }
Jason Evans7372b152012-02-10 20:22:09 -0800918 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700919 prof_malloc(result, usize, cnt);
Jason Evansb1476112012-04-05 13:36:17 -0700920 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -0700921 return (ret);
922}
923
Jason Evansa5070042011-08-12 13:48:27 -0700924JEMALLOC_ATTR(nonnull(1))
925JEMALLOC_ATTR(visibility("default"))
926int
Jason Evans0a5489e2012-03-01 17:19:20 -0800927je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700928{
Jason Evans122449b2012-04-06 00:35:09 -0700929 int ret = imemalign(memptr, alignment, size, sizeof(void *));
930 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
931 config_prof), false);
932 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700933}
934
935JEMALLOC_ATTR(malloc)
936JEMALLOC_ATTR(visibility("default"))
937void *
938je_aligned_alloc(size_t alignment, size_t size)
939{
940 void *ret;
941 int err;
942
943 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
944 ret = NULL;
945 errno = err;
946 }
Jason Evans122449b2012-04-06 00:35:09 -0700947 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
948 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700949 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700950}
951
Jason Evans9ad48232010-01-03 11:59:20 -0800952JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800953JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700954void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800955je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700956{
957 void *ret;
958 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800959 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700960 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700961
962 if (malloc_init()) {
963 num_size = 0;
964 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700965 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700966 }
967
968 num_size = num * size;
969 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800970 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700971 num_size = 1;
972 else {
973 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700974 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700975 }
976 /*
977 * Try to avoid division here. We know that it isn't possible to
978 * overflow during multiplication if neither operand uses any of the
979 * most significant half of the bits in a size_t.
980 */
981 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
982 && (num_size / size != num)) {
983 /* size_t overflow. */
984 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700985 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700986 }
987
Jason Evans7372b152012-02-10 20:22:09 -0800988 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700989 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700990 PROF_ALLOC_PREP(1, usize, cnt);
991 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700992 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700993 goto label_return;
Jason Evans0b270a92010-03-31 16:45:04 -0700994 }
Jason Evans93443682010-10-20 17:39:18 -0700995 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800996 <= SMALL_MAXCLASS) {
997 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700998 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700999 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001000 } else
1001 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001002 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001003 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001004 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001005 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001006 }
Jason Evans289053c2009-06-22 12:08:42 -07001007
Jason Evansa1ee7832012-04-10 15:07:44 -07001008label_return:
Jason Evans289053c2009-06-22 12:08:42 -07001009 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001010 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001011 malloc_write("<jemalloc>: Error in calloc(): out of "
1012 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001013 abort();
1014 }
1015 errno = ENOMEM;
1016 }
1017
Jason Evans7372b152012-02-10 20:22:09 -08001018 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001019 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001020 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -07001021 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001022 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -07001023 }
Jason Evansb1476112012-04-05 13:36:17 -07001024 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001025 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001026 return (ret);
1027}
1028
Jason Evanse476f8a2010-01-16 09:53:50 -08001029JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001030void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001031je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001032{
1033 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001034 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001035 size_t old_size = 0;
Jason Evans122449b2012-04-06 00:35:09 -07001036 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -07001037 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1038 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001039
Jason Evans289053c2009-06-22 12:08:42 -07001040 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001041 if (ptr != NULL) {
1042 /* realloc(ptr, 0) is equivalent to free(p). */
Jason Evans122449b2012-04-06 00:35:09 -07001043 if (config_prof) {
1044 old_size = isalloc(ptr, true);
1045 if (config_valgrind && opt_valgrind)
1046 old_rzsize = p2rz(ptr);
1047 } else if (config_stats) {
1048 old_size = isalloc(ptr, false);
1049 if (config_valgrind && opt_valgrind)
1050 old_rzsize = u2rz(old_size);
1051 } else if (config_valgrind && opt_valgrind) {
1052 old_size = isalloc(ptr, false);
1053 old_rzsize = u2rz(old_size);
1054 }
Jason Evansf081b882012-02-28 20:24:05 -08001055 if (config_prof && opt_prof) {
1056 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001057 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001058 }
Jason Evans122449b2012-04-06 00:35:09 -07001059 iqalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001060 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001061 goto label_return;
Jason Evansc90ad712012-02-28 20:31:37 -08001062 } else
1063 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001064 }
1065
1066 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001067 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001068
Jason Evans122449b2012-04-06 00:35:09 -07001069 if (config_prof) {
1070 old_size = isalloc(ptr, true);
1071 if (config_valgrind && opt_valgrind)
1072 old_rzsize = p2rz(ptr);
1073 } else if (config_stats) {
1074 old_size = isalloc(ptr, false);
1075 if (config_valgrind && opt_valgrind)
1076 old_rzsize = u2rz(old_size);
1077 } else if (config_valgrind && opt_valgrind) {
1078 old_size = isalloc(ptr, false);
1079 old_rzsize = u2rz(old_size);
1080 }
Jason Evans7372b152012-02-10 20:22:09 -08001081 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001082 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001083 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001084 PROF_ALLOC_PREP(1, usize, cnt);
1085 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001086 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001087 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001088 goto label_oom;
Jason Evans6109fe02010-02-10 10:37:56 -08001089 }
Jason Evans0b270a92010-03-31 16:45:04 -07001090 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001091 usize <= SMALL_MAXCLASS) {
1092 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001093 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001094 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001095 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001096 else
1097 old_ctx = NULL;
1098 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001099 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001100 if (ret == NULL)
1101 old_ctx = NULL;
1102 }
Jason Evans7372b152012-02-10 20:22:09 -08001103 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001104 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001105 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001106 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001107 }
Jason Evans289053c2009-06-22 12:08:42 -07001108
Jason Evansa1ee7832012-04-10 15:07:44 -07001109label_oom:
Jason Evans289053c2009-06-22 12:08:42 -07001110 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001111 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001112 malloc_write("<jemalloc>: Error in realloc(): "
1113 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001114 abort();
1115 }
1116 errno = ENOMEM;
1117 }
1118 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001119 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001120 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001121 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001122 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001123 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001124 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001125 ret = NULL;
1126 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001127 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001128 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001129 PROF_ALLOC_PREP(1, usize, cnt);
1130 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001131 ret = NULL;
1132 else {
1133 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001134 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001135 SMALL_MAXCLASS) {
1136 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001137 if (ret != NULL) {
1138 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001139 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001140 }
1141 } else
1142 ret = imalloc(size);
1143 }
Jason Evans7372b152012-02-10 20:22:09 -08001144 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001145 if (config_stats || (config_valgrind &&
1146 opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001147 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001148 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001149 }
Jason Evans6109fe02010-02-10 10:37:56 -08001150 }
Jason Evans569432c2009-12-29 00:09:15 -08001151
Jason Evans289053c2009-06-22 12:08:42 -07001152 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001153 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001154 malloc_write("<jemalloc>: Error in realloc(): "
1155 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001156 abort();
1157 }
1158 errno = ENOMEM;
1159 }
1160 }
1161
Jason Evansa1ee7832012-04-10 15:07:44 -07001162label_return:
Jason Evans7372b152012-02-10 20:22:09 -08001163 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001164 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001165 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001166 thread_allocated_t *ta;
Jason Evans122449b2012-04-06 00:35:09 -07001167 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001168 ta = thread_allocated_tsd_get();
1169 ta->allocated += usize;
1170 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001171 }
Jason Evansb1476112012-04-05 13:36:17 -07001172 UTRACE(ptr, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001173 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001174 return (ret);
1175}
1176
Jason Evanse476f8a2010-01-16 09:53:50 -08001177JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001178void
Jason Evans0a5489e2012-03-01 17:19:20 -08001179je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001180{
1181
Jason Evansb1476112012-04-05 13:36:17 -07001182 UTRACE(ptr, 0, 0);
Jason Evansf0047372012-04-02 15:18:24 -07001183 if (ptr != NULL) {
1184 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001185 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evanse4f78462010-10-22 10:45:59 -07001186
Jason Evansf0047372012-04-02 15:18:24 -07001187 assert(malloc_initialized || IS_INITIALIZER);
1188
1189 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001190 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001191 prof_free(ptr, usize);
Jason Evans122449b2012-04-06 00:35:09 -07001192 } else if (config_stats || config_valgrind)
1193 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001194 if (config_stats)
1195 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001196 if (config_valgrind && opt_valgrind)
1197 rzsize = p2rz(ptr);
1198 iqalloc(ptr);
1199 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evansf0047372012-04-02 15:18:24 -07001200 }
Jason Evans289053c2009-06-22 12:08:42 -07001201}
1202
1203/*
1204 * End malloc(3)-compatible functions.
1205 */
1206/******************************************************************************/
1207/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001208 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001209 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001210
1211#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1212JEMALLOC_ATTR(malloc)
1213JEMALLOC_ATTR(visibility("default"))
1214void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001215je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001216{
Jason Evans9225a192012-03-23 15:39:07 -07001217 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001218 imemalign(&ret, alignment, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001219 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001220 return (ret);
1221}
1222#endif
1223
1224#ifdef JEMALLOC_OVERRIDE_VALLOC
1225JEMALLOC_ATTR(malloc)
1226JEMALLOC_ATTR(visibility("default"))
1227void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001228je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001229{
Jason Evans9225a192012-03-23 15:39:07 -07001230 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001231 imemalign(&ret, PAGE, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001232 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001233 return (ret);
1234}
1235#endif
1236
Mike Hommey5c89c502012-03-26 17:46:57 +02001237/*
1238 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1239 * #define je_malloc malloc
1240 */
1241#define malloc_is_malloc 1
1242#define is_malloc_(a) malloc_is_ ## a
1243#define is_malloc(a) is_malloc_(a)
1244
1245#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001246/*
1247 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1248 * to inconsistently reference libc's malloc(3)-compatible functions
1249 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1250 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001251 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001252 * passed an extra argument for the caller return address, which will be
1253 * ignored.
1254 */
1255JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001256void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001257
1258JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001259void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001260
1261JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001262void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001263
1264JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001265void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001266#endif
1267
Jason Evans6a0d2912010-09-20 16:44:23 -07001268/*
1269 * End non-standard override functions.
1270 */
1271/******************************************************************************/
1272/*
Jason Evans289053c2009-06-22 12:08:42 -07001273 * Begin non-standard functions.
1274 */
1275
Jason Evanse476f8a2010-01-16 09:53:50 -08001276JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001277size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001278je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001279{
Jason Evans569432c2009-12-29 00:09:15 -08001280 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001281
Jason Evans41b6afb2012-02-02 22:04:57 -08001282 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001283
Jason Evans7372b152012-02-10 20:22:09 -08001284 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001285 ret = ivsalloc(ptr, config_prof);
Jason Evans2465bdf2012-03-26 13:13:55 -07001286 else
Jason Evans122449b2012-04-06 00:35:09 -07001287 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001288
Jason Evans569432c2009-12-29 00:09:15 -08001289 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001290}
1291
Jason Evans4201af02010-01-24 02:53:40 -08001292JEMALLOC_ATTR(visibility("default"))
1293void
Jason Evans0a5489e2012-03-01 17:19:20 -08001294je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1295 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001296{
1297
Jason Evans698805c2010-03-03 17:45:38 -08001298 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001299}
1300
Jason Evans3c234352010-01-27 13:10:55 -08001301JEMALLOC_ATTR(visibility("default"))
1302int
Jason Evans0a5489e2012-03-01 17:19:20 -08001303je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001304 size_t newlen)
1305{
1306
Jason Evans95833312010-01-27 13:45:21 -08001307 if (malloc_init())
1308 return (EAGAIN);
1309
Jason Evans3c234352010-01-27 13:10:55 -08001310 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1311}
1312
1313JEMALLOC_ATTR(visibility("default"))
1314int
Jason Evans0a5489e2012-03-01 17:19:20 -08001315je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001316{
1317
Jason Evans95833312010-01-27 13:45:21 -08001318 if (malloc_init())
1319 return (EAGAIN);
1320
Jason Evans3c234352010-01-27 13:10:55 -08001321 return (ctl_nametomib(name, mibp, miblenp));
1322}
1323
1324JEMALLOC_ATTR(visibility("default"))
1325int
Jason Evans0a5489e2012-03-01 17:19:20 -08001326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1327 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001328{
1329
Jason Evans95833312010-01-27 13:45:21 -08001330 if (malloc_init())
1331 return (EAGAIN);
1332
Jason Evans3c234352010-01-27 13:10:55 -08001333 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1334}
1335
Jason Evans7e77eaf2012-03-02 17:47:37 -08001336/*
1337 * End non-standard functions.
1338 */
1339/******************************************************************************/
1340/*
1341 * Begin experimental functions.
1342 */
1343#ifdef JEMALLOC_EXPERIMENTAL
1344
Jason Evans8e3c3c62010-09-17 15:46:18 -07001345JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001346iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001347{
1348
Jason Evans5ff709c2012-04-11 18:13:45 -07001349 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1350 alignment)));
Jason Evans38d92102011-03-23 00:37:29 -07001351
Jason Evans8e3c3c62010-09-17 15:46:18 -07001352 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001353 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001355 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356 else
Jason Evans38d92102011-03-23 00:37:29 -07001357 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001358}
1359
Jason Evans6a0d2912010-09-20 16:44:23 -07001360JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361JEMALLOC_ATTR(visibility("default"))
1362int
Jason Evans0a5489e2012-03-01 17:19:20 -08001363je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001364{
1365 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001366 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001367 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1368 & (SIZE_T_MAX-1));
1369 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001370 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001371
1372 assert(ptr != NULL);
1373 assert(size != 0);
1374
1375 if (malloc_init())
Jason Evansa1ee7832012-04-10 15:07:44 -07001376 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001377
Jason Evans5ff709c2012-04-11 18:13:45 -07001378 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001379 if (usize == 0)
Jason Evansa1ee7832012-04-10 15:07:44 -07001380 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001381
Jason Evans7372b152012-02-10 20:22:09 -08001382 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001383 PROF_ALLOC_PREP(1, usize, cnt);
1384 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001385 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001386 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001387 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001388 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001389 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -07001390 alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001391 assert(usize_promoted != 0);
1392 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001393 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001394 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001395 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001396 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001397 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001398 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001399 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001400 }
Jason Evans749c2a02011-08-12 18:37:54 -07001401 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001402 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001403 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001404 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001405 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406 }
Jason Evans7372b152012-02-10 20:22:09 -08001407 if (rsize != NULL)
1408 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001409
1410 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001411 if (config_stats) {
Jason Evans122449b2012-04-06 00:35:09 -07001412 assert(usize == isalloc(p, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001413 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001414 }
Jason Evansb1476112012-04-05 13:36:17 -07001415 UTRACE(0, size, p);
Jason Evans122449b2012-04-06 00:35:09 -07001416 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001418label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001419 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001420 malloc_write("<jemalloc>: Error in allocm(): "
1421 "out of memory\n");
1422 abort();
1423 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424 *ptr = NULL;
Jason Evansb1476112012-04-05 13:36:17 -07001425 UTRACE(0, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001426 return (ALLOCM_ERR_OOM);
1427}
1428
Jason Evans6a0d2912010-09-20 16:44:23 -07001429JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001430JEMALLOC_ATTR(visibility("default"))
1431int
Jason Evans0a5489e2012-03-01 17:19:20 -08001432je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001433{
1434 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001435 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001436 size_t old_size;
Jason Evans122449b2012-04-06 00:35:09 -07001437 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001438 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1439 & (SIZE_T_MAX-1));
1440 bool zero = flags & ALLOCM_ZERO;
1441 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001442 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443
1444 assert(ptr != NULL);
1445 assert(*ptr != NULL);
1446 assert(size != 0);
1447 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001448 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001449
1450 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001451 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001452 /*
1453 * usize isn't knowable before iralloc() returns when extra is
1454 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001455 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001456 * backtrace. prof_realloc() will use the actual usize to
1457 * decide whether to sample.
1458 */
1459 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
Jason Evans5ff709c2012-04-11 18:13:45 -07001460 sa2u(size+extra, alignment);
Jason Evans46405e62011-08-30 23:37:29 -07001461 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans122449b2012-04-06 00:35:09 -07001462 old_size = isalloc(p, true);
1463 if (config_valgrind && opt_valgrind)
1464 old_rzsize = p2rz(p);
Jason Evansa5070042011-08-12 13:48:27 -07001465 PROF_ALLOC_PREP(1, max_usize, cnt);
1466 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001467 goto label_oom;
Jason Evans183ba502011-08-11 22:51:00 -07001468 /*
1469 * Use minimum usize to determine whether promotion may happen.
1470 */
1471 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
Jason Evans5ff709c2012-04-11 18:13:45 -07001472 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1473 <= SMALL_MAXCLASS) {
Jason Evansb1726102012-02-28 16:50:47 -08001474 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1475 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001476 alignment, zero, no_move);
1477 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001478 goto label_err;
Jason Evansae4c7b42012-04-02 07:04:34 -07001479 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001480 usize = max_usize;
1481 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001482 } else
Jason Evans122449b2012-04-06 00:35:09 -07001483 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001484 } else {
1485 q = iralloc(p, size, extra, alignment, zero, no_move);
1486 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001487 goto label_err;
Jason Evans122449b2012-04-06 00:35:09 -07001488 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001489 }
Jason Evanse4f78462010-10-22 10:45:59 -07001490 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001491 if (rsize != NULL)
1492 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001493 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001494 if (config_stats) {
1495 old_size = isalloc(p, false);
1496 if (config_valgrind && opt_valgrind)
1497 old_rzsize = u2rz(old_size);
1498 } else if (config_valgrind && opt_valgrind) {
1499 old_size = isalloc(p, false);
1500 old_rzsize = u2rz(old_size);
1501 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001502 q = iralloc(p, size, extra, alignment, zero, no_move);
1503 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001504 goto label_err;
Jason Evans7372b152012-02-10 20:22:09 -08001505 if (config_stats)
Jason Evans122449b2012-04-06 00:35:09 -07001506 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001507 if (rsize != NULL) {
1508 if (config_stats == false)
Jason Evans122449b2012-04-06 00:35:09 -07001509 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001510 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001511 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001512 }
1513
1514 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001515 if (config_stats) {
1516 thread_allocated_t *ta;
1517 ta = thread_allocated_tsd_get();
1518 ta->allocated += usize;
1519 ta->deallocated += old_size;
1520 }
Jason Evansb1476112012-04-05 13:36:17 -07001521 UTRACE(p, size, q);
Jason Evans122449b2012-04-06 00:35:09 -07001522 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001523 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001524label_err:
Jason Evansb1476112012-04-05 13:36:17 -07001525 if (no_move) {
1526 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001527 return (ALLOCM_ERR_NOT_MOVED);
Jason Evansb1476112012-04-05 13:36:17 -07001528 }
Jason Evansa1ee7832012-04-10 15:07:44 -07001529label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001530 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001531 malloc_write("<jemalloc>: Error in rallocm(): "
1532 "out of memory\n");
1533 abort();
1534 }
Jason Evansb1476112012-04-05 13:36:17 -07001535 UTRACE(p, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001536 return (ALLOCM_ERR_OOM);
1537}
1538
Jason Evans6a0d2912010-09-20 16:44:23 -07001539JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001540JEMALLOC_ATTR(visibility("default"))
1541int
Jason Evans0a5489e2012-03-01 17:19:20 -08001542je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001543{
1544 size_t sz;
1545
Jason Evans41b6afb2012-02-02 22:04:57 -08001546 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001547
Jason Evans7372b152012-02-10 20:22:09 -08001548 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001549 sz = ivsalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001550 else {
1551 assert(ptr != NULL);
Jason Evans122449b2012-04-06 00:35:09 -07001552 sz = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001553 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001554 assert(rsize != NULL);
1555 *rsize = sz;
1556
1557 return (ALLOCM_SUCCESS);
1558}
1559
Jason Evans6a0d2912010-09-20 16:44:23 -07001560JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001561JEMALLOC_ATTR(visibility("default"))
1562int
Jason Evans0a5489e2012-03-01 17:19:20 -08001563je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001564{
Jason Evanse4f78462010-10-22 10:45:59 -07001565 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001566 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001567
1568 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001569 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001570
Jason Evansb1476112012-04-05 13:36:17 -07001571 UTRACE(ptr, 0, 0);
Jason Evans122449b2012-04-06 00:35:09 -07001572 if (config_stats || config_valgrind)
1573 usize = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001574 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001575 if (config_stats == false && config_valgrind == false)
1576 usize = isalloc(ptr, config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -07001577 prof_free(ptr, usize);
1578 }
Jason Evans7372b152012-02-10 20:22:09 -08001579 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001580 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001581 if (config_valgrind && opt_valgrind)
1582 rzsize = p2rz(ptr);
1583 iqalloc(ptr);
1584 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001585
1586 return (ALLOCM_SUCCESS);
1587}
1588
Jason Evans7e15dab2012-02-29 12:56:37 -08001589JEMALLOC_ATTR(visibility("default"))
1590int
Jason Evans0a5489e2012-03-01 17:19:20 -08001591je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001592{
1593 size_t usize;
1594 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1595 & (SIZE_T_MAX-1));
1596
1597 assert(size != 0);
1598
1599 if (malloc_init())
1600 return (ALLOCM_ERR_OOM);
1601
Jason Evans5ff709c2012-04-11 18:13:45 -07001602 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans7e15dab2012-02-29 12:56:37 -08001603 if (usize == 0)
1604 return (ALLOCM_ERR_OOM);
1605
1606 if (rsize != NULL)
1607 *rsize = usize;
1608 return (ALLOCM_SUCCESS);
1609}
1610
Jason Evans7e77eaf2012-03-02 17:47:37 -08001611#endif
Jason Evans289053c2009-06-22 12:08:42 -07001612/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001613 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001614 */
1615/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001616/*
1617 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001618 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001619 */
1620
Jason Evans41b6afb2012-02-02 22:04:57 -08001621#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001622void
Jason Evans804c9ec2009-06-22 17:44:33 -07001623jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001624#else
Jason Evans86e58582012-04-18 19:01:00 -07001625JEMALLOC_ATTR(visibility("default"))
Jason Evans41b6afb2012-02-02 22:04:57 -08001626void
1627_malloc_prefork(void)
1628#endif
Jason Evans289053c2009-06-22 12:08:42 -07001629{
Jason Evansfbbb6242010-01-24 17:56:48 -08001630 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001631
1632 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001633 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001634 for (i = 0; i < narenas; i++) {
1635 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001636 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001637 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001638 base_prefork();
1639 huge_prefork();
1640 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001641}
1642
Jason Evans41b6afb2012-02-02 22:04:57 -08001643#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001644void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001645jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001646#else
Jason Evans86e58582012-04-18 19:01:00 -07001647JEMALLOC_ATTR(visibility("default"))
Jason Evans41b6afb2012-02-02 22:04:57 -08001648void
1649_malloc_postfork(void)
1650#endif
Jason Evans289053c2009-06-22 12:08:42 -07001651{
1652 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001653
1654 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001655 chunk_dss_postfork_parent();
1656 huge_postfork_parent();
1657 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001658 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001659 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001660 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001661 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001662 malloc_mutex_postfork_parent(&arenas_lock);
1663}
1664
1665void
1666jemalloc_postfork_child(void)
1667{
1668 unsigned i;
1669
1670 /* Release all mutexes, now that fork() has completed. */
1671 chunk_dss_postfork_child();
1672 huge_postfork_child();
1673 base_postfork_child();
1674 for (i = 0; i < narenas; i++) {
1675 if (arenas[i] != NULL)
1676 arena_postfork_child(arenas[i]);
1677 }
1678 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001679}
Jason Evans2dbecf12010-09-05 10:35:13 -07001680
1681/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001682/*
1683 * The following functions are used for TLS allocation/deallocation in static
1684 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1685 * is that these avoid accessing TLS variables.
1686 */
1687
1688static void *
1689a0alloc(size_t size, bool zero)
1690{
1691
1692 if (malloc_init())
1693 return (NULL);
1694
1695 if (size == 0)
1696 size = 1;
1697
1698 if (size <= arena_maxclass)
1699 return (arena_malloc(arenas[0], size, zero, false));
1700 else
1701 return (huge_malloc(size, zero));
1702}
1703
1704void *
1705a0malloc(size_t size)
1706{
1707
1708 return (a0alloc(size, false));
1709}
1710
1711void *
1712a0calloc(size_t num, size_t size)
1713{
1714
1715 return (a0alloc(num * size, true));
1716}
1717
1718void
1719a0free(void *ptr)
1720{
1721 arena_chunk_t *chunk;
1722
1723 if (ptr == NULL)
1724 return;
1725
1726 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1727 if (chunk != ptr)
1728 arena_dalloc(chunk->arena, chunk, ptr, false);
1729 else
1730 huge_dalloc(ptr, true);
1731}
1732
1733/******************************************************************************/