blob: 67ac90b22f260c911b049e6b5b87016f41079152 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evans122449b2012-04-06 00:35:09 -070024size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070025bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070026bool opt_utrace = false;
Jason Evans122449b2012-04-06 00:35:09 -070027bool opt_valgrind = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080028bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080029bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070030size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070031
Jason Evanscd9a1342012-03-21 18:33:03 -070032unsigned ncpus;
33
34malloc_mutex_t arenas_lock;
35arena_t **arenas;
36unsigned narenas;
37
38/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070039static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070040
Jason Evans41b6afb2012-02-02 22:04:57 -080041#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070042/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070043# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080044# define INITIALIZER pthread_self()
45# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070046static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080047#else
Jason Evans02b23122012-04-05 11:06:23 -070048# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080049# define INITIALIZER true
50# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070051static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080052#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
Mike Hommeya19e87f2012-04-21 21:27:46 -070055#ifdef _WIN32
56static malloc_mutex_t init_lock;
57
58JEMALLOC_ATTR(constructor)
59static void
60init_init_lock()
61{
62
63 malloc_mutex_init(&init_lock);
64}
65#else
Jason Evanscd9a1342012-03-21 18:33:03 -070066static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Mike Hommeya19e87f2012-04-21 21:27:46 -070067#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070068
Jason Evansb1476112012-04-05 13:36:17 -070069typedef struct {
70 void *p; /* Input pointer (as in realloc(p, s)). */
71 size_t s; /* Request size. */
72 void *r; /* Result pointer. */
73} malloc_utrace_t;
74
75#ifdef JEMALLOC_UTRACE
76# define UTRACE(a, b, c) do { \
77 if (opt_utrace) { \
78 malloc_utrace_t ut; \
79 ut.p = (a); \
80 ut.s = (b); \
81 ut.r = (c); \
82 utrace(&ut, sizeof(ut)); \
83 } \
84} while (0)
85#else
86# define UTRACE(a, b, c)
87#endif
88
Jason Evans289053c2009-06-22 12:08:42 -070089/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080090/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070091
Jason Evans03c22372010-01-03 12:10:42 -080092static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070093static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070094static bool malloc_conf_next(char const **opts_p, char const **k_p,
95 size_t *klen_p, char const **v_p, size_t *vlen_p);
96static void malloc_conf_error(const char *msg, const char *k, size_t klen,
97 const char *v, size_t vlen);
98static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070099static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -0800100static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700101 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -0700102
Jason Evans289053c2009-06-22 12:08:42 -0700103/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -0700104/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700106 */
107
Jason Evanse476f8a2010-01-16 09:53:50 -0800108/* Create a new arena and insert it into the arenas array at index ind. */
109arena_t *
110arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700111{
112 arena_t *ret;
113
Jason Evansb1726102012-02-28 16:50:47 -0800114 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -0800115 if (ret != NULL && arena_new(ret, ind) == false) {
116 arenas[ind] = ret;
117 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700118 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800119 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700120
Jason Evanse476f8a2010-01-16 09:53:50 -0800121 /*
122 * OOM here is quite inconvenient to propagate, since dealing with it
123 * would require a check for failure in the fast path. Instead, punt
124 * by using arenas[0]. In practice, this is an extremely unlikely
125 * failure.
126 */
Jason Evans698805c2010-03-03 17:45:38 -0800127 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800128 if (opt_abort)
129 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700130
Jason Evanse476f8a2010-01-16 09:53:50 -0800131 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700132}
133
Jason Evans4c2faa82012-03-13 11:09:23 -0700134/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800135arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700136choose_arena_hard(void)
137{
138 arena_t *ret;
139
Jason Evans289053c2009-06-22 12:08:42 -0700140 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700141 unsigned i, choose, first_null;
142
143 choose = 0;
144 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800145 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700146 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700147 for (i = 1; i < narenas; i++) {
148 if (arenas[i] != NULL) {
149 /*
150 * Choose the first arena that has the lowest
151 * number of threads assigned to it.
152 */
153 if (arenas[i]->nthreads <
154 arenas[choose]->nthreads)
155 choose = i;
156 } else if (first_null == narenas) {
157 /*
158 * Record the index of the first uninitialized
159 * arena, in case all extant arenas are in use.
160 *
161 * NB: It is possible for there to be
162 * discontinuities in terms of initialized
163 * versus uninitialized arenas, due to the
164 * "thread.arena" mallctl.
165 */
166 first_null = i;
167 }
168 }
169
Jason Evans41b6afb2012-02-02 22:04:57 -0800170 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700171 /*
172 * Use an unloaded arena, or the least loaded arena if
173 * all arenas are already initialized.
174 */
175 ret = arenas[choose];
176 } else {
177 /* Initialize a new arena. */
178 ret = arenas_extend(first_null);
179 }
180 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800181 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700182 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700183 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700184 malloc_mutex_lock(&arenas_lock);
185 ret->nthreads++;
186 malloc_mutex_unlock(&arenas_lock);
187 }
Jason Evans289053c2009-06-22 12:08:42 -0700188
Jason Evanscd9a1342012-03-21 18:33:03 -0700189 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700190
191 return (ret);
192}
Jason Evans289053c2009-06-22 12:08:42 -0700193
Jason Evans03c22372010-01-03 12:10:42 -0800194static void
195stats_print_atexit(void)
196{
197
Jason Evans7372b152012-02-10 20:22:09 -0800198 if (config_tcache && config_stats) {
199 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800200
Jason Evans7372b152012-02-10 20:22:09 -0800201 /*
202 * Merge stats from extant threads. This is racy, since
203 * individual threads do not lock when recording tcache stats
204 * events. As a consequence, the final stats may be slightly
205 * out of date by the time they are reported, if other threads
206 * continue to allocate.
207 */
208 for (i = 0; i < narenas; i++) {
209 arena_t *arena = arenas[i];
210 if (arena != NULL) {
211 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800212
Jason Evans7372b152012-02-10 20:22:09 -0800213 /*
214 * tcache_stats_merge() locks bins, so if any
215 * code is introduced that acquires both arena
216 * and bin locks in the opposite order,
217 * deadlocks may result.
218 */
219 malloc_mutex_lock(&arena->lock);
220 ql_foreach(tcache, &arena->tcache_ql, link) {
221 tcache_stats_merge(tcache, arena);
222 }
223 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800224 }
Jason Evans03c22372010-01-03 12:10:42 -0800225 }
226 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800227 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700228}
229
Jason Evans289053c2009-06-22 12:08:42 -0700230/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800231 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700232 */
233/******************************************************************************/
234/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800235 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700236 */
237
Jason Evansc9658dd2009-06-22 14:44:08 -0700238static unsigned
239malloc_ncpus(void)
240{
241 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700242 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700243
Mike Hommeya19e87f2012-04-21 21:27:46 -0700244#ifdef _WIN32
245 SYSTEM_INFO si;
246 GetSystemInfo(&si);
247 result = si.dwNumberOfProcessors;
248#else
Jason Evansb7924f52009-06-23 19:01:18 -0700249 result = sysconf(_SC_NPROCESSORS_ONLN);
250 if (result == -1) {
251 /* Error. */
252 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700253 }
Mike Hommeya19e87f2012-04-21 21:27:46 -0700254#endif
Jason Evansb7924f52009-06-23 19:01:18 -0700255 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700256
257 return (ret);
258}
Jason Evansb7924f52009-06-23 19:01:18 -0700259
Jason Evanscd9a1342012-03-21 18:33:03 -0700260void
Jason Evans597632b2011-03-18 13:41:33 -0700261arenas_cleanup(void *arg)
262{
Jason Evanscd9a1342012-03-21 18:33:03 -0700263 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700264
265 malloc_mutex_lock(&arenas_lock);
266 arena->nthreads--;
267 malloc_mutex_unlock(&arenas_lock);
268}
269
Jason Evans289053c2009-06-22 12:08:42 -0700270static inline bool
271malloc_init(void)
272{
273
274 if (malloc_initialized == false)
275 return (malloc_init_hard());
276
277 return (false);
278}
279
280static bool
Jason Evanse7339702010-10-23 18:37:06 -0700281malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
282 char const **v_p, size_t *vlen_p)
283{
284 bool accept;
285 const char *opts = *opts_p;
286
287 *k_p = opts;
288
289 for (accept = false; accept == false;) {
290 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800291 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
292 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
293 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
294 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
295 case 'Y': case 'Z':
296 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
297 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
298 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
299 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
300 case 'y': case 'z':
301 case '0': case '1': case '2': case '3': case '4': case '5':
302 case '6': case '7': case '8': case '9':
303 case '_':
304 opts++;
305 break;
306 case ':':
307 opts++;
308 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
309 *v_p = opts;
310 accept = true;
311 break;
312 case '\0':
313 if (opts != *opts_p) {
314 malloc_write("<jemalloc>: Conf string ends "
315 "with key\n");
316 }
317 return (true);
318 default:
319 malloc_write("<jemalloc>: Malformed conf string\n");
320 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700321 }
322 }
323
324 for (accept = false; accept == false;) {
325 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800326 case ',':
327 opts++;
328 /*
329 * Look ahead one character here, because the next time
330 * this function is called, it will assume that end of
331 * input has been cleanly reached if no input remains,
332 * but we have optimistically already consumed the
333 * comma if one exists.
334 */
335 if (*opts == '\0') {
336 malloc_write("<jemalloc>: Conf string ends "
337 "with comma\n");
338 }
339 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
340 accept = true;
341 break;
342 case '\0':
343 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
344 accept = true;
345 break;
346 default:
347 opts++;
348 break;
Jason Evanse7339702010-10-23 18:37:06 -0700349 }
350 }
351
352 *opts_p = opts;
353 return (false);
354}
355
356static void
357malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
358 size_t vlen)
359{
Jason Evanse7339702010-10-23 18:37:06 -0700360
Jason Evansd81e4bd2012-03-06 14:57:45 -0800361 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
362 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700363}
364
365static void
366malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700367{
368 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700369 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *opts, *k, *v;
371 size_t klen, vlen;
372
373 for (i = 0; i < 3; i++) {
374 /* Get runtime configuration. */
375 switch (i) {
376 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800377 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700378 /*
379 * Use options that were compiled into the
380 * program.
381 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800382 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700383 } else {
384 /* No configuration specified. */
385 buf[0] = '\0';
386 opts = buf;
387 }
388 break;
389 case 1: {
Mike Hommeya19e87f2012-04-21 21:27:46 -0700390#ifndef _WIN32
Jason Evanse7339702010-10-23 18:37:06 -0700391 int linklen;
392 const char *linkname =
Mike Hommeya19e87f2012-04-21 21:27:46 -0700393# ifdef JEMALLOC_PREFIX
Jason Evanse7339702010-10-23 18:37:06 -0700394 "/etc/"JEMALLOC_PREFIX"malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700395# else
Jason Evanse7339702010-10-23 18:37:06 -0700396 "/etc/malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700397# endif
Jason Evanse7339702010-10-23 18:37:06 -0700398 ;
399
400 if ((linklen = readlink(linkname, buf,
401 sizeof(buf) - 1)) != -1) {
402 /*
403 * Use the contents of the "/etc/malloc.conf"
404 * symbolic link's name.
405 */
406 buf[linklen] = '\0';
407 opts = buf;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700408 } else
409#endif
410 {
Jason Evanse7339702010-10-23 18:37:06 -0700411 /* No configuration specified. */
412 buf[0] = '\0';
413 opts = buf;
414 }
415 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800416 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700417 const char *envname =
418#ifdef JEMALLOC_PREFIX
419 JEMALLOC_CPREFIX"MALLOC_CONF"
420#else
421 "MALLOC_CONF"
422#endif
423 ;
424
425 if ((opts = getenv(envname)) != NULL) {
426 /*
427 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800428 * the value of the MALLOC_CONF environment
429 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700430 */
431 } else {
432 /* No configuration specified. */
433 buf[0] = '\0';
434 opts = buf;
435 }
436 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800437 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700438 /* NOTREACHED */
439 assert(false);
440 buf[0] = '\0';
441 opts = buf;
442 }
443
444 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
445 &vlen) == false) {
Jason Evans122449b2012-04-06 00:35:09 -0700446#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700447 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700448 klen) == 0) { \
449 if (strncmp("true", v, vlen) == 0 && \
450 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800451 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 else if (strncmp("false", v, vlen) == \
453 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800454 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700455 else { \
456 malloc_conf_error( \
457 "Invalid conf value", \
458 k, klen, v, vlen); \
459 } \
Jason Evans122449b2012-04-06 00:35:09 -0700460 hit = true; \
461 } else \
462 hit = false;
463#define CONF_HANDLE_BOOL(o, n) { \
464 bool hit; \
465 CONF_HANDLE_BOOL_HIT(o, n, hit); \
466 if (hit) \
Jason Evanse7339702010-10-23 18:37:06 -0700467 continue; \
Jason Evans122449b2012-04-06 00:35:09 -0700468}
Jason Evansd81e4bd2012-03-06 14:57:45 -0800469#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700470 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700471 klen) == 0) { \
Jason Evans122449b2012-04-06 00:35:09 -0700472 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700473 char *end; \
474 \
475 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800476 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700477 if (errno != 0 || (uintptr_t)end - \
478 (uintptr_t)v != vlen) { \
479 malloc_conf_error( \
480 "Invalid conf value", \
481 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800482 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700483 malloc_conf_error( \
484 "Out-of-range conf value", \
485 k, klen, v, vlen); \
486 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800487 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700488 continue; \
489 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800490#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700491 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700492 klen) == 0) { \
493 long l; \
494 char *end; \
495 \
496 errno = 0; \
497 l = strtol(v, &end, 0); \
498 if (errno != 0 || (uintptr_t)end - \
499 (uintptr_t)v != vlen) { \
500 malloc_conf_error( \
501 "Invalid conf value", \
502 k, klen, v, vlen); \
503 } else if (l < (ssize_t)min || l > \
504 (ssize_t)max) { \
505 malloc_conf_error( \
506 "Out-of-range conf value", \
507 k, klen, v, vlen); \
508 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800509 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700510 continue; \
511 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800512#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700513 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700514 klen) == 0) { \
515 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800516 sizeof(o)-1) ? vlen : \
517 sizeof(o)-1; \
518 strncpy(o, v, cpylen); \
519 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700520 continue; \
521 }
522
Jason Evans606f1fd2012-04-20 21:39:14 -0700523 CONF_HANDLE_BOOL(opt_abort, "abort")
Jason Evanse7339702010-10-23 18:37:06 -0700524 /*
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700525 * Chunks always require at least one header page, plus
526 * one data page in the absence of redzones, or three
527 * pages in the presence of redzones. In order to
528 * simplify options processing, fix the limit based on
529 * config_fill.
Jason Evanse7339702010-10-23 18:37:06 -0700530 */
Jason Evans606f1fd2012-04-20 21:39:14 -0700531 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700532 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700533 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
534 SIZE_T_MAX)
535 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -0800536 -1, (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700537 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
Jason Evans7372b152012-02-10 20:22:09 -0800538 if (config_fill) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700539 CONF_HANDLE_BOOL(opt_junk, "junk")
540 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans122449b2012-04-06 00:35:09 -0700541 0, SIZE_T_MAX)
Jason Evans606f1fd2012-04-20 21:39:14 -0700542 CONF_HANDLE_BOOL(opt_redzone, "redzone")
543 CONF_HANDLE_BOOL(opt_zero, "zero")
Jason Evans7372b152012-02-10 20:22:09 -0800544 }
Jason Evansb1476112012-04-05 13:36:17 -0700545 if (config_utrace) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700546 CONF_HANDLE_BOOL(opt_utrace, "utrace")
Jason Evansb1476112012-04-05 13:36:17 -0700547 }
Jason Evans122449b2012-04-06 00:35:09 -0700548 if (config_valgrind) {
549 bool hit;
550 CONF_HANDLE_BOOL_HIT(opt_valgrind,
Jason Evans606f1fd2012-04-20 21:39:14 -0700551 "valgrind", hit)
Jason Evans122449b2012-04-06 00:35:09 -0700552 if (config_fill && opt_valgrind && hit) {
553 opt_junk = false;
554 opt_zero = false;
555 if (opt_quarantine == 0) {
556 opt_quarantine =
557 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
558 }
559 opt_redzone = true;
560 }
561 if (hit)
562 continue;
563 }
Jason Evans7372b152012-02-10 20:22:09 -0800564 if (config_xmalloc) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700565 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
Jason Evans7372b152012-02-10 20:22:09 -0800566 }
567 if (config_tcache) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700568 CONF_HANDLE_BOOL(opt_tcache, "tcache")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800569 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -0700570 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800571 (sizeof(size_t) << 3) - 1)
572 }
573 if (config_prof) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700574 CONF_HANDLE_BOOL(opt_prof, "prof")
575 CONF_HANDLE_CHAR_P(opt_prof_prefix,
576 "prof_prefix", "jeprof")
577 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800578 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
Jason Evans606f1fd2012-04-20 21:39:14 -0700579 "lg_prof_sample", 0,
Jason Evans7372b152012-02-10 20:22:09 -0800580 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700581 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800582 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -0700583 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800584 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700585 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
586 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
587 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
Jason Evans7372b152012-02-10 20:22:09 -0800588 }
Jason Evanse7339702010-10-23 18:37:06 -0700589 malloc_conf_error("Invalid conf pair", k, klen, v,
590 vlen);
591#undef CONF_HANDLE_BOOL
592#undef CONF_HANDLE_SIZE_T
593#undef CONF_HANDLE_SSIZE_T
594#undef CONF_HANDLE_CHAR_P
595 }
Jason Evanse7339702010-10-23 18:37:06 -0700596 }
597}
598
599static bool
600malloc_init_hard(void)
601{
Jason Evansb7924f52009-06-23 19:01:18 -0700602 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700603
604 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800605 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700606 /*
607 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800608 * acquired init_lock, or this thread is the initializing
609 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700610 */
611 malloc_mutex_unlock(&init_lock);
612 return (false);
613 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800614#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700615 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700616 /* Busy-wait until the initializing thread completes. */
617 do {
618 malloc_mutex_unlock(&init_lock);
619 CPU_SPINWAIT;
620 malloc_mutex_lock(&init_lock);
621 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700622 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700623 return (false);
624 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800625#endif
626 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700627
Jason Evanscd9a1342012-03-21 18:33:03 -0700628 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800629 if (config_prof)
630 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700631
Jason Evanse7339702010-10-23 18:37:06 -0700632 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700633
Mike Hommeya19e87f2012-04-21 21:27:46 -0700634#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
635 && !defined(_WIN32))
Jason Evansa0bf2422010-01-29 14:30:41 -0800636 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700637 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
638 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800639 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800640 if (opt_abort)
641 abort();
642 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800643#endif
Jason Evans3c234352010-01-27 13:10:55 -0800644
Jason Evans03c22372010-01-03 12:10:42 -0800645 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700646 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800647 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800648 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800649 if (opt_abort)
650 abort();
651 }
Jason Evans289053c2009-06-22 12:08:42 -0700652 }
653
Mike Hommeyb8325f92012-04-12 15:15:35 +0200654 if (base_boot()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700658
Jason Evansa8f8d752012-04-21 19:17:21 -0700659 if (chunk_boot()) {
Jason Evans3c234352010-01-27 13:10:55 -0800660 malloc_mutex_unlock(&init_lock);
661 return (true);
662 }
663
Jason Evans41b6afb2012-02-02 22:04:57 -0800664 if (ctl_boot()) {
665 malloc_mutex_unlock(&init_lock);
666 return (true);
667 }
668
Jason Evans7372b152012-02-10 20:22:09 -0800669 if (config_prof)
670 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800671
Jason Evansb1726102012-02-28 16:50:47 -0800672 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700673
Jason Evanscd9a1342012-03-21 18:33:03 -0700674 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700675 malloc_mutex_unlock(&init_lock);
676 return (true);
677 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800678
Jason Evanse476f8a2010-01-16 09:53:50 -0800679 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700680 malloc_mutex_unlock(&init_lock);
681 return (true);
682 }
Jason Evans289053c2009-06-22 12:08:42 -0700683
Jason Evans8e6f8b42011-11-03 18:40:03 -0700684 if (malloc_mutex_init(&arenas_lock))
685 return (true);
686
Jason Evansb7924f52009-06-23 19:01:18 -0700687 /*
688 * Create enough scaffolding to allow recursive allocation in
689 * malloc_ncpus().
690 */
691 narenas = 1;
692 arenas = init_arenas;
693 memset(arenas, 0, sizeof(arena_t *) * narenas);
694
695 /*
696 * Initialize one arena here. The rest are lazily created in
697 * choose_arena_hard().
698 */
699 arenas_extend(0);
700 if (arenas[0] == NULL) {
701 malloc_mutex_unlock(&init_lock);
702 return (true);
703 }
704
Jason Evanscd9a1342012-03-21 18:33:03 -0700705 /* Initialize allocation counters before any allocations can occur. */
706 if (config_stats && thread_allocated_tsd_boot()) {
707 malloc_mutex_unlock(&init_lock);
708 return (true);
709 }
Jason Evansb7924f52009-06-23 19:01:18 -0700710
Jason Evanscd9a1342012-03-21 18:33:03 -0700711 if (arenas_tsd_boot()) {
712 malloc_mutex_unlock(&init_lock);
713 return (true);
714 }
715
716 if (config_tcache && tcache_boot1()) {
717 malloc_mutex_unlock(&init_lock);
718 return (true);
719 }
720
Jason Evans122449b2012-04-06 00:35:09 -0700721 if (config_fill && quarantine_boot()) {
722 malloc_mutex_unlock(&init_lock);
723 return (true);
724 }
725
Jason Evans6da54182012-03-23 18:05:51 -0700726 if (config_prof && prof_boot2()) {
727 malloc_mutex_unlock(&init_lock);
728 return (true);
729 }
730
Jason Evansb7924f52009-06-23 19:01:18 -0700731 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700732 malloc_mutex_unlock(&init_lock);
733 ncpus = malloc_ncpus();
734 malloc_mutex_lock(&init_lock);
735
Jason Evans633aaff2012-04-03 08:47:07 -0700736 if (mutex_boot()) {
737 malloc_mutex_unlock(&init_lock);
738 return (true);
739 }
740
Jason Evanse7339702010-10-23 18:37:06 -0700741 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700742 /*
Jason Evans5463a522009-12-29 00:09:15 -0800743 * For SMP systems, create more than one arena per CPU by
744 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700745 */
Jason Evanse7339702010-10-23 18:37:06 -0700746 if (ncpus > 1)
747 opt_narenas = ncpus << 2;
748 else
749 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700750 }
Jason Evanse7339702010-10-23 18:37:06 -0700751 narenas = opt_narenas;
752 /*
753 * Make sure that the arenas array can be allocated. In practice, this
754 * limit is enough to allow the allocator to function, but the ctl
755 * machinery will fail to allocate memory at far lower limits.
756 */
757 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700758 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800759 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
760 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700761 }
Jason Evans289053c2009-06-22 12:08:42 -0700762
Jason Evans289053c2009-06-22 12:08:42 -0700763 /* Allocate and initialize arenas. */
764 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
765 if (arenas == NULL) {
766 malloc_mutex_unlock(&init_lock);
767 return (true);
768 }
769 /*
770 * Zero the array. In practice, this should always be pre-zeroed,
771 * since it was just mmap()ed, but let's be sure.
772 */
773 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700774 /* Copy the pointer to the one arena that was already initialized. */
775 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700776
777 malloc_initialized = true;
778 malloc_mutex_unlock(&init_lock);
779 return (false);
780}
781
782/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800783 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700784 */
785/******************************************************************************/
786/*
787 * Begin malloc(3)-compatible functions.
788 */
789
Jason Evans9ad48232010-01-03 11:59:20 -0800790JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800791JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700792void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800793je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700794{
795 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800796 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700797 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700798
799 if (malloc_init()) {
800 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700801 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -0700802 }
803
Jason Evansc90ad712012-02-28 20:31:37 -0800804 if (size == 0)
805 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700806
Jason Evans7372b152012-02-10 20:22:09 -0800807 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700808 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700809 PROF_ALLOC_PREP(1, usize, cnt);
810 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700811 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700812 goto label_oom;
Jason Evans0b270a92010-03-31 16:45:04 -0700813 }
Jason Evans93443682010-10-20 17:39:18 -0700814 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800815 SMALL_MAXCLASS) {
816 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700817 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700818 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700819 } else
820 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800821 } else {
Jason Evans122449b2012-04-06 00:35:09 -0700822 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -0800823 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700824 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700825 }
Jason Evans289053c2009-06-22 12:08:42 -0700826
Jason Evansa1ee7832012-04-10 15:07:44 -0700827label_oom:
Jason Evans289053c2009-06-22 12:08:42 -0700828 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800829 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800830 malloc_write("<jemalloc>: Error in malloc(): "
831 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700832 abort();
833 }
834 errno = ENOMEM;
835 }
Jason Evans7372b152012-02-10 20:22:09 -0800836 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700837 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800838 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700839 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700840 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700841 }
Jason Evansb1476112012-04-05 13:36:17 -0700842 UTRACE(0, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -0700843 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -0700844 return (ret);
845}
846
Jason Evans9ad48232010-01-03 11:59:20 -0800847JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700848#ifdef JEMALLOC_PROF
849/*
Jason Evans7372b152012-02-10 20:22:09 -0800850 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700851 * PROF_ALLOC_PREP().
852 */
853JEMALLOC_ATTR(noinline)
854#endif
855static int
Jason Evans59656312012-02-28 21:37:38 -0800856imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700857 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700858{
859 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800860 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700861 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700862 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700863
Jason Evans0a0bbf62012-03-13 12:55:21 -0700864 assert(min_alignment != 0);
865
Jason Evans289053c2009-06-22 12:08:42 -0700866 if (malloc_init())
867 result = NULL;
868 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800869 if (size == 0)
870 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800871
Jason Evans289053c2009-06-22 12:08:42 -0700872 /* Make sure that alignment is a large enough power of 2. */
873 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700874 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800875 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700876 malloc_write("<jemalloc>: Error allocating "
877 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700878 abort();
879 }
880 result = NULL;
881 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700882 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700883 }
884
Jason Evans5ff709c2012-04-11 18:13:45 -0700885 usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -0700886 if (usize == 0) {
887 result = NULL;
888 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700889 goto label_return;
Jason Evans38d92102011-03-23 00:37:29 -0700890 }
891
Jason Evans7372b152012-02-10 20:22:09 -0800892 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700893 PROF_ALLOC_PREP(2, usize, cnt);
894 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700895 result = NULL;
896 ret = EINVAL;
897 } else {
898 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800899 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
900 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700901 alignment) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800902 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700903 alignment), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700904 if (result != NULL) {
905 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700906 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700907 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700908 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700909 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700910 false);
911 }
Jason Evans0b270a92010-03-31 16:45:04 -0700912 }
Jason Evans6109fe02010-02-10 10:37:56 -0800913 } else
Jason Evans38d92102011-03-23 00:37:29 -0700914 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700915 }
916
917 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800918 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700919 malloc_write("<jemalloc>: Error allocating aligned "
920 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700921 abort();
922 }
923 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700924 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700925 }
926
927 *memptr = result;
928 ret = 0;
929
Jason Evansa1ee7832012-04-10 15:07:44 -0700930label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800931 if (config_stats && result != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700932 assert(usize == isalloc(result, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700933 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700934 }
Jason Evans7372b152012-02-10 20:22:09 -0800935 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700936 prof_malloc(result, usize, cnt);
Jason Evansb1476112012-04-05 13:36:17 -0700937 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -0700938 return (ret);
939}
940
Jason Evansa5070042011-08-12 13:48:27 -0700941JEMALLOC_ATTR(nonnull(1))
942JEMALLOC_ATTR(visibility("default"))
943int
Jason Evans0a5489e2012-03-01 17:19:20 -0800944je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700945{
Jason Evans122449b2012-04-06 00:35:09 -0700946 int ret = imemalign(memptr, alignment, size, sizeof(void *));
947 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
948 config_prof), false);
949 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700950}
951
952JEMALLOC_ATTR(malloc)
953JEMALLOC_ATTR(visibility("default"))
954void *
955je_aligned_alloc(size_t alignment, size_t size)
956{
957 void *ret;
958 int err;
959
960 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
961 ret = NULL;
962 errno = err;
963 }
Jason Evans122449b2012-04-06 00:35:09 -0700964 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
965 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700966 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700967}
968
Jason Evans9ad48232010-01-03 11:59:20 -0800969JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800970JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700971void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800972je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700973{
974 void *ret;
975 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800976 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700977 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700978
979 if (malloc_init()) {
980 num_size = 0;
981 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700982 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700983 }
984
985 num_size = num * size;
986 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800987 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700988 num_size = 1;
989 else {
990 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700991 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700992 }
993 /*
994 * Try to avoid division here. We know that it isn't possible to
995 * overflow during multiplication if neither operand uses any of the
996 * most significant half of the bits in a size_t.
997 */
998 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
999 && (num_size / size != num)) {
1000 /* size_t overflow. */
1001 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001002 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001003 }
1004
Jason Evans7372b152012-02-10 20:22:09 -08001005 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001006 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001007 PROF_ALLOC_PREP(1, usize, cnt);
1008 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001009 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001010 goto label_return;
Jason Evans0b270a92010-03-31 16:45:04 -07001011 }
Jason Evans93443682010-10-20 17:39:18 -07001012 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001013 <= SMALL_MAXCLASS) {
1014 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001015 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001016 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001017 } else
1018 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001019 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001020 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001021 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001022 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001023 }
Jason Evans289053c2009-06-22 12:08:42 -07001024
Jason Evansa1ee7832012-04-10 15:07:44 -07001025label_return:
Jason Evans289053c2009-06-22 12:08:42 -07001026 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001027 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001028 malloc_write("<jemalloc>: Error in calloc(): out of "
1029 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001030 abort();
1031 }
1032 errno = ENOMEM;
1033 }
1034
Jason Evans7372b152012-02-10 20:22:09 -08001035 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001036 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001037 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -07001038 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001039 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -07001040 }
Jason Evansb1476112012-04-05 13:36:17 -07001041 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001042 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001043 return (ret);
1044}
1045
Jason Evanse476f8a2010-01-16 09:53:50 -08001046JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001047void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001048je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001049{
1050 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001051 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001052 size_t old_size = 0;
Jason Evans122449b2012-04-06 00:35:09 -07001053 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -07001054 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1055 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001056
Jason Evans289053c2009-06-22 12:08:42 -07001057 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001058 if (ptr != NULL) {
1059 /* realloc(ptr, 0) is equivalent to free(p). */
Jason Evans122449b2012-04-06 00:35:09 -07001060 if (config_prof) {
1061 old_size = isalloc(ptr, true);
1062 if (config_valgrind && opt_valgrind)
1063 old_rzsize = p2rz(ptr);
1064 } else if (config_stats) {
1065 old_size = isalloc(ptr, false);
1066 if (config_valgrind && opt_valgrind)
1067 old_rzsize = u2rz(old_size);
1068 } else if (config_valgrind && opt_valgrind) {
1069 old_size = isalloc(ptr, false);
1070 old_rzsize = u2rz(old_size);
1071 }
Jason Evansf081b882012-02-28 20:24:05 -08001072 if (config_prof && opt_prof) {
1073 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001074 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001075 }
Jason Evans122449b2012-04-06 00:35:09 -07001076 iqalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001077 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001078 goto label_return;
Jason Evansc90ad712012-02-28 20:31:37 -08001079 } else
1080 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001081 }
1082
1083 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001084 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001085
Jason Evans122449b2012-04-06 00:35:09 -07001086 if (config_prof) {
1087 old_size = isalloc(ptr, true);
1088 if (config_valgrind && opt_valgrind)
1089 old_rzsize = p2rz(ptr);
1090 } else if (config_stats) {
1091 old_size = isalloc(ptr, false);
1092 if (config_valgrind && opt_valgrind)
1093 old_rzsize = u2rz(old_size);
1094 } else if (config_valgrind && opt_valgrind) {
1095 old_size = isalloc(ptr, false);
1096 old_rzsize = u2rz(old_size);
1097 }
Jason Evans7372b152012-02-10 20:22:09 -08001098 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001099 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001100 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001101 PROF_ALLOC_PREP(1, usize, cnt);
1102 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001103 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001104 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001105 goto label_oom;
Jason Evans6109fe02010-02-10 10:37:56 -08001106 }
Jason Evans0b270a92010-03-31 16:45:04 -07001107 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001108 usize <= SMALL_MAXCLASS) {
1109 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001110 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001111 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001112 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001113 else
1114 old_ctx = NULL;
1115 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001116 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001117 if (ret == NULL)
1118 old_ctx = NULL;
1119 }
Jason Evans7372b152012-02-10 20:22:09 -08001120 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001121 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001122 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001123 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001124 }
Jason Evans289053c2009-06-22 12:08:42 -07001125
Jason Evansa1ee7832012-04-10 15:07:44 -07001126label_oom:
Jason Evans289053c2009-06-22 12:08:42 -07001127 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001128 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001129 malloc_write("<jemalloc>: Error in realloc(): "
1130 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001131 abort();
1132 }
1133 errno = ENOMEM;
1134 }
1135 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001136 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001137 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001138 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001139 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001140 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001141 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001142 ret = NULL;
1143 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001144 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001145 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001146 PROF_ALLOC_PREP(1, usize, cnt);
1147 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001148 ret = NULL;
1149 else {
1150 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001151 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001152 SMALL_MAXCLASS) {
1153 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001154 if (ret != NULL) {
1155 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001156 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001157 }
1158 } else
1159 ret = imalloc(size);
1160 }
Jason Evans7372b152012-02-10 20:22:09 -08001161 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001162 if (config_stats || (config_valgrind &&
1163 opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001164 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001165 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001166 }
Jason Evans6109fe02010-02-10 10:37:56 -08001167 }
Jason Evans569432c2009-12-29 00:09:15 -08001168
Jason Evans289053c2009-06-22 12:08:42 -07001169 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001170 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001171 malloc_write("<jemalloc>: Error in realloc(): "
1172 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001173 abort();
1174 }
1175 errno = ENOMEM;
1176 }
1177 }
1178
Jason Evansa1ee7832012-04-10 15:07:44 -07001179label_return:
Jason Evans7372b152012-02-10 20:22:09 -08001180 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001181 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001182 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001183 thread_allocated_t *ta;
Jason Evans122449b2012-04-06 00:35:09 -07001184 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001185 ta = thread_allocated_tsd_get();
1186 ta->allocated += usize;
1187 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001188 }
Jason Evansb1476112012-04-05 13:36:17 -07001189 UTRACE(ptr, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001190 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001191 return (ret);
1192}
1193
Jason Evanse476f8a2010-01-16 09:53:50 -08001194JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001195void
Jason Evans0a5489e2012-03-01 17:19:20 -08001196je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001197{
1198
Jason Evansb1476112012-04-05 13:36:17 -07001199 UTRACE(ptr, 0, 0);
Jason Evansf0047372012-04-02 15:18:24 -07001200 if (ptr != NULL) {
1201 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001202 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evanse4f78462010-10-22 10:45:59 -07001203
Jason Evansf0047372012-04-02 15:18:24 -07001204 assert(malloc_initialized || IS_INITIALIZER);
1205
1206 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001207 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001208 prof_free(ptr, usize);
Jason Evans122449b2012-04-06 00:35:09 -07001209 } else if (config_stats || config_valgrind)
1210 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001211 if (config_stats)
1212 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001213 if (config_valgrind && opt_valgrind)
1214 rzsize = p2rz(ptr);
1215 iqalloc(ptr);
1216 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evansf0047372012-04-02 15:18:24 -07001217 }
Jason Evans289053c2009-06-22 12:08:42 -07001218}
1219
1220/*
1221 * End malloc(3)-compatible functions.
1222 */
1223/******************************************************************************/
1224/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001225 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001226 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001227
1228#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1229JEMALLOC_ATTR(malloc)
1230JEMALLOC_ATTR(visibility("default"))
1231void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001232je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001233{
Jason Evans9225a192012-03-23 15:39:07 -07001234 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001235 imemalign(&ret, alignment, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001236 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001237 return (ret);
1238}
1239#endif
1240
1241#ifdef JEMALLOC_OVERRIDE_VALLOC
1242JEMALLOC_ATTR(malloc)
1243JEMALLOC_ATTR(visibility("default"))
1244void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001245je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001246{
Jason Evans9225a192012-03-23 15:39:07 -07001247 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001248 imemalign(&ret, PAGE, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001249 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001250 return (ret);
1251}
1252#endif
1253
Mike Hommey5c89c502012-03-26 17:46:57 +02001254/*
1255 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1256 * #define je_malloc malloc
1257 */
1258#define malloc_is_malloc 1
1259#define is_malloc_(a) malloc_is_ ## a
1260#define is_malloc(a) is_malloc_(a)
1261
1262#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001263/*
1264 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1265 * to inconsistently reference libc's malloc(3)-compatible functions
1266 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1267 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001268 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001269 * passed an extra argument for the caller return address, which will be
1270 * ignored.
1271 */
1272JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001273void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001274
1275JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001276void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001277
1278JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001279void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001280
1281JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001282void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001283#endif
1284
Jason Evans6a0d2912010-09-20 16:44:23 -07001285/*
1286 * End non-standard override functions.
1287 */
1288/******************************************************************************/
1289/*
Jason Evans289053c2009-06-22 12:08:42 -07001290 * Begin non-standard functions.
1291 */
1292
Jason Evanse476f8a2010-01-16 09:53:50 -08001293JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001294size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001295je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001296{
Jason Evans569432c2009-12-29 00:09:15 -08001297 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001298
Jason Evans41b6afb2012-02-02 22:04:57 -08001299 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001300
Jason Evans7372b152012-02-10 20:22:09 -08001301 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001302 ret = ivsalloc(ptr, config_prof);
Jason Evans2465bdf2012-03-26 13:13:55 -07001303 else
Jason Evans122449b2012-04-06 00:35:09 -07001304 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001305
Jason Evans569432c2009-12-29 00:09:15 -08001306 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001307}
1308
Jason Evans4201af02010-01-24 02:53:40 -08001309JEMALLOC_ATTR(visibility("default"))
1310void
Jason Evans0a5489e2012-03-01 17:19:20 -08001311je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1312 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001313{
1314
Jason Evans698805c2010-03-03 17:45:38 -08001315 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001316}
1317
Jason Evans3c234352010-01-27 13:10:55 -08001318JEMALLOC_ATTR(visibility("default"))
1319int
Jason Evans0a5489e2012-03-01 17:19:20 -08001320je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001321 size_t newlen)
1322{
1323
Jason Evans95833312010-01-27 13:45:21 -08001324 if (malloc_init())
1325 return (EAGAIN);
1326
Jason Evans3c234352010-01-27 13:10:55 -08001327 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1328}
1329
1330JEMALLOC_ATTR(visibility("default"))
1331int
Jason Evans0a5489e2012-03-01 17:19:20 -08001332je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001333{
1334
Jason Evans95833312010-01-27 13:45:21 -08001335 if (malloc_init())
1336 return (EAGAIN);
1337
Jason Evans3c234352010-01-27 13:10:55 -08001338 return (ctl_nametomib(name, mibp, miblenp));
1339}
1340
1341JEMALLOC_ATTR(visibility("default"))
1342int
Jason Evans0a5489e2012-03-01 17:19:20 -08001343je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1344 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001345{
1346
Jason Evans95833312010-01-27 13:45:21 -08001347 if (malloc_init())
1348 return (EAGAIN);
1349
Jason Evans3c234352010-01-27 13:10:55 -08001350 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1351}
1352
Jason Evans7e77eaf2012-03-02 17:47:37 -08001353/*
1354 * End non-standard functions.
1355 */
1356/******************************************************************************/
1357/*
1358 * Begin experimental functions.
1359 */
1360#ifdef JEMALLOC_EXPERIMENTAL
1361
Jason Evans8e3c3c62010-09-17 15:46:18 -07001362JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001363iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001364{
1365
Jason Evans5ff709c2012-04-11 18:13:45 -07001366 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1367 alignment)));
Jason Evans38d92102011-03-23 00:37:29 -07001368
Jason Evans8e3c3c62010-09-17 15:46:18 -07001369 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001370 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001371 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001372 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001373 else
Jason Evans38d92102011-03-23 00:37:29 -07001374 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001375}
1376
Jason Evans6a0d2912010-09-20 16:44:23 -07001377JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001378JEMALLOC_ATTR(visibility("default"))
1379int
Jason Evans0a5489e2012-03-01 17:19:20 -08001380je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001381{
1382 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001383 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001384 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1385 & (SIZE_T_MAX-1));
1386 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001388
1389 assert(ptr != NULL);
1390 assert(size != 0);
1391
1392 if (malloc_init())
Jason Evansa1ee7832012-04-10 15:07:44 -07001393 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001394
Jason Evans5ff709c2012-04-11 18:13:45 -07001395 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001396 if (usize == 0)
Jason Evansa1ee7832012-04-10 15:07:44 -07001397 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001398
Jason Evans7372b152012-02-10 20:22:09 -08001399 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001400 PROF_ALLOC_PREP(1, usize, cnt);
1401 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001402 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001403 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001404 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001405 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001406 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -07001407 alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001408 assert(usize_promoted != 0);
1409 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001411 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001412 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001413 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001414 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001415 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001416 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 }
Jason Evans749c2a02011-08-12 18:37:54 -07001418 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001419 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001420 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001422 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001423 }
Jason Evans7372b152012-02-10 20:22:09 -08001424 if (rsize != NULL)
1425 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001426
1427 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001428 if (config_stats) {
Jason Evans122449b2012-04-06 00:35:09 -07001429 assert(usize == isalloc(p, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001430 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001431 }
Jason Evansb1476112012-04-05 13:36:17 -07001432 UTRACE(0, size, p);
Jason Evans122449b2012-04-06 00:35:09 -07001433 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001434 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001435label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001436 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001437 malloc_write("<jemalloc>: Error in allocm(): "
1438 "out of memory\n");
1439 abort();
1440 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441 *ptr = NULL;
Jason Evansb1476112012-04-05 13:36:17 -07001442 UTRACE(0, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443 return (ALLOCM_ERR_OOM);
1444}
1445
Jason Evans6a0d2912010-09-20 16:44:23 -07001446JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001447JEMALLOC_ATTR(visibility("default"))
1448int
Jason Evans0a5489e2012-03-01 17:19:20 -08001449je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450{
1451 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001452 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001453 size_t old_size;
Jason Evans122449b2012-04-06 00:35:09 -07001454 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001455 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1456 & (SIZE_T_MAX-1));
1457 bool zero = flags & ALLOCM_ZERO;
1458 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001459 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460
1461 assert(ptr != NULL);
1462 assert(*ptr != NULL);
1463 assert(size != 0);
1464 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001465 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001466
1467 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001468 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001469 /*
1470 * usize isn't knowable before iralloc() returns when extra is
1471 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001472 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001473 * backtrace. prof_realloc() will use the actual usize to
1474 * decide whether to sample.
1475 */
1476 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
Jason Evans5ff709c2012-04-11 18:13:45 -07001477 sa2u(size+extra, alignment);
Jason Evans46405e62011-08-30 23:37:29 -07001478 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans122449b2012-04-06 00:35:09 -07001479 old_size = isalloc(p, true);
1480 if (config_valgrind && opt_valgrind)
1481 old_rzsize = p2rz(p);
Jason Evansa5070042011-08-12 13:48:27 -07001482 PROF_ALLOC_PREP(1, max_usize, cnt);
1483 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001484 goto label_oom;
Jason Evans183ba502011-08-11 22:51:00 -07001485 /*
1486 * Use minimum usize to determine whether promotion may happen.
1487 */
1488 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
Jason Evans5ff709c2012-04-11 18:13:45 -07001489 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1490 <= SMALL_MAXCLASS) {
Jason Evansb1726102012-02-28 16:50:47 -08001491 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1492 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001493 alignment, zero, no_move);
1494 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001495 goto label_err;
Jason Evansae4c7b42012-04-02 07:04:34 -07001496 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001497 usize = max_usize;
1498 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001499 } else
Jason Evans122449b2012-04-06 00:35:09 -07001500 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001501 } else {
1502 q = iralloc(p, size, extra, alignment, zero, no_move);
1503 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001504 goto label_err;
Jason Evans122449b2012-04-06 00:35:09 -07001505 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001506 }
Jason Evanse4f78462010-10-22 10:45:59 -07001507 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001508 if (rsize != NULL)
1509 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001510 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001511 if (config_stats) {
1512 old_size = isalloc(p, false);
1513 if (config_valgrind && opt_valgrind)
1514 old_rzsize = u2rz(old_size);
1515 } else if (config_valgrind && opt_valgrind) {
1516 old_size = isalloc(p, false);
1517 old_rzsize = u2rz(old_size);
1518 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001519 q = iralloc(p, size, extra, alignment, zero, no_move);
1520 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001521 goto label_err;
Jason Evans7372b152012-02-10 20:22:09 -08001522 if (config_stats)
Jason Evans122449b2012-04-06 00:35:09 -07001523 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001524 if (rsize != NULL) {
1525 if (config_stats == false)
Jason Evans122449b2012-04-06 00:35:09 -07001526 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001527 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001528 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001529 }
1530
1531 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001532 if (config_stats) {
1533 thread_allocated_t *ta;
1534 ta = thread_allocated_tsd_get();
1535 ta->allocated += usize;
1536 ta->deallocated += old_size;
1537 }
Jason Evansb1476112012-04-05 13:36:17 -07001538 UTRACE(p, size, q);
Jason Evans122449b2012-04-06 00:35:09 -07001539 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001540 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001541label_err:
Jason Evansb1476112012-04-05 13:36:17 -07001542 if (no_move) {
1543 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001544 return (ALLOCM_ERR_NOT_MOVED);
Jason Evansb1476112012-04-05 13:36:17 -07001545 }
Jason Evansa1ee7832012-04-10 15:07:44 -07001546label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001547 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001548 malloc_write("<jemalloc>: Error in rallocm(): "
1549 "out of memory\n");
1550 abort();
1551 }
Jason Evansb1476112012-04-05 13:36:17 -07001552 UTRACE(p, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001553 return (ALLOCM_ERR_OOM);
1554}
1555
Jason Evans6a0d2912010-09-20 16:44:23 -07001556JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001557JEMALLOC_ATTR(visibility("default"))
1558int
Jason Evans0a5489e2012-03-01 17:19:20 -08001559je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001560{
1561 size_t sz;
1562
Jason Evans41b6afb2012-02-02 22:04:57 -08001563 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001564
Jason Evans7372b152012-02-10 20:22:09 -08001565 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001566 sz = ivsalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001567 else {
1568 assert(ptr != NULL);
Jason Evans122449b2012-04-06 00:35:09 -07001569 sz = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001570 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001571 assert(rsize != NULL);
1572 *rsize = sz;
1573
1574 return (ALLOCM_SUCCESS);
1575}
1576
Jason Evans6a0d2912010-09-20 16:44:23 -07001577JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001578JEMALLOC_ATTR(visibility("default"))
1579int
Jason Evans0a5489e2012-03-01 17:19:20 -08001580je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001581{
Jason Evanse4f78462010-10-22 10:45:59 -07001582 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001583 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001584
1585 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001586 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001587
Jason Evansb1476112012-04-05 13:36:17 -07001588 UTRACE(ptr, 0, 0);
Jason Evans122449b2012-04-06 00:35:09 -07001589 if (config_stats || config_valgrind)
1590 usize = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001591 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001592 if (config_stats == false && config_valgrind == false)
1593 usize = isalloc(ptr, config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -07001594 prof_free(ptr, usize);
1595 }
Jason Evans7372b152012-02-10 20:22:09 -08001596 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001597 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001598 if (config_valgrind && opt_valgrind)
1599 rzsize = p2rz(ptr);
1600 iqalloc(ptr);
1601 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001602
1603 return (ALLOCM_SUCCESS);
1604}
1605
Jason Evans7e15dab2012-02-29 12:56:37 -08001606JEMALLOC_ATTR(visibility("default"))
1607int
Jason Evans0a5489e2012-03-01 17:19:20 -08001608je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001609{
1610 size_t usize;
1611 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1612 & (SIZE_T_MAX-1));
1613
1614 assert(size != 0);
1615
1616 if (malloc_init())
1617 return (ALLOCM_ERR_OOM);
1618
Jason Evans5ff709c2012-04-11 18:13:45 -07001619 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans7e15dab2012-02-29 12:56:37 -08001620 if (usize == 0)
1621 return (ALLOCM_ERR_OOM);
1622
1623 if (rsize != NULL)
1624 *rsize = usize;
1625 return (ALLOCM_SUCCESS);
1626}
1627
Jason Evans7e77eaf2012-03-02 17:47:37 -08001628#endif
Jason Evans289053c2009-06-22 12:08:42 -07001629/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001630 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001631 */
1632/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001633/*
1634 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001635 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001636 */
1637
Jason Evans41b6afb2012-02-02 22:04:57 -08001638#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001639void
Jason Evans804c9ec2009-06-22 17:44:33 -07001640jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001641#else
Jason Evans86e58582012-04-18 19:01:00 -07001642JEMALLOC_ATTR(visibility("default"))
Jason Evans41b6afb2012-02-02 22:04:57 -08001643void
1644_malloc_prefork(void)
1645#endif
Jason Evans289053c2009-06-22 12:08:42 -07001646{
Jason Evansfbbb6242010-01-24 17:56:48 -08001647 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001648
1649 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001650 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001651 for (i = 0; i < narenas; i++) {
1652 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001653 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001654 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001655 base_prefork();
1656 huge_prefork();
1657 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001658}
1659
Jason Evans41b6afb2012-02-02 22:04:57 -08001660#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001661void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001662jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001663#else
Jason Evans86e58582012-04-18 19:01:00 -07001664JEMALLOC_ATTR(visibility("default"))
Jason Evans41b6afb2012-02-02 22:04:57 -08001665void
1666_malloc_postfork(void)
1667#endif
Jason Evans289053c2009-06-22 12:08:42 -07001668{
1669 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001670
1671 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001672 chunk_dss_postfork_parent();
1673 huge_postfork_parent();
1674 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001675 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001676 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001677 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001678 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001679 malloc_mutex_postfork_parent(&arenas_lock);
1680}
1681
1682void
1683jemalloc_postfork_child(void)
1684{
1685 unsigned i;
1686
1687 /* Release all mutexes, now that fork() has completed. */
1688 chunk_dss_postfork_child();
1689 huge_postfork_child();
1690 base_postfork_child();
1691 for (i = 0; i < narenas; i++) {
1692 if (arenas[i] != NULL)
1693 arena_postfork_child(arenas[i]);
1694 }
1695 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001696}
Jason Evans2dbecf12010-09-05 10:35:13 -07001697
1698/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001699/*
1700 * The following functions are used for TLS allocation/deallocation in static
1701 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1702 * is that these avoid accessing TLS variables.
1703 */
1704
1705static void *
1706a0alloc(size_t size, bool zero)
1707{
1708
1709 if (malloc_init())
1710 return (NULL);
1711
1712 if (size == 0)
1713 size = 1;
1714
1715 if (size <= arena_maxclass)
1716 return (arena_malloc(arenas[0], size, zero, false));
1717 else
1718 return (huge_malloc(size, zero));
1719}
1720
1721void *
1722a0malloc(size_t size)
1723{
1724
1725 return (a0alloc(size, false));
1726}
1727
1728void *
1729a0calloc(size_t num, size_t size)
1730{
1731
1732 return (a0alloc(num * size, true));
1733}
1734
1735void
1736a0free(void *ptr)
1737{
1738 arena_chunk_t *chunk;
1739
1740 if (ptr == NULL)
1741 return;
1742
1743 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1744 if (chunk != ptr)
1745 arena_dalloc(chunk->arena, chunk, ptr, false);
1746 else
1747 huge_dalloc(ptr, true);
1748}
1749
1750/******************************************************************************/