blob: d42e91dbeba51d5fa9278af5a2054814dc0f5319 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Mike Hommeyda99e312012-04-30 12:38:29 +020012const char *je_malloc_conf;
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evans122449b2012-04-06 00:35:09 -070024size_t opt_quarantine = ZU(0);
Jason Evansd6abcbb2012-04-12 17:09:54 -070025bool opt_redzone = false;
Jason Evansb1476112012-04-05 13:36:17 -070026bool opt_utrace = false;
Jason Evans122449b2012-04-06 00:35:09 -070027bool opt_valgrind = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080028bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080029bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070030size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070031
Jason Evanscd9a1342012-03-21 18:33:03 -070032unsigned ncpus;
33
34malloc_mutex_t arenas_lock;
35arena_t **arenas;
36unsigned narenas;
37
38/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070039static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070040
Jason Evans41b6afb2012-02-02 22:04:57 -080041#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070042/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070043# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080044# define INITIALIZER pthread_self()
45# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070046static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080047#else
Jason Evans02b23122012-04-05 11:06:23 -070048# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080049# define INITIALIZER true
50# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070051static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080052#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
Mike Hommeya19e87f2012-04-21 21:27:46 -070055#ifdef _WIN32
56static malloc_mutex_t init_lock;
57
58JEMALLOC_ATTR(constructor)
Mike Hommeyfd97b1d2012-04-30 12:38:31 +020059static void WINAPI
60_init_init_lock(void)
Mike Hommeya19e87f2012-04-21 21:27:46 -070061{
62
63 malloc_mutex_init(&init_lock);
64}
Mike Hommeyfd97b1d2012-04-30 12:38:31 +020065
66#ifdef _MSC_VER
67# pragma section(".CRT$XCU", read)
68JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
69static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
70#endif
71
Mike Hommeya19e87f2012-04-21 21:27:46 -070072#else
Jason Evanscd9a1342012-03-21 18:33:03 -070073static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Mike Hommeya19e87f2012-04-21 21:27:46 -070074#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070075
Jason Evansb1476112012-04-05 13:36:17 -070076typedef struct {
77 void *p; /* Input pointer (as in realloc(p, s)). */
78 size_t s; /* Request size. */
79 void *r; /* Result pointer. */
80} malloc_utrace_t;
81
82#ifdef JEMALLOC_UTRACE
83# define UTRACE(a, b, c) do { \
84 if (opt_utrace) { \
85 malloc_utrace_t ut; \
86 ut.p = (a); \
87 ut.s = (b); \
88 ut.r = (c); \
89 utrace(&ut, sizeof(ut)); \
90 } \
91} while (0)
92#else
93# define UTRACE(a, b, c)
94#endif
95
Jason Evans289053c2009-06-22 12:08:42 -070096/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080097/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070098
Jason Evans03c22372010-01-03 12:10:42 -080099static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -0700100static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -0700101static bool malloc_conf_next(char const **opts_p, char const **k_p,
102 size_t *klen_p, char const **v_p, size_t *vlen_p);
103static void malloc_conf_error(const char *msg, const char *k, size_t klen,
104 const char *v, size_t vlen);
105static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -0700106static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -0800107static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700108 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -0700109
Jason Evans289053c2009-06-22 12:08:42 -0700110/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -0700111/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800112 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700113 */
114
Jason Evanse476f8a2010-01-16 09:53:50 -0800115/* Create a new arena and insert it into the arenas array at index ind. */
116arena_t *
117arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700118{
119 arena_t *ret;
120
Jason Evansb1726102012-02-28 16:50:47 -0800121 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -0800122 if (ret != NULL && arena_new(ret, ind) == false) {
123 arenas[ind] = ret;
124 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700125 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800126 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700127
Jason Evanse476f8a2010-01-16 09:53:50 -0800128 /*
129 * OOM here is quite inconvenient to propagate, since dealing with it
130 * would require a check for failure in the fast path. Instead, punt
131 * by using arenas[0]. In practice, this is an extremely unlikely
132 * failure.
133 */
Jason Evans698805c2010-03-03 17:45:38 -0800134 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800135 if (opt_abort)
136 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700137
Jason Evanse476f8a2010-01-16 09:53:50 -0800138 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700139}
140
Jason Evans4c2faa82012-03-13 11:09:23 -0700141/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800142arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700143choose_arena_hard(void)
144{
145 arena_t *ret;
146
Jason Evans289053c2009-06-22 12:08:42 -0700147 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700148 unsigned i, choose, first_null;
149
150 choose = 0;
151 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800152 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700153 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700154 for (i = 1; i < narenas; i++) {
155 if (arenas[i] != NULL) {
156 /*
157 * Choose the first arena that has the lowest
158 * number of threads assigned to it.
159 */
160 if (arenas[i]->nthreads <
161 arenas[choose]->nthreads)
162 choose = i;
163 } else if (first_null == narenas) {
164 /*
165 * Record the index of the first uninitialized
166 * arena, in case all extant arenas are in use.
167 *
168 * NB: It is possible for there to be
169 * discontinuities in terms of initialized
170 * versus uninitialized arenas, due to the
171 * "thread.arena" mallctl.
172 */
173 first_null = i;
174 }
175 }
176
Jason Evans41b6afb2012-02-02 22:04:57 -0800177 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700178 /*
179 * Use an unloaded arena, or the least loaded arena if
180 * all arenas are already initialized.
181 */
182 ret = arenas[choose];
183 } else {
184 /* Initialize a new arena. */
185 ret = arenas_extend(first_null);
186 }
187 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800188 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700189 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700190 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700191 malloc_mutex_lock(&arenas_lock);
192 ret->nthreads++;
193 malloc_mutex_unlock(&arenas_lock);
194 }
Jason Evans289053c2009-06-22 12:08:42 -0700195
Jason Evanscd9a1342012-03-21 18:33:03 -0700196 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700197
198 return (ret);
199}
Jason Evans289053c2009-06-22 12:08:42 -0700200
Jason Evans03c22372010-01-03 12:10:42 -0800201static void
202stats_print_atexit(void)
203{
204
Jason Evans7372b152012-02-10 20:22:09 -0800205 if (config_tcache && config_stats) {
206 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800207
Jason Evans7372b152012-02-10 20:22:09 -0800208 /*
209 * Merge stats from extant threads. This is racy, since
210 * individual threads do not lock when recording tcache stats
211 * events. As a consequence, the final stats may be slightly
212 * out of date by the time they are reported, if other threads
213 * continue to allocate.
214 */
215 for (i = 0; i < narenas; i++) {
216 arena_t *arena = arenas[i];
217 if (arena != NULL) {
218 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800219
Jason Evans7372b152012-02-10 20:22:09 -0800220 /*
221 * tcache_stats_merge() locks bins, so if any
222 * code is introduced that acquires both arena
223 * and bin locks in the opposite order,
224 * deadlocks may result.
225 */
226 malloc_mutex_lock(&arena->lock);
227 ql_foreach(tcache, &arena->tcache_ql, link) {
228 tcache_stats_merge(tcache, arena);
229 }
230 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800231 }
Jason Evans03c22372010-01-03 12:10:42 -0800232 }
233 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800234 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700235}
236
Jason Evans289053c2009-06-22 12:08:42 -0700237/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800238 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700239 */
240/******************************************************************************/
241/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800242 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700243 */
244
Jason Evansc9658dd2009-06-22 14:44:08 -0700245static unsigned
246malloc_ncpus(void)
247{
248 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700249 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700250
Mike Hommeya19e87f2012-04-21 21:27:46 -0700251#ifdef _WIN32
252 SYSTEM_INFO si;
253 GetSystemInfo(&si);
254 result = si.dwNumberOfProcessors;
255#else
Jason Evansb7924f52009-06-23 19:01:18 -0700256 result = sysconf(_SC_NPROCESSORS_ONLN);
257 if (result == -1) {
258 /* Error. */
259 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700260 }
Mike Hommeya19e87f2012-04-21 21:27:46 -0700261#endif
Jason Evansb7924f52009-06-23 19:01:18 -0700262 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700263
264 return (ret);
265}
Jason Evansb7924f52009-06-23 19:01:18 -0700266
Jason Evanscd9a1342012-03-21 18:33:03 -0700267void
Jason Evans597632b2011-03-18 13:41:33 -0700268arenas_cleanup(void *arg)
269{
Jason Evanscd9a1342012-03-21 18:33:03 -0700270 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700271
272 malloc_mutex_lock(&arenas_lock);
273 arena->nthreads--;
274 malloc_mutex_unlock(&arenas_lock);
275}
276
Jason Evans289053c2009-06-22 12:08:42 -0700277static inline bool
278malloc_init(void)
279{
280
281 if (malloc_initialized == false)
282 return (malloc_init_hard());
283
284 return (false);
285}
286
287static bool
Jason Evanse7339702010-10-23 18:37:06 -0700288malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
289 char const **v_p, size_t *vlen_p)
290{
291 bool accept;
292 const char *opts = *opts_p;
293
294 *k_p = opts;
295
296 for (accept = false; accept == false;) {
297 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800298 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
299 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
300 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
301 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
302 case 'Y': case 'Z':
303 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
304 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
305 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
306 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
307 case 'y': case 'z':
308 case '0': case '1': case '2': case '3': case '4': case '5':
309 case '6': case '7': case '8': case '9':
310 case '_':
311 opts++;
312 break;
313 case ':':
314 opts++;
315 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
316 *v_p = opts;
317 accept = true;
318 break;
319 case '\0':
320 if (opts != *opts_p) {
321 malloc_write("<jemalloc>: Conf string ends "
322 "with key\n");
323 }
324 return (true);
325 default:
326 malloc_write("<jemalloc>: Malformed conf string\n");
327 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700328 }
329 }
330
331 for (accept = false; accept == false;) {
332 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800333 case ',':
334 opts++;
335 /*
336 * Look ahead one character here, because the next time
337 * this function is called, it will assume that end of
338 * input has been cleanly reached if no input remains,
339 * but we have optimistically already consumed the
340 * comma if one exists.
341 */
342 if (*opts == '\0') {
343 malloc_write("<jemalloc>: Conf string ends "
344 "with comma\n");
345 }
346 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
347 accept = true;
348 break;
349 case '\0':
350 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
351 accept = true;
352 break;
353 default:
354 opts++;
355 break;
Jason Evanse7339702010-10-23 18:37:06 -0700356 }
357 }
358
359 *opts_p = opts;
360 return (false);
361}
362
363static void
364malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
365 size_t vlen)
366{
Jason Evanse7339702010-10-23 18:37:06 -0700367
Jason Evansd81e4bd2012-03-06 14:57:45 -0800368 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
369 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700370}
371
372static void
373malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700374{
375 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700376 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700377 const char *opts, *k, *v;
378 size_t klen, vlen;
379
380 for (i = 0; i < 3; i++) {
381 /* Get runtime configuration. */
382 switch (i) {
383 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800384 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700385 /*
386 * Use options that were compiled into the
387 * program.
388 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800389 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700390 } else {
391 /* No configuration specified. */
392 buf[0] = '\0';
393 opts = buf;
394 }
395 break;
396 case 1: {
Mike Hommeya19e87f2012-04-21 21:27:46 -0700397#ifndef _WIN32
Jason Evanse7339702010-10-23 18:37:06 -0700398 int linklen;
399 const char *linkname =
Mike Hommeya19e87f2012-04-21 21:27:46 -0700400# ifdef JEMALLOC_PREFIX
Jason Evanse7339702010-10-23 18:37:06 -0700401 "/etc/"JEMALLOC_PREFIX"malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700402# else
Jason Evanse7339702010-10-23 18:37:06 -0700403 "/etc/malloc.conf"
Mike Hommeya19e87f2012-04-21 21:27:46 -0700404# endif
Jason Evanse7339702010-10-23 18:37:06 -0700405 ;
406
407 if ((linklen = readlink(linkname, buf,
408 sizeof(buf) - 1)) != -1) {
409 /*
410 * Use the contents of the "/etc/malloc.conf"
411 * symbolic link's name.
412 */
413 buf[linklen] = '\0';
414 opts = buf;
Mike Hommeya19e87f2012-04-21 21:27:46 -0700415 } else
416#endif
417 {
Jason Evanse7339702010-10-23 18:37:06 -0700418 /* No configuration specified. */
419 buf[0] = '\0';
420 opts = buf;
421 }
422 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800423 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700424 const char *envname =
425#ifdef JEMALLOC_PREFIX
426 JEMALLOC_CPREFIX"MALLOC_CONF"
427#else
428 "MALLOC_CONF"
429#endif
430 ;
431
432 if ((opts = getenv(envname)) != NULL) {
433 /*
434 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800435 * the value of the MALLOC_CONF environment
436 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700437 */
438 } else {
439 /* No configuration specified. */
440 buf[0] = '\0';
441 opts = buf;
442 }
443 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800444 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700445 /* NOTREACHED */
446 assert(false);
447 buf[0] = '\0';
448 opts = buf;
449 }
450
451 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
452 &vlen) == false) {
Jason Evans122449b2012-04-06 00:35:09 -0700453#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700454 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700455 klen) == 0) { \
456 if (strncmp("true", v, vlen) == 0 && \
457 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800458 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700459 else if (strncmp("false", v, vlen) == \
460 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800461 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700462 else { \
463 malloc_conf_error( \
464 "Invalid conf value", \
465 k, klen, v, vlen); \
466 } \
Jason Evans122449b2012-04-06 00:35:09 -0700467 hit = true; \
468 } else \
469 hit = false;
470#define CONF_HANDLE_BOOL(o, n) { \
471 bool hit; \
472 CONF_HANDLE_BOOL_HIT(o, n, hit); \
473 if (hit) \
Jason Evanse7339702010-10-23 18:37:06 -0700474 continue; \
Jason Evans122449b2012-04-06 00:35:09 -0700475}
Jason Evansd81e4bd2012-03-06 14:57:45 -0800476#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700477 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700478 klen) == 0) { \
Jason Evans122449b2012-04-06 00:35:09 -0700479 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700480 char *end; \
481 \
Mike Hommeya14bce82012-04-30 12:38:26 +0200482 set_errno(0); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800483 um = malloc_strtoumax(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +0200484 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -0700485 (uintptr_t)v != vlen) { \
486 malloc_conf_error( \
487 "Invalid conf value", \
488 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800489 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700490 malloc_conf_error( \
491 "Out-of-range conf value", \
492 k, klen, v, vlen); \
493 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800494 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700495 continue; \
496 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800497#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700498 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700499 klen) == 0) { \
500 long l; \
501 char *end; \
502 \
Mike Hommeya14bce82012-04-30 12:38:26 +0200503 set_errno(0); \
Jason Evanse7339702010-10-23 18:37:06 -0700504 l = strtol(v, &end, 0); \
Mike Hommeya14bce82012-04-30 12:38:26 +0200505 if (get_errno() != 0 || (uintptr_t)end -\
Jason Evanse7339702010-10-23 18:37:06 -0700506 (uintptr_t)v != vlen) { \
507 malloc_conf_error( \
508 "Invalid conf value", \
509 k, klen, v, vlen); \
510 } else if (l < (ssize_t)min || l > \
511 (ssize_t)max) { \
512 malloc_conf_error( \
513 "Out-of-range conf value", \
514 k, klen, v, vlen); \
515 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800516 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700517 continue; \
518 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800519#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evans606f1fd2012-04-20 21:39:14 -0700520 if (sizeof(n)-1 == klen && strncmp(n, k, \
Jason Evanse7339702010-10-23 18:37:06 -0700521 klen) == 0) { \
522 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800523 sizeof(o)-1) ? vlen : \
524 sizeof(o)-1; \
525 strncpy(o, v, cpylen); \
526 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700527 continue; \
528 }
529
Jason Evans606f1fd2012-04-20 21:39:14 -0700530 CONF_HANDLE_BOOL(opt_abort, "abort")
Jason Evanse7339702010-10-23 18:37:06 -0700531 /*
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700532 * Chunks always require at least one header page, plus
533 * one data page in the absence of redzones, or three
534 * pages in the presence of redzones. In order to
535 * simplify options processing, fix the limit based on
536 * config_fill.
Jason Evanse7339702010-10-23 18:37:06 -0700537 */
Jason Evans606f1fd2012-04-20 21:39:14 -0700538 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
Jason Evans7ca0fdf2012-04-12 20:20:58 -0700539 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700540 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
541 SIZE_T_MAX)
542 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
Jason Evansd81e4bd2012-03-06 14:57:45 -0800543 -1, (sizeof(size_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700544 CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
Jason Evans7372b152012-02-10 20:22:09 -0800545 if (config_fill) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700546 CONF_HANDLE_BOOL(opt_junk, "junk")
547 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
Jason Evans122449b2012-04-06 00:35:09 -0700548 0, SIZE_T_MAX)
Jason Evans606f1fd2012-04-20 21:39:14 -0700549 CONF_HANDLE_BOOL(opt_redzone, "redzone")
550 CONF_HANDLE_BOOL(opt_zero, "zero")
Jason Evans7372b152012-02-10 20:22:09 -0800551 }
Jason Evansb1476112012-04-05 13:36:17 -0700552 if (config_utrace) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700553 CONF_HANDLE_BOOL(opt_utrace, "utrace")
Jason Evansb1476112012-04-05 13:36:17 -0700554 }
Jason Evans122449b2012-04-06 00:35:09 -0700555 if (config_valgrind) {
556 bool hit;
557 CONF_HANDLE_BOOL_HIT(opt_valgrind,
Jason Evans606f1fd2012-04-20 21:39:14 -0700558 "valgrind", hit)
Jason Evans122449b2012-04-06 00:35:09 -0700559 if (config_fill && opt_valgrind && hit) {
560 opt_junk = false;
561 opt_zero = false;
562 if (opt_quarantine == 0) {
563 opt_quarantine =
564 JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
565 }
566 opt_redzone = true;
567 }
568 if (hit)
569 continue;
570 }
Jason Evans7372b152012-02-10 20:22:09 -0800571 if (config_xmalloc) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700572 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
Jason Evans7372b152012-02-10 20:22:09 -0800573 }
574 if (config_tcache) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700575 CONF_HANDLE_BOOL(opt_tcache, "tcache")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800576 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
Jason Evans606f1fd2012-04-20 21:39:14 -0700577 "lg_tcache_max", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800578 (sizeof(size_t) << 3) - 1)
579 }
580 if (config_prof) {
Jason Evans606f1fd2012-04-20 21:39:14 -0700581 CONF_HANDLE_BOOL(opt_prof, "prof")
582 CONF_HANDLE_CHAR_P(opt_prof_prefix,
583 "prof_prefix", "jeprof")
584 CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800585 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
Jason Evans606f1fd2012-04-20 21:39:14 -0700586 "lg_prof_sample", 0,
Jason Evans7372b152012-02-10 20:22:09 -0800587 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700588 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
Jason Evansd81e4bd2012-03-06 14:57:45 -0800589 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
Jason Evans606f1fd2012-04-20 21:39:14 -0700590 "lg_prof_interval", -1,
Jason Evans7372b152012-02-10 20:22:09 -0800591 (sizeof(uint64_t) << 3) - 1)
Jason Evans606f1fd2012-04-20 21:39:14 -0700592 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
593 CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
594 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
Jason Evans7372b152012-02-10 20:22:09 -0800595 }
Jason Evanse7339702010-10-23 18:37:06 -0700596 malloc_conf_error("Invalid conf pair", k, klen, v,
597 vlen);
598#undef CONF_HANDLE_BOOL
599#undef CONF_HANDLE_SIZE_T
600#undef CONF_HANDLE_SSIZE_T
601#undef CONF_HANDLE_CHAR_P
602 }
Jason Evanse7339702010-10-23 18:37:06 -0700603 }
604}
605
606static bool
607malloc_init_hard(void)
608{
Jason Evansb7924f52009-06-23 19:01:18 -0700609 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700610
611 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800612 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700613 /*
614 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800615 * acquired init_lock, or this thread is the initializing
616 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700617 */
618 malloc_mutex_unlock(&init_lock);
619 return (false);
620 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800621#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700622 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700623 /* Busy-wait until the initializing thread completes. */
624 do {
625 malloc_mutex_unlock(&init_lock);
626 CPU_SPINWAIT;
627 malloc_mutex_lock(&init_lock);
628 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700629 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700630 return (false);
631 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800632#endif
633 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700634
Jason Evanscd9a1342012-03-21 18:33:03 -0700635 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800636 if (config_prof)
637 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700638
Jason Evanse7339702010-10-23 18:37:06 -0700639 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700640
Mike Hommeya19e87f2012-04-21 21:27:46 -0700641#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
642 && !defined(_WIN32))
Jason Evansa0bf2422010-01-29 14:30:41 -0800643 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700644 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
645 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800646 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800647 if (opt_abort)
648 abort();
649 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800650#endif
Jason Evans3c234352010-01-27 13:10:55 -0800651
Jason Evans03c22372010-01-03 12:10:42 -0800652 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700653 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800654 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800655 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800656 if (opt_abort)
657 abort();
658 }
Jason Evans289053c2009-06-22 12:08:42 -0700659 }
660
Mike Hommeyb8325f92012-04-12 15:15:35 +0200661 if (base_boot()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800662 malloc_mutex_unlock(&init_lock);
663 return (true);
664 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700665
Jason Evansa8f8d752012-04-21 19:17:21 -0700666 if (chunk_boot()) {
Jason Evans3c234352010-01-27 13:10:55 -0800667 malloc_mutex_unlock(&init_lock);
668 return (true);
669 }
670
Jason Evans41b6afb2012-02-02 22:04:57 -0800671 if (ctl_boot()) {
672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
675
Jason Evans7372b152012-02-10 20:22:09 -0800676 if (config_prof)
677 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800678
Jason Evansb1726102012-02-28 16:50:47 -0800679 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700680
Jason Evanscd9a1342012-03-21 18:33:03 -0700681 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700682 malloc_mutex_unlock(&init_lock);
683 return (true);
684 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800685
Jason Evanse476f8a2010-01-16 09:53:50 -0800686 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700687 malloc_mutex_unlock(&init_lock);
688 return (true);
689 }
Jason Evans289053c2009-06-22 12:08:42 -0700690
Jason Evans8e6f8b42011-11-03 18:40:03 -0700691 if (malloc_mutex_init(&arenas_lock))
692 return (true);
693
Jason Evansb7924f52009-06-23 19:01:18 -0700694 /*
695 * Create enough scaffolding to allow recursive allocation in
696 * malloc_ncpus().
697 */
698 narenas = 1;
699 arenas = init_arenas;
700 memset(arenas, 0, sizeof(arena_t *) * narenas);
701
702 /*
703 * Initialize one arena here. The rest are lazily created in
704 * choose_arena_hard().
705 */
706 arenas_extend(0);
707 if (arenas[0] == NULL) {
708 malloc_mutex_unlock(&init_lock);
709 return (true);
710 }
711
Jason Evanscd9a1342012-03-21 18:33:03 -0700712 /* Initialize allocation counters before any allocations can occur. */
713 if (config_stats && thread_allocated_tsd_boot()) {
714 malloc_mutex_unlock(&init_lock);
715 return (true);
716 }
Jason Evansb7924f52009-06-23 19:01:18 -0700717
Jason Evanscd9a1342012-03-21 18:33:03 -0700718 if (arenas_tsd_boot()) {
719 malloc_mutex_unlock(&init_lock);
720 return (true);
721 }
722
723 if (config_tcache && tcache_boot1()) {
724 malloc_mutex_unlock(&init_lock);
725 return (true);
726 }
727
Jason Evans122449b2012-04-06 00:35:09 -0700728 if (config_fill && quarantine_boot()) {
729 malloc_mutex_unlock(&init_lock);
730 return (true);
731 }
732
Jason Evans6da54182012-03-23 18:05:51 -0700733 if (config_prof && prof_boot2()) {
734 malloc_mutex_unlock(&init_lock);
735 return (true);
736 }
737
Jason Evansb7924f52009-06-23 19:01:18 -0700738 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700739 malloc_mutex_unlock(&init_lock);
740 ncpus = malloc_ncpus();
741 malloc_mutex_lock(&init_lock);
742
Jason Evans633aaff2012-04-03 08:47:07 -0700743 if (mutex_boot()) {
744 malloc_mutex_unlock(&init_lock);
745 return (true);
746 }
747
Jason Evanse7339702010-10-23 18:37:06 -0700748 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700749 /*
Jason Evans5463a522009-12-29 00:09:15 -0800750 * For SMP systems, create more than one arena per CPU by
751 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700752 */
Jason Evanse7339702010-10-23 18:37:06 -0700753 if (ncpus > 1)
754 opt_narenas = ncpus << 2;
755 else
756 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700757 }
Jason Evanse7339702010-10-23 18:37:06 -0700758 narenas = opt_narenas;
759 /*
760 * Make sure that the arenas array can be allocated. In practice, this
761 * limit is enough to allow the allocator to function, but the ctl
762 * machinery will fail to allocate memory at far lower limits.
763 */
764 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700765 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800766 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
767 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700768 }
Jason Evans289053c2009-06-22 12:08:42 -0700769
Jason Evans289053c2009-06-22 12:08:42 -0700770 /* Allocate and initialize arenas. */
771 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
772 if (arenas == NULL) {
773 malloc_mutex_unlock(&init_lock);
774 return (true);
775 }
776 /*
777 * Zero the array. In practice, this should always be pre-zeroed,
778 * since it was just mmap()ed, but let's be sure.
779 */
780 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700781 /* Copy the pointer to the one arena that was already initialized. */
782 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700783
784 malloc_initialized = true;
785 malloc_mutex_unlock(&init_lock);
786 return (false);
787}
788
789/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800790 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700791 */
792/******************************************************************************/
793/*
794 * Begin malloc(3)-compatible functions.
795 */
796
797void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800798je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700799{
800 void *ret;
Jason Evans8694e2e2012-04-23 13:05:32 -0700801 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -0700802 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700803
804 if (malloc_init()) {
805 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700806 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -0700807 }
808
Jason Evansc90ad712012-02-28 20:31:37 -0800809 if (size == 0)
810 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700811
Jason Evans7372b152012-02-10 20:22:09 -0800812 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700813 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700814 PROF_ALLOC_PREP(1, usize, cnt);
815 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700816 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700817 goto label_oom;
Jason Evans0b270a92010-03-31 16:45:04 -0700818 }
Jason Evans93443682010-10-20 17:39:18 -0700819 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800820 SMALL_MAXCLASS) {
821 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700822 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700823 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700824 } else
825 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800826 } else {
Jason Evans122449b2012-04-06 00:35:09 -0700827 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -0800828 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700829 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700830 }
Jason Evans289053c2009-06-22 12:08:42 -0700831
Jason Evansa1ee7832012-04-10 15:07:44 -0700832label_oom:
Jason Evans289053c2009-06-22 12:08:42 -0700833 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800834 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800835 malloc_write("<jemalloc>: Error in malloc(): "
836 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700837 abort();
838 }
Mike Hommeya14bce82012-04-30 12:38:26 +0200839 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -0700840 }
Jason Evans7372b152012-02-10 20:22:09 -0800841 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700842 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800843 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700844 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700845 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700846 }
Jason Evansb1476112012-04-05 13:36:17 -0700847 UTRACE(0, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -0700848 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
Jason Evans289053c2009-06-22 12:08:42 -0700849 return (ret);
850}
851
Jason Evans9ad48232010-01-03 11:59:20 -0800852JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700853#ifdef JEMALLOC_PROF
854/*
Jason Evans7372b152012-02-10 20:22:09 -0800855 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700856 * PROF_ALLOC_PREP().
857 */
858JEMALLOC_ATTR(noinline)
859#endif
860static int
Jason Evans59656312012-02-28 21:37:38 -0800861imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700862 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700863{
864 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800865 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700866 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700867 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700868
Jason Evans0a0bbf62012-03-13 12:55:21 -0700869 assert(min_alignment != 0);
870
Jason Evans289053c2009-06-22 12:08:42 -0700871 if (malloc_init())
872 result = NULL;
873 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800874 if (size == 0)
875 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800876
Jason Evans289053c2009-06-22 12:08:42 -0700877 /* Make sure that alignment is a large enough power of 2. */
878 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700879 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800880 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700881 malloc_write("<jemalloc>: Error allocating "
882 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700883 abort();
884 }
885 result = NULL;
886 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700887 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700888 }
889
Jason Evans5ff709c2012-04-11 18:13:45 -0700890 usize = sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -0700891 if (usize == 0) {
892 result = NULL;
893 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700894 goto label_return;
Jason Evans38d92102011-03-23 00:37:29 -0700895 }
896
Jason Evans7372b152012-02-10 20:22:09 -0800897 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700898 PROF_ALLOC_PREP(2, usize, cnt);
899 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700900 result = NULL;
901 ret = EINVAL;
902 } else {
903 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800904 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
905 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700906 alignment) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800907 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -0700908 alignment), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700909 if (result != NULL) {
910 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700911 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700912 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700913 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700914 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700915 false);
916 }
Jason Evans0b270a92010-03-31 16:45:04 -0700917 }
Jason Evans6109fe02010-02-10 10:37:56 -0800918 } else
Jason Evans38d92102011-03-23 00:37:29 -0700919 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700920 }
921
922 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800923 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700924 malloc_write("<jemalloc>: Error allocating aligned "
925 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700926 abort();
927 }
928 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700929 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700930 }
931
932 *memptr = result;
933 ret = 0;
934
Jason Evansa1ee7832012-04-10 15:07:44 -0700935label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800936 if (config_stats && result != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -0700937 assert(usize == isalloc(result, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -0700938 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700939 }
Jason Evans7372b152012-02-10 20:22:09 -0800940 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700941 prof_malloc(result, usize, cnt);
Jason Evansb1476112012-04-05 13:36:17 -0700942 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -0700943 return (ret);
944}
945
Jason Evansa5070042011-08-12 13:48:27 -0700946int
Jason Evans0a5489e2012-03-01 17:19:20 -0800947je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700948{
Jason Evans122449b2012-04-06 00:35:09 -0700949 int ret = imemalign(memptr, alignment, size, sizeof(void *));
950 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
951 config_prof), false);
952 return (ret);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700953}
954
Jason Evans0a0bbf62012-03-13 12:55:21 -0700955void *
956je_aligned_alloc(size_t alignment, size_t size)
957{
958 void *ret;
959 int err;
960
961 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
962 ret = NULL;
Mike Hommeya14bce82012-04-30 12:38:26 +0200963 set_errno(err);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700964 }
Jason Evans122449b2012-04-06 00:35:09 -0700965 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
966 false);
Jason Evans0a0bbf62012-03-13 12:55:21 -0700967 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700968}
969
Jason Evans289053c2009-06-22 12:08:42 -0700970void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800971je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700972{
973 void *ret;
974 size_t num_size;
Jason Evans8694e2e2012-04-23 13:05:32 -0700975 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -0700976 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700977
978 if (malloc_init()) {
979 num_size = 0;
980 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700981 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700982 }
983
984 num_size = num * size;
985 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800986 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700987 num_size = 1;
988 else {
989 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700990 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700991 }
992 /*
993 * Try to avoid division here. We know that it isn't possible to
994 * overflow during multiplication if neither operand uses any of the
995 * most significant half of the bits in a size_t.
996 */
997 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
998 && (num_size / size != num)) {
999 /* size_t overflow. */
1000 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001001 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -07001002 }
1003
Jason Evans7372b152012-02-10 20:22:09 -08001004 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001005 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001006 PROF_ALLOC_PREP(1, usize, cnt);
1007 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001008 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001009 goto label_return;
Jason Evans0b270a92010-03-31 16:45:04 -07001010 }
Jason Evans93443682010-10-20 17:39:18 -07001011 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001012 <= SMALL_MAXCLASS) {
1013 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001014 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001015 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001016 } else
1017 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001018 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001019 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001020 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001021 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001022 }
Jason Evans289053c2009-06-22 12:08:42 -07001023
Jason Evansa1ee7832012-04-10 15:07:44 -07001024label_return:
Jason Evans289053c2009-06-22 12:08:42 -07001025 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001026 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001027 malloc_write("<jemalloc>: Error in calloc(): out of "
1028 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001029 abort();
1030 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001031 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001032 }
1033
Jason Evans7372b152012-02-10 20:22:09 -08001034 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001035 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001036 if (config_stats && ret != NULL) {
Jason Evans122449b2012-04-06 00:35:09 -07001037 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001038 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -07001039 }
Jason Evansb1476112012-04-05 13:36:17 -07001040 UTRACE(0, num_size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001041 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
Jason Evans289053c2009-06-22 12:08:42 -07001042 return (ret);
1043}
1044
1045void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001046je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001047{
1048 void *ret;
Jason Evans8694e2e2012-04-23 13:05:32 -07001049 size_t usize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans93443682010-10-20 17:39:18 -07001050 size_t old_size = 0;
Jason Evans122449b2012-04-06 00:35:09 -07001051 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans9225a192012-03-23 15:39:07 -07001052 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1053 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001054
Jason Evans289053c2009-06-22 12:08:42 -07001055 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001056 if (ptr != NULL) {
1057 /* realloc(ptr, 0) is equivalent to free(p). */
Jason Evans122449b2012-04-06 00:35:09 -07001058 if (config_prof) {
1059 old_size = isalloc(ptr, true);
1060 if (config_valgrind && opt_valgrind)
1061 old_rzsize = p2rz(ptr);
1062 } else if (config_stats) {
1063 old_size = isalloc(ptr, false);
1064 if (config_valgrind && opt_valgrind)
1065 old_rzsize = u2rz(old_size);
1066 } else if (config_valgrind && opt_valgrind) {
1067 old_size = isalloc(ptr, false);
1068 old_rzsize = u2rz(old_size);
1069 }
Jason Evansf081b882012-02-28 20:24:05 -08001070 if (config_prof && opt_prof) {
1071 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001072 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001073 }
Jason Evans122449b2012-04-06 00:35:09 -07001074 iqalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001075 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001076 goto label_return;
Jason Evansc90ad712012-02-28 20:31:37 -08001077 } else
1078 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001079 }
1080
1081 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001082 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001083
Jason Evans122449b2012-04-06 00:35:09 -07001084 if (config_prof) {
1085 old_size = isalloc(ptr, true);
1086 if (config_valgrind && opt_valgrind)
1087 old_rzsize = p2rz(ptr);
1088 } else if (config_stats) {
1089 old_size = isalloc(ptr, false);
1090 if (config_valgrind && opt_valgrind)
1091 old_rzsize = u2rz(old_size);
1092 } else if (config_valgrind && opt_valgrind) {
1093 old_size = isalloc(ptr, false);
1094 old_rzsize = u2rz(old_size);
1095 }
Jason Evans7372b152012-02-10 20:22:09 -08001096 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001097 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001098 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001099 PROF_ALLOC_PREP(1, usize, cnt);
1100 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001101 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001102 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001103 goto label_oom;
Jason Evans6109fe02010-02-10 10:37:56 -08001104 }
Jason Evans0b270a92010-03-31 16:45:04 -07001105 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001106 usize <= SMALL_MAXCLASS) {
1107 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001108 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001109 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001110 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001111 else
1112 old_ctx = NULL;
1113 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001114 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001115 if (ret == NULL)
1116 old_ctx = NULL;
1117 }
Jason Evans7372b152012-02-10 20:22:09 -08001118 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001119 if (config_stats || (config_valgrind && opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001120 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001121 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001122 }
Jason Evans289053c2009-06-22 12:08:42 -07001123
Jason Evansa1ee7832012-04-10 15:07:44 -07001124label_oom:
Jason Evans289053c2009-06-22 12:08:42 -07001125 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001126 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001127 malloc_write("<jemalloc>: Error in realloc(): "
1128 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001129 abort();
1130 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001131 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001132 }
1133 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001134 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001135 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001136 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001137 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001138 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001139 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001140 ret = NULL;
1141 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001142 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001143 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001144 PROF_ALLOC_PREP(1, usize, cnt);
1145 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001146 ret = NULL;
1147 else {
1148 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001149 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001150 SMALL_MAXCLASS) {
1151 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001152 if (ret != NULL) {
1153 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001154 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001155 }
1156 } else
1157 ret = imalloc(size);
1158 }
Jason Evans7372b152012-02-10 20:22:09 -08001159 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001160 if (config_stats || (config_valgrind &&
1161 opt_valgrind))
Jason Evans7372b152012-02-10 20:22:09 -08001162 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001163 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001164 }
Jason Evans6109fe02010-02-10 10:37:56 -08001165 }
Jason Evans569432c2009-12-29 00:09:15 -08001166
Jason Evans289053c2009-06-22 12:08:42 -07001167 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001168 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001169 malloc_write("<jemalloc>: Error in realloc(): "
1170 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001171 abort();
1172 }
Mike Hommeya14bce82012-04-30 12:38:26 +02001173 set_errno(ENOMEM);
Jason Evans289053c2009-06-22 12:08:42 -07001174 }
1175 }
1176
Jason Evansa1ee7832012-04-10 15:07:44 -07001177label_return:
Jason Evans7372b152012-02-10 20:22:09 -08001178 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001179 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001180 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001181 thread_allocated_t *ta;
Jason Evans122449b2012-04-06 00:35:09 -07001182 assert(usize == isalloc(ret, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001183 ta = thread_allocated_tsd_get();
1184 ta->allocated += usize;
1185 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001186 }
Jason Evansb1476112012-04-05 13:36:17 -07001187 UTRACE(ptr, size, ret);
Jason Evans122449b2012-04-06 00:35:09 -07001188 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
Jason Evans289053c2009-06-22 12:08:42 -07001189 return (ret);
1190}
1191
1192void
Jason Evans0a5489e2012-03-01 17:19:20 -08001193je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001194{
1195
Jason Evansb1476112012-04-05 13:36:17 -07001196 UTRACE(ptr, 0, 0);
Jason Evansf0047372012-04-02 15:18:24 -07001197 if (ptr != NULL) {
1198 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001199 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evanse4f78462010-10-22 10:45:59 -07001200
Jason Evansf0047372012-04-02 15:18:24 -07001201 assert(malloc_initialized || IS_INITIALIZER);
1202
1203 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001204 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001205 prof_free(ptr, usize);
Jason Evans122449b2012-04-06 00:35:09 -07001206 } else if (config_stats || config_valgrind)
1207 usize = isalloc(ptr, config_prof);
Jason Evansf0047372012-04-02 15:18:24 -07001208 if (config_stats)
1209 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001210 if (config_valgrind && opt_valgrind)
1211 rzsize = p2rz(ptr);
1212 iqalloc(ptr);
1213 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evansf0047372012-04-02 15:18:24 -07001214 }
Jason Evans289053c2009-06-22 12:08:42 -07001215}
1216
1217/*
1218 * End malloc(3)-compatible functions.
1219 */
1220/******************************************************************************/
1221/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001222 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001223 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001224
1225#ifdef JEMALLOC_OVERRIDE_MEMALIGN
Jason Evans6a0d2912010-09-20 16:44:23 -07001226void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001227je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001228{
Jason Evans9225a192012-03-23 15:39:07 -07001229 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001230 imemalign(&ret, alignment, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001231 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001232 return (ret);
1233}
1234#endif
1235
1236#ifdef JEMALLOC_OVERRIDE_VALLOC
Jason Evans6a0d2912010-09-20 16:44:23 -07001237void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001238je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001239{
Jason Evans9225a192012-03-23 15:39:07 -07001240 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001241 imemalign(&ret, PAGE, size, 1);
Jason Evans122449b2012-04-06 00:35:09 -07001242 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001243 return (ret);
1244}
1245#endif
1246
Mike Hommey5c89c502012-03-26 17:46:57 +02001247/*
1248 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1249 * #define je_malloc malloc
1250 */
1251#define malloc_is_malloc 1
1252#define is_malloc_(a) malloc_is_ ## a
1253#define is_malloc(a) is_malloc_(a)
1254
1255#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001256/*
1257 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1258 * to inconsistently reference libc's malloc(3)-compatible functions
1259 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1260 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001261 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001262 * passed an extra argument for the caller return address, which will be
1263 * ignored.
1264 */
Mike Hommeyda99e312012-04-30 12:38:29 +02001265JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
1266JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
1267JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
1268 je_realloc;
1269JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
1270 je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001271#endif
1272
Jason Evans6a0d2912010-09-20 16:44:23 -07001273/*
1274 * End non-standard override functions.
1275 */
1276/******************************************************************************/
1277/*
Jason Evans289053c2009-06-22 12:08:42 -07001278 * Begin non-standard functions.
1279 */
1280
1281size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001282je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001283{
Jason Evans569432c2009-12-29 00:09:15 -08001284 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001285
Jason Evans41b6afb2012-02-02 22:04:57 -08001286 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001287
Jason Evans7372b152012-02-10 20:22:09 -08001288 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001289 ret = ivsalloc(ptr, config_prof);
Jason Evans2465bdf2012-03-26 13:13:55 -07001290 else
Jason Evans122449b2012-04-06 00:35:09 -07001291 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001292
Jason Evans569432c2009-12-29 00:09:15 -08001293 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001294}
1295
Jason Evans4201af02010-01-24 02:53:40 -08001296void
Jason Evans0a5489e2012-03-01 17:19:20 -08001297je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1298 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001299{
1300
Jason Evans698805c2010-03-03 17:45:38 -08001301 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001302}
1303
Jason Evans3c234352010-01-27 13:10:55 -08001304int
Jason Evans0a5489e2012-03-01 17:19:20 -08001305je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001306 size_t newlen)
1307{
1308
Jason Evans95833312010-01-27 13:45:21 -08001309 if (malloc_init())
1310 return (EAGAIN);
1311
Jason Evans3c234352010-01-27 13:10:55 -08001312 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1313}
1314
Jason Evans3c234352010-01-27 13:10:55 -08001315int
Jason Evans0a5489e2012-03-01 17:19:20 -08001316je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001317{
1318
Jason Evans95833312010-01-27 13:45:21 -08001319 if (malloc_init())
1320 return (EAGAIN);
1321
Jason Evans3c234352010-01-27 13:10:55 -08001322 return (ctl_nametomib(name, mibp, miblenp));
1323}
1324
Jason Evans3c234352010-01-27 13:10:55 -08001325int
Jason Evans0a5489e2012-03-01 17:19:20 -08001326je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1327 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001328{
1329
Jason Evans95833312010-01-27 13:45:21 -08001330 if (malloc_init())
1331 return (EAGAIN);
1332
Jason Evans3c234352010-01-27 13:10:55 -08001333 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1334}
1335
Jason Evans7e77eaf2012-03-02 17:47:37 -08001336/*
1337 * End non-standard functions.
1338 */
1339/******************************************************************************/
1340/*
1341 * Begin experimental functions.
1342 */
1343#ifdef JEMALLOC_EXPERIMENTAL
1344
Jason Evans8e3c3c62010-09-17 15:46:18 -07001345JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001346iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001347{
1348
Jason Evans5ff709c2012-04-11 18:13:45 -07001349 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1350 alignment)));
Jason Evans38d92102011-03-23 00:37:29 -07001351
Jason Evans8e3c3c62010-09-17 15:46:18 -07001352 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001353 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001355 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356 else
Jason Evans38d92102011-03-23 00:37:29 -07001357 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001358}
1359
Jason Evans8e3c3c62010-09-17 15:46:18 -07001360int
Jason Evans0a5489e2012-03-01 17:19:20 -08001361je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001362{
1363 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001364 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001365 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1366 & (SIZE_T_MAX-1));
1367 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001368
1369 assert(ptr != NULL);
1370 assert(size != 0);
1371
1372 if (malloc_init())
Jason Evansa1ee7832012-04-10 15:07:44 -07001373 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374
Jason Evans5ff709c2012-04-11 18:13:45 -07001375 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001376 if (usize == 0)
Jason Evansa1ee7832012-04-10 15:07:44 -07001377 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001378
Jason Evans7372b152012-02-10 20:22:09 -08001379 if (config_prof && opt_prof) {
Jason Evans3fb50b02012-04-25 13:13:44 -07001380 prof_thr_cnt_t *cnt;
1381
Jason Evansa5070042011-08-12 13:48:27 -07001382 PROF_ALLOC_PREP(1, usize, cnt);
1383 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001384 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001385 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001386 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001387 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001388 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans5ff709c2012-04-11 18:13:45 -07001389 alignment);
Jason Evans38d92102011-03-23 00:37:29 -07001390 assert(usize_promoted != 0);
1391 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001392 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001393 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001394 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001396 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001397 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001398 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001399 }
Jason Evans749c2a02011-08-12 18:37:54 -07001400 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001401 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001402 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001403 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001404 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001405 }
Jason Evans7372b152012-02-10 20:22:09 -08001406 if (rsize != NULL)
1407 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001408
1409 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001410 if (config_stats) {
Jason Evans122449b2012-04-06 00:35:09 -07001411 assert(usize == isalloc(p, config_prof));
Jason Evanscd9a1342012-03-21 18:33:03 -07001412 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001413 }
Jason Evansb1476112012-04-05 13:36:17 -07001414 UTRACE(0, size, p);
Jason Evans122449b2012-04-06 00:35:09 -07001415 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001416 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001417label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001418 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419 malloc_write("<jemalloc>: Error in allocm(): "
1420 "out of memory\n");
1421 abort();
1422 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001423 *ptr = NULL;
Jason Evansb1476112012-04-05 13:36:17 -07001424 UTRACE(0, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001425 return (ALLOCM_ERR_OOM);
1426}
1427
Jason Evans8e3c3c62010-09-17 15:46:18 -07001428int
Jason Evans0a5489e2012-03-01 17:19:20 -08001429je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001430{
1431 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001432 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001433 size_t old_size;
Jason Evans122449b2012-04-06 00:35:09 -07001434 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1436 & (SIZE_T_MAX-1));
1437 bool zero = flags & ALLOCM_ZERO;
1438 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439
1440 assert(ptr != NULL);
1441 assert(*ptr != NULL);
1442 assert(size != 0);
1443 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001444 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001445
1446 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001447 if (config_prof && opt_prof) {
Jason Evans3fb50b02012-04-25 13:13:44 -07001448 prof_thr_cnt_t *cnt;
1449
Jason Evans93443682010-10-20 17:39:18 -07001450 /*
1451 * usize isn't knowable before iralloc() returns when extra is
1452 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001453 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001454 * backtrace. prof_realloc() will use the actual usize to
1455 * decide whether to sample.
1456 */
1457 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
Jason Evans5ff709c2012-04-11 18:13:45 -07001458 sa2u(size+extra, alignment);
Jason Evans46405e62011-08-30 23:37:29 -07001459 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans122449b2012-04-06 00:35:09 -07001460 old_size = isalloc(p, true);
1461 if (config_valgrind && opt_valgrind)
1462 old_rzsize = p2rz(p);
Jason Evansa5070042011-08-12 13:48:27 -07001463 PROF_ALLOC_PREP(1, max_usize, cnt);
1464 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001465 goto label_oom;
Jason Evans183ba502011-08-11 22:51:00 -07001466 /*
1467 * Use minimum usize to determine whether promotion may happen.
1468 */
1469 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
Jason Evans5ff709c2012-04-11 18:13:45 -07001470 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1471 <= SMALL_MAXCLASS) {
Jason Evansb1726102012-02-28 16:50:47 -08001472 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1473 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001474 alignment, zero, no_move);
1475 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001476 goto label_err;
Jason Evansae4c7b42012-04-02 07:04:34 -07001477 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001478 usize = max_usize;
1479 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001480 } else
Jason Evans122449b2012-04-06 00:35:09 -07001481 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001482 } else {
1483 q = iralloc(p, size, extra, alignment, zero, no_move);
1484 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001485 goto label_err;
Jason Evans122449b2012-04-06 00:35:09 -07001486 usize = isalloc(q, config_prof);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001487 }
Jason Evanse4f78462010-10-22 10:45:59 -07001488 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001489 if (rsize != NULL)
1490 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001491 } else {
Jason Evans122449b2012-04-06 00:35:09 -07001492 if (config_stats) {
1493 old_size = isalloc(p, false);
1494 if (config_valgrind && opt_valgrind)
1495 old_rzsize = u2rz(old_size);
1496 } else if (config_valgrind && opt_valgrind) {
1497 old_size = isalloc(p, false);
1498 old_rzsize = u2rz(old_size);
1499 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001500 q = iralloc(p, size, extra, alignment, zero, no_move);
1501 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001502 goto label_err;
Jason Evans7372b152012-02-10 20:22:09 -08001503 if (config_stats)
Jason Evans122449b2012-04-06 00:35:09 -07001504 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001505 if (rsize != NULL) {
1506 if (config_stats == false)
Jason Evans122449b2012-04-06 00:35:09 -07001507 usize = isalloc(q, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001508 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001509 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001510 }
1511
1512 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001513 if (config_stats) {
1514 thread_allocated_t *ta;
1515 ta = thread_allocated_tsd_get();
1516 ta->allocated += usize;
1517 ta->deallocated += old_size;
1518 }
Jason Evansb1476112012-04-05 13:36:17 -07001519 UTRACE(p, size, q);
Jason Evans122449b2012-04-06 00:35:09 -07001520 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001521 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001522label_err:
Jason Evansb1476112012-04-05 13:36:17 -07001523 if (no_move) {
1524 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001525 return (ALLOCM_ERR_NOT_MOVED);
Jason Evansb1476112012-04-05 13:36:17 -07001526 }
Jason Evansa1ee7832012-04-10 15:07:44 -07001527label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001528 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001529 malloc_write("<jemalloc>: Error in rallocm(): "
1530 "out of memory\n");
1531 abort();
1532 }
Jason Evansb1476112012-04-05 13:36:17 -07001533 UTRACE(p, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001534 return (ALLOCM_ERR_OOM);
1535}
1536
Jason Evans8e3c3c62010-09-17 15:46:18 -07001537int
Jason Evans0a5489e2012-03-01 17:19:20 -08001538je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001539{
1540 size_t sz;
1541
Jason Evans41b6afb2012-02-02 22:04:57 -08001542 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001543
Jason Evans7372b152012-02-10 20:22:09 -08001544 if (config_ivsalloc)
Jason Evans122449b2012-04-06 00:35:09 -07001545 sz = ivsalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001546 else {
1547 assert(ptr != NULL);
Jason Evans122449b2012-04-06 00:35:09 -07001548 sz = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001549 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001550 assert(rsize != NULL);
1551 *rsize = sz;
1552
1553 return (ALLOCM_SUCCESS);
1554}
1555
Jason Evans8e3c3c62010-09-17 15:46:18 -07001556int
Jason Evans0a5489e2012-03-01 17:19:20 -08001557je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001558{
Jason Evanse4f78462010-10-22 10:45:59 -07001559 size_t usize;
Jason Evans122449b2012-04-06 00:35:09 -07001560 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001561
1562 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001563 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001564
Jason Evansb1476112012-04-05 13:36:17 -07001565 UTRACE(ptr, 0, 0);
Jason Evans122449b2012-04-06 00:35:09 -07001566 if (config_stats || config_valgrind)
1567 usize = isalloc(ptr, config_prof);
Jason Evans7372b152012-02-10 20:22:09 -08001568 if (config_prof && opt_prof) {
Jason Evans122449b2012-04-06 00:35:09 -07001569 if (config_stats == false && config_valgrind == false)
1570 usize = isalloc(ptr, config_prof);
Jason Evanse4f78462010-10-22 10:45:59 -07001571 prof_free(ptr, usize);
1572 }
Jason Evans7372b152012-02-10 20:22:09 -08001573 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001574 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans122449b2012-04-06 00:35:09 -07001575 if (config_valgrind && opt_valgrind)
1576 rzsize = p2rz(ptr);
1577 iqalloc(ptr);
1578 JEMALLOC_VALGRIND_FREE(ptr, rzsize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001579
1580 return (ALLOCM_SUCCESS);
1581}
1582
Jason Evans7e15dab2012-02-29 12:56:37 -08001583int
Jason Evans0a5489e2012-03-01 17:19:20 -08001584je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001585{
1586 size_t usize;
1587 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1588 & (SIZE_T_MAX-1));
1589
1590 assert(size != 0);
1591
1592 if (malloc_init())
1593 return (ALLOCM_ERR_OOM);
1594
Jason Evans5ff709c2012-04-11 18:13:45 -07001595 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
Jason Evans7e15dab2012-02-29 12:56:37 -08001596 if (usize == 0)
1597 return (ALLOCM_ERR_OOM);
1598
1599 if (rsize != NULL)
1600 *rsize = usize;
1601 return (ALLOCM_SUCCESS);
1602}
1603
Jason Evans7e77eaf2012-03-02 17:47:37 -08001604#endif
Jason Evans289053c2009-06-22 12:08:42 -07001605/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001606 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001607 */
1608/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001609/*
1610 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001611 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001612 */
1613
Jason Evans41b6afb2012-02-02 22:04:57 -08001614#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001615void
Jason Evans804c9ec2009-06-22 17:44:33 -07001616jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001617#else
Mike Hommeyda99e312012-04-30 12:38:29 +02001618JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08001619_malloc_prefork(void)
1620#endif
Jason Evans289053c2009-06-22 12:08:42 -07001621{
Jason Evansfbbb6242010-01-24 17:56:48 -08001622 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001623
1624 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001625 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001626 for (i = 0; i < narenas; i++) {
1627 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001628 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001629 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001630 base_prefork();
1631 huge_prefork();
1632 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001633}
1634
Jason Evans41b6afb2012-02-02 22:04:57 -08001635#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001636void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001637jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001638#else
Mike Hommeyda99e312012-04-30 12:38:29 +02001639JEMALLOC_EXPORT void
Jason Evans41b6afb2012-02-02 22:04:57 -08001640_malloc_postfork(void)
1641#endif
Jason Evans289053c2009-06-22 12:08:42 -07001642{
1643 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001644
1645 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001646 chunk_dss_postfork_parent();
1647 huge_postfork_parent();
1648 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001649 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001650 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001651 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001652 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001653 malloc_mutex_postfork_parent(&arenas_lock);
1654}
1655
1656void
1657jemalloc_postfork_child(void)
1658{
1659 unsigned i;
1660
1661 /* Release all mutexes, now that fork() has completed. */
1662 chunk_dss_postfork_child();
1663 huge_postfork_child();
1664 base_postfork_child();
1665 for (i = 0; i < narenas; i++) {
1666 if (arenas[i] != NULL)
1667 arena_postfork_child(arenas[i]);
1668 }
1669 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001670}
Jason Evans2dbecf12010-09-05 10:35:13 -07001671
1672/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001673/*
1674 * The following functions are used for TLS allocation/deallocation in static
1675 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1676 * is that these avoid accessing TLS variables.
1677 */
1678
1679static void *
1680a0alloc(size_t size, bool zero)
1681{
1682
1683 if (malloc_init())
1684 return (NULL);
1685
1686 if (size == 0)
1687 size = 1;
1688
1689 if (size <= arena_maxclass)
1690 return (arena_malloc(arenas[0], size, zero, false));
1691 else
1692 return (huge_malloc(size, zero));
1693}
1694
1695void *
1696a0malloc(size_t size)
1697{
1698
1699 return (a0alloc(size, false));
1700}
1701
1702void *
1703a0calloc(size_t num, size_t size)
1704{
1705
1706 return (a0alloc(num * size, true));
1707}
1708
1709void
1710a0free(void *ptr)
1711{
1712 arena_chunk_t *chunk;
1713
1714 if (ptr == NULL)
1715 return;
1716
1717 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1718 if (chunk != ptr)
1719 arena_dalloc(chunk->arena, chunk, ptr, false);
1720 else
1721 huge_dalloc(ptr, true);
1722}
1723
1724/******************************************************************************/