blob: 3e168fd0d0b654a00ba6bfc68f44d8885ceb49fe [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028#ifdef DYNAMIC_PAGE_SHIFT
29size_t pagesize;
30size_t pagesize_mask;
31size_t lg_pagesize;
32#endif
33
34unsigned ncpus;
35
36malloc_mutex_t arenas_lock;
37arena_t **arenas;
38unsigned narenas;
39
40/* Set to true once the allocator has been initialized. */
Jason Evans41b6afb2012-02-02 22:04:57 -080041bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070042
Jason Evans41b6afb2012-02-02 22:04:57 -080043#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070044/* Used to let the initializing thread recursively allocate. */
45static pthread_t malloc_initializer = (unsigned long)0;
Jason Evans41b6afb2012-02-02 22:04:57 -080046# define INITIALIZER pthread_self()
47# define IS_INITIALIZER (malloc_initializer == pthread_self())
48#else
49static bool malloc_initializer = false;
50# define INITIALIZER true
51# define IS_INITIALIZER malloc_initializer
52#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
55static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
56
Jason Evans289053c2009-06-22 12:08:42 -070057/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080058/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070059
Jason Evans03c22372010-01-03 12:10:42 -080060static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070061static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070062static bool malloc_conf_next(char const **opts_p, char const **k_p,
63 size_t *klen_p, char const **v_p, size_t *vlen_p);
64static void malloc_conf_error(const char *msg, const char *k, size_t klen,
65 const char *v, size_t vlen);
66static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070067static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080068static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070069 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070070
Jason Evans289053c2009-06-22 12:08:42 -070071/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070072/*
Jason Evanse476f8a2010-01-16 09:53:50 -080073 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070074 */
75
Jason Evanse476f8a2010-01-16 09:53:50 -080076/* Create a new arena and insert it into the arenas array at index ind. */
77arena_t *
78arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070079{
80 arena_t *ret;
81
Jason Evansb1726102012-02-28 16:50:47 -080082 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080083 if (ret != NULL && arena_new(ret, ind) == false) {
84 arenas[ind] = ret;
85 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070086 }
Jason Evanse476f8a2010-01-16 09:53:50 -080087 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070088
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /*
90 * OOM here is quite inconvenient to propagate, since dealing with it
91 * would require a check for failure in the fast path. Instead, punt
92 * by using arenas[0]. In practice, this is an extremely unlikely
93 * failure.
94 */
Jason Evans698805c2010-03-03 17:45:38 -080095 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080096 if (opt_abort)
97 abort();
Jason Evans289053c2009-06-22 12:08:42 -070098
Jason Evanse476f8a2010-01-16 09:53:50 -080099 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700100}
101
Jason Evans4c2faa82012-03-13 11:09:23 -0700102/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800103arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700104choose_arena_hard(void)
105{
106 arena_t *ret;
107
Jason Evans289053c2009-06-22 12:08:42 -0700108 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700109 unsigned i, choose, first_null;
110
111 choose = 0;
112 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800113 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700114 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700115 for (i = 1; i < narenas; i++) {
116 if (arenas[i] != NULL) {
117 /*
118 * Choose the first arena that has the lowest
119 * number of threads assigned to it.
120 */
121 if (arenas[i]->nthreads <
122 arenas[choose]->nthreads)
123 choose = i;
124 } else if (first_null == narenas) {
125 /*
126 * Record the index of the first uninitialized
127 * arena, in case all extant arenas are in use.
128 *
129 * NB: It is possible for there to be
130 * discontinuities in terms of initialized
131 * versus uninitialized arenas, due to the
132 * "thread.arena" mallctl.
133 */
134 first_null = i;
135 }
136 }
137
Jason Evans41b6afb2012-02-02 22:04:57 -0800138 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700139 /*
140 * Use an unloaded arena, or the least loaded arena if
141 * all arenas are already initialized.
142 */
143 ret = arenas[choose];
144 } else {
145 /* Initialize a new arena. */
146 ret = arenas_extend(first_null);
147 }
148 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800149 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700150 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700151 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700152 malloc_mutex_lock(&arenas_lock);
153 ret->nthreads++;
154 malloc_mutex_unlock(&arenas_lock);
155 }
Jason Evans289053c2009-06-22 12:08:42 -0700156
Jason Evanscd9a1342012-03-21 18:33:03 -0700157 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700158
159 return (ret);
160}
Jason Evans289053c2009-06-22 12:08:42 -0700161
Jason Evans03c22372010-01-03 12:10:42 -0800162static void
163stats_print_atexit(void)
164{
165
Jason Evans7372b152012-02-10 20:22:09 -0800166 if (config_tcache && config_stats) {
167 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800168
Jason Evans7372b152012-02-10 20:22:09 -0800169 /*
170 * Merge stats from extant threads. This is racy, since
171 * individual threads do not lock when recording tcache stats
172 * events. As a consequence, the final stats may be slightly
173 * out of date by the time they are reported, if other threads
174 * continue to allocate.
175 */
176 for (i = 0; i < narenas; i++) {
177 arena_t *arena = arenas[i];
178 if (arena != NULL) {
179 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800180
Jason Evans7372b152012-02-10 20:22:09 -0800181 /*
182 * tcache_stats_merge() locks bins, so if any
183 * code is introduced that acquires both arena
184 * and bin locks in the opposite order,
185 * deadlocks may result.
186 */
187 malloc_mutex_lock(&arena->lock);
188 ql_foreach(tcache, &arena->tcache_ql, link) {
189 tcache_stats_merge(tcache, arena);
190 }
191 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800192 }
Jason Evans03c22372010-01-03 12:10:42 -0800193 }
194 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800195 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700196}
197
Jason Evans289053c2009-06-22 12:08:42 -0700198/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800199 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700200 */
201/******************************************************************************/
202/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800203 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700204 */
205
Jason Evansc9658dd2009-06-22 14:44:08 -0700206static unsigned
207malloc_ncpus(void)
208{
209 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700210 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700211
Jason Evansb7924f52009-06-23 19:01:18 -0700212 result = sysconf(_SC_NPROCESSORS_ONLN);
213 if (result == -1) {
214 /* Error. */
215 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700216 }
Jason Evansb7924f52009-06-23 19:01:18 -0700217 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700218
219 return (ret);
220}
Jason Evansb7924f52009-06-23 19:01:18 -0700221
Jason Evanscd9a1342012-03-21 18:33:03 -0700222void
Jason Evans597632b2011-03-18 13:41:33 -0700223arenas_cleanup(void *arg)
224{
Jason Evanscd9a1342012-03-21 18:33:03 -0700225 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700226
227 malloc_mutex_lock(&arenas_lock);
228 arena->nthreads--;
229 malloc_mutex_unlock(&arenas_lock);
230}
231
Jason Evans289053c2009-06-22 12:08:42 -0700232static inline bool
233malloc_init(void)
234{
235
236 if (malloc_initialized == false)
237 return (malloc_init_hard());
238
239 return (false);
240}
241
242static bool
Jason Evanse7339702010-10-23 18:37:06 -0700243malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
244 char const **v_p, size_t *vlen_p)
245{
246 bool accept;
247 const char *opts = *opts_p;
248
249 *k_p = opts;
250
251 for (accept = false; accept == false;) {
252 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800253 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
254 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
255 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
256 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
257 case 'Y': case 'Z':
258 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
259 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
260 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
261 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
262 case 'y': case 'z':
263 case '0': case '1': case '2': case '3': case '4': case '5':
264 case '6': case '7': case '8': case '9':
265 case '_':
266 opts++;
267 break;
268 case ':':
269 opts++;
270 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
271 *v_p = opts;
272 accept = true;
273 break;
274 case '\0':
275 if (opts != *opts_p) {
276 malloc_write("<jemalloc>: Conf string ends "
277 "with key\n");
278 }
279 return (true);
280 default:
281 malloc_write("<jemalloc>: Malformed conf string\n");
282 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700283 }
284 }
285
286 for (accept = false; accept == false;) {
287 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800288 case ',':
289 opts++;
290 /*
291 * Look ahead one character here, because the next time
292 * this function is called, it will assume that end of
293 * input has been cleanly reached if no input remains,
294 * but we have optimistically already consumed the
295 * comma if one exists.
296 */
297 if (*opts == '\0') {
298 malloc_write("<jemalloc>: Conf string ends "
299 "with comma\n");
300 }
301 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
302 accept = true;
303 break;
304 case '\0':
305 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
306 accept = true;
307 break;
308 default:
309 opts++;
310 break;
Jason Evanse7339702010-10-23 18:37:06 -0700311 }
312 }
313
314 *opts_p = opts;
315 return (false);
316}
317
318static void
319malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
320 size_t vlen)
321{
Jason Evanse7339702010-10-23 18:37:06 -0700322
Jason Evansd81e4bd2012-03-06 14:57:45 -0800323 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
324 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700325}
326
327static void
328malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700329{
330 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700331 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700332 const char *opts, *k, *v;
333 size_t klen, vlen;
334
335 for (i = 0; i < 3; i++) {
336 /* Get runtime configuration. */
337 switch (i) {
338 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800339 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700340 /*
341 * Use options that were compiled into the
342 * program.
343 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800344 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700345 } else {
346 /* No configuration specified. */
347 buf[0] = '\0';
348 opts = buf;
349 }
350 break;
351 case 1: {
352 int linklen;
353 const char *linkname =
354#ifdef JEMALLOC_PREFIX
355 "/etc/"JEMALLOC_PREFIX"malloc.conf"
356#else
357 "/etc/malloc.conf"
358#endif
359 ;
360
361 if ((linklen = readlink(linkname, buf,
362 sizeof(buf) - 1)) != -1) {
363 /*
364 * Use the contents of the "/etc/malloc.conf"
365 * symbolic link's name.
366 */
367 buf[linklen] = '\0';
368 opts = buf;
369 } else {
370 /* No configuration specified. */
371 buf[0] = '\0';
372 opts = buf;
373 }
374 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800375 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700376 const char *envname =
377#ifdef JEMALLOC_PREFIX
378 JEMALLOC_CPREFIX"MALLOC_CONF"
379#else
380 "MALLOC_CONF"
381#endif
382 ;
383
384 if ((opts = getenv(envname)) != NULL) {
385 /*
386 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800387 * the value of the MALLOC_CONF environment
388 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700389 */
390 } else {
391 /* No configuration specified. */
392 buf[0] = '\0';
393 opts = buf;
394 }
395 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800396 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700397 /* NOTREACHED */
398 assert(false);
399 buf[0] = '\0';
400 opts = buf;
401 }
402
403 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
404 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800405#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700406 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
407 klen) == 0) { \
408 if (strncmp("true", v, vlen) == 0 && \
409 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800410 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700411 else if (strncmp("false", v, vlen) == \
412 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700414 else { \
415 malloc_conf_error( \
416 "Invalid conf value", \
417 k, klen, v, vlen); \
418 } \
419 continue; \
420 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800421#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700422 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
423 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800424 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700425 char *end; \
426 \
427 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800428 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700429 if (errno != 0 || (uintptr_t)end - \
430 (uintptr_t)v != vlen) { \
431 malloc_conf_error( \
432 "Invalid conf value", \
433 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800434 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700435 malloc_conf_error( \
436 "Out-of-range conf value", \
437 k, klen, v, vlen); \
438 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800439 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700440 continue; \
441 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800442#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700443 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
444 klen) == 0) { \
445 long l; \
446 char *end; \
447 \
448 errno = 0; \
449 l = strtol(v, &end, 0); \
450 if (errno != 0 || (uintptr_t)end - \
451 (uintptr_t)v != vlen) { \
452 malloc_conf_error( \
453 "Invalid conf value", \
454 k, klen, v, vlen); \
455 } else if (l < (ssize_t)min || l > \
456 (ssize_t)max) { \
457 malloc_conf_error( \
458 "Out-of-range conf value", \
459 k, klen, v, vlen); \
460 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800461 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700462 continue; \
463 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800464#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700465 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
466 klen) == 0) { \
467 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800468 sizeof(o)-1) ? vlen : \
469 sizeof(o)-1; \
470 strncpy(o, v, cpylen); \
471 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700472 continue; \
473 }
474
Jason Evansd81e4bd2012-03-06 14:57:45 -0800475 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700476 /*
477 * Chunks always require at least one * header page,
478 * plus one data page.
479 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700481 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800482 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
483 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
484 -1, (sizeof(size_t) << 3) - 1)
485 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800486 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800487 CONF_HANDLE_BOOL(opt_junk, junk)
488 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800489 }
Jason Evans7372b152012-02-10 20:22:09 -0800490 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800491 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800492 }
493 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800494 CONF_HANDLE_BOOL(opt_tcache, tcache)
495 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
496 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800497 (sizeof(size_t) << 3) - 1)
498 }
499 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800500 CONF_HANDLE_BOOL(opt_prof, prof)
501 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
502 "jeprof")
503 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
504 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
505 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800506 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800507 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
508 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
509 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800510 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800511 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
512 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800513 }
Jason Evanse7339702010-10-23 18:37:06 -0700514 malloc_conf_error("Invalid conf pair", k, klen, v,
515 vlen);
516#undef CONF_HANDLE_BOOL
517#undef CONF_HANDLE_SIZE_T
518#undef CONF_HANDLE_SSIZE_T
519#undef CONF_HANDLE_CHAR_P
520 }
Jason Evanse7339702010-10-23 18:37:06 -0700521 }
522}
523
524static bool
525malloc_init_hard(void)
526{
Jason Evansb7924f52009-06-23 19:01:18 -0700527 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700528
529 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800530 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700531 /*
532 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800533 * acquired init_lock, or this thread is the initializing
534 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700535 */
536 malloc_mutex_unlock(&init_lock);
537 return (false);
538 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800539#ifdef JEMALLOC_THREADED_INIT
540 if (IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700541 /* Busy-wait until the initializing thread completes. */
542 do {
543 malloc_mutex_unlock(&init_lock);
544 CPU_SPINWAIT;
545 malloc_mutex_lock(&init_lock);
546 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700547 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700548 return (false);
549 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800550#endif
551 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700552
Jason Evansb7924f52009-06-23 19:01:18 -0700553#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700554 /* Get page size. */
555 {
556 long result;
557
558 result = sysconf(_SC_PAGESIZE);
559 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700560 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700561
562 /*
563 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800564 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700565 */
566 assert(((result - 1) & result) == 0);
567 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800568 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700569 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700570#endif
Jason Evans289053c2009-06-22 12:08:42 -0700571
Jason Evanscd9a1342012-03-21 18:33:03 -0700572 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800573 if (config_prof)
574 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700575
Jason Evanse7339702010-10-23 18:37:06 -0700576 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700577
Jason Evans41b6afb2012-02-02 22:04:57 -0800578#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evansa0bf2422010-01-29 14:30:41 -0800579 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700580 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
581 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800582 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800583 if (opt_abort)
584 abort();
585 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800586#endif
Jason Evans3c234352010-01-27 13:10:55 -0800587
Jason Evans03c22372010-01-03 12:10:42 -0800588 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700589 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800590 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800591 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800592 if (opt_abort)
593 abort();
594 }
Jason Evans289053c2009-06-22 12:08:42 -0700595 }
596
Jason Evanscd9a1342012-03-21 18:33:03 -0700597 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800598 malloc_mutex_unlock(&init_lock);
599 return (true);
600 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700601
Jason Evans3c234352010-01-27 13:10:55 -0800602 if (base_boot()) {
603 malloc_mutex_unlock(&init_lock);
604 return (true);
605 }
606
Jason Evans41b6afb2012-02-02 22:04:57 -0800607 if (ctl_boot()) {
608 malloc_mutex_unlock(&init_lock);
609 return (true);
610 }
611
Jason Evans7372b152012-02-10 20:22:09 -0800612 if (config_prof)
613 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800614
Jason Evansb1726102012-02-28 16:50:47 -0800615 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700616
Jason Evanscd9a1342012-03-21 18:33:03 -0700617 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700618 malloc_mutex_unlock(&init_lock);
619 return (true);
620 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800621
Jason Evanse476f8a2010-01-16 09:53:50 -0800622 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700623 malloc_mutex_unlock(&init_lock);
624 return (true);
625 }
Jason Evans289053c2009-06-22 12:08:42 -0700626
Jason Evans8e6f8b42011-11-03 18:40:03 -0700627 if (malloc_mutex_init(&arenas_lock))
628 return (true);
629
Jason Evansb7924f52009-06-23 19:01:18 -0700630 /*
631 * Create enough scaffolding to allow recursive allocation in
632 * malloc_ncpus().
633 */
634 narenas = 1;
635 arenas = init_arenas;
636 memset(arenas, 0, sizeof(arena_t *) * narenas);
637
638 /*
639 * Initialize one arena here. The rest are lazily created in
640 * choose_arena_hard().
641 */
642 arenas_extend(0);
643 if (arenas[0] == NULL) {
644 malloc_mutex_unlock(&init_lock);
645 return (true);
646 }
647
Jason Evanscd9a1342012-03-21 18:33:03 -0700648 /* Initialize allocation counters before any allocations can occur. */
649 if (config_stats && thread_allocated_tsd_boot()) {
650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evansb7924f52009-06-23 19:01:18 -0700653
Jason Evanscd9a1342012-03-21 18:33:03 -0700654 if (arenas_tsd_boot()) {
655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
658
659 if (config_tcache && tcache_boot1()) {
660 malloc_mutex_unlock(&init_lock);
661 return (true);
662 }
663
Jason Evans6da54182012-03-23 18:05:51 -0700664 if (config_prof && prof_boot2()) {
665 malloc_mutex_unlock(&init_lock);
666 return (true);
667 }
668
Jason Evansb7924f52009-06-23 19:01:18 -0700669 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700670 malloc_mutex_unlock(&init_lock);
671 ncpus = malloc_ncpus();
672 malloc_mutex_lock(&init_lock);
673
Jason Evanscd9a1342012-03-21 18:33:03 -0700674 if (chunk_boot1()) {
675 malloc_mutex_unlock(&init_lock);
676 return (true);
677 }
678
Jason Evanse7339702010-10-23 18:37:06 -0700679 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700680 /*
Jason Evans5463a522009-12-29 00:09:15 -0800681 * For SMP systems, create more than one arena per CPU by
682 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700683 */
Jason Evanse7339702010-10-23 18:37:06 -0700684 if (ncpus > 1)
685 opt_narenas = ncpus << 2;
686 else
687 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700688 }
Jason Evanse7339702010-10-23 18:37:06 -0700689 narenas = opt_narenas;
690 /*
691 * Make sure that the arenas array can be allocated. In practice, this
692 * limit is enough to allow the allocator to function, but the ctl
693 * machinery will fail to allocate memory at far lower limits.
694 */
695 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700696 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800697 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
698 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700699 }
Jason Evans289053c2009-06-22 12:08:42 -0700700
Jason Evans289053c2009-06-22 12:08:42 -0700701 /* Allocate and initialize arenas. */
702 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
703 if (arenas == NULL) {
704 malloc_mutex_unlock(&init_lock);
705 return (true);
706 }
707 /*
708 * Zero the array. In practice, this should always be pre-zeroed,
709 * since it was just mmap()ed, but let's be sure.
710 */
711 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700712 /* Copy the pointer to the one arena that was already initialized. */
713 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700714
Jason Evans2dbecf12010-09-05 10:35:13 -0700715#ifdef JEMALLOC_ZONE
Mike Hommey154829d2012-03-20 18:01:38 +0100716 /* Register the custom zone. At this point it won't be the default. */
717 malloc_zone_t *jemalloc_zone = create_zone();
718 malloc_zone_register(jemalloc_zone);
Jason Evans2dbecf12010-09-05 10:35:13 -0700719
720 /*
Mike Hommey154829d2012-03-20 18:01:38 +0100721 * Unregister and reregister the default zone. On OSX >= 10.6,
722 * unregistering takes the last registered zone and places it at the
723 * location of the specified zone. Unregistering the default zone thus
724 * makes the last registered one the default. On OSX < 10.6,
725 * unregistering shifts all registered zones. The first registered zone
726 * then becomes the default.
Jason Evans2dbecf12010-09-05 10:35:13 -0700727 */
Mike Hommey154829d2012-03-20 18:01:38 +0100728 do {
729 malloc_zone_t *default_zone = malloc_default_zone();
730 malloc_zone_unregister(default_zone);
731 malloc_zone_register(default_zone);
732 } while (malloc_default_zone() != jemalloc_zone);
Jason Evans2dbecf12010-09-05 10:35:13 -0700733#endif
734
Jason Evans289053c2009-06-22 12:08:42 -0700735 malloc_initialized = true;
736 malloc_mutex_unlock(&init_lock);
737 return (false);
738}
739
Jason Evans2dbecf12010-09-05 10:35:13 -0700740#ifdef JEMALLOC_ZONE
741JEMALLOC_ATTR(constructor)
742void
743jemalloc_darwin_init(void)
744{
745
746 if (malloc_init_hard())
747 abort();
748}
749#endif
750
Jason Evans289053c2009-06-22 12:08:42 -0700751/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800752 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700753 */
754/******************************************************************************/
755/*
756 * Begin malloc(3)-compatible functions.
757 */
758
Jason Evans9ad48232010-01-03 11:59:20 -0800759JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800760JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700761void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800762je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700763{
764 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800765 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700766 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700767
768 if (malloc_init()) {
769 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800770 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700771 }
772
Jason Evansc90ad712012-02-28 20:31:37 -0800773 if (size == 0)
774 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700775
Jason Evans7372b152012-02-10 20:22:09 -0800776 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700777 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700778 PROF_ALLOC_PREP(1, usize, cnt);
779 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700780 ret = NULL;
781 goto OOM;
782 }
Jason Evans93443682010-10-20 17:39:18 -0700783 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800784 SMALL_MAXCLASS) {
785 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700786 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700787 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700788 } else
789 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800790 } else {
791 if (config_stats)
792 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700793 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700794 }
Jason Evans289053c2009-06-22 12:08:42 -0700795
Jason Evansf2518142009-12-29 00:09:15 -0800796OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700797 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800798 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800799 malloc_write("<jemalloc>: Error in malloc(): "
800 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700801 abort();
802 }
803 errno = ENOMEM;
804 }
Jason Evans7372b152012-02-10 20:22:09 -0800805 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700806 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800807 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700808 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700809 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700810 }
Jason Evans289053c2009-06-22 12:08:42 -0700811 return (ret);
812}
813
Jason Evans9ad48232010-01-03 11:59:20 -0800814JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700815#ifdef JEMALLOC_PROF
816/*
Jason Evans7372b152012-02-10 20:22:09 -0800817 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700818 * PROF_ALLOC_PREP().
819 */
820JEMALLOC_ATTR(noinline)
821#endif
822static int
Jason Evans59656312012-02-28 21:37:38 -0800823imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700824 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700825{
826 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800827 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700828 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700829 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700830
Jason Evans0a0bbf62012-03-13 12:55:21 -0700831 assert(min_alignment != 0);
832
Jason Evans289053c2009-06-22 12:08:42 -0700833 if (malloc_init())
834 result = NULL;
835 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800836 if (size == 0)
837 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800838
Jason Evans289053c2009-06-22 12:08:42 -0700839 /* Make sure that alignment is a large enough power of 2. */
840 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700841 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800842 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700843 malloc_write("<jemalloc>: Error allocating "
844 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700845 abort();
846 }
847 result = NULL;
848 ret = EINVAL;
849 goto RETURN;
850 }
851
Jason Evans38d92102011-03-23 00:37:29 -0700852 usize = sa2u(size, alignment, NULL);
853 if (usize == 0) {
854 result = NULL;
855 ret = ENOMEM;
856 goto RETURN;
857 }
858
Jason Evans7372b152012-02-10 20:22:09 -0800859 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700860 PROF_ALLOC_PREP(2, usize, cnt);
861 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700862 result = NULL;
863 ret = EINVAL;
864 } else {
865 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800866 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
867 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700868 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800869 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700870 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700871 if (result != NULL) {
872 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700873 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700874 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700875 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700876 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700877 false);
878 }
Jason Evans0b270a92010-03-31 16:45:04 -0700879 }
Jason Evans6109fe02010-02-10 10:37:56 -0800880 } else
Jason Evans38d92102011-03-23 00:37:29 -0700881 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700882 }
883
884 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800885 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700886 malloc_write("<jemalloc>: Error allocating aligned "
887 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700888 abort();
889 }
890 ret = ENOMEM;
891 goto RETURN;
892 }
893
894 *memptr = result;
895 ret = 0;
896
897RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800898 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700899 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700900 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700901 }
Jason Evans7372b152012-02-10 20:22:09 -0800902 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700903 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700904 return (ret);
905}
906
Jason Evansa5070042011-08-12 13:48:27 -0700907JEMALLOC_ATTR(nonnull(1))
908JEMALLOC_ATTR(visibility("default"))
909int
Jason Evans0a5489e2012-03-01 17:19:20 -0800910je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700911{
912
Jason Evans0a0bbf62012-03-13 12:55:21 -0700913 return imemalign(memptr, alignment, size, sizeof(void *));
914}
915
916JEMALLOC_ATTR(malloc)
917JEMALLOC_ATTR(visibility("default"))
918void *
919je_aligned_alloc(size_t alignment, size_t size)
920{
921 void *ret;
922 int err;
923
924 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
925 ret = NULL;
926 errno = err;
927 }
928 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700929}
930
Jason Evans9ad48232010-01-03 11:59:20 -0800931JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800932JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700933void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800934je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700935{
936 void *ret;
937 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800938 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700939 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700940
941 if (malloc_init()) {
942 num_size = 0;
943 ret = NULL;
944 goto RETURN;
945 }
946
947 num_size = num * size;
948 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800949 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700950 num_size = 1;
951 else {
952 ret = NULL;
953 goto RETURN;
954 }
955 /*
956 * Try to avoid division here. We know that it isn't possible to
957 * overflow during multiplication if neither operand uses any of the
958 * most significant half of the bits in a size_t.
959 */
960 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
961 && (num_size / size != num)) {
962 /* size_t overflow. */
963 ret = NULL;
964 goto RETURN;
965 }
966
Jason Evans7372b152012-02-10 20:22:09 -0800967 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700968 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700969 PROF_ALLOC_PREP(1, usize, cnt);
970 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700971 ret = NULL;
972 goto RETURN;
973 }
Jason Evans93443682010-10-20 17:39:18 -0700974 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800975 <= SMALL_MAXCLASS) {
976 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700977 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700978 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700979 } else
980 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800981 } else {
982 if (config_stats)
983 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700984 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700985 }
Jason Evans289053c2009-06-22 12:08:42 -0700986
987RETURN:
988 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800989 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800990 malloc_write("<jemalloc>: Error in calloc(): out of "
991 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700992 abort();
993 }
994 errno = ENOMEM;
995 }
996
Jason Evans7372b152012-02-10 20:22:09 -0800997 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700998 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800999 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001000 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001001 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -07001002 }
Jason Evans289053c2009-06-22 12:08:42 -07001003 return (ret);
1004}
1005
Jason Evanse476f8a2010-01-16 09:53:50 -08001006JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001007void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001008je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001009{
1010 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001011 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001012 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -07001013 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1014 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -08001015
Jason Evans289053c2009-06-22 12:08:42 -07001016 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001017 if (ptr != NULL) {
1018 /* realloc(ptr, 0) is equivalent to free(p). */
1019 if (config_prof || config_stats)
1020 old_size = isalloc(ptr);
1021 if (config_prof && opt_prof) {
1022 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001023 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001024 }
Jason Evansf081b882012-02-28 20:24:05 -08001025 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001026 ret = NULL;
1027 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001028 } else
1029 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001030 }
1031
1032 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001033 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001034
Jason Evans7372b152012-02-10 20:22:09 -08001035 if (config_prof || config_stats)
1036 old_size = isalloc(ptr);
1037 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001038 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001039 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001040 PROF_ALLOC_PREP(1, usize, cnt);
1041 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001042 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001043 ret = NULL;
1044 goto OOM;
1045 }
Jason Evans0b270a92010-03-31 16:45:04 -07001046 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001047 usize <= SMALL_MAXCLASS) {
1048 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001049 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001050 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001051 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001052 else
1053 old_ctx = NULL;
1054 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001055 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001056 if (ret == NULL)
1057 old_ctx = NULL;
1058 }
Jason Evans7372b152012-02-10 20:22:09 -08001059 } else {
1060 if (config_stats)
1061 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001062 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001063 }
Jason Evans289053c2009-06-22 12:08:42 -07001064
Jason Evans6109fe02010-02-10 10:37:56 -08001065OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001066 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001067 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001068 malloc_write("<jemalloc>: Error in realloc(): "
1069 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001070 abort();
1071 }
1072 errno = ENOMEM;
1073 }
1074 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001075 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001076 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001077 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001078 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001079 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001080 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001081 ret = NULL;
1082 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001083 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001084 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001085 PROF_ALLOC_PREP(1, usize, cnt);
1086 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001087 ret = NULL;
1088 else {
1089 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001090 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001091 SMALL_MAXCLASS) {
1092 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001093 if (ret != NULL) {
1094 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001095 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001096 }
1097 } else
1098 ret = imalloc(size);
1099 }
Jason Evans7372b152012-02-10 20:22:09 -08001100 } else {
1101 if (config_stats)
1102 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001103 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001104 }
Jason Evans6109fe02010-02-10 10:37:56 -08001105 }
Jason Evans569432c2009-12-29 00:09:15 -08001106
Jason Evans289053c2009-06-22 12:08:42 -07001107 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001108 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001109 malloc_write("<jemalloc>: Error in realloc(): "
1110 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001111 abort();
1112 }
1113 errno = ENOMEM;
1114 }
1115 }
1116
1117RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001118 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001119 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001120 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001121 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001122 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001123 ta = thread_allocated_tsd_get();
1124 ta->allocated += usize;
1125 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001126 }
Jason Evans289053c2009-06-22 12:08:42 -07001127 return (ret);
1128}
1129
Jason Evanse476f8a2010-01-16 09:53:50 -08001130JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001131void
Jason Evans0a5489e2012-03-01 17:19:20 -08001132je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001133{
1134
Jason Evans289053c2009-06-22 12:08:42 -07001135 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001136 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001137
Jason Evans41b6afb2012-02-02 22:04:57 -08001138 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001139
Jason Evans7372b152012-02-10 20:22:09 -08001140 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001141 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001142 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001143 } else if (config_stats) {
1144 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001145 }
Jason Evans7372b152012-02-10 20:22:09 -08001146 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001147 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans289053c2009-06-22 12:08:42 -07001148 idalloc(ptr);
1149 }
1150}
1151
1152/*
1153 * End malloc(3)-compatible functions.
1154 */
1155/******************************************************************************/
1156/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001157 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001158 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001159
1160#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1161JEMALLOC_ATTR(malloc)
1162JEMALLOC_ATTR(visibility("default"))
1163void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001164je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001165{
Jason Evans9225a192012-03-23 15:39:07 -07001166 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001167 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001168 return (ret);
1169}
1170#endif
1171
1172#ifdef JEMALLOC_OVERRIDE_VALLOC
1173JEMALLOC_ATTR(malloc)
1174JEMALLOC_ATTR(visibility("default"))
1175void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001176je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001177{
Jason Evans9225a192012-03-23 15:39:07 -07001178 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001179 imemalign(&ret, PAGE_SIZE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001180 return (ret);
1181}
1182#endif
1183
Jason Evans0a5489e2012-03-01 17:19:20 -08001184#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001185/*
1186 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1187 * to inconsistently reference libc's malloc(3)-compatible functions
1188 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1189 *
1190 * These definitions interpose hooks in glibc.  The functions are actually
1191 * passed an extra argument for the caller return address, which will be
1192 * ignored.
1193 */
1194JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001195void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001196
1197JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001198void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001199
1200JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001201void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001202
1203JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001204void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001205#endif
1206
Jason Evans6a0d2912010-09-20 16:44:23 -07001207/*
1208 * End non-standard override functions.
1209 */
1210/******************************************************************************/
1211/*
Jason Evans289053c2009-06-22 12:08:42 -07001212 * Begin non-standard functions.
1213 */
1214
Jason Evanse476f8a2010-01-16 09:53:50 -08001215JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001216size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001217je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001218{
Jason Evans569432c2009-12-29 00:09:15 -08001219 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001220
Jason Evans41b6afb2012-02-02 22:04:57 -08001221 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001222
Jason Evans7372b152012-02-10 20:22:09 -08001223 if (config_ivsalloc)
1224 ret = ivsalloc(ptr);
1225 else {
1226 assert(ptr != NULL);
1227 ret = isalloc(ptr);
1228 }
Jason Evans289053c2009-06-22 12:08:42 -07001229
Jason Evans569432c2009-12-29 00:09:15 -08001230 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001231}
1232
Jason Evans4201af02010-01-24 02:53:40 -08001233JEMALLOC_ATTR(visibility("default"))
1234void
Jason Evans0a5489e2012-03-01 17:19:20 -08001235je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1236 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001237{
1238
Jason Evans698805c2010-03-03 17:45:38 -08001239 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001240}
1241
Jason Evans3c234352010-01-27 13:10:55 -08001242JEMALLOC_ATTR(visibility("default"))
1243int
Jason Evans0a5489e2012-03-01 17:19:20 -08001244je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001245 size_t newlen)
1246{
1247
Jason Evans95833312010-01-27 13:45:21 -08001248 if (malloc_init())
1249 return (EAGAIN);
1250
Jason Evans3c234352010-01-27 13:10:55 -08001251 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1252}
1253
1254JEMALLOC_ATTR(visibility("default"))
1255int
Jason Evans0a5489e2012-03-01 17:19:20 -08001256je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001257{
1258
Jason Evans95833312010-01-27 13:45:21 -08001259 if (malloc_init())
1260 return (EAGAIN);
1261
Jason Evans3c234352010-01-27 13:10:55 -08001262 return (ctl_nametomib(name, mibp, miblenp));
1263}
1264
1265JEMALLOC_ATTR(visibility("default"))
1266int
Jason Evans0a5489e2012-03-01 17:19:20 -08001267je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1268 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001269{
1270
Jason Evans95833312010-01-27 13:45:21 -08001271 if (malloc_init())
1272 return (EAGAIN);
1273
Jason Evans3c234352010-01-27 13:10:55 -08001274 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1275}
1276
Jason Evans7e77eaf2012-03-02 17:47:37 -08001277/*
1278 * End non-standard functions.
1279 */
1280/******************************************************************************/
1281/*
1282 * Begin experimental functions.
1283 */
1284#ifdef JEMALLOC_EXPERIMENTAL
1285
Jason Evans8e3c3c62010-09-17 15:46:18 -07001286JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001287iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001288{
1289
Jason Evans38d92102011-03-23 00:37:29 -07001290 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1291 NULL)));
1292
Jason Evans8e3c3c62010-09-17 15:46:18 -07001293 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001294 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001295 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001296 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001297 else
Jason Evans38d92102011-03-23 00:37:29 -07001298 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001299}
1300
Jason Evans6a0d2912010-09-20 16:44:23 -07001301JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001302JEMALLOC_ATTR(visibility("default"))
1303int
Jason Evans0a5489e2012-03-01 17:19:20 -08001304je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001305{
1306 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001307 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001308 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1309 & (SIZE_T_MAX-1));
1310 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001311 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001312
1313 assert(ptr != NULL);
1314 assert(size != 0);
1315
1316 if (malloc_init())
1317 goto OOM;
1318
Jason Evans749c2a02011-08-12 18:37:54 -07001319 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001320 if (usize == 0)
1321 goto OOM;
1322
Jason Evans7372b152012-02-10 20:22:09 -08001323 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001324 PROF_ALLOC_PREP(1, usize, cnt);
1325 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001326 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001327 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001328 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001329 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001330 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001331 alignment, NULL);
1332 assert(usize_promoted != 0);
1333 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001334 if (p == NULL)
1335 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001336 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001337 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001338 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001339 if (p == NULL)
1340 goto OOM;
1341 }
Jason Evans749c2a02011-08-12 18:37:54 -07001342 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001343 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001344 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001345 if (p == NULL)
1346 goto OOM;
1347 }
Jason Evans7372b152012-02-10 20:22:09 -08001348 if (rsize != NULL)
1349 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001350
1351 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001352 if (config_stats) {
1353 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001354 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001355 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356 return (ALLOCM_SUCCESS);
1357OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001358 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001359 malloc_write("<jemalloc>: Error in allocm(): "
1360 "out of memory\n");
1361 abort();
1362 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001363 *ptr = NULL;
1364 return (ALLOCM_ERR_OOM);
1365}
1366
Jason Evans6a0d2912010-09-20 16:44:23 -07001367JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001368JEMALLOC_ATTR(visibility("default"))
1369int
Jason Evans0a5489e2012-03-01 17:19:20 -08001370je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001371{
1372 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001373 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001374 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001375 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1376 & (SIZE_T_MAX-1));
1377 bool zero = flags & ALLOCM_ZERO;
1378 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001379 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001380
1381 assert(ptr != NULL);
1382 assert(*ptr != NULL);
1383 assert(size != 0);
1384 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001385 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001386
1387 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001388 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001389 /*
1390 * usize isn't knowable before iralloc() returns when extra is
1391 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001392 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001393 * backtrace. prof_realloc() will use the actual usize to
1394 * decide whether to sample.
1395 */
1396 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1397 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001398 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001399 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001400 PROF_ALLOC_PREP(1, max_usize, cnt);
1401 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001402 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001403 /*
1404 * Use minimum usize to determine whether promotion may happen.
1405 */
1406 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1407 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001408 alignment, NULL)) <= SMALL_MAXCLASS) {
1409 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1410 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411 alignment, zero, no_move);
1412 if (q == NULL)
1413 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001414 if (max_usize < PAGE_SIZE) {
1415 usize = max_usize;
1416 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001417 } else
1418 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419 } else {
1420 q = iralloc(p, size, extra, alignment, zero, no_move);
1421 if (q == NULL)
1422 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001423 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424 }
Jason Evanse4f78462010-10-22 10:45:59 -07001425 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001426 if (rsize != NULL)
1427 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001428 } else {
1429 if (config_stats)
1430 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001431 q = iralloc(p, size, extra, alignment, zero, no_move);
1432 if (q == NULL)
1433 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001434 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001435 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001436 if (rsize != NULL) {
1437 if (config_stats == false)
1438 usize = isalloc(q);
1439 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001440 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441 }
1442
1443 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001444 if (config_stats) {
1445 thread_allocated_t *ta;
1446 ta = thread_allocated_tsd_get();
1447 ta->allocated += usize;
1448 ta->deallocated += old_size;
1449 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450 return (ALLOCM_SUCCESS);
1451ERR:
1452 if (no_move)
1453 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001454OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001455 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001456 malloc_write("<jemalloc>: Error in rallocm(): "
1457 "out of memory\n");
1458 abort();
1459 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 return (ALLOCM_ERR_OOM);
1461}
1462
Jason Evans6a0d2912010-09-20 16:44:23 -07001463JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001464JEMALLOC_ATTR(visibility("default"))
1465int
Jason Evans0a5489e2012-03-01 17:19:20 -08001466je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001467{
1468 size_t sz;
1469
Jason Evans41b6afb2012-02-02 22:04:57 -08001470 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001471
Jason Evans7372b152012-02-10 20:22:09 -08001472 if (config_ivsalloc)
1473 sz = ivsalloc(ptr);
1474 else {
1475 assert(ptr != NULL);
1476 sz = isalloc(ptr);
1477 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001478 assert(rsize != NULL);
1479 *rsize = sz;
1480
1481 return (ALLOCM_SUCCESS);
1482}
1483
Jason Evans6a0d2912010-09-20 16:44:23 -07001484JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001485JEMALLOC_ATTR(visibility("default"))
1486int
Jason Evans0a5489e2012-03-01 17:19:20 -08001487je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001488{
Jason Evanse4f78462010-10-22 10:45:59 -07001489 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001490
1491 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001492 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001493
Jason Evans7372b152012-02-10 20:22:09 -08001494 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001495 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001496 if (config_prof && opt_prof) {
1497 if (config_stats == false)
1498 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001499 prof_free(ptr, usize);
1500 }
Jason Evans7372b152012-02-10 20:22:09 -08001501 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001502 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001503 idalloc(ptr);
1504
1505 return (ALLOCM_SUCCESS);
1506}
1507
Jason Evans7e15dab2012-02-29 12:56:37 -08001508JEMALLOC_ATTR(visibility("default"))
1509int
Jason Evans0a5489e2012-03-01 17:19:20 -08001510je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001511{
1512 size_t usize;
1513 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1514 & (SIZE_T_MAX-1));
1515
1516 assert(size != 0);
1517
1518 if (malloc_init())
1519 return (ALLOCM_ERR_OOM);
1520
1521 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1522 if (usize == 0)
1523 return (ALLOCM_ERR_OOM);
1524
1525 if (rsize != NULL)
1526 *rsize = usize;
1527 return (ALLOCM_SUCCESS);
1528}
1529
Jason Evans7e77eaf2012-03-02 17:47:37 -08001530#endif
Jason Evans289053c2009-06-22 12:08:42 -07001531/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001532 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001533 */
1534/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001535
Jason Evans289053c2009-06-22 12:08:42 -07001536/*
1537 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001538 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001539 */
1540
Jason Evans41b6afb2012-02-02 22:04:57 -08001541#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001542void
Jason Evans804c9ec2009-06-22 17:44:33 -07001543jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001544#else
1545void
1546_malloc_prefork(void)
1547#endif
Jason Evans289053c2009-06-22 12:08:42 -07001548{
Jason Evansfbbb6242010-01-24 17:56:48 -08001549 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001550
1551 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001552 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001553 for (i = 0; i < narenas; i++) {
1554 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001555 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001556 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001557 base_prefork();
1558 huge_prefork();
1559 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001560}
1561
Jason Evans41b6afb2012-02-02 22:04:57 -08001562#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001563void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001564jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001565#else
1566void
1567_malloc_postfork(void)
1568#endif
Jason Evans289053c2009-06-22 12:08:42 -07001569{
1570 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001571
1572 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001573 chunk_dss_postfork_parent();
1574 huge_postfork_parent();
1575 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001576 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001577 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001578 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001579 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001580 malloc_mutex_postfork_parent(&arenas_lock);
1581}
1582
1583void
1584jemalloc_postfork_child(void)
1585{
1586 unsigned i;
1587
1588 /* Release all mutexes, now that fork() has completed. */
1589 chunk_dss_postfork_child();
1590 huge_postfork_child();
1591 base_postfork_child();
1592 for (i = 0; i < narenas; i++) {
1593 if (arenas[i] != NULL)
1594 arena_postfork_child(arenas[i]);
1595 }
1596 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001597}
Jason Evans2dbecf12010-09-05 10:35:13 -07001598
1599/******************************************************************************/