blob: a531a21693d8edc27a4f1c82338e0a8ff62d2014 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028unsigned ncpus;
29
30malloc_mutex_t arenas_lock;
31arena_t **arenas;
32unsigned narenas;
33
34/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070035static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070036
Jason Evans41b6afb2012-02-02 22:04:57 -080037#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070038/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070039# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080040# define INITIALIZER pthread_self()
41# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070042static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080043#else
Jason Evans02b23122012-04-05 11:06:23 -070044# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080045# define INITIALIZER true
46# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070047static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080048#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070049
50/* Used to avoid initialization races. */
51static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
52
Jason Evans289053c2009-06-22 12:08:42 -070053/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080054/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070055
Jason Evans03c22372010-01-03 12:10:42 -080056static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070057static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070058static bool malloc_conf_next(char const **opts_p, char const **k_p,
59 size_t *klen_p, char const **v_p, size_t *vlen_p);
60static void malloc_conf_error(const char *msg, const char *k, size_t klen,
61 const char *v, size_t vlen);
62static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070063static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080064static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070065 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070066
Jason Evans289053c2009-06-22 12:08:42 -070067/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070068/*
Jason Evanse476f8a2010-01-16 09:53:50 -080069 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070070 */
71
Jason Evanse476f8a2010-01-16 09:53:50 -080072/* Create a new arena and insert it into the arenas array at index ind. */
73arena_t *
74arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070075{
76 arena_t *ret;
77
Jason Evansb1726102012-02-28 16:50:47 -080078 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080079 if (ret != NULL && arena_new(ret, ind) == false) {
80 arenas[ind] = ret;
81 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070082 }
Jason Evanse476f8a2010-01-16 09:53:50 -080083 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070084
Jason Evanse476f8a2010-01-16 09:53:50 -080085 /*
86 * OOM here is quite inconvenient to propagate, since dealing with it
87 * would require a check for failure in the fast path. Instead, punt
88 * by using arenas[0]. In practice, this is an extremely unlikely
89 * failure.
90 */
Jason Evans698805c2010-03-03 17:45:38 -080091 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080092 if (opt_abort)
93 abort();
Jason Evans289053c2009-06-22 12:08:42 -070094
Jason Evanse476f8a2010-01-16 09:53:50 -080095 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -070096}
97
Jason Evans4c2faa82012-03-13 11:09:23 -070098/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -080099arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700100choose_arena_hard(void)
101{
102 arena_t *ret;
103
Jason Evans289053c2009-06-22 12:08:42 -0700104 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700105 unsigned i, choose, first_null;
106
107 choose = 0;
108 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800109 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700110 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700111 for (i = 1; i < narenas; i++) {
112 if (arenas[i] != NULL) {
113 /*
114 * Choose the first arena that has the lowest
115 * number of threads assigned to it.
116 */
117 if (arenas[i]->nthreads <
118 arenas[choose]->nthreads)
119 choose = i;
120 } else if (first_null == narenas) {
121 /*
122 * Record the index of the first uninitialized
123 * arena, in case all extant arenas are in use.
124 *
125 * NB: It is possible for there to be
126 * discontinuities in terms of initialized
127 * versus uninitialized arenas, due to the
128 * "thread.arena" mallctl.
129 */
130 first_null = i;
131 }
132 }
133
Jason Evans41b6afb2012-02-02 22:04:57 -0800134 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700135 /*
136 * Use an unloaded arena, or the least loaded arena if
137 * all arenas are already initialized.
138 */
139 ret = arenas[choose];
140 } else {
141 /* Initialize a new arena. */
142 ret = arenas_extend(first_null);
143 }
144 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800145 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700146 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700147 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700148 malloc_mutex_lock(&arenas_lock);
149 ret->nthreads++;
150 malloc_mutex_unlock(&arenas_lock);
151 }
Jason Evans289053c2009-06-22 12:08:42 -0700152
Jason Evanscd9a1342012-03-21 18:33:03 -0700153 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700154
155 return (ret);
156}
Jason Evans289053c2009-06-22 12:08:42 -0700157
Jason Evans03c22372010-01-03 12:10:42 -0800158static void
159stats_print_atexit(void)
160{
161
Jason Evans7372b152012-02-10 20:22:09 -0800162 if (config_tcache && config_stats) {
163 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800164
Jason Evans7372b152012-02-10 20:22:09 -0800165 /*
166 * Merge stats from extant threads. This is racy, since
167 * individual threads do not lock when recording tcache stats
168 * events. As a consequence, the final stats may be slightly
169 * out of date by the time they are reported, if other threads
170 * continue to allocate.
171 */
172 for (i = 0; i < narenas; i++) {
173 arena_t *arena = arenas[i];
174 if (arena != NULL) {
175 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800176
Jason Evans7372b152012-02-10 20:22:09 -0800177 /*
178 * tcache_stats_merge() locks bins, so if any
179 * code is introduced that acquires both arena
180 * and bin locks in the opposite order,
181 * deadlocks may result.
182 */
183 malloc_mutex_lock(&arena->lock);
184 ql_foreach(tcache, &arena->tcache_ql, link) {
185 tcache_stats_merge(tcache, arena);
186 }
187 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800188 }
Jason Evans03c22372010-01-03 12:10:42 -0800189 }
190 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800191 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700192}
193
Jason Evans289053c2009-06-22 12:08:42 -0700194/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800195 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700196 */
197/******************************************************************************/
198/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800199 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700200 */
201
Jason Evansc9658dd2009-06-22 14:44:08 -0700202static unsigned
203malloc_ncpus(void)
204{
205 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700206 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700207
Jason Evansb7924f52009-06-23 19:01:18 -0700208 result = sysconf(_SC_NPROCESSORS_ONLN);
209 if (result == -1) {
210 /* Error. */
211 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700212 }
Jason Evansb7924f52009-06-23 19:01:18 -0700213 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700214
215 return (ret);
216}
Jason Evansb7924f52009-06-23 19:01:18 -0700217
Jason Evanscd9a1342012-03-21 18:33:03 -0700218void
Jason Evans597632b2011-03-18 13:41:33 -0700219arenas_cleanup(void *arg)
220{
Jason Evanscd9a1342012-03-21 18:33:03 -0700221 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700222
223 malloc_mutex_lock(&arenas_lock);
224 arena->nthreads--;
225 malloc_mutex_unlock(&arenas_lock);
226}
227
Jason Evans289053c2009-06-22 12:08:42 -0700228static inline bool
229malloc_init(void)
230{
231
232 if (malloc_initialized == false)
233 return (malloc_init_hard());
234
235 return (false);
236}
237
238static bool
Jason Evanse7339702010-10-23 18:37:06 -0700239malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
240 char const **v_p, size_t *vlen_p)
241{
242 bool accept;
243 const char *opts = *opts_p;
244
245 *k_p = opts;
246
247 for (accept = false; accept == false;) {
248 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800249 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
250 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
251 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
252 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
253 case 'Y': case 'Z':
254 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
255 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
256 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
257 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
258 case 'y': case 'z':
259 case '0': case '1': case '2': case '3': case '4': case '5':
260 case '6': case '7': case '8': case '9':
261 case '_':
262 opts++;
263 break;
264 case ':':
265 opts++;
266 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
267 *v_p = opts;
268 accept = true;
269 break;
270 case '\0':
271 if (opts != *opts_p) {
272 malloc_write("<jemalloc>: Conf string ends "
273 "with key\n");
274 }
275 return (true);
276 default:
277 malloc_write("<jemalloc>: Malformed conf string\n");
278 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700279 }
280 }
281
282 for (accept = false; accept == false;) {
283 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800284 case ',':
285 opts++;
286 /*
287 * Look ahead one character here, because the next time
288 * this function is called, it will assume that end of
289 * input has been cleanly reached if no input remains,
290 * but we have optimistically already consumed the
291 * comma if one exists.
292 */
293 if (*opts == '\0') {
294 malloc_write("<jemalloc>: Conf string ends "
295 "with comma\n");
296 }
297 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
298 accept = true;
299 break;
300 case '\0':
301 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
302 accept = true;
303 break;
304 default:
305 opts++;
306 break;
Jason Evanse7339702010-10-23 18:37:06 -0700307 }
308 }
309
310 *opts_p = opts;
311 return (false);
312}
313
314static void
315malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
316 size_t vlen)
317{
Jason Evanse7339702010-10-23 18:37:06 -0700318
Jason Evansd81e4bd2012-03-06 14:57:45 -0800319 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
320 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700321}
322
323static void
324malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700325{
326 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700327 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700328 const char *opts, *k, *v;
329 size_t klen, vlen;
330
331 for (i = 0; i < 3; i++) {
332 /* Get runtime configuration. */
333 switch (i) {
334 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800335 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700336 /*
337 * Use options that were compiled into the
338 * program.
339 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800340 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700341 } else {
342 /* No configuration specified. */
343 buf[0] = '\0';
344 opts = buf;
345 }
346 break;
347 case 1: {
348 int linklen;
349 const char *linkname =
350#ifdef JEMALLOC_PREFIX
351 "/etc/"JEMALLOC_PREFIX"malloc.conf"
352#else
353 "/etc/malloc.conf"
354#endif
355 ;
356
357 if ((linklen = readlink(linkname, buf,
358 sizeof(buf) - 1)) != -1) {
359 /*
360 * Use the contents of the "/etc/malloc.conf"
361 * symbolic link's name.
362 */
363 buf[linklen] = '\0';
364 opts = buf;
365 } else {
366 /* No configuration specified. */
367 buf[0] = '\0';
368 opts = buf;
369 }
370 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800371 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700372 const char *envname =
373#ifdef JEMALLOC_PREFIX
374 JEMALLOC_CPREFIX"MALLOC_CONF"
375#else
376 "MALLOC_CONF"
377#endif
378 ;
379
380 if ((opts = getenv(envname)) != NULL) {
381 /*
382 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800383 * the value of the MALLOC_CONF environment
384 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700385 */
386 } else {
387 /* No configuration specified. */
388 buf[0] = '\0';
389 opts = buf;
390 }
391 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800392 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700393 /* NOTREACHED */
394 assert(false);
395 buf[0] = '\0';
396 opts = buf;
397 }
398
399 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
400 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800401#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700402 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
403 klen) == 0) { \
404 if (strncmp("true", v, vlen) == 0 && \
405 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800406 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700407 else if (strncmp("false", v, vlen) == \
408 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800409 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700410 else { \
411 malloc_conf_error( \
412 "Invalid conf value", \
413 k, klen, v, vlen); \
414 } \
415 continue; \
416 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800417#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700418 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
419 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800420 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700421 char *end; \
422 \
423 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800424 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700425 if (errno != 0 || (uintptr_t)end - \
426 (uintptr_t)v != vlen) { \
427 malloc_conf_error( \
428 "Invalid conf value", \
429 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800430 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700431 malloc_conf_error( \
432 "Out-of-range conf value", \
433 k, klen, v, vlen); \
434 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800435 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700436 continue; \
437 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800438#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700439 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
440 klen) == 0) { \
441 long l; \
442 char *end; \
443 \
444 errno = 0; \
445 l = strtol(v, &end, 0); \
446 if (errno != 0 || (uintptr_t)end - \
447 (uintptr_t)v != vlen) { \
448 malloc_conf_error( \
449 "Invalid conf value", \
450 k, klen, v, vlen); \
451 } else if (l < (ssize_t)min || l > \
452 (ssize_t)max) { \
453 malloc_conf_error( \
454 "Out-of-range conf value", \
455 k, klen, v, vlen); \
456 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800457 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700458 continue; \
459 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800460#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700461 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
462 klen) == 0) { \
463 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800464 sizeof(o)-1) ? vlen : \
465 sizeof(o)-1; \
466 strncpy(o, v, cpylen); \
467 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700468 continue; \
469 }
470
Jason Evansd81e4bd2012-03-06 14:57:45 -0800471 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700472 /*
473 * Chunks always require at least one * header page,
474 * plus one data page.
475 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700476 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
Jason Evanse7339702010-10-23 18:37:06 -0700477 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800478 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
479 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
480 -1, (sizeof(size_t) << 3) - 1)
481 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800482 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800483 CONF_HANDLE_BOOL(opt_junk, junk)
484 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800485 }
Jason Evans7372b152012-02-10 20:22:09 -0800486 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800487 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800488 }
489 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800490 CONF_HANDLE_BOOL(opt_tcache, tcache)
491 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
492 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800493 (sizeof(size_t) << 3) - 1)
494 }
495 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800496 CONF_HANDLE_BOOL(opt_prof, prof)
497 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
498 "jeprof")
499 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
500 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
501 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800502 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800503 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
504 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
505 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800506 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800507 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
508 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800509 }
Jason Evanse7339702010-10-23 18:37:06 -0700510 malloc_conf_error("Invalid conf pair", k, klen, v,
511 vlen);
512#undef CONF_HANDLE_BOOL
513#undef CONF_HANDLE_SIZE_T
514#undef CONF_HANDLE_SSIZE_T
515#undef CONF_HANDLE_CHAR_P
516 }
Jason Evanse7339702010-10-23 18:37:06 -0700517 }
518}
519
520static bool
521malloc_init_hard(void)
522{
Jason Evansb7924f52009-06-23 19:01:18 -0700523 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700524
525 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800526 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700527 /*
528 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800529 * acquired init_lock, or this thread is the initializing
530 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700531 */
532 malloc_mutex_unlock(&init_lock);
533 return (false);
534 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800535#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700536 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700537 /* Busy-wait until the initializing thread completes. */
538 do {
539 malloc_mutex_unlock(&init_lock);
540 CPU_SPINWAIT;
541 malloc_mutex_lock(&init_lock);
542 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700543 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700544 return (false);
545 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800546#endif
547 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700548
Jason Evanscd9a1342012-03-21 18:33:03 -0700549 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800550 if (config_prof)
551 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700552
Jason Evanse7339702010-10-23 18:37:06 -0700553 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700554
Mike Hommeye77fa592012-03-28 09:53:16 +0200555#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800556 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700557 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
558 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800559 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800560 if (opt_abort)
561 abort();
562 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800563#endif
Jason Evans3c234352010-01-27 13:10:55 -0800564
Jason Evans03c22372010-01-03 12:10:42 -0800565 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700566 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800567 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800568 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800569 if (opt_abort)
570 abort();
571 }
Jason Evans289053c2009-06-22 12:08:42 -0700572 }
573
Jason Evanscd9a1342012-03-21 18:33:03 -0700574 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800575 malloc_mutex_unlock(&init_lock);
576 return (true);
577 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700578
Jason Evans3c234352010-01-27 13:10:55 -0800579 if (base_boot()) {
580 malloc_mutex_unlock(&init_lock);
581 return (true);
582 }
583
Jason Evans41b6afb2012-02-02 22:04:57 -0800584 if (ctl_boot()) {
585 malloc_mutex_unlock(&init_lock);
586 return (true);
587 }
588
Jason Evans7372b152012-02-10 20:22:09 -0800589 if (config_prof)
590 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800591
Jason Evansb1726102012-02-28 16:50:47 -0800592 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700593
Jason Evanscd9a1342012-03-21 18:33:03 -0700594 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700595 malloc_mutex_unlock(&init_lock);
596 return (true);
597 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800598
Jason Evanse476f8a2010-01-16 09:53:50 -0800599 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700600 malloc_mutex_unlock(&init_lock);
601 return (true);
602 }
Jason Evans289053c2009-06-22 12:08:42 -0700603
Jason Evans8e6f8b42011-11-03 18:40:03 -0700604 if (malloc_mutex_init(&arenas_lock))
605 return (true);
606
Jason Evansb7924f52009-06-23 19:01:18 -0700607 /*
608 * Create enough scaffolding to allow recursive allocation in
609 * malloc_ncpus().
610 */
611 narenas = 1;
612 arenas = init_arenas;
613 memset(arenas, 0, sizeof(arena_t *) * narenas);
614
615 /*
616 * Initialize one arena here. The rest are lazily created in
617 * choose_arena_hard().
618 */
619 arenas_extend(0);
620 if (arenas[0] == NULL) {
621 malloc_mutex_unlock(&init_lock);
622 return (true);
623 }
624
Jason Evanscd9a1342012-03-21 18:33:03 -0700625 /* Initialize allocation counters before any allocations can occur. */
626 if (config_stats && thread_allocated_tsd_boot()) {
627 malloc_mutex_unlock(&init_lock);
628 return (true);
629 }
Jason Evansb7924f52009-06-23 19:01:18 -0700630
Jason Evanscd9a1342012-03-21 18:33:03 -0700631 if (arenas_tsd_boot()) {
632 malloc_mutex_unlock(&init_lock);
633 return (true);
634 }
635
636 if (config_tcache && tcache_boot1()) {
637 malloc_mutex_unlock(&init_lock);
638 return (true);
639 }
640
Jason Evans6da54182012-03-23 18:05:51 -0700641 if (config_prof && prof_boot2()) {
642 malloc_mutex_unlock(&init_lock);
643 return (true);
644 }
645
Jason Evansb7924f52009-06-23 19:01:18 -0700646 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700647 malloc_mutex_unlock(&init_lock);
648 ncpus = malloc_ncpus();
649 malloc_mutex_lock(&init_lock);
650
Jason Evanscd9a1342012-03-21 18:33:03 -0700651 if (chunk_boot1()) {
652 malloc_mutex_unlock(&init_lock);
653 return (true);
654 }
655
Jason Evans633aaff2012-04-03 08:47:07 -0700656 if (mutex_boot()) {
657 malloc_mutex_unlock(&init_lock);
658 return (true);
659 }
660
Jason Evanse7339702010-10-23 18:37:06 -0700661 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700662 /*
Jason Evans5463a522009-12-29 00:09:15 -0800663 * For SMP systems, create more than one arena per CPU by
664 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700665 */
Jason Evanse7339702010-10-23 18:37:06 -0700666 if (ncpus > 1)
667 opt_narenas = ncpus << 2;
668 else
669 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700670 }
Jason Evanse7339702010-10-23 18:37:06 -0700671 narenas = opt_narenas;
672 /*
673 * Make sure that the arenas array can be allocated. In practice, this
674 * limit is enough to allow the allocator to function, but the ctl
675 * machinery will fail to allocate memory at far lower limits.
676 */
677 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700678 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800679 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
680 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700681 }
Jason Evans289053c2009-06-22 12:08:42 -0700682
Jason Evans289053c2009-06-22 12:08:42 -0700683 /* Allocate and initialize arenas. */
684 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
685 if (arenas == NULL) {
686 malloc_mutex_unlock(&init_lock);
687 return (true);
688 }
689 /*
690 * Zero the array. In practice, this should always be pre-zeroed,
691 * since it was just mmap()ed, but let's be sure.
692 */
693 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700694 /* Copy the pointer to the one arena that was already initialized. */
695 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700696
697 malloc_initialized = true;
698 malloc_mutex_unlock(&init_lock);
699 return (false);
700}
701
702/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800703 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700704 */
705/******************************************************************************/
706/*
707 * Begin malloc(3)-compatible functions.
708 */
709
Jason Evans9ad48232010-01-03 11:59:20 -0800710JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800711JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700712void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800713je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700714{
715 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800716 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700717 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700718
719 if (malloc_init()) {
720 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800721 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700722 }
723
Jason Evansc90ad712012-02-28 20:31:37 -0800724 if (size == 0)
725 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700726
Jason Evans7372b152012-02-10 20:22:09 -0800727 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700728 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700729 PROF_ALLOC_PREP(1, usize, cnt);
730 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700731 ret = NULL;
732 goto OOM;
733 }
Jason Evans93443682010-10-20 17:39:18 -0700734 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800735 SMALL_MAXCLASS) {
736 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700737 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700738 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700739 } else
740 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800741 } else {
742 if (config_stats)
743 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700744 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700745 }
Jason Evans289053c2009-06-22 12:08:42 -0700746
Jason Evansf2518142009-12-29 00:09:15 -0800747OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700748 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800749 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800750 malloc_write("<jemalloc>: Error in malloc(): "
751 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700752 abort();
753 }
754 errno = ENOMEM;
755 }
Jason Evans7372b152012-02-10 20:22:09 -0800756 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700757 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800758 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700759 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700760 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700761 }
Jason Evans289053c2009-06-22 12:08:42 -0700762 return (ret);
763}
764
Jason Evans9ad48232010-01-03 11:59:20 -0800765JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700766#ifdef JEMALLOC_PROF
767/*
Jason Evans7372b152012-02-10 20:22:09 -0800768 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700769 * PROF_ALLOC_PREP().
770 */
771JEMALLOC_ATTR(noinline)
772#endif
773static int
Jason Evans59656312012-02-28 21:37:38 -0800774imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700775 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700776{
777 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800778 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700779 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700780 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700781
Jason Evans0a0bbf62012-03-13 12:55:21 -0700782 assert(min_alignment != 0);
783
Jason Evans289053c2009-06-22 12:08:42 -0700784 if (malloc_init())
785 result = NULL;
786 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800787 if (size == 0)
788 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800789
Jason Evans289053c2009-06-22 12:08:42 -0700790 /* Make sure that alignment is a large enough power of 2. */
791 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700792 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800793 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700794 malloc_write("<jemalloc>: Error allocating "
795 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700796 abort();
797 }
798 result = NULL;
799 ret = EINVAL;
800 goto RETURN;
801 }
802
Jason Evans38d92102011-03-23 00:37:29 -0700803 usize = sa2u(size, alignment, NULL);
804 if (usize == 0) {
805 result = NULL;
806 ret = ENOMEM;
807 goto RETURN;
808 }
809
Jason Evans7372b152012-02-10 20:22:09 -0800810 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700811 PROF_ALLOC_PREP(2, usize, cnt);
812 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700813 result = NULL;
814 ret = EINVAL;
815 } else {
816 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800817 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
818 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700819 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800820 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700821 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700822 if (result != NULL) {
823 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700824 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700825 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700826 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700827 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700828 false);
829 }
Jason Evans0b270a92010-03-31 16:45:04 -0700830 }
Jason Evans6109fe02010-02-10 10:37:56 -0800831 } else
Jason Evans38d92102011-03-23 00:37:29 -0700832 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700833 }
834
835 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800836 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700837 malloc_write("<jemalloc>: Error allocating aligned "
838 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700839 abort();
840 }
841 ret = ENOMEM;
842 goto RETURN;
843 }
844
845 *memptr = result;
846 ret = 0;
847
848RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800849 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700850 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700851 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700852 }
Jason Evans7372b152012-02-10 20:22:09 -0800853 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700854 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700855 return (ret);
856}
857
Jason Evansa5070042011-08-12 13:48:27 -0700858JEMALLOC_ATTR(nonnull(1))
859JEMALLOC_ATTR(visibility("default"))
860int
Jason Evans0a5489e2012-03-01 17:19:20 -0800861je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700862{
863
Jason Evans0a0bbf62012-03-13 12:55:21 -0700864 return imemalign(memptr, alignment, size, sizeof(void *));
865}
866
867JEMALLOC_ATTR(malloc)
868JEMALLOC_ATTR(visibility("default"))
869void *
870je_aligned_alloc(size_t alignment, size_t size)
871{
872 void *ret;
873 int err;
874
875 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
876 ret = NULL;
877 errno = err;
878 }
879 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700880}
881
Jason Evans9ad48232010-01-03 11:59:20 -0800882JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800883JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700884void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800885je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700886{
887 void *ret;
888 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800889 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700890 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700891
892 if (malloc_init()) {
893 num_size = 0;
894 ret = NULL;
895 goto RETURN;
896 }
897
898 num_size = num * size;
899 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800900 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700901 num_size = 1;
902 else {
903 ret = NULL;
904 goto RETURN;
905 }
906 /*
907 * Try to avoid division here. We know that it isn't possible to
908 * overflow during multiplication if neither operand uses any of the
909 * most significant half of the bits in a size_t.
910 */
911 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
912 && (num_size / size != num)) {
913 /* size_t overflow. */
914 ret = NULL;
915 goto RETURN;
916 }
917
Jason Evans7372b152012-02-10 20:22:09 -0800918 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700919 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700920 PROF_ALLOC_PREP(1, usize, cnt);
921 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700922 ret = NULL;
923 goto RETURN;
924 }
Jason Evans93443682010-10-20 17:39:18 -0700925 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800926 <= SMALL_MAXCLASS) {
927 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700928 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700929 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700930 } else
931 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800932 } else {
933 if (config_stats)
934 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700935 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700936 }
Jason Evans289053c2009-06-22 12:08:42 -0700937
938RETURN:
939 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800940 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800941 malloc_write("<jemalloc>: Error in calloc(): out of "
942 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700943 abort();
944 }
945 errno = ENOMEM;
946 }
947
Jason Evans7372b152012-02-10 20:22:09 -0800948 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700949 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800950 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700951 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700952 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700953 }
Jason Evans289053c2009-06-22 12:08:42 -0700954 return (ret);
955}
956
Jason Evanse476f8a2010-01-16 09:53:50 -0800957JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700958void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800959je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700960{
961 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800962 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700963 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700964 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
965 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800966
Jason Evans289053c2009-06-22 12:08:42 -0700967 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800968 if (ptr != NULL) {
969 /* realloc(ptr, 0) is equivalent to free(p). */
970 if (config_prof || config_stats)
971 old_size = isalloc(ptr);
972 if (config_prof && opt_prof) {
973 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -0800974 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800975 }
Jason Evansf081b882012-02-28 20:24:05 -0800976 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -0700977 ret = NULL;
978 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -0800979 } else
980 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700981 }
982
983 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -0800984 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -0700985
Jason Evans7372b152012-02-10 20:22:09 -0800986 if (config_prof || config_stats)
987 old_size = isalloc(ptr);
988 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700989 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -0700990 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -0700991 PROF_ALLOC_PREP(1, usize, cnt);
992 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -0700993 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800994 ret = NULL;
995 goto OOM;
996 }
Jason Evans0b270a92010-03-31 16:45:04 -0700997 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -0800998 usize <= SMALL_MAXCLASS) {
999 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001000 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001001 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001002 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001003 else
1004 old_ctx = NULL;
1005 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001006 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001007 if (ret == NULL)
1008 old_ctx = NULL;
1009 }
Jason Evans7372b152012-02-10 20:22:09 -08001010 } else {
1011 if (config_stats)
1012 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001013 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001014 }
Jason Evans289053c2009-06-22 12:08:42 -07001015
Jason Evans6109fe02010-02-10 10:37:56 -08001016OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001017 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001018 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001019 malloc_write("<jemalloc>: Error in realloc(): "
1020 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001021 abort();
1022 }
1023 errno = ENOMEM;
1024 }
1025 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001026 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001027 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001028 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001029 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001030 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001031 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001032 ret = NULL;
1033 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001034 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001035 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001036 PROF_ALLOC_PREP(1, usize, cnt);
1037 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001038 ret = NULL;
1039 else {
1040 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001041 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001042 SMALL_MAXCLASS) {
1043 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001044 if (ret != NULL) {
1045 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001046 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001047 }
1048 } else
1049 ret = imalloc(size);
1050 }
Jason Evans7372b152012-02-10 20:22:09 -08001051 } else {
1052 if (config_stats)
1053 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001054 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001055 }
Jason Evans6109fe02010-02-10 10:37:56 -08001056 }
Jason Evans569432c2009-12-29 00:09:15 -08001057
Jason Evans289053c2009-06-22 12:08:42 -07001058 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001059 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001060 malloc_write("<jemalloc>: Error in realloc(): "
1061 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001062 abort();
1063 }
1064 errno = ENOMEM;
1065 }
1066 }
1067
1068RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001070 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001071 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001072 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001073 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001074 ta = thread_allocated_tsd_get();
1075 ta->allocated += usize;
1076 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001077 }
Jason Evans289053c2009-06-22 12:08:42 -07001078 return (ret);
1079}
1080
Jason Evanse476f8a2010-01-16 09:53:50 -08001081JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001082void
Jason Evans0a5489e2012-03-01 17:19:20 -08001083je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001084{
1085
Jason Evansf0047372012-04-02 15:18:24 -07001086 if (ptr != NULL) {
1087 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001088
Jason Evansf0047372012-04-02 15:18:24 -07001089 assert(malloc_initialized || IS_INITIALIZER);
1090
1091 if (config_prof && opt_prof) {
1092 usize = isalloc(ptr);
1093 prof_free(ptr, usize);
1094 } else if (config_stats) {
1095 usize = isalloc(ptr);
1096 }
1097 if (config_stats)
1098 thread_allocated_tsd_get()->deallocated += usize;
1099 idalloc(ptr);
1100 }
Jason Evans289053c2009-06-22 12:08:42 -07001101}
1102
1103/*
1104 * End malloc(3)-compatible functions.
1105 */
1106/******************************************************************************/
1107/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001108 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001109 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001110
1111#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1112JEMALLOC_ATTR(malloc)
1113JEMALLOC_ATTR(visibility("default"))
1114void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001115je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001116{
Jason Evans9225a192012-03-23 15:39:07 -07001117 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001118 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001119 return (ret);
1120}
1121#endif
1122
1123#ifdef JEMALLOC_OVERRIDE_VALLOC
1124JEMALLOC_ATTR(malloc)
1125JEMALLOC_ATTR(visibility("default"))
1126void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001127je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001128{
Jason Evans9225a192012-03-23 15:39:07 -07001129 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001130 imemalign(&ret, PAGE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001131 return (ret);
1132}
1133#endif
1134
Mike Hommey5c89c502012-03-26 17:46:57 +02001135/*
1136 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1137 * #define je_malloc malloc
1138 */
1139#define malloc_is_malloc 1
1140#define is_malloc_(a) malloc_is_ ## a
1141#define is_malloc(a) is_malloc_(a)
1142
1143#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001144/*
1145 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1146 * to inconsistently reference libc's malloc(3)-compatible functions
1147 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1148 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001149 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001150 * passed an extra argument for the caller return address, which will be
1151 * ignored.
1152 */
1153JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001154void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001155
1156JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001157void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001158
1159JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001160void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001161
1162JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001163void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001164#endif
1165
Jason Evans6a0d2912010-09-20 16:44:23 -07001166/*
1167 * End non-standard override functions.
1168 */
1169/******************************************************************************/
1170/*
Jason Evans289053c2009-06-22 12:08:42 -07001171 * Begin non-standard functions.
1172 */
1173
Jason Evanse476f8a2010-01-16 09:53:50 -08001174JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001175size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001176je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001177{
Jason Evans569432c2009-12-29 00:09:15 -08001178 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001179
Jason Evans41b6afb2012-02-02 22:04:57 -08001180 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001181
Jason Evans7372b152012-02-10 20:22:09 -08001182 if (config_ivsalloc)
1183 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001184 else
Jason Evansf0047372012-04-02 15:18:24 -07001185 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001186
Jason Evans569432c2009-12-29 00:09:15 -08001187 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001188}
1189
Jason Evans4201af02010-01-24 02:53:40 -08001190JEMALLOC_ATTR(visibility("default"))
1191void
Jason Evans0a5489e2012-03-01 17:19:20 -08001192je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1193 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001194{
1195
Jason Evans698805c2010-03-03 17:45:38 -08001196 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001197}
1198
Jason Evans3c234352010-01-27 13:10:55 -08001199JEMALLOC_ATTR(visibility("default"))
1200int
Jason Evans0a5489e2012-03-01 17:19:20 -08001201je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001202 size_t newlen)
1203{
1204
Jason Evans95833312010-01-27 13:45:21 -08001205 if (malloc_init())
1206 return (EAGAIN);
1207
Jason Evans3c234352010-01-27 13:10:55 -08001208 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1209}
1210
1211JEMALLOC_ATTR(visibility("default"))
1212int
Jason Evans0a5489e2012-03-01 17:19:20 -08001213je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001214{
1215
Jason Evans95833312010-01-27 13:45:21 -08001216 if (malloc_init())
1217 return (EAGAIN);
1218
Jason Evans3c234352010-01-27 13:10:55 -08001219 return (ctl_nametomib(name, mibp, miblenp));
1220}
1221
1222JEMALLOC_ATTR(visibility("default"))
1223int
Jason Evans0a5489e2012-03-01 17:19:20 -08001224je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1225 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001226{
1227
Jason Evans95833312010-01-27 13:45:21 -08001228 if (malloc_init())
1229 return (EAGAIN);
1230
Jason Evans3c234352010-01-27 13:10:55 -08001231 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1232}
1233
Jason Evans7e77eaf2012-03-02 17:47:37 -08001234/*
1235 * End non-standard functions.
1236 */
1237/******************************************************************************/
1238/*
1239 * Begin experimental functions.
1240 */
1241#ifdef JEMALLOC_EXPERIMENTAL
1242
Jason Evans8e3c3c62010-09-17 15:46:18 -07001243JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001244iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001245{
1246
Jason Evans38d92102011-03-23 00:37:29 -07001247 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1248 NULL)));
1249
Jason Evans8e3c3c62010-09-17 15:46:18 -07001250 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001251 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001252 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001253 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001254 else
Jason Evans38d92102011-03-23 00:37:29 -07001255 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001256}
1257
Jason Evans6a0d2912010-09-20 16:44:23 -07001258JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001259JEMALLOC_ATTR(visibility("default"))
1260int
Jason Evans0a5489e2012-03-01 17:19:20 -08001261je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001262{
1263 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001264 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001265 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1266 & (SIZE_T_MAX-1));
1267 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001268 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001269
1270 assert(ptr != NULL);
1271 assert(size != 0);
1272
1273 if (malloc_init())
1274 goto OOM;
1275
Jason Evans749c2a02011-08-12 18:37:54 -07001276 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001277 if (usize == 0)
1278 goto OOM;
1279
Jason Evans7372b152012-02-10 20:22:09 -08001280 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001281 PROF_ALLOC_PREP(1, usize, cnt);
1282 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001283 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001284 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001285 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001286 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001287 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001288 alignment, NULL);
1289 assert(usize_promoted != 0);
1290 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001291 if (p == NULL)
1292 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001293 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001294 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001295 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001296 if (p == NULL)
1297 goto OOM;
1298 }
Jason Evans749c2a02011-08-12 18:37:54 -07001299 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001300 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001301 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001302 if (p == NULL)
1303 goto OOM;
1304 }
Jason Evans7372b152012-02-10 20:22:09 -08001305 if (rsize != NULL)
1306 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001307
1308 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001309 if (config_stats) {
1310 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001311 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001312 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001313 return (ALLOCM_SUCCESS);
1314OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001315 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001316 malloc_write("<jemalloc>: Error in allocm(): "
1317 "out of memory\n");
1318 abort();
1319 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001320 *ptr = NULL;
1321 return (ALLOCM_ERR_OOM);
1322}
1323
Jason Evans6a0d2912010-09-20 16:44:23 -07001324JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325JEMALLOC_ATTR(visibility("default"))
1326int
Jason Evans0a5489e2012-03-01 17:19:20 -08001327je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001328{
1329 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001330 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001331 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001332 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1333 & (SIZE_T_MAX-1));
1334 bool zero = flags & ALLOCM_ZERO;
1335 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001336 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001337
1338 assert(ptr != NULL);
1339 assert(*ptr != NULL);
1340 assert(size != 0);
1341 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001342 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001343
1344 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001345 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001346 /*
1347 * usize isn't knowable before iralloc() returns when extra is
1348 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001349 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001350 * backtrace. prof_realloc() will use the actual usize to
1351 * decide whether to sample.
1352 */
1353 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1354 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001355 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001357 PROF_ALLOC_PREP(1, max_usize, cnt);
1358 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001359 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001360 /*
1361 * Use minimum usize to determine whether promotion may happen.
1362 */
1363 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1364 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001365 alignment, NULL)) <= SMALL_MAXCLASS) {
1366 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1367 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001368 alignment, zero, no_move);
1369 if (q == NULL)
1370 goto ERR;
Jason Evansae4c7b42012-04-02 07:04:34 -07001371 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001372 usize = max_usize;
1373 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001374 } else
1375 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001376 } else {
1377 q = iralloc(p, size, extra, alignment, zero, no_move);
1378 if (q == NULL)
1379 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001380 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001381 }
Jason Evanse4f78462010-10-22 10:45:59 -07001382 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001383 if (rsize != NULL)
1384 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001385 } else {
1386 if (config_stats)
1387 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001388 q = iralloc(p, size, extra, alignment, zero, no_move);
1389 if (q == NULL)
1390 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001391 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001392 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001393 if (rsize != NULL) {
1394 if (config_stats == false)
1395 usize = isalloc(q);
1396 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001397 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001398 }
1399
1400 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001401 if (config_stats) {
1402 thread_allocated_t *ta;
1403 ta = thread_allocated_tsd_get();
1404 ta->allocated += usize;
1405 ta->deallocated += old_size;
1406 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001407 return (ALLOCM_SUCCESS);
1408ERR:
1409 if (no_move)
1410 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001412 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001413 malloc_write("<jemalloc>: Error in rallocm(): "
1414 "out of memory\n");
1415 abort();
1416 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 return (ALLOCM_ERR_OOM);
1418}
1419
Jason Evans6a0d2912010-09-20 16:44:23 -07001420JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421JEMALLOC_ATTR(visibility("default"))
1422int
Jason Evans0a5489e2012-03-01 17:19:20 -08001423je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001424{
1425 size_t sz;
1426
Jason Evans41b6afb2012-02-02 22:04:57 -08001427 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001428
Jason Evans7372b152012-02-10 20:22:09 -08001429 if (config_ivsalloc)
1430 sz = ivsalloc(ptr);
1431 else {
1432 assert(ptr != NULL);
1433 sz = isalloc(ptr);
1434 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435 assert(rsize != NULL);
1436 *rsize = sz;
1437
1438 return (ALLOCM_SUCCESS);
1439}
1440
Jason Evans6a0d2912010-09-20 16:44:23 -07001441JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001442JEMALLOC_ATTR(visibility("default"))
1443int
Jason Evans0a5489e2012-03-01 17:19:20 -08001444je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001445{
Jason Evanse4f78462010-10-22 10:45:59 -07001446 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001447
1448 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001449 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450
Jason Evans7372b152012-02-10 20:22:09 -08001451 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001452 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001453 if (config_prof && opt_prof) {
1454 if (config_stats == false)
1455 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001456 prof_free(ptr, usize);
1457 }
Jason Evans7372b152012-02-10 20:22:09 -08001458 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001459 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460 idalloc(ptr);
1461
1462 return (ALLOCM_SUCCESS);
1463}
1464
Jason Evans7e15dab2012-02-29 12:56:37 -08001465JEMALLOC_ATTR(visibility("default"))
1466int
Jason Evans0a5489e2012-03-01 17:19:20 -08001467je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001468{
1469 size_t usize;
1470 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1471 & (SIZE_T_MAX-1));
1472
1473 assert(size != 0);
1474
1475 if (malloc_init())
1476 return (ALLOCM_ERR_OOM);
1477
1478 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1479 if (usize == 0)
1480 return (ALLOCM_ERR_OOM);
1481
1482 if (rsize != NULL)
1483 *rsize = usize;
1484 return (ALLOCM_SUCCESS);
1485}
1486
Jason Evans7e77eaf2012-03-02 17:47:37 -08001487#endif
Jason Evans289053c2009-06-22 12:08:42 -07001488/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001489 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001490 */
1491/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001492/*
1493 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001494 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001495 */
1496
Jason Evans41b6afb2012-02-02 22:04:57 -08001497#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001498void
Jason Evans804c9ec2009-06-22 17:44:33 -07001499jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001500#else
1501void
1502_malloc_prefork(void)
1503#endif
Jason Evans289053c2009-06-22 12:08:42 -07001504{
Jason Evansfbbb6242010-01-24 17:56:48 -08001505 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001506
1507 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001508 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001509 for (i = 0; i < narenas; i++) {
1510 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001511 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001512 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001513 base_prefork();
1514 huge_prefork();
1515 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001516}
1517
Jason Evans41b6afb2012-02-02 22:04:57 -08001518#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001519void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001520jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001521#else
1522void
1523_malloc_postfork(void)
1524#endif
Jason Evans289053c2009-06-22 12:08:42 -07001525{
1526 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001527
1528 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001529 chunk_dss_postfork_parent();
1530 huge_postfork_parent();
1531 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001532 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001533 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001534 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001535 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001536 malloc_mutex_postfork_parent(&arenas_lock);
1537}
1538
1539void
1540jemalloc_postfork_child(void)
1541{
1542 unsigned i;
1543
1544 /* Release all mutexes, now that fork() has completed. */
1545 chunk_dss_postfork_child();
1546 huge_postfork_child();
1547 base_postfork_child();
1548 for (i = 0; i < narenas; i++) {
1549 if (arenas[i] != NULL)
1550 arena_postfork_child(arenas[i]);
1551 }
1552 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001553}
Jason Evans2dbecf12010-09-05 10:35:13 -07001554
1555/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001556/*
1557 * The following functions are used for TLS allocation/deallocation in static
1558 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1559 * is that these avoid accessing TLS variables.
1560 */
1561
1562static void *
1563a0alloc(size_t size, bool zero)
1564{
1565
1566 if (malloc_init())
1567 return (NULL);
1568
1569 if (size == 0)
1570 size = 1;
1571
1572 if (size <= arena_maxclass)
1573 return (arena_malloc(arenas[0], size, zero, false));
1574 else
1575 return (huge_malloc(size, zero));
1576}
1577
1578void *
1579a0malloc(size_t size)
1580{
1581
1582 return (a0alloc(size, false));
1583}
1584
1585void *
1586a0calloc(size_t num, size_t size)
1587{
1588
1589 return (a0alloc(num * size, true));
1590}
1591
1592void
1593a0free(void *ptr)
1594{
1595 arena_chunk_t *chunk;
1596
1597 if (ptr == NULL)
1598 return;
1599
1600 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1601 if (chunk != ptr)
1602 arena_dalloc(chunk->arena, chunk, ptr, false);
1603 else
1604 huge_dalloc(ptr, true);
1605}
1606
1607/******************************************************************************/