blob: 1deabcd9c1a24c7b5ec2c805f55fd118fbd11d37 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028#ifdef DYNAMIC_PAGE_SHIFT
29size_t pagesize;
30size_t pagesize_mask;
31size_t lg_pagesize;
32#endif
33
34unsigned ncpus;
35
36malloc_mutex_t arenas_lock;
37arena_t **arenas;
38unsigned narenas;
39
40/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070041static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070042
Jason Evans41b6afb2012-02-02 22:04:57 -080043#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070044/* Used to let the initializing thread recursively allocate. */
45static pthread_t malloc_initializer = (unsigned long)0;
Jason Evans41b6afb2012-02-02 22:04:57 -080046# define INITIALIZER pthread_self()
47# define IS_INITIALIZER (malloc_initializer == pthread_self())
48#else
49static bool malloc_initializer = false;
50# define INITIALIZER true
51# define IS_INITIALIZER malloc_initializer
52#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
55static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
56
Jason Evans289053c2009-06-22 12:08:42 -070057/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080058/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070059
Jason Evans03c22372010-01-03 12:10:42 -080060static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070061static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070062static bool malloc_conf_next(char const **opts_p, char const **k_p,
63 size_t *klen_p, char const **v_p, size_t *vlen_p);
64static void malloc_conf_error(const char *msg, const char *k, size_t klen,
65 const char *v, size_t vlen);
66static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070067static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080068static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070069 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070070
Jason Evans289053c2009-06-22 12:08:42 -070071/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070072/*
Jason Evanse476f8a2010-01-16 09:53:50 -080073 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070074 */
75
Jason Evanse476f8a2010-01-16 09:53:50 -080076/* Create a new arena and insert it into the arenas array at index ind. */
77arena_t *
78arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070079{
80 arena_t *ret;
81
Jason Evansb1726102012-02-28 16:50:47 -080082 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080083 if (ret != NULL && arena_new(ret, ind) == false) {
84 arenas[ind] = ret;
85 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070086 }
Jason Evanse476f8a2010-01-16 09:53:50 -080087 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070088
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /*
90 * OOM here is quite inconvenient to propagate, since dealing with it
91 * would require a check for failure in the fast path. Instead, punt
92 * by using arenas[0]. In practice, this is an extremely unlikely
93 * failure.
94 */
Jason Evans698805c2010-03-03 17:45:38 -080095 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080096 if (opt_abort)
97 abort();
Jason Evans289053c2009-06-22 12:08:42 -070098
Jason Evanse476f8a2010-01-16 09:53:50 -080099 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700100}
101
Jason Evans4c2faa82012-03-13 11:09:23 -0700102/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800103arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700104choose_arena_hard(void)
105{
106 arena_t *ret;
107
Jason Evans289053c2009-06-22 12:08:42 -0700108 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700109 unsigned i, choose, first_null;
110
111 choose = 0;
112 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800113 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700114 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700115 for (i = 1; i < narenas; i++) {
116 if (arenas[i] != NULL) {
117 /*
118 * Choose the first arena that has the lowest
119 * number of threads assigned to it.
120 */
121 if (arenas[i]->nthreads <
122 arenas[choose]->nthreads)
123 choose = i;
124 } else if (first_null == narenas) {
125 /*
126 * Record the index of the first uninitialized
127 * arena, in case all extant arenas are in use.
128 *
129 * NB: It is possible for there to be
130 * discontinuities in terms of initialized
131 * versus uninitialized arenas, due to the
132 * "thread.arena" mallctl.
133 */
134 first_null = i;
135 }
136 }
137
Jason Evans41b6afb2012-02-02 22:04:57 -0800138 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700139 /*
140 * Use an unloaded arena, or the least loaded arena if
141 * all arenas are already initialized.
142 */
143 ret = arenas[choose];
144 } else {
145 /* Initialize a new arena. */
146 ret = arenas_extend(first_null);
147 }
148 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800149 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700150 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700151 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700152 malloc_mutex_lock(&arenas_lock);
153 ret->nthreads++;
154 malloc_mutex_unlock(&arenas_lock);
155 }
Jason Evans289053c2009-06-22 12:08:42 -0700156
Jason Evanscd9a1342012-03-21 18:33:03 -0700157 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700158
159 return (ret);
160}
Jason Evans289053c2009-06-22 12:08:42 -0700161
Jason Evans03c22372010-01-03 12:10:42 -0800162static void
163stats_print_atexit(void)
164{
165
Jason Evans7372b152012-02-10 20:22:09 -0800166 if (config_tcache && config_stats) {
167 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800168
Jason Evans7372b152012-02-10 20:22:09 -0800169 /*
170 * Merge stats from extant threads. This is racy, since
171 * individual threads do not lock when recording tcache stats
172 * events. As a consequence, the final stats may be slightly
173 * out of date by the time they are reported, if other threads
174 * continue to allocate.
175 */
176 for (i = 0; i < narenas; i++) {
177 arena_t *arena = arenas[i];
178 if (arena != NULL) {
179 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800180
Jason Evans7372b152012-02-10 20:22:09 -0800181 /*
182 * tcache_stats_merge() locks bins, so if any
183 * code is introduced that acquires both arena
184 * and bin locks in the opposite order,
185 * deadlocks may result.
186 */
187 malloc_mutex_lock(&arena->lock);
188 ql_foreach(tcache, &arena->tcache_ql, link) {
189 tcache_stats_merge(tcache, arena);
190 }
191 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800192 }
Jason Evans03c22372010-01-03 12:10:42 -0800193 }
194 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800195 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700196}
197
Jason Evans289053c2009-06-22 12:08:42 -0700198/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800199 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700200 */
201/******************************************************************************/
202/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800203 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700204 */
205
Jason Evansc9658dd2009-06-22 14:44:08 -0700206static unsigned
207malloc_ncpus(void)
208{
209 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700210 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700211
Jason Evansb7924f52009-06-23 19:01:18 -0700212 result = sysconf(_SC_NPROCESSORS_ONLN);
213 if (result == -1) {
214 /* Error. */
215 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700216 }
Jason Evansb7924f52009-06-23 19:01:18 -0700217 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700218
219 return (ret);
220}
Jason Evansb7924f52009-06-23 19:01:18 -0700221
Jason Evanscd9a1342012-03-21 18:33:03 -0700222void
Jason Evans597632b2011-03-18 13:41:33 -0700223arenas_cleanup(void *arg)
224{
Jason Evanscd9a1342012-03-21 18:33:03 -0700225 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700226
227 malloc_mutex_lock(&arenas_lock);
228 arena->nthreads--;
229 malloc_mutex_unlock(&arenas_lock);
230}
231
Jason Evans289053c2009-06-22 12:08:42 -0700232static inline bool
233malloc_init(void)
234{
235
236 if (malloc_initialized == false)
237 return (malloc_init_hard());
238
239 return (false);
240}
241
242static bool
Jason Evanse7339702010-10-23 18:37:06 -0700243malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
244 char const **v_p, size_t *vlen_p)
245{
246 bool accept;
247 const char *opts = *opts_p;
248
249 *k_p = opts;
250
251 for (accept = false; accept == false;) {
252 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800253 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
254 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
255 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
256 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
257 case 'Y': case 'Z':
258 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
259 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
260 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
261 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
262 case 'y': case 'z':
263 case '0': case '1': case '2': case '3': case '4': case '5':
264 case '6': case '7': case '8': case '9':
265 case '_':
266 opts++;
267 break;
268 case ':':
269 opts++;
270 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
271 *v_p = opts;
272 accept = true;
273 break;
274 case '\0':
275 if (opts != *opts_p) {
276 malloc_write("<jemalloc>: Conf string ends "
277 "with key\n");
278 }
279 return (true);
280 default:
281 malloc_write("<jemalloc>: Malformed conf string\n");
282 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700283 }
284 }
285
286 for (accept = false; accept == false;) {
287 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800288 case ',':
289 opts++;
290 /*
291 * Look ahead one character here, because the next time
292 * this function is called, it will assume that end of
293 * input has been cleanly reached if no input remains,
294 * but we have optimistically already consumed the
295 * comma if one exists.
296 */
297 if (*opts == '\0') {
298 malloc_write("<jemalloc>: Conf string ends "
299 "with comma\n");
300 }
301 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
302 accept = true;
303 break;
304 case '\0':
305 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
306 accept = true;
307 break;
308 default:
309 opts++;
310 break;
Jason Evanse7339702010-10-23 18:37:06 -0700311 }
312 }
313
314 *opts_p = opts;
315 return (false);
316}
317
318static void
319malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
320 size_t vlen)
321{
Jason Evanse7339702010-10-23 18:37:06 -0700322
Jason Evansd81e4bd2012-03-06 14:57:45 -0800323 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
324 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700325}
326
327static void
328malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700329{
330 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700331 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700332 const char *opts, *k, *v;
333 size_t klen, vlen;
334
335 for (i = 0; i < 3; i++) {
336 /* Get runtime configuration. */
337 switch (i) {
338 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800339 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700340 /*
341 * Use options that were compiled into the
342 * program.
343 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800344 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700345 } else {
346 /* No configuration specified. */
347 buf[0] = '\0';
348 opts = buf;
349 }
350 break;
351 case 1: {
352 int linklen;
353 const char *linkname =
354#ifdef JEMALLOC_PREFIX
355 "/etc/"JEMALLOC_PREFIX"malloc.conf"
356#else
357 "/etc/malloc.conf"
358#endif
359 ;
360
361 if ((linklen = readlink(linkname, buf,
362 sizeof(buf) - 1)) != -1) {
363 /*
364 * Use the contents of the "/etc/malloc.conf"
365 * symbolic link's name.
366 */
367 buf[linklen] = '\0';
368 opts = buf;
369 } else {
370 /* No configuration specified. */
371 buf[0] = '\0';
372 opts = buf;
373 }
374 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800375 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700376 const char *envname =
377#ifdef JEMALLOC_PREFIX
378 JEMALLOC_CPREFIX"MALLOC_CONF"
379#else
380 "MALLOC_CONF"
381#endif
382 ;
383
384 if ((opts = getenv(envname)) != NULL) {
385 /*
386 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800387 * the value of the MALLOC_CONF environment
388 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700389 */
390 } else {
391 /* No configuration specified. */
392 buf[0] = '\0';
393 opts = buf;
394 }
395 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800396 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700397 /* NOTREACHED */
398 assert(false);
399 buf[0] = '\0';
400 opts = buf;
401 }
402
403 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
404 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800405#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700406 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
407 klen) == 0) { \
408 if (strncmp("true", v, vlen) == 0 && \
409 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800410 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700411 else if (strncmp("false", v, vlen) == \
412 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700414 else { \
415 malloc_conf_error( \
416 "Invalid conf value", \
417 k, klen, v, vlen); \
418 } \
419 continue; \
420 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800421#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700422 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
423 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800424 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700425 char *end; \
426 \
427 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800428 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700429 if (errno != 0 || (uintptr_t)end - \
430 (uintptr_t)v != vlen) { \
431 malloc_conf_error( \
432 "Invalid conf value", \
433 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800434 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700435 malloc_conf_error( \
436 "Out-of-range conf value", \
437 k, klen, v, vlen); \
438 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800439 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700440 continue; \
441 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800442#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700443 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
444 klen) == 0) { \
445 long l; \
446 char *end; \
447 \
448 errno = 0; \
449 l = strtol(v, &end, 0); \
450 if (errno != 0 || (uintptr_t)end - \
451 (uintptr_t)v != vlen) { \
452 malloc_conf_error( \
453 "Invalid conf value", \
454 k, klen, v, vlen); \
455 } else if (l < (ssize_t)min || l > \
456 (ssize_t)max) { \
457 malloc_conf_error( \
458 "Out-of-range conf value", \
459 k, klen, v, vlen); \
460 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800461 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700462 continue; \
463 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800464#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700465 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
466 klen) == 0) { \
467 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800468 sizeof(o)-1) ? vlen : \
469 sizeof(o)-1; \
470 strncpy(o, v, cpylen); \
471 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700472 continue; \
473 }
474
Jason Evansd81e4bd2012-03-06 14:57:45 -0800475 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700476 /*
477 * Chunks always require at least one * header page,
478 * plus one data page.
479 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700481 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800482 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
483 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
484 -1, (sizeof(size_t) << 3) - 1)
485 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800486 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800487 CONF_HANDLE_BOOL(opt_junk, junk)
488 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800489 }
Jason Evans7372b152012-02-10 20:22:09 -0800490 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800491 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800492 }
493 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800494 CONF_HANDLE_BOOL(opt_tcache, tcache)
495 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
496 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800497 (sizeof(size_t) << 3) - 1)
498 }
499 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800500 CONF_HANDLE_BOOL(opt_prof, prof)
501 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
502 "jeprof")
503 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
504 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
505 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800506 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800507 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
508 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
509 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800510 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800511 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
512 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800513 }
Jason Evanse7339702010-10-23 18:37:06 -0700514 malloc_conf_error("Invalid conf pair", k, klen, v,
515 vlen);
516#undef CONF_HANDLE_BOOL
517#undef CONF_HANDLE_SIZE_T
518#undef CONF_HANDLE_SSIZE_T
519#undef CONF_HANDLE_CHAR_P
520 }
Jason Evanse7339702010-10-23 18:37:06 -0700521 }
522}
523
524static bool
525malloc_init_hard(void)
526{
Jason Evansb7924f52009-06-23 19:01:18 -0700527 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700528
529 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800530 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700531 /*
532 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800533 * acquired init_lock, or this thread is the initializing
534 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700535 */
536 malloc_mutex_unlock(&init_lock);
537 return (false);
538 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800539#ifdef JEMALLOC_THREADED_INIT
540 if (IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700541 /* Busy-wait until the initializing thread completes. */
542 do {
543 malloc_mutex_unlock(&init_lock);
544 CPU_SPINWAIT;
545 malloc_mutex_lock(&init_lock);
546 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700547 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700548 return (false);
549 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800550#endif
551 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700552
Jason Evansb7924f52009-06-23 19:01:18 -0700553#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700554 /* Get page size. */
555 {
556 long result;
557
558 result = sysconf(_SC_PAGESIZE);
559 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700560 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700561
562 /*
563 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800564 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700565 */
566 assert(((result - 1) & result) == 0);
567 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800568 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700569 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700570#endif
Jason Evans289053c2009-06-22 12:08:42 -0700571
Jason Evanscd9a1342012-03-21 18:33:03 -0700572 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800573 if (config_prof)
574 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700575
Jason Evanse7339702010-10-23 18:37:06 -0700576 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700577
Mike Hommeye77fa592012-03-28 09:53:16 +0200578#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800579 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700580 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
581 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800582 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800583 if (opt_abort)
584 abort();
585 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800586#endif
Jason Evans3c234352010-01-27 13:10:55 -0800587
Jason Evans03c22372010-01-03 12:10:42 -0800588 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700589 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800590 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800591 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800592 if (opt_abort)
593 abort();
594 }
Jason Evans289053c2009-06-22 12:08:42 -0700595 }
596
Jason Evanscd9a1342012-03-21 18:33:03 -0700597 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800598 malloc_mutex_unlock(&init_lock);
599 return (true);
600 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700601
Jason Evans3c234352010-01-27 13:10:55 -0800602 if (base_boot()) {
603 malloc_mutex_unlock(&init_lock);
604 return (true);
605 }
606
Jason Evans41b6afb2012-02-02 22:04:57 -0800607 if (ctl_boot()) {
608 malloc_mutex_unlock(&init_lock);
609 return (true);
610 }
611
Jason Evans7372b152012-02-10 20:22:09 -0800612 if (config_prof)
613 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800614
Jason Evansb1726102012-02-28 16:50:47 -0800615 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700616
Jason Evanscd9a1342012-03-21 18:33:03 -0700617 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700618 malloc_mutex_unlock(&init_lock);
619 return (true);
620 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800621
Jason Evanse476f8a2010-01-16 09:53:50 -0800622 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700623 malloc_mutex_unlock(&init_lock);
624 return (true);
625 }
Jason Evans289053c2009-06-22 12:08:42 -0700626
Jason Evans8e6f8b42011-11-03 18:40:03 -0700627 if (malloc_mutex_init(&arenas_lock))
628 return (true);
629
Jason Evansb7924f52009-06-23 19:01:18 -0700630 /*
631 * Create enough scaffolding to allow recursive allocation in
632 * malloc_ncpus().
633 */
634 narenas = 1;
635 arenas = init_arenas;
636 memset(arenas, 0, sizeof(arena_t *) * narenas);
637
638 /*
639 * Initialize one arena here. The rest are lazily created in
640 * choose_arena_hard().
641 */
642 arenas_extend(0);
643 if (arenas[0] == NULL) {
644 malloc_mutex_unlock(&init_lock);
645 return (true);
646 }
647
Jason Evanscd9a1342012-03-21 18:33:03 -0700648 /* Initialize allocation counters before any allocations can occur. */
649 if (config_stats && thread_allocated_tsd_boot()) {
650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evansb7924f52009-06-23 19:01:18 -0700653
Jason Evanscd9a1342012-03-21 18:33:03 -0700654 if (arenas_tsd_boot()) {
655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
658
659 if (config_tcache && tcache_boot1()) {
660 malloc_mutex_unlock(&init_lock);
661 return (true);
662 }
663
Jason Evans6da54182012-03-23 18:05:51 -0700664 if (config_prof && prof_boot2()) {
665 malloc_mutex_unlock(&init_lock);
666 return (true);
667 }
668
Jason Evansb7924f52009-06-23 19:01:18 -0700669 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700670 malloc_mutex_unlock(&init_lock);
671 ncpus = malloc_ncpus();
672 malloc_mutex_lock(&init_lock);
673
Jason Evanscd9a1342012-03-21 18:33:03 -0700674 if (chunk_boot1()) {
675 malloc_mutex_unlock(&init_lock);
676 return (true);
677 }
678
Jason Evanse7339702010-10-23 18:37:06 -0700679 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700680 /*
Jason Evans5463a522009-12-29 00:09:15 -0800681 * For SMP systems, create more than one arena per CPU by
682 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700683 */
Jason Evanse7339702010-10-23 18:37:06 -0700684 if (ncpus > 1)
685 opt_narenas = ncpus << 2;
686 else
687 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700688 }
Jason Evanse7339702010-10-23 18:37:06 -0700689 narenas = opt_narenas;
690 /*
691 * Make sure that the arenas array can be allocated. In practice, this
692 * limit is enough to allow the allocator to function, but the ctl
693 * machinery will fail to allocate memory at far lower limits.
694 */
695 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700696 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800697 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
698 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700699 }
Jason Evans289053c2009-06-22 12:08:42 -0700700
Jason Evans289053c2009-06-22 12:08:42 -0700701 /* Allocate and initialize arenas. */
702 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
703 if (arenas == NULL) {
704 malloc_mutex_unlock(&init_lock);
705 return (true);
706 }
707 /*
708 * Zero the array. In practice, this should always be pre-zeroed,
709 * since it was just mmap()ed, but let's be sure.
710 */
711 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700712 /* Copy the pointer to the one arena that was already initialized. */
713 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700714
715 malloc_initialized = true;
716 malloc_mutex_unlock(&init_lock);
717 return (false);
718}
719
720/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800721 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700722 */
723/******************************************************************************/
724/*
725 * Begin malloc(3)-compatible functions.
726 */
727
Jason Evans9ad48232010-01-03 11:59:20 -0800728JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800729JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700730void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800731je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700732{
733 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800734 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700735 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700736
737 if (malloc_init()) {
738 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800739 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700740 }
741
Jason Evansc90ad712012-02-28 20:31:37 -0800742 if (size == 0)
743 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700744
Jason Evans7372b152012-02-10 20:22:09 -0800745 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700746 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700747 PROF_ALLOC_PREP(1, usize, cnt);
748 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700749 ret = NULL;
750 goto OOM;
751 }
Jason Evans93443682010-10-20 17:39:18 -0700752 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800753 SMALL_MAXCLASS) {
754 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700755 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700756 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700757 } else
758 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800759 } else {
760 if (config_stats)
761 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700762 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700763 }
Jason Evans289053c2009-06-22 12:08:42 -0700764
Jason Evansf2518142009-12-29 00:09:15 -0800765OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700766 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800767 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800768 malloc_write("<jemalloc>: Error in malloc(): "
769 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700770 abort();
771 }
772 errno = ENOMEM;
773 }
Jason Evans7372b152012-02-10 20:22:09 -0800774 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700775 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800776 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700777 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700778 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700779 }
Jason Evans289053c2009-06-22 12:08:42 -0700780 return (ret);
781}
782
Jason Evans9ad48232010-01-03 11:59:20 -0800783JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700784#ifdef JEMALLOC_PROF
785/*
Jason Evans7372b152012-02-10 20:22:09 -0800786 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700787 * PROF_ALLOC_PREP().
788 */
789JEMALLOC_ATTR(noinline)
790#endif
791static int
Jason Evans59656312012-02-28 21:37:38 -0800792imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700793 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700794{
795 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800796 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700797 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700798 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700799
Jason Evans0a0bbf62012-03-13 12:55:21 -0700800 assert(min_alignment != 0);
801
Jason Evans289053c2009-06-22 12:08:42 -0700802 if (malloc_init())
803 result = NULL;
804 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800805 if (size == 0)
806 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800807
Jason Evans289053c2009-06-22 12:08:42 -0700808 /* Make sure that alignment is a large enough power of 2. */
809 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700810 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800811 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700812 malloc_write("<jemalloc>: Error allocating "
813 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700814 abort();
815 }
816 result = NULL;
817 ret = EINVAL;
818 goto RETURN;
819 }
820
Jason Evans38d92102011-03-23 00:37:29 -0700821 usize = sa2u(size, alignment, NULL);
822 if (usize == 0) {
823 result = NULL;
824 ret = ENOMEM;
825 goto RETURN;
826 }
827
Jason Evans7372b152012-02-10 20:22:09 -0800828 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700829 PROF_ALLOC_PREP(2, usize, cnt);
830 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700831 result = NULL;
832 ret = EINVAL;
833 } else {
834 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800835 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
836 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700837 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800838 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700839 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700840 if (result != NULL) {
841 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700842 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700843 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700844 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700845 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700846 false);
847 }
Jason Evans0b270a92010-03-31 16:45:04 -0700848 }
Jason Evans6109fe02010-02-10 10:37:56 -0800849 } else
Jason Evans38d92102011-03-23 00:37:29 -0700850 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700851 }
852
853 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800854 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700855 malloc_write("<jemalloc>: Error allocating aligned "
856 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700857 abort();
858 }
859 ret = ENOMEM;
860 goto RETURN;
861 }
862
863 *memptr = result;
864 ret = 0;
865
866RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800867 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700868 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700869 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700870 }
Jason Evans7372b152012-02-10 20:22:09 -0800871 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700872 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700873 return (ret);
874}
875
Jason Evansa5070042011-08-12 13:48:27 -0700876JEMALLOC_ATTR(nonnull(1))
877JEMALLOC_ATTR(visibility("default"))
878int
Jason Evans0a5489e2012-03-01 17:19:20 -0800879je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700880{
881
Jason Evans0a0bbf62012-03-13 12:55:21 -0700882 return imemalign(memptr, alignment, size, sizeof(void *));
883}
884
885JEMALLOC_ATTR(malloc)
886JEMALLOC_ATTR(visibility("default"))
887void *
888je_aligned_alloc(size_t alignment, size_t size)
889{
890 void *ret;
891 int err;
892
893 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
894 ret = NULL;
895 errno = err;
896 }
897 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700898}
899
Jason Evans9ad48232010-01-03 11:59:20 -0800900JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800901JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700902void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800903je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700904{
905 void *ret;
906 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800907 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700908 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700909
910 if (malloc_init()) {
911 num_size = 0;
912 ret = NULL;
913 goto RETURN;
914 }
915
916 num_size = num * size;
917 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800918 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700919 num_size = 1;
920 else {
921 ret = NULL;
922 goto RETURN;
923 }
924 /*
925 * Try to avoid division here. We know that it isn't possible to
926 * overflow during multiplication if neither operand uses any of the
927 * most significant half of the bits in a size_t.
928 */
929 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
930 && (num_size / size != num)) {
931 /* size_t overflow. */
932 ret = NULL;
933 goto RETURN;
934 }
935
Jason Evans7372b152012-02-10 20:22:09 -0800936 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700937 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700938 PROF_ALLOC_PREP(1, usize, cnt);
939 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700940 ret = NULL;
941 goto RETURN;
942 }
Jason Evans93443682010-10-20 17:39:18 -0700943 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800944 <= SMALL_MAXCLASS) {
945 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700946 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700947 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700948 } else
949 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800950 } else {
951 if (config_stats)
952 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700953 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700954 }
Jason Evans289053c2009-06-22 12:08:42 -0700955
956RETURN:
957 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800958 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800959 malloc_write("<jemalloc>: Error in calloc(): out of "
960 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700961 abort();
962 }
963 errno = ENOMEM;
964 }
965
Jason Evans7372b152012-02-10 20:22:09 -0800966 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700967 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800968 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700969 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700970 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700971 }
Jason Evans289053c2009-06-22 12:08:42 -0700972 return (ret);
973}
974
Jason Evanse476f8a2010-01-16 09:53:50 -0800975JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700976void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800977je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700978{
979 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800980 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700981 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700982 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
983 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800984
Jason Evans289053c2009-06-22 12:08:42 -0700985 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800986 if (ptr != NULL) {
987 /* realloc(ptr, 0) is equivalent to free(p). */
988 if (config_prof || config_stats)
989 old_size = isalloc(ptr);
990 if (config_prof && opt_prof) {
991 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -0800992 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800993 }
Jason Evansf081b882012-02-28 20:24:05 -0800994 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -0700995 ret = NULL;
996 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -0800997 } else
998 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700999 }
1000
1001 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001002 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001003
Jason Evans7372b152012-02-10 20:22:09 -08001004 if (config_prof || config_stats)
1005 old_size = isalloc(ptr);
1006 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001007 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001008 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001009 PROF_ALLOC_PREP(1, usize, cnt);
1010 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001011 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001012 ret = NULL;
1013 goto OOM;
1014 }
Jason Evans0b270a92010-03-31 16:45:04 -07001015 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001016 usize <= SMALL_MAXCLASS) {
1017 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001018 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001019 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001020 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001021 else
1022 old_ctx = NULL;
1023 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001024 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001025 if (ret == NULL)
1026 old_ctx = NULL;
1027 }
Jason Evans7372b152012-02-10 20:22:09 -08001028 } else {
1029 if (config_stats)
1030 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001031 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001032 }
Jason Evans289053c2009-06-22 12:08:42 -07001033
Jason Evans6109fe02010-02-10 10:37:56 -08001034OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001035 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001036 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001037 malloc_write("<jemalloc>: Error in realloc(): "
1038 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001039 abort();
1040 }
1041 errno = ENOMEM;
1042 }
1043 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001044 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001045 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001046 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001047 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001048 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001049 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001050 ret = NULL;
1051 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001052 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001053 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001054 PROF_ALLOC_PREP(1, usize, cnt);
1055 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001056 ret = NULL;
1057 else {
1058 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001059 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001060 SMALL_MAXCLASS) {
1061 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001062 if (ret != NULL) {
1063 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001064 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001065 }
1066 } else
1067 ret = imalloc(size);
1068 }
Jason Evans7372b152012-02-10 20:22:09 -08001069 } else {
1070 if (config_stats)
1071 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001072 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001073 }
Jason Evans6109fe02010-02-10 10:37:56 -08001074 }
Jason Evans569432c2009-12-29 00:09:15 -08001075
Jason Evans289053c2009-06-22 12:08:42 -07001076 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001077 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001078 malloc_write("<jemalloc>: Error in realloc(): "
1079 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001080 abort();
1081 }
1082 errno = ENOMEM;
1083 }
1084 }
1085
1086RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001087 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001088 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001089 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001090 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001091 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001092 ta = thread_allocated_tsd_get();
1093 ta->allocated += usize;
1094 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001095 }
Jason Evans289053c2009-06-22 12:08:42 -07001096 return (ret);
1097}
1098
Jason Evanse476f8a2010-01-16 09:53:50 -08001099JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001100void
Jason Evans0a5489e2012-03-01 17:19:20 -08001101je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001102{
1103
Jason Evans289053c2009-06-22 12:08:42 -07001104 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001105 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001106
Jason Evans41b6afb2012-02-02 22:04:57 -08001107 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001108
Jason Evans7372b152012-02-10 20:22:09 -08001109 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001110 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001111 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001112 } else if (config_stats) {
1113 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001114 }
Jason Evans7372b152012-02-10 20:22:09 -08001115 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001116 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans289053c2009-06-22 12:08:42 -07001117 idalloc(ptr);
1118 }
1119}
1120
1121/*
1122 * End malloc(3)-compatible functions.
1123 */
1124/******************************************************************************/
1125/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001126 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001127 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001128
1129#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1130JEMALLOC_ATTR(malloc)
1131JEMALLOC_ATTR(visibility("default"))
1132void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001133je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001134{
Jason Evans9225a192012-03-23 15:39:07 -07001135 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001136 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001137 return (ret);
1138}
1139#endif
1140
1141#ifdef JEMALLOC_OVERRIDE_VALLOC
1142JEMALLOC_ATTR(malloc)
1143JEMALLOC_ATTR(visibility("default"))
1144void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001145je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001146{
Jason Evans9225a192012-03-23 15:39:07 -07001147 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001148 imemalign(&ret, PAGE_SIZE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001149 return (ret);
1150}
1151#endif
1152
Mike Hommey5c89c502012-03-26 17:46:57 +02001153/*
1154 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1155 * #define je_malloc malloc
1156 */
1157#define malloc_is_malloc 1
1158#define is_malloc_(a) malloc_is_ ## a
1159#define is_malloc(a) is_malloc_(a)
1160
1161#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001162/*
1163 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1164 * to inconsistently reference libc's malloc(3)-compatible functions
1165 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1166 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001167 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001168 * passed an extra argument for the caller return address, which will be
1169 * ignored.
1170 */
1171JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001172void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001173
1174JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001175void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001176
1177JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001178void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001179
1180JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001181void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001182#endif
1183
Jason Evans6a0d2912010-09-20 16:44:23 -07001184/*
1185 * End non-standard override functions.
1186 */
1187/******************************************************************************/
1188/*
Jason Evans289053c2009-06-22 12:08:42 -07001189 * Begin non-standard functions.
1190 */
1191
Jason Evanse476f8a2010-01-16 09:53:50 -08001192JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001193size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001194je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001195{
Jason Evans569432c2009-12-29 00:09:15 -08001196 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001197
Jason Evans41b6afb2012-02-02 22:04:57 -08001198 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001199
Jason Evans7372b152012-02-10 20:22:09 -08001200 if (config_ivsalloc)
1201 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001202 else
1203 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001204
Jason Evans569432c2009-12-29 00:09:15 -08001205 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001206}
1207
Jason Evans4201af02010-01-24 02:53:40 -08001208JEMALLOC_ATTR(visibility("default"))
1209void
Jason Evans0a5489e2012-03-01 17:19:20 -08001210je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1211 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001212{
1213
Jason Evans698805c2010-03-03 17:45:38 -08001214 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001215}
1216
Jason Evans3c234352010-01-27 13:10:55 -08001217JEMALLOC_ATTR(visibility("default"))
1218int
Jason Evans0a5489e2012-03-01 17:19:20 -08001219je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001220 size_t newlen)
1221{
1222
Jason Evans95833312010-01-27 13:45:21 -08001223 if (malloc_init())
1224 return (EAGAIN);
1225
Jason Evans3c234352010-01-27 13:10:55 -08001226 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1227}
1228
1229JEMALLOC_ATTR(visibility("default"))
1230int
Jason Evans0a5489e2012-03-01 17:19:20 -08001231je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001232{
1233
Jason Evans95833312010-01-27 13:45:21 -08001234 if (malloc_init())
1235 return (EAGAIN);
1236
Jason Evans3c234352010-01-27 13:10:55 -08001237 return (ctl_nametomib(name, mibp, miblenp));
1238}
1239
1240JEMALLOC_ATTR(visibility("default"))
1241int
Jason Evans0a5489e2012-03-01 17:19:20 -08001242je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1243 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001244{
1245
Jason Evans95833312010-01-27 13:45:21 -08001246 if (malloc_init())
1247 return (EAGAIN);
1248
Jason Evans3c234352010-01-27 13:10:55 -08001249 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1250}
1251
Jason Evans7e77eaf2012-03-02 17:47:37 -08001252/*
1253 * End non-standard functions.
1254 */
1255/******************************************************************************/
1256/*
1257 * Begin experimental functions.
1258 */
1259#ifdef JEMALLOC_EXPERIMENTAL
1260
Jason Evans8e3c3c62010-09-17 15:46:18 -07001261JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001262iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001263{
1264
Jason Evans38d92102011-03-23 00:37:29 -07001265 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1266 NULL)));
1267
Jason Evans8e3c3c62010-09-17 15:46:18 -07001268 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001269 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001270 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001271 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001272 else
Jason Evans38d92102011-03-23 00:37:29 -07001273 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001274}
1275
Jason Evans6a0d2912010-09-20 16:44:23 -07001276JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001277JEMALLOC_ATTR(visibility("default"))
1278int
Jason Evans0a5489e2012-03-01 17:19:20 -08001279je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001280{
1281 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001282 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001283 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1284 & (SIZE_T_MAX-1));
1285 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001286 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001287
1288 assert(ptr != NULL);
1289 assert(size != 0);
1290
1291 if (malloc_init())
1292 goto OOM;
1293
Jason Evans749c2a02011-08-12 18:37:54 -07001294 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001295 if (usize == 0)
1296 goto OOM;
1297
Jason Evans7372b152012-02-10 20:22:09 -08001298 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001299 PROF_ALLOC_PREP(1, usize, cnt);
1300 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001301 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001302 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001303 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001304 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001305 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001306 alignment, NULL);
1307 assert(usize_promoted != 0);
1308 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001309 if (p == NULL)
1310 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001311 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001312 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001313 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001314 if (p == NULL)
1315 goto OOM;
1316 }
Jason Evans749c2a02011-08-12 18:37:54 -07001317 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001318 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001319 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001320 if (p == NULL)
1321 goto OOM;
1322 }
Jason Evans7372b152012-02-10 20:22:09 -08001323 if (rsize != NULL)
1324 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325
1326 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001327 if (config_stats) {
1328 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001329 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001330 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001331 return (ALLOCM_SUCCESS);
1332OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001333 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001334 malloc_write("<jemalloc>: Error in allocm(): "
1335 "out of memory\n");
1336 abort();
1337 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001338 *ptr = NULL;
1339 return (ALLOCM_ERR_OOM);
1340}
1341
Jason Evans6a0d2912010-09-20 16:44:23 -07001342JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001343JEMALLOC_ATTR(visibility("default"))
1344int
Jason Evans0a5489e2012-03-01 17:19:20 -08001345je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001346{
1347 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001348 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001349 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001350 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1351 & (SIZE_T_MAX-1));
1352 bool zero = flags & ALLOCM_ZERO;
1353 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001355
1356 assert(ptr != NULL);
1357 assert(*ptr != NULL);
1358 assert(size != 0);
1359 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001360 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361
1362 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001363 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001364 /*
1365 * usize isn't knowable before iralloc() returns when extra is
1366 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001367 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001368 * backtrace. prof_realloc() will use the actual usize to
1369 * decide whether to sample.
1370 */
1371 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1372 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001373 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001375 PROF_ALLOC_PREP(1, max_usize, cnt);
1376 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001377 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001378 /*
1379 * Use minimum usize to determine whether promotion may happen.
1380 */
1381 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1382 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001383 alignment, NULL)) <= SMALL_MAXCLASS) {
1384 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1385 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001386 alignment, zero, no_move);
1387 if (q == NULL)
1388 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001389 if (max_usize < PAGE_SIZE) {
1390 usize = max_usize;
1391 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001392 } else
1393 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001394 } else {
1395 q = iralloc(p, size, extra, alignment, zero, no_move);
1396 if (q == NULL)
1397 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001398 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001399 }
Jason Evanse4f78462010-10-22 10:45:59 -07001400 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001401 if (rsize != NULL)
1402 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001403 } else {
1404 if (config_stats)
1405 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406 q = iralloc(p, size, extra, alignment, zero, no_move);
1407 if (q == NULL)
1408 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001409 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001410 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001411 if (rsize != NULL) {
1412 if (config_stats == false)
1413 usize = isalloc(q);
1414 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001415 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001416 }
1417
1418 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001419 if (config_stats) {
1420 thread_allocated_t *ta;
1421 ta = thread_allocated_tsd_get();
1422 ta->allocated += usize;
1423 ta->deallocated += old_size;
1424 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001425 return (ALLOCM_SUCCESS);
1426ERR:
1427 if (no_move)
1428 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001429OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001430 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001431 malloc_write("<jemalloc>: Error in rallocm(): "
1432 "out of memory\n");
1433 abort();
1434 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435 return (ALLOCM_ERR_OOM);
1436}
1437
Jason Evans6a0d2912010-09-20 16:44:23 -07001438JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439JEMALLOC_ATTR(visibility("default"))
1440int
Jason Evans0a5489e2012-03-01 17:19:20 -08001441je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001442{
1443 size_t sz;
1444
Jason Evans41b6afb2012-02-02 22:04:57 -08001445 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001446
Jason Evans7372b152012-02-10 20:22:09 -08001447 if (config_ivsalloc)
1448 sz = ivsalloc(ptr);
1449 else {
1450 assert(ptr != NULL);
1451 sz = isalloc(ptr);
1452 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001453 assert(rsize != NULL);
1454 *rsize = sz;
1455
1456 return (ALLOCM_SUCCESS);
1457}
1458
Jason Evans6a0d2912010-09-20 16:44:23 -07001459JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001460JEMALLOC_ATTR(visibility("default"))
1461int
Jason Evans0a5489e2012-03-01 17:19:20 -08001462je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001463{
Jason Evanse4f78462010-10-22 10:45:59 -07001464 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001465
1466 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001467 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001468
Jason Evans7372b152012-02-10 20:22:09 -08001469 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001470 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001471 if (config_prof && opt_prof) {
1472 if (config_stats == false)
1473 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001474 prof_free(ptr, usize);
1475 }
Jason Evans7372b152012-02-10 20:22:09 -08001476 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001477 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001478 idalloc(ptr);
1479
1480 return (ALLOCM_SUCCESS);
1481}
1482
Jason Evans7e15dab2012-02-29 12:56:37 -08001483JEMALLOC_ATTR(visibility("default"))
1484int
Jason Evans0a5489e2012-03-01 17:19:20 -08001485je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001486{
1487 size_t usize;
1488 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1489 & (SIZE_T_MAX-1));
1490
1491 assert(size != 0);
1492
1493 if (malloc_init())
1494 return (ALLOCM_ERR_OOM);
1495
1496 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1497 if (usize == 0)
1498 return (ALLOCM_ERR_OOM);
1499
1500 if (rsize != NULL)
1501 *rsize = usize;
1502 return (ALLOCM_SUCCESS);
1503}
1504
Jason Evans7e77eaf2012-03-02 17:47:37 -08001505#endif
Jason Evans289053c2009-06-22 12:08:42 -07001506/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001507 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001508 */
1509/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001510
Jason Evans289053c2009-06-22 12:08:42 -07001511/*
1512 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001513 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001514 */
1515
Jason Evans41b6afb2012-02-02 22:04:57 -08001516#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001517void
Jason Evans804c9ec2009-06-22 17:44:33 -07001518jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001519#else
1520void
1521_malloc_prefork(void)
1522#endif
Jason Evans289053c2009-06-22 12:08:42 -07001523{
Jason Evansfbbb6242010-01-24 17:56:48 -08001524 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001525
1526 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001527 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001528 for (i = 0; i < narenas; i++) {
1529 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001530 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001531 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001532 base_prefork();
1533 huge_prefork();
1534 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001535}
1536
Jason Evans41b6afb2012-02-02 22:04:57 -08001537#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001538void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001539jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001540#else
1541void
1542_malloc_postfork(void)
1543#endif
Jason Evans289053c2009-06-22 12:08:42 -07001544{
1545 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001546
1547 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001548 chunk_dss_postfork_parent();
1549 huge_postfork_parent();
1550 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001551 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001552 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001553 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001554 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001555 malloc_mutex_postfork_parent(&arenas_lock);
1556}
1557
1558void
1559jemalloc_postfork_child(void)
1560{
1561 unsigned i;
1562
1563 /* Release all mutexes, now that fork() has completed. */
1564 chunk_dss_postfork_child();
1565 huge_postfork_child();
1566 base_postfork_child();
1567 for (i = 0; i < narenas; i++) {
1568 if (arenas[i] != NULL)
1569 arena_postfork_child(arenas[i]);
1570 }
1571 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001572}
Jason Evans2dbecf12010-09-05 10:35:13 -07001573
1574/******************************************************************************/