blob: 908485a737c318ded6b790d496c625b6629b38a6 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028#ifdef DYNAMIC_PAGE_SHIFT
29size_t pagesize;
30size_t pagesize_mask;
31size_t lg_pagesize;
32#endif
33
34unsigned ncpus;
35
36malloc_mutex_t arenas_lock;
37arena_t **arenas;
38unsigned narenas;
39
40/* Set to true once the allocator has been initialized. */
Jason Evans41b6afb2012-02-02 22:04:57 -080041bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070042
Jason Evans41b6afb2012-02-02 22:04:57 -080043#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070044/* Used to let the initializing thread recursively allocate. */
45static pthread_t malloc_initializer = (unsigned long)0;
Jason Evans41b6afb2012-02-02 22:04:57 -080046# define INITIALIZER pthread_self()
47# define IS_INITIALIZER (malloc_initializer == pthread_self())
48#else
49static bool malloc_initializer = false;
50# define INITIALIZER true
51# define IS_INITIALIZER malloc_initializer
52#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070053
54/* Used to avoid initialization races. */
55static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
56
Jason Evans289053c2009-06-22 12:08:42 -070057/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080058/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070059
Jason Evans03c22372010-01-03 12:10:42 -080060static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070061static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070062static bool malloc_conf_next(char const **opts_p, char const **k_p,
63 size_t *klen_p, char const **v_p, size_t *vlen_p);
64static void malloc_conf_error(const char *msg, const char *k, size_t klen,
65 const char *v, size_t vlen);
66static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070067static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080068static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070069 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070070
Jason Evans289053c2009-06-22 12:08:42 -070071/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070072/*
Jason Evanse476f8a2010-01-16 09:53:50 -080073 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070074 */
75
Jason Evanse476f8a2010-01-16 09:53:50 -080076/* Create a new arena and insert it into the arenas array at index ind. */
77arena_t *
78arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070079{
80 arena_t *ret;
81
Jason Evansb1726102012-02-28 16:50:47 -080082 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080083 if (ret != NULL && arena_new(ret, ind) == false) {
84 arenas[ind] = ret;
85 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070086 }
Jason Evanse476f8a2010-01-16 09:53:50 -080087 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070088
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /*
90 * OOM here is quite inconvenient to propagate, since dealing with it
91 * would require a check for failure in the fast path. Instead, punt
92 * by using arenas[0]. In practice, this is an extremely unlikely
93 * failure.
94 */
Jason Evans698805c2010-03-03 17:45:38 -080095 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080096 if (opt_abort)
97 abort();
Jason Evans289053c2009-06-22 12:08:42 -070098
Jason Evanse476f8a2010-01-16 09:53:50 -080099 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700100}
101
Jason Evans4c2faa82012-03-13 11:09:23 -0700102/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800103arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700104choose_arena_hard(void)
105{
106 arena_t *ret;
107
Jason Evans289053c2009-06-22 12:08:42 -0700108 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700109 unsigned i, choose, first_null;
110
111 choose = 0;
112 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800113 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700114 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700115 for (i = 1; i < narenas; i++) {
116 if (arenas[i] != NULL) {
117 /*
118 * Choose the first arena that has the lowest
119 * number of threads assigned to it.
120 */
121 if (arenas[i]->nthreads <
122 arenas[choose]->nthreads)
123 choose = i;
124 } else if (first_null == narenas) {
125 /*
126 * Record the index of the first uninitialized
127 * arena, in case all extant arenas are in use.
128 *
129 * NB: It is possible for there to be
130 * discontinuities in terms of initialized
131 * versus uninitialized arenas, due to the
132 * "thread.arena" mallctl.
133 */
134 first_null = i;
135 }
136 }
137
Jason Evans41b6afb2012-02-02 22:04:57 -0800138 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700139 /*
140 * Use an unloaded arena, or the least loaded arena if
141 * all arenas are already initialized.
142 */
143 ret = arenas[choose];
144 } else {
145 /* Initialize a new arena. */
146 ret = arenas_extend(first_null);
147 }
148 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800149 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700150 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700151 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700152 malloc_mutex_lock(&arenas_lock);
153 ret->nthreads++;
154 malloc_mutex_unlock(&arenas_lock);
155 }
Jason Evans289053c2009-06-22 12:08:42 -0700156
Jason Evanscd9a1342012-03-21 18:33:03 -0700157 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700158
159 return (ret);
160}
Jason Evans289053c2009-06-22 12:08:42 -0700161
Jason Evans03c22372010-01-03 12:10:42 -0800162static void
163stats_print_atexit(void)
164{
165
Jason Evans7372b152012-02-10 20:22:09 -0800166 if (config_tcache && config_stats) {
167 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800168
Jason Evans7372b152012-02-10 20:22:09 -0800169 /*
170 * Merge stats from extant threads. This is racy, since
171 * individual threads do not lock when recording tcache stats
172 * events. As a consequence, the final stats may be slightly
173 * out of date by the time they are reported, if other threads
174 * continue to allocate.
175 */
176 for (i = 0; i < narenas; i++) {
177 arena_t *arena = arenas[i];
178 if (arena != NULL) {
179 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800180
Jason Evans7372b152012-02-10 20:22:09 -0800181 /*
182 * tcache_stats_merge() locks bins, so if any
183 * code is introduced that acquires both arena
184 * and bin locks in the opposite order,
185 * deadlocks may result.
186 */
187 malloc_mutex_lock(&arena->lock);
188 ql_foreach(tcache, &arena->tcache_ql, link) {
189 tcache_stats_merge(tcache, arena);
190 }
191 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800192 }
Jason Evans03c22372010-01-03 12:10:42 -0800193 }
194 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800195 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700196}
197
Jason Evans289053c2009-06-22 12:08:42 -0700198/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800199 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700200 */
201/******************************************************************************/
202/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800203 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700204 */
205
Jason Evansc9658dd2009-06-22 14:44:08 -0700206static unsigned
207malloc_ncpus(void)
208{
209 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700210 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700211
Jason Evansb7924f52009-06-23 19:01:18 -0700212 result = sysconf(_SC_NPROCESSORS_ONLN);
213 if (result == -1) {
214 /* Error. */
215 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700216 }
Jason Evansb7924f52009-06-23 19:01:18 -0700217 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700218
219 return (ret);
220}
Jason Evansb7924f52009-06-23 19:01:18 -0700221
Jason Evanscd9a1342012-03-21 18:33:03 -0700222void
Jason Evans597632b2011-03-18 13:41:33 -0700223arenas_cleanup(void *arg)
224{
Jason Evanscd9a1342012-03-21 18:33:03 -0700225 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700226
227 malloc_mutex_lock(&arenas_lock);
228 arena->nthreads--;
229 malloc_mutex_unlock(&arenas_lock);
230}
231
Jason Evans289053c2009-06-22 12:08:42 -0700232static inline bool
233malloc_init(void)
234{
235
236 if (malloc_initialized == false)
237 return (malloc_init_hard());
238
239 return (false);
240}
241
242static bool
Jason Evanse7339702010-10-23 18:37:06 -0700243malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
244 char const **v_p, size_t *vlen_p)
245{
246 bool accept;
247 const char *opts = *opts_p;
248
249 *k_p = opts;
250
251 for (accept = false; accept == false;) {
252 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800253 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
254 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
255 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
256 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
257 case 'Y': case 'Z':
258 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
259 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
260 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
261 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
262 case 'y': case 'z':
263 case '0': case '1': case '2': case '3': case '4': case '5':
264 case '6': case '7': case '8': case '9':
265 case '_':
266 opts++;
267 break;
268 case ':':
269 opts++;
270 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
271 *v_p = opts;
272 accept = true;
273 break;
274 case '\0':
275 if (opts != *opts_p) {
276 malloc_write("<jemalloc>: Conf string ends "
277 "with key\n");
278 }
279 return (true);
280 default:
281 malloc_write("<jemalloc>: Malformed conf string\n");
282 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700283 }
284 }
285
286 for (accept = false; accept == false;) {
287 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800288 case ',':
289 opts++;
290 /*
291 * Look ahead one character here, because the next time
292 * this function is called, it will assume that end of
293 * input has been cleanly reached if no input remains,
294 * but we have optimistically already consumed the
295 * comma if one exists.
296 */
297 if (*opts == '\0') {
298 malloc_write("<jemalloc>: Conf string ends "
299 "with comma\n");
300 }
301 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
302 accept = true;
303 break;
304 case '\0':
305 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
306 accept = true;
307 break;
308 default:
309 opts++;
310 break;
Jason Evanse7339702010-10-23 18:37:06 -0700311 }
312 }
313
314 *opts_p = opts;
315 return (false);
316}
317
318static void
319malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
320 size_t vlen)
321{
Jason Evanse7339702010-10-23 18:37:06 -0700322
Jason Evansd81e4bd2012-03-06 14:57:45 -0800323 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
324 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700325}
326
327static void
328malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700329{
330 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700331 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700332 const char *opts, *k, *v;
333 size_t klen, vlen;
334
335 for (i = 0; i < 3; i++) {
336 /* Get runtime configuration. */
337 switch (i) {
338 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800339 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700340 /*
341 * Use options that were compiled into the
342 * program.
343 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800344 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700345 } else {
346 /* No configuration specified. */
347 buf[0] = '\0';
348 opts = buf;
349 }
350 break;
351 case 1: {
352 int linklen;
353 const char *linkname =
354#ifdef JEMALLOC_PREFIX
355 "/etc/"JEMALLOC_PREFIX"malloc.conf"
356#else
357 "/etc/malloc.conf"
358#endif
359 ;
360
361 if ((linklen = readlink(linkname, buf,
362 sizeof(buf) - 1)) != -1) {
363 /*
364 * Use the contents of the "/etc/malloc.conf"
365 * symbolic link's name.
366 */
367 buf[linklen] = '\0';
368 opts = buf;
369 } else {
370 /* No configuration specified. */
371 buf[0] = '\0';
372 opts = buf;
373 }
374 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800375 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700376 const char *envname =
377#ifdef JEMALLOC_PREFIX
378 JEMALLOC_CPREFIX"MALLOC_CONF"
379#else
380 "MALLOC_CONF"
381#endif
382 ;
383
384 if ((opts = getenv(envname)) != NULL) {
385 /*
386 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800387 * the value of the MALLOC_CONF environment
388 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700389 */
390 } else {
391 /* No configuration specified. */
392 buf[0] = '\0';
393 opts = buf;
394 }
395 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800396 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700397 /* NOTREACHED */
398 assert(false);
399 buf[0] = '\0';
400 opts = buf;
401 }
402
403 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
404 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800405#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700406 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
407 klen) == 0) { \
408 if (strncmp("true", v, vlen) == 0 && \
409 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800410 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700411 else if (strncmp("false", v, vlen) == \
412 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700414 else { \
415 malloc_conf_error( \
416 "Invalid conf value", \
417 k, klen, v, vlen); \
418 } \
419 continue; \
420 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800421#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700422 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
423 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800424 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700425 char *end; \
426 \
427 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800428 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700429 if (errno != 0 || (uintptr_t)end - \
430 (uintptr_t)v != vlen) { \
431 malloc_conf_error( \
432 "Invalid conf value", \
433 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800434 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700435 malloc_conf_error( \
436 "Out-of-range conf value", \
437 k, klen, v, vlen); \
438 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800439 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700440 continue; \
441 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800442#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700443 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
444 klen) == 0) { \
445 long l; \
446 char *end; \
447 \
448 errno = 0; \
449 l = strtol(v, &end, 0); \
450 if (errno != 0 || (uintptr_t)end - \
451 (uintptr_t)v != vlen) { \
452 malloc_conf_error( \
453 "Invalid conf value", \
454 k, klen, v, vlen); \
455 } else if (l < (ssize_t)min || l > \
456 (ssize_t)max) { \
457 malloc_conf_error( \
458 "Out-of-range conf value", \
459 k, klen, v, vlen); \
460 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800461 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700462 continue; \
463 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800464#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700465 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
466 klen) == 0) { \
467 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800468 sizeof(o)-1) ? vlen : \
469 sizeof(o)-1; \
470 strncpy(o, v, cpylen); \
471 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700472 continue; \
473 }
474
Jason Evansd81e4bd2012-03-06 14:57:45 -0800475 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700476 /*
477 * Chunks always require at least one * header page,
478 * plus one data page.
479 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700481 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800482 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
483 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
484 -1, (sizeof(size_t) << 3) - 1)
485 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800486 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800487 CONF_HANDLE_BOOL(opt_junk, junk)
488 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800489 }
Jason Evans7372b152012-02-10 20:22:09 -0800490 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800491 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800492 }
493 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800494 CONF_HANDLE_BOOL(opt_tcache, tcache)
495 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
496 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800497 (sizeof(size_t) << 3) - 1)
498 }
499 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800500 CONF_HANDLE_BOOL(opt_prof, prof)
501 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
502 "jeprof")
503 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
504 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
505 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800506 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800507 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
508 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
509 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800510 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800511 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
512 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800513 }
Jason Evanse7339702010-10-23 18:37:06 -0700514 malloc_conf_error("Invalid conf pair", k, klen, v,
515 vlen);
516#undef CONF_HANDLE_BOOL
517#undef CONF_HANDLE_SIZE_T
518#undef CONF_HANDLE_SSIZE_T
519#undef CONF_HANDLE_CHAR_P
520 }
Jason Evanse7339702010-10-23 18:37:06 -0700521 }
522}
523
524static bool
525malloc_init_hard(void)
526{
Jason Evansb7924f52009-06-23 19:01:18 -0700527 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700528
529 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800530 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700531 /*
532 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800533 * acquired init_lock, or this thread is the initializing
534 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700535 */
536 malloc_mutex_unlock(&init_lock);
537 return (false);
538 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800539#ifdef JEMALLOC_THREADED_INIT
540 if (IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700541 /* Busy-wait until the initializing thread completes. */
542 do {
543 malloc_mutex_unlock(&init_lock);
544 CPU_SPINWAIT;
545 malloc_mutex_lock(&init_lock);
546 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700547 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700548 return (false);
549 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800550#endif
551 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700552
Jason Evansb7924f52009-06-23 19:01:18 -0700553#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700554 /* Get page size. */
555 {
556 long result;
557
558 result = sysconf(_SC_PAGESIZE);
559 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700560 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700561
562 /*
563 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800564 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700565 */
566 assert(((result - 1) & result) == 0);
567 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800568 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700569 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700570#endif
Jason Evans289053c2009-06-22 12:08:42 -0700571
Jason Evanscd9a1342012-03-21 18:33:03 -0700572 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800573 if (config_prof)
574 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700575
Jason Evanse7339702010-10-23 18:37:06 -0700576 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700577
Mike Hommeye77fa592012-03-28 09:53:16 +0200578#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800579 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700580 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
581 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800582 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800583 if (opt_abort)
584 abort();
585 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800586#endif
Jason Evans3c234352010-01-27 13:10:55 -0800587
Jason Evans03c22372010-01-03 12:10:42 -0800588 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700589 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800590 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800591 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800592 if (opt_abort)
593 abort();
594 }
Jason Evans289053c2009-06-22 12:08:42 -0700595 }
596
Jason Evanscd9a1342012-03-21 18:33:03 -0700597 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800598 malloc_mutex_unlock(&init_lock);
599 return (true);
600 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700601
Jason Evans3c234352010-01-27 13:10:55 -0800602 if (base_boot()) {
603 malloc_mutex_unlock(&init_lock);
604 return (true);
605 }
606
Jason Evans41b6afb2012-02-02 22:04:57 -0800607 if (ctl_boot()) {
608 malloc_mutex_unlock(&init_lock);
609 return (true);
610 }
611
Jason Evans7372b152012-02-10 20:22:09 -0800612 if (config_prof)
613 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800614
Jason Evansb1726102012-02-28 16:50:47 -0800615 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700616
Jason Evanscd9a1342012-03-21 18:33:03 -0700617 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700618 malloc_mutex_unlock(&init_lock);
619 return (true);
620 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800621
Jason Evanse476f8a2010-01-16 09:53:50 -0800622 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700623 malloc_mutex_unlock(&init_lock);
624 return (true);
625 }
Jason Evans289053c2009-06-22 12:08:42 -0700626
Jason Evans8e6f8b42011-11-03 18:40:03 -0700627 if (malloc_mutex_init(&arenas_lock))
628 return (true);
629
Jason Evansb7924f52009-06-23 19:01:18 -0700630 /*
631 * Create enough scaffolding to allow recursive allocation in
632 * malloc_ncpus().
633 */
634 narenas = 1;
635 arenas = init_arenas;
636 memset(arenas, 0, sizeof(arena_t *) * narenas);
637
638 /*
639 * Initialize one arena here. The rest are lazily created in
640 * choose_arena_hard().
641 */
642 arenas_extend(0);
643 if (arenas[0] == NULL) {
644 malloc_mutex_unlock(&init_lock);
645 return (true);
646 }
647
Jason Evanscd9a1342012-03-21 18:33:03 -0700648 /* Initialize allocation counters before any allocations can occur. */
649 if (config_stats && thread_allocated_tsd_boot()) {
650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
Jason Evansb7924f52009-06-23 19:01:18 -0700653
Jason Evanscd9a1342012-03-21 18:33:03 -0700654 if (arenas_tsd_boot()) {
655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
658
659 if (config_tcache && tcache_boot1()) {
660 malloc_mutex_unlock(&init_lock);
661 return (true);
662 }
663
Jason Evans6da54182012-03-23 18:05:51 -0700664 if (config_prof && prof_boot2()) {
665 malloc_mutex_unlock(&init_lock);
666 return (true);
667 }
668
Jason Evansb7924f52009-06-23 19:01:18 -0700669 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700670 malloc_mutex_unlock(&init_lock);
671 ncpus = malloc_ncpus();
672 malloc_mutex_lock(&init_lock);
673
Jason Evanscd9a1342012-03-21 18:33:03 -0700674 if (chunk_boot1()) {
675 malloc_mutex_unlock(&init_lock);
676 return (true);
677 }
678
Jason Evanse7339702010-10-23 18:37:06 -0700679 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700680 /*
Jason Evans5463a522009-12-29 00:09:15 -0800681 * For SMP systems, create more than one arena per CPU by
682 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700683 */
Jason Evanse7339702010-10-23 18:37:06 -0700684 if (ncpus > 1)
685 opt_narenas = ncpus << 2;
686 else
687 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700688 }
Jason Evanse7339702010-10-23 18:37:06 -0700689 narenas = opt_narenas;
690 /*
691 * Make sure that the arenas array can be allocated. In practice, this
692 * limit is enough to allow the allocator to function, but the ctl
693 * machinery will fail to allocate memory at far lower limits.
694 */
695 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700696 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800697 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
698 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700699 }
Jason Evans289053c2009-06-22 12:08:42 -0700700
Jason Evans289053c2009-06-22 12:08:42 -0700701 /* Allocate and initialize arenas. */
702 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
703 if (arenas == NULL) {
704 malloc_mutex_unlock(&init_lock);
705 return (true);
706 }
707 /*
708 * Zero the array. In practice, this should always be pre-zeroed,
709 * since it was just mmap()ed, but let's be sure.
710 */
711 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700712 /* Copy the pointer to the one arena that was already initialized. */
713 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700714
715 malloc_initialized = true;
716 malloc_mutex_unlock(&init_lock);
717 return (false);
718}
719
Jason Evans2dbecf12010-09-05 10:35:13 -0700720#ifdef JEMALLOC_ZONE
721JEMALLOC_ATTR(constructor)
722void
723jemalloc_darwin_init(void)
724{
725
Mike Hommey71a93b82012-03-27 14:20:12 +0200726 if (malloc_init_hard() == false)
727 register_zone();
Jason Evans2dbecf12010-09-05 10:35:13 -0700728}
729#endif
730
Jason Evans289053c2009-06-22 12:08:42 -0700731/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800732 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700733 */
734/******************************************************************************/
735/*
736 * Begin malloc(3)-compatible functions.
737 */
738
Jason Evans9ad48232010-01-03 11:59:20 -0800739JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800740JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700741void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800742je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700743{
744 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800745 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700746 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700747
748 if (malloc_init()) {
749 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800750 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700751 }
752
Jason Evansc90ad712012-02-28 20:31:37 -0800753 if (size == 0)
754 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700755
Jason Evans7372b152012-02-10 20:22:09 -0800756 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700757 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700758 PROF_ALLOC_PREP(1, usize, cnt);
759 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700760 ret = NULL;
761 goto OOM;
762 }
Jason Evans93443682010-10-20 17:39:18 -0700763 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800764 SMALL_MAXCLASS) {
765 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700766 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700767 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700768 } else
769 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800770 } else {
771 if (config_stats)
772 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700773 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700774 }
Jason Evans289053c2009-06-22 12:08:42 -0700775
Jason Evansf2518142009-12-29 00:09:15 -0800776OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700777 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800778 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800779 malloc_write("<jemalloc>: Error in malloc(): "
780 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700781 abort();
782 }
783 errno = ENOMEM;
784 }
Jason Evans7372b152012-02-10 20:22:09 -0800785 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700786 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800787 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700788 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700789 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700790 }
Jason Evans289053c2009-06-22 12:08:42 -0700791 return (ret);
792}
793
Jason Evans9ad48232010-01-03 11:59:20 -0800794JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700795#ifdef JEMALLOC_PROF
796/*
Jason Evans7372b152012-02-10 20:22:09 -0800797 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700798 * PROF_ALLOC_PREP().
799 */
800JEMALLOC_ATTR(noinline)
801#endif
802static int
Jason Evans59656312012-02-28 21:37:38 -0800803imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700804 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700805{
806 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800807 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700808 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700809 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700810
Jason Evans0a0bbf62012-03-13 12:55:21 -0700811 assert(min_alignment != 0);
812
Jason Evans289053c2009-06-22 12:08:42 -0700813 if (malloc_init())
814 result = NULL;
815 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800816 if (size == 0)
817 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800818
Jason Evans289053c2009-06-22 12:08:42 -0700819 /* Make sure that alignment is a large enough power of 2. */
820 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700821 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800822 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700823 malloc_write("<jemalloc>: Error allocating "
824 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700825 abort();
826 }
827 result = NULL;
828 ret = EINVAL;
829 goto RETURN;
830 }
831
Jason Evans38d92102011-03-23 00:37:29 -0700832 usize = sa2u(size, alignment, NULL);
833 if (usize == 0) {
834 result = NULL;
835 ret = ENOMEM;
836 goto RETURN;
837 }
838
Jason Evans7372b152012-02-10 20:22:09 -0800839 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700840 PROF_ALLOC_PREP(2, usize, cnt);
841 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700842 result = NULL;
843 ret = EINVAL;
844 } else {
845 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800846 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
847 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700848 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800849 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700850 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700851 if (result != NULL) {
852 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700853 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700854 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700855 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700856 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700857 false);
858 }
Jason Evans0b270a92010-03-31 16:45:04 -0700859 }
Jason Evans6109fe02010-02-10 10:37:56 -0800860 } else
Jason Evans38d92102011-03-23 00:37:29 -0700861 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700862 }
863
864 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800865 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700866 malloc_write("<jemalloc>: Error allocating aligned "
867 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700868 abort();
869 }
870 ret = ENOMEM;
871 goto RETURN;
872 }
873
874 *memptr = result;
875 ret = 0;
876
877RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800878 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700879 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700880 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700881 }
Jason Evans7372b152012-02-10 20:22:09 -0800882 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700883 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700884 return (ret);
885}
886
Jason Evansa5070042011-08-12 13:48:27 -0700887JEMALLOC_ATTR(nonnull(1))
888JEMALLOC_ATTR(visibility("default"))
889int
Jason Evans0a5489e2012-03-01 17:19:20 -0800890je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700891{
892
Jason Evans0a0bbf62012-03-13 12:55:21 -0700893 return imemalign(memptr, alignment, size, sizeof(void *));
894}
895
896JEMALLOC_ATTR(malloc)
897JEMALLOC_ATTR(visibility("default"))
898void *
899je_aligned_alloc(size_t alignment, size_t size)
900{
901 void *ret;
902 int err;
903
904 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
905 ret = NULL;
906 errno = err;
907 }
908 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700909}
910
Jason Evans9ad48232010-01-03 11:59:20 -0800911JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800912JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700913void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800914je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700915{
916 void *ret;
917 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800918 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700919 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700920
921 if (malloc_init()) {
922 num_size = 0;
923 ret = NULL;
924 goto RETURN;
925 }
926
927 num_size = num * size;
928 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800929 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700930 num_size = 1;
931 else {
932 ret = NULL;
933 goto RETURN;
934 }
935 /*
936 * Try to avoid division here. We know that it isn't possible to
937 * overflow during multiplication if neither operand uses any of the
938 * most significant half of the bits in a size_t.
939 */
940 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
941 && (num_size / size != num)) {
942 /* size_t overflow. */
943 ret = NULL;
944 goto RETURN;
945 }
946
Jason Evans7372b152012-02-10 20:22:09 -0800947 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700948 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700949 PROF_ALLOC_PREP(1, usize, cnt);
950 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700951 ret = NULL;
952 goto RETURN;
953 }
Jason Evans93443682010-10-20 17:39:18 -0700954 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800955 <= SMALL_MAXCLASS) {
956 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700957 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700958 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700959 } else
960 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800961 } else {
962 if (config_stats)
963 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700964 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700965 }
Jason Evans289053c2009-06-22 12:08:42 -0700966
967RETURN:
968 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800969 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800970 malloc_write("<jemalloc>: Error in calloc(): out of "
971 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700972 abort();
973 }
974 errno = ENOMEM;
975 }
976
Jason Evans7372b152012-02-10 20:22:09 -0800977 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700978 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800979 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700980 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700981 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700982 }
Jason Evans289053c2009-06-22 12:08:42 -0700983 return (ret);
984}
985
Jason Evanse476f8a2010-01-16 09:53:50 -0800986JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700987void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800988je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700989{
990 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800991 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700992 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700993 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
994 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800995
Jason Evans289053c2009-06-22 12:08:42 -0700996 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800997 if (ptr != NULL) {
998 /* realloc(ptr, 0) is equivalent to free(p). */
999 if (config_prof || config_stats)
1000 old_size = isalloc(ptr);
1001 if (config_prof && opt_prof) {
1002 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001003 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001004 }
Jason Evansf081b882012-02-28 20:24:05 -08001005 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001006 ret = NULL;
1007 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001008 } else
1009 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001010 }
1011
1012 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001013 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001014
Jason Evans7372b152012-02-10 20:22:09 -08001015 if (config_prof || config_stats)
1016 old_size = isalloc(ptr);
1017 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001018 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001019 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001020 PROF_ALLOC_PREP(1, usize, cnt);
1021 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001022 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001023 ret = NULL;
1024 goto OOM;
1025 }
Jason Evans0b270a92010-03-31 16:45:04 -07001026 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001027 usize <= SMALL_MAXCLASS) {
1028 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001029 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001030 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001031 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001032 else
1033 old_ctx = NULL;
1034 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001035 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001036 if (ret == NULL)
1037 old_ctx = NULL;
1038 }
Jason Evans7372b152012-02-10 20:22:09 -08001039 } else {
1040 if (config_stats)
1041 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001042 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001043 }
Jason Evans289053c2009-06-22 12:08:42 -07001044
Jason Evans6109fe02010-02-10 10:37:56 -08001045OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001046 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001047 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001048 malloc_write("<jemalloc>: Error in realloc(): "
1049 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001050 abort();
1051 }
1052 errno = ENOMEM;
1053 }
1054 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001055 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001056 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001057 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001058 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001059 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001060 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001061 ret = NULL;
1062 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001063 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001064 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001065 PROF_ALLOC_PREP(1, usize, cnt);
1066 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001067 ret = NULL;
1068 else {
1069 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001070 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001071 SMALL_MAXCLASS) {
1072 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001073 if (ret != NULL) {
1074 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001075 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001076 }
1077 } else
1078 ret = imalloc(size);
1079 }
Jason Evans7372b152012-02-10 20:22:09 -08001080 } else {
1081 if (config_stats)
1082 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001083 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001084 }
Jason Evans6109fe02010-02-10 10:37:56 -08001085 }
Jason Evans569432c2009-12-29 00:09:15 -08001086
Jason Evans289053c2009-06-22 12:08:42 -07001087 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001088 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001089 malloc_write("<jemalloc>: Error in realloc(): "
1090 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001091 abort();
1092 }
1093 errno = ENOMEM;
1094 }
1095 }
1096
1097RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001098 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001099 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001100 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001101 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001102 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001103 ta = thread_allocated_tsd_get();
1104 ta->allocated += usize;
1105 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001106 }
Jason Evans289053c2009-06-22 12:08:42 -07001107 return (ret);
1108}
1109
Jason Evanse476f8a2010-01-16 09:53:50 -08001110JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001111void
Jason Evans0a5489e2012-03-01 17:19:20 -08001112je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001113{
1114
Jason Evans289053c2009-06-22 12:08:42 -07001115 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001116 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001117
Jason Evans41b6afb2012-02-02 22:04:57 -08001118 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001119
Jason Evans7372b152012-02-10 20:22:09 -08001120 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001121 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001122 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001123 } else if (config_stats) {
1124 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001125 }
Jason Evans7372b152012-02-10 20:22:09 -08001126 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001127 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans289053c2009-06-22 12:08:42 -07001128 idalloc(ptr);
1129 }
1130}
1131
1132/*
1133 * End malloc(3)-compatible functions.
1134 */
1135/******************************************************************************/
1136/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001137 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001138 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001139
1140#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1141JEMALLOC_ATTR(malloc)
1142JEMALLOC_ATTR(visibility("default"))
1143void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001144je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001145{
Jason Evans9225a192012-03-23 15:39:07 -07001146 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001147 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001148 return (ret);
1149}
1150#endif
1151
1152#ifdef JEMALLOC_OVERRIDE_VALLOC
1153JEMALLOC_ATTR(malloc)
1154JEMALLOC_ATTR(visibility("default"))
1155void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001156je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001157{
Jason Evans9225a192012-03-23 15:39:07 -07001158 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001159 imemalign(&ret, PAGE_SIZE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001160 return (ret);
1161}
1162#endif
1163
Mike Hommey5c89c502012-03-26 17:46:57 +02001164/*
1165 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1166 * #define je_malloc malloc
1167 */
1168#define malloc_is_malloc 1
1169#define is_malloc_(a) malloc_is_ ## a
1170#define is_malloc(a) is_malloc_(a)
1171
1172#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001173/*
1174 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1175 * to inconsistently reference libc's malloc(3)-compatible functions
1176 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1177 *
1178 * These definitions interpose hooks in glibc.  The functions are actually
1179 * passed an extra argument for the caller return address, which will be
1180 * ignored.
1181 */
1182JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001183void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001184
1185JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001186void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001187
1188JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001189void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001190
1191JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001192void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001193#endif
1194
Jason Evans6a0d2912010-09-20 16:44:23 -07001195/*
1196 * End non-standard override functions.
1197 */
1198/******************************************************************************/
1199/*
Jason Evans289053c2009-06-22 12:08:42 -07001200 * Begin non-standard functions.
1201 */
1202
Jason Evanse476f8a2010-01-16 09:53:50 -08001203JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001204size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001205je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001206{
Jason Evans569432c2009-12-29 00:09:15 -08001207 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001208
Jason Evans41b6afb2012-02-02 22:04:57 -08001209 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001210
Jason Evans7372b152012-02-10 20:22:09 -08001211 if (config_ivsalloc)
1212 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001213 else
1214 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001215
Jason Evans569432c2009-12-29 00:09:15 -08001216 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001217}
1218
Jason Evans4201af02010-01-24 02:53:40 -08001219JEMALLOC_ATTR(visibility("default"))
1220void
Jason Evans0a5489e2012-03-01 17:19:20 -08001221je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1222 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001223{
1224
Jason Evans698805c2010-03-03 17:45:38 -08001225 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001226}
1227
Jason Evans3c234352010-01-27 13:10:55 -08001228JEMALLOC_ATTR(visibility("default"))
1229int
Jason Evans0a5489e2012-03-01 17:19:20 -08001230je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001231 size_t newlen)
1232{
1233
Jason Evans95833312010-01-27 13:45:21 -08001234 if (malloc_init())
1235 return (EAGAIN);
1236
Jason Evans3c234352010-01-27 13:10:55 -08001237 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1238}
1239
1240JEMALLOC_ATTR(visibility("default"))
1241int
Jason Evans0a5489e2012-03-01 17:19:20 -08001242je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001243{
1244
Jason Evans95833312010-01-27 13:45:21 -08001245 if (malloc_init())
1246 return (EAGAIN);
1247
Jason Evans3c234352010-01-27 13:10:55 -08001248 return (ctl_nametomib(name, mibp, miblenp));
1249}
1250
1251JEMALLOC_ATTR(visibility("default"))
1252int
Jason Evans0a5489e2012-03-01 17:19:20 -08001253je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1254 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001255{
1256
Jason Evans95833312010-01-27 13:45:21 -08001257 if (malloc_init())
1258 return (EAGAIN);
1259
Jason Evans3c234352010-01-27 13:10:55 -08001260 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1261}
1262
Jason Evans7e77eaf2012-03-02 17:47:37 -08001263/*
1264 * End non-standard functions.
1265 */
1266/******************************************************************************/
1267/*
1268 * Begin experimental functions.
1269 */
1270#ifdef JEMALLOC_EXPERIMENTAL
1271
Jason Evans8e3c3c62010-09-17 15:46:18 -07001272JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001273iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001274{
1275
Jason Evans38d92102011-03-23 00:37:29 -07001276 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1277 NULL)));
1278
Jason Evans8e3c3c62010-09-17 15:46:18 -07001279 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001280 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001281 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001282 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001283 else
Jason Evans38d92102011-03-23 00:37:29 -07001284 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001285}
1286
Jason Evans6a0d2912010-09-20 16:44:23 -07001287JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001288JEMALLOC_ATTR(visibility("default"))
1289int
Jason Evans0a5489e2012-03-01 17:19:20 -08001290je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001291{
1292 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001293 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001294 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1295 & (SIZE_T_MAX-1));
1296 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001297 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001298
1299 assert(ptr != NULL);
1300 assert(size != 0);
1301
1302 if (malloc_init())
1303 goto OOM;
1304
Jason Evans749c2a02011-08-12 18:37:54 -07001305 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001306 if (usize == 0)
1307 goto OOM;
1308
Jason Evans7372b152012-02-10 20:22:09 -08001309 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001310 PROF_ALLOC_PREP(1, usize, cnt);
1311 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001312 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001313 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001314 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001315 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001316 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001317 alignment, NULL);
1318 assert(usize_promoted != 0);
1319 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001320 if (p == NULL)
1321 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001322 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001323 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001324 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325 if (p == NULL)
1326 goto OOM;
1327 }
Jason Evans749c2a02011-08-12 18:37:54 -07001328 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001329 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001330 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001331 if (p == NULL)
1332 goto OOM;
1333 }
Jason Evans7372b152012-02-10 20:22:09 -08001334 if (rsize != NULL)
1335 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001336
1337 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001338 if (config_stats) {
1339 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001340 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001341 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001342 return (ALLOCM_SUCCESS);
1343OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001344 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001345 malloc_write("<jemalloc>: Error in allocm(): "
1346 "out of memory\n");
1347 abort();
1348 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001349 *ptr = NULL;
1350 return (ALLOCM_ERR_OOM);
1351}
1352
Jason Evans6a0d2912010-09-20 16:44:23 -07001353JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354JEMALLOC_ATTR(visibility("default"))
1355int
Jason Evans0a5489e2012-03-01 17:19:20 -08001356je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001357{
1358 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001359 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001360 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1362 & (SIZE_T_MAX-1));
1363 bool zero = flags & ALLOCM_ZERO;
1364 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001365 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001366
1367 assert(ptr != NULL);
1368 assert(*ptr != NULL);
1369 assert(size != 0);
1370 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001371 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001372
1373 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001374 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001375 /*
1376 * usize isn't knowable before iralloc() returns when extra is
1377 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001378 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001379 * backtrace. prof_realloc() will use the actual usize to
1380 * decide whether to sample.
1381 */
1382 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1383 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001384 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001385 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001386 PROF_ALLOC_PREP(1, max_usize, cnt);
1387 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001388 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001389 /*
1390 * Use minimum usize to determine whether promotion may happen.
1391 */
1392 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1393 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001394 alignment, NULL)) <= SMALL_MAXCLASS) {
1395 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1396 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001397 alignment, zero, no_move);
1398 if (q == NULL)
1399 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001400 if (max_usize < PAGE_SIZE) {
1401 usize = max_usize;
1402 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001403 } else
1404 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001405 } else {
1406 q = iralloc(p, size, extra, alignment, zero, no_move);
1407 if (q == NULL)
1408 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001409 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410 }
Jason Evanse4f78462010-10-22 10:45:59 -07001411 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001412 if (rsize != NULL)
1413 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001414 } else {
1415 if (config_stats)
1416 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417 q = iralloc(p, size, extra, alignment, zero, no_move);
1418 if (q == NULL)
1419 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001420 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001421 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001422 if (rsize != NULL) {
1423 if (config_stats == false)
1424 usize = isalloc(q);
1425 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001426 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001427 }
1428
1429 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001430 if (config_stats) {
1431 thread_allocated_t *ta;
1432 ta = thread_allocated_tsd_get();
1433 ta->allocated += usize;
1434 ta->deallocated += old_size;
1435 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001436 return (ALLOCM_SUCCESS);
1437ERR:
1438 if (no_move)
1439 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001441 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001442 malloc_write("<jemalloc>: Error in rallocm(): "
1443 "out of memory\n");
1444 abort();
1445 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001446 return (ALLOCM_ERR_OOM);
1447}
1448
Jason Evans6a0d2912010-09-20 16:44:23 -07001449JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450JEMALLOC_ATTR(visibility("default"))
1451int
Jason Evans0a5489e2012-03-01 17:19:20 -08001452je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001453{
1454 size_t sz;
1455
Jason Evans41b6afb2012-02-02 22:04:57 -08001456 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001457
Jason Evans7372b152012-02-10 20:22:09 -08001458 if (config_ivsalloc)
1459 sz = ivsalloc(ptr);
1460 else {
1461 assert(ptr != NULL);
1462 sz = isalloc(ptr);
1463 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001464 assert(rsize != NULL);
1465 *rsize = sz;
1466
1467 return (ALLOCM_SUCCESS);
1468}
1469
Jason Evans6a0d2912010-09-20 16:44:23 -07001470JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001471JEMALLOC_ATTR(visibility("default"))
1472int
Jason Evans0a5489e2012-03-01 17:19:20 -08001473je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001474{
Jason Evanse4f78462010-10-22 10:45:59 -07001475 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001476
1477 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001478 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001479
Jason Evans7372b152012-02-10 20:22:09 -08001480 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001481 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001482 if (config_prof && opt_prof) {
1483 if (config_stats == false)
1484 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001485 prof_free(ptr, usize);
1486 }
Jason Evans7372b152012-02-10 20:22:09 -08001487 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001488 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001489 idalloc(ptr);
1490
1491 return (ALLOCM_SUCCESS);
1492}
1493
Jason Evans7e15dab2012-02-29 12:56:37 -08001494JEMALLOC_ATTR(visibility("default"))
1495int
Jason Evans0a5489e2012-03-01 17:19:20 -08001496je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001497{
1498 size_t usize;
1499 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1500 & (SIZE_T_MAX-1));
1501
1502 assert(size != 0);
1503
1504 if (malloc_init())
1505 return (ALLOCM_ERR_OOM);
1506
1507 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1508 if (usize == 0)
1509 return (ALLOCM_ERR_OOM);
1510
1511 if (rsize != NULL)
1512 *rsize = usize;
1513 return (ALLOCM_SUCCESS);
1514}
1515
Jason Evans7e77eaf2012-03-02 17:47:37 -08001516#endif
Jason Evans289053c2009-06-22 12:08:42 -07001517/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001518 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001519 */
1520/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001521
Jason Evans289053c2009-06-22 12:08:42 -07001522/*
1523 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001524 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001525 */
1526
Jason Evans41b6afb2012-02-02 22:04:57 -08001527#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001528void
Jason Evans804c9ec2009-06-22 17:44:33 -07001529jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001530#else
1531void
1532_malloc_prefork(void)
1533#endif
Jason Evans289053c2009-06-22 12:08:42 -07001534{
Jason Evansfbbb6242010-01-24 17:56:48 -08001535 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001536
1537 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001538 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001539 for (i = 0; i < narenas; i++) {
1540 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001541 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001542 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001543 base_prefork();
1544 huge_prefork();
1545 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001546}
1547
Jason Evans41b6afb2012-02-02 22:04:57 -08001548#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001549void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001550jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001551#else
1552void
1553_malloc_postfork(void)
1554#endif
Jason Evans289053c2009-06-22 12:08:42 -07001555{
1556 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001557
1558 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001559 chunk_dss_postfork_parent();
1560 huge_postfork_parent();
1561 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001562 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001563 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001564 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001565 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001566 malloc_mutex_postfork_parent(&arenas_lock);
1567}
1568
1569void
1570jemalloc_postfork_child(void)
1571{
1572 unsigned i;
1573
1574 /* Release all mutexes, now that fork() has completed. */
1575 chunk_dss_postfork_child();
1576 huge_postfork_child();
1577 base_postfork_child();
1578 for (i = 0; i < narenas; i++) {
1579 if (arenas[i] != NULL)
1580 arena_postfork_child(arenas[i]);
1581 }
1582 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001583}
Jason Evans2dbecf12010-09-05 10:35:13 -07001584
1585/******************************************************************************/