blob: e148ae0edf3c38f23f2a6d9b335f20c6c2cce953 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080039const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070053size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070054
Jason Evans289053c2009-06-22 12:08:42 -070055/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080056/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070057
Jason Evans03c22372010-01-03 12:10:42 -080058static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070059static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070060static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080061#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070062static void thread_allocated_cleanup(void *arg);
63#endif
Jason Evanse7339702010-10-23 18:37:06 -070064static bool malloc_conf_next(char const **opts_p, char const **k_p,
65 size_t *klen_p, char const **v_p, size_t *vlen_p);
66static void malloc_conf_error(const char *msg, const char *k, size_t klen,
67 const char *v, size_t vlen);
68static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070069static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080070static int imemalign(void **memptr, size_t alignment, size_t size,
71 bool enforce_min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070072
Jason Evans289053c2009-06-22 12:08:42 -070073/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070074/*
Jason Evanse476f8a2010-01-16 09:53:50 -080075 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070076 */
77
Jason Evanse476f8a2010-01-16 09:53:50 -080078/* Create a new arena and insert it into the arenas array at index ind. */
79arena_t *
80arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070081{
82 arena_t *ret;
83
Jason Evansb1726102012-02-28 16:50:47 -080084 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080085 if (ret != NULL && arena_new(ret, ind) == false) {
86 arenas[ind] = ret;
87 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070088 }
Jason Evanse476f8a2010-01-16 09:53:50 -080089 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evanse476f8a2010-01-16 09:53:50 -080091 /*
92 * OOM here is quite inconvenient to propagate, since dealing with it
93 * would require a check for failure in the fast path. Instead, punt
94 * by using arenas[0]. In practice, this is an extremely unlikely
95 * failure.
96 */
Jason Evans698805c2010-03-03 17:45:38 -080097 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080098 if (opt_abort)
99 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700100
Jason Evanse476f8a2010-01-16 09:53:50 -0800101 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700102}
103
Jason Evans289053c2009-06-22 12:08:42 -0700104/*
105 * Choose an arena based on a per-thread value (slow-path code only, called
106 * only by choose_arena()).
107 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800108arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700109choose_arena_hard(void)
110{
111 arena_t *ret;
112
Jason Evans289053c2009-06-22 12:08:42 -0700113 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700114 unsigned i, choose, first_null;
115
116 choose = 0;
117 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800118 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700119 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700120 for (i = 1; i < narenas; i++) {
121 if (arenas[i] != NULL) {
122 /*
123 * Choose the first arena that has the lowest
124 * number of threads assigned to it.
125 */
126 if (arenas[i]->nthreads <
127 arenas[choose]->nthreads)
128 choose = i;
129 } else if (first_null == narenas) {
130 /*
131 * Record the index of the first uninitialized
132 * arena, in case all extant arenas are in use.
133 *
134 * NB: It is possible for there to be
135 * discontinuities in terms of initialized
136 * versus uninitialized arenas, due to the
137 * "thread.arena" mallctl.
138 */
139 first_null = i;
140 }
141 }
142
143 if (arenas[choose] == 0 || first_null == narenas) {
144 /*
145 * Use an unloaded arena, or the least loaded arena if
146 * all arenas are already initialized.
147 */
148 ret = arenas[choose];
149 } else {
150 /* Initialize a new arena. */
151 ret = arenas_extend(first_null);
152 }
153 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800154 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700155 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700156 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700157 malloc_mutex_lock(&arenas_lock);
158 ret->nthreads++;
159 malloc_mutex_unlock(&arenas_lock);
160 }
Jason Evans289053c2009-06-22 12:08:42 -0700161
Jason Evans2dbecf12010-09-05 10:35:13 -0700162 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700163
164 return (ret);
165}
Jason Evans289053c2009-06-22 12:08:42 -0700166
Jason Evans03c22372010-01-03 12:10:42 -0800167static void
168stats_print_atexit(void)
169{
170
Jason Evans7372b152012-02-10 20:22:09 -0800171 if (config_tcache && config_stats) {
172 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800173
Jason Evans7372b152012-02-10 20:22:09 -0800174 /*
175 * Merge stats from extant threads. This is racy, since
176 * individual threads do not lock when recording tcache stats
177 * events. As a consequence, the final stats may be slightly
178 * out of date by the time they are reported, if other threads
179 * continue to allocate.
180 */
181 for (i = 0; i < narenas; i++) {
182 arena_t *arena = arenas[i];
183 if (arena != NULL) {
184 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800185
Jason Evans7372b152012-02-10 20:22:09 -0800186 /*
187 * tcache_stats_merge() locks bins, so if any
188 * code is introduced that acquires both arena
189 * and bin locks in the opposite order,
190 * deadlocks may result.
191 */
192 malloc_mutex_lock(&arena->lock);
193 ql_foreach(tcache, &arena->tcache_ql, link) {
194 tcache_stats_merge(tcache, arena);
195 }
196 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800197 }
Jason Evans03c22372010-01-03 12:10:42 -0800198 }
199 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800200 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700201}
202
Jason Evans9dcad2d2011-02-13 18:11:54 -0800203thread_allocated_t *
204thread_allocated_get_hard(void)
205{
206 thread_allocated_t *thread_allocated = (thread_allocated_t *)
207 imalloc(sizeof(thread_allocated_t));
208 if (thread_allocated == NULL) {
209 static thread_allocated_t static_thread_allocated = {0, 0};
210 malloc_write("<jemalloc>: Error allocating TSD;"
211 " mallctl(\"thread.{de,}allocated[p]\", ...)"
212 " will be inaccurate\n");
213 if (opt_abort)
214 abort();
215 return (&static_thread_allocated);
216 }
217 pthread_setspecific(thread_allocated_tsd, thread_allocated);
218 thread_allocated->allocated = 0;
219 thread_allocated->deallocated = 0;
220 return (thread_allocated);
221}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800222
Jason Evans289053c2009-06-22 12:08:42 -0700223/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800224 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700225 */
226/******************************************************************************/
227/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800228 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700229 */
230
Jason Evansc9658dd2009-06-22 14:44:08 -0700231static unsigned
232malloc_ncpus(void)
233{
234 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700235 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700236
Jason Evansb7924f52009-06-23 19:01:18 -0700237 result = sysconf(_SC_NPROCESSORS_ONLN);
238 if (result == -1) {
239 /* Error. */
240 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700241 }
Jason Evansb7924f52009-06-23 19:01:18 -0700242 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700243
244 return (ret);
245}
Jason Evansb7924f52009-06-23 19:01:18 -0700246
Jason Evans597632b2011-03-18 13:41:33 -0700247static void
248arenas_cleanup(void *arg)
249{
250 arena_t *arena = (arena_t *)arg;
251
252 malloc_mutex_lock(&arenas_lock);
253 arena->nthreads--;
254 malloc_mutex_unlock(&arenas_lock);
255}
256
Jason Evans7372b152012-02-10 20:22:09 -0800257#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700258static void
259thread_allocated_cleanup(void *arg)
260{
261 uint64_t *allocated = (uint64_t *)arg;
262
263 if (allocated != NULL)
264 idalloc(allocated);
265}
266#endif
267
Jason Evans289053c2009-06-22 12:08:42 -0700268/*
269 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
270 * implementation has to take pains to avoid infinite recursion during
271 * initialization.
272 */
273static inline bool
274malloc_init(void)
275{
276
277 if (malloc_initialized == false)
278 return (malloc_init_hard());
279
280 return (false);
281}
282
283static bool
Jason Evanse7339702010-10-23 18:37:06 -0700284malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
285 char const **v_p, size_t *vlen_p)
286{
287 bool accept;
288 const char *opts = *opts_p;
289
290 *k_p = opts;
291
292 for (accept = false; accept == false;) {
293 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800294 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
295 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
296 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
297 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
298 case 'Y': case 'Z':
299 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
300 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
301 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
302 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
303 case 'y': case 'z':
304 case '0': case '1': case '2': case '3': case '4': case '5':
305 case '6': case '7': case '8': case '9':
306 case '_':
307 opts++;
308 break;
309 case ':':
310 opts++;
311 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
312 *v_p = opts;
313 accept = true;
314 break;
315 case '\0':
316 if (opts != *opts_p) {
317 malloc_write("<jemalloc>: Conf string ends "
318 "with key\n");
319 }
320 return (true);
321 default:
322 malloc_write("<jemalloc>: Malformed conf string\n");
323 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700324 }
325 }
326
327 for (accept = false; accept == false;) {
328 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800329 case ',':
330 opts++;
331 /*
332 * Look ahead one character here, because the next time
333 * this function is called, it will assume that end of
334 * input has been cleanly reached if no input remains,
335 * but we have optimistically already consumed the
336 * comma if one exists.
337 */
338 if (*opts == '\0') {
339 malloc_write("<jemalloc>: Conf string ends "
340 "with comma\n");
341 }
342 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
343 accept = true;
344 break;
345 case '\0':
346 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
347 accept = true;
348 break;
349 default:
350 opts++;
351 break;
Jason Evanse7339702010-10-23 18:37:06 -0700352 }
353 }
354
355 *opts_p = opts;
356 return (false);
357}
358
359static void
360malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
361 size_t vlen)
362{
Jason Evanse7339702010-10-23 18:37:06 -0700363
Jason Evansd81e4bd2012-03-06 14:57:45 -0800364 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
365 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700366}
367
368static void
369malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700370{
371 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700372 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700373 const char *opts, *k, *v;
374 size_t klen, vlen;
375
376 for (i = 0; i < 3; i++) {
377 /* Get runtime configuration. */
378 switch (i) {
379 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800380 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700381 /*
382 * Use options that were compiled into the
383 * program.
384 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800385 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700386 } else {
387 /* No configuration specified. */
388 buf[0] = '\0';
389 opts = buf;
390 }
391 break;
392 case 1: {
393 int linklen;
394 const char *linkname =
395#ifdef JEMALLOC_PREFIX
396 "/etc/"JEMALLOC_PREFIX"malloc.conf"
397#else
398 "/etc/malloc.conf"
399#endif
400 ;
401
402 if ((linklen = readlink(linkname, buf,
403 sizeof(buf) - 1)) != -1) {
404 /*
405 * Use the contents of the "/etc/malloc.conf"
406 * symbolic link's name.
407 */
408 buf[linklen] = '\0';
409 opts = buf;
410 } else {
411 /* No configuration specified. */
412 buf[0] = '\0';
413 opts = buf;
414 }
415 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800416 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700417 const char *envname =
418#ifdef JEMALLOC_PREFIX
419 JEMALLOC_CPREFIX"MALLOC_CONF"
420#else
421 "MALLOC_CONF"
422#endif
423 ;
424
425 if ((opts = getenv(envname)) != NULL) {
426 /*
427 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800428 * the value of the MALLOC_CONF environment
429 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700430 */
431 } else {
432 /* No configuration specified. */
433 buf[0] = '\0';
434 opts = buf;
435 }
436 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800437 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700438 /* NOTREACHED */
439 assert(false);
440 buf[0] = '\0';
441 opts = buf;
442 }
443
444 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
445 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800446#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700447 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
448 klen) == 0) { \
449 if (strncmp("true", v, vlen) == 0 && \
450 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800451 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700452 else if (strncmp("false", v, vlen) == \
453 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800454 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700455 else { \
456 malloc_conf_error( \
457 "Invalid conf value", \
458 k, klen, v, vlen); \
459 } \
460 continue; \
461 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800462#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700463 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
464 klen) == 0) { \
465 unsigned long ul; \
466 char *end; \
467 \
468 errno = 0; \
469 ul = strtoul(v, &end, 0); \
470 if (errno != 0 || (uintptr_t)end - \
471 (uintptr_t)v != vlen) { \
472 malloc_conf_error( \
473 "Invalid conf value", \
474 k, klen, v, vlen); \
475 } else if (ul < min || ul > max) { \
476 malloc_conf_error( \
477 "Out-of-range conf value", \
478 k, klen, v, vlen); \
479 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800480 o = ul; \
Jason Evanse7339702010-10-23 18:37:06 -0700481 continue; \
482 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800483#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700484 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
485 klen) == 0) { \
486 long l; \
487 char *end; \
488 \
489 errno = 0; \
490 l = strtol(v, &end, 0); \
491 if (errno != 0 || (uintptr_t)end - \
492 (uintptr_t)v != vlen) { \
493 malloc_conf_error( \
494 "Invalid conf value", \
495 k, klen, v, vlen); \
496 } else if (l < (ssize_t)min || l > \
497 (ssize_t)max) { \
498 malloc_conf_error( \
499 "Out-of-range conf value", \
500 k, klen, v, vlen); \
501 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800502 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700503 continue; \
504 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800505#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700506 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
507 klen) == 0) { \
508 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800509 sizeof(o)-1) ? vlen : \
510 sizeof(o)-1; \
511 strncpy(o, v, cpylen); \
512 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700513 continue; \
514 }
515
Jason Evansd81e4bd2012-03-06 14:57:45 -0800516 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700517 /*
518 * Chunks always require at least one * header page,
519 * plus one data page.
520 */
Jason Evansd81e4bd2012-03-06 14:57:45 -0800521 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
Jason Evanse7339702010-10-23 18:37:06 -0700522 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800523 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
524 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
525 -1, (sizeof(size_t) << 3) - 1)
526 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800527 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800528 CONF_HANDLE_BOOL(opt_junk, junk)
529 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800530 }
Jason Evans7372b152012-02-10 20:22:09 -0800531 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800532 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800533 }
534 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800535 CONF_HANDLE_BOOL(opt_tcache, tcache)
536 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
537 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800538 (sizeof(size_t) << 3) - 1)
539 }
540 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800541 CONF_HANDLE_BOOL(opt_prof, prof)
542 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
543 "jeprof")
544 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
545 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
546 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800547 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800548 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
549 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
550 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800551 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800552 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
553 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800554 }
Jason Evanse7339702010-10-23 18:37:06 -0700555 malloc_conf_error("Invalid conf pair", k, klen, v,
556 vlen);
557#undef CONF_HANDLE_BOOL
558#undef CONF_HANDLE_SIZE_T
559#undef CONF_HANDLE_SSIZE_T
560#undef CONF_HANDLE_CHAR_P
561 }
Jason Evanse7339702010-10-23 18:37:06 -0700562 }
563}
564
565static bool
566malloc_init_hard(void)
567{
Jason Evansb7924f52009-06-23 19:01:18 -0700568 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700569
570 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700571 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700572 /*
573 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800574 * acquired init_lock, or this thread is the initializing
575 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700576 */
577 malloc_mutex_unlock(&init_lock);
578 return (false);
579 }
Jason Evansb7924f52009-06-23 19:01:18 -0700580 if (malloc_initializer != (unsigned long)0) {
581 /* Busy-wait until the initializing thread completes. */
582 do {
583 malloc_mutex_unlock(&init_lock);
584 CPU_SPINWAIT;
585 malloc_mutex_lock(&init_lock);
586 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700587 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700588 return (false);
589 }
Jason Evans289053c2009-06-22 12:08:42 -0700590
Jason Evansb7924f52009-06-23 19:01:18 -0700591#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700592 /* Get page size. */
593 {
594 long result;
595
596 result = sysconf(_SC_PAGESIZE);
597 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700598 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700599
600 /*
601 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800602 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700603 */
604 assert(((result - 1) & result) == 0);
605 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800606 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700607 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700608#endif
Jason Evans289053c2009-06-22 12:08:42 -0700609
Jason Evans7372b152012-02-10 20:22:09 -0800610 if (config_prof)
611 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700612
Jason Evanse7339702010-10-23 18:37:06 -0700613 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700614
Jason Evansa0bf2422010-01-29 14:30:41 -0800615 /* Register fork handlers. */
616 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
617 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800618 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800619 if (opt_abort)
620 abort();
621 }
622
Jason Evans3c234352010-01-27 13:10:55 -0800623 if (ctl_boot()) {
624 malloc_mutex_unlock(&init_lock);
625 return (true);
626 }
627
Jason Evans03c22372010-01-03 12:10:42 -0800628 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700629 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800630 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800631 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800632 if (opt_abort)
633 abort();
634 }
Jason Evans289053c2009-06-22 12:08:42 -0700635 }
636
Jason Evansa0bf2422010-01-29 14:30:41 -0800637 if (chunk_boot()) {
638 malloc_mutex_unlock(&init_lock);
639 return (true);
640 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700641
Jason Evans3c234352010-01-27 13:10:55 -0800642 if (base_boot()) {
643 malloc_mutex_unlock(&init_lock);
644 return (true);
645 }
646
Jason Evans7372b152012-02-10 20:22:09 -0800647 if (config_prof)
648 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800649
Jason Evansb1726102012-02-28 16:50:47 -0800650 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700651
Jason Evans7372b152012-02-10 20:22:09 -0800652 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700653 malloc_mutex_unlock(&init_lock);
654 return (true);
655 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800656
Jason Evanse476f8a2010-01-16 09:53:50 -0800657 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700658 malloc_mutex_unlock(&init_lock);
659 return (true);
660 }
Jason Evans289053c2009-06-22 12:08:42 -0700661
Jason Evans7372b152012-02-10 20:22:09 -0800662#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700663 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800664 if (config_stats && pthread_key_create(&thread_allocated_tsd,
665 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700666 malloc_mutex_unlock(&init_lock);
667 return (true);
668 }
669#endif
670
Jason Evans8e6f8b42011-11-03 18:40:03 -0700671 if (malloc_mutex_init(&arenas_lock))
672 return (true);
673
674 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
675 malloc_mutex_unlock(&init_lock);
676 return (true);
677 }
678
Jason Evansb7924f52009-06-23 19:01:18 -0700679 /*
680 * Create enough scaffolding to allow recursive allocation in
681 * malloc_ncpus().
682 */
683 narenas = 1;
684 arenas = init_arenas;
685 memset(arenas, 0, sizeof(arena_t *) * narenas);
686
687 /*
688 * Initialize one arena here. The rest are lazily created in
689 * choose_arena_hard().
690 */
691 arenas_extend(0);
692 if (arenas[0] == NULL) {
693 malloc_mutex_unlock(&init_lock);
694 return (true);
695 }
696
Jason Evansb7924f52009-06-23 19:01:18 -0700697 /*
698 * Assign the initial arena to the initial thread, in order to avoid
699 * spurious creation of an extra arena if the application switches to
700 * threaded mode.
701 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700702 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700703 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700704
Jason Evans7372b152012-02-10 20:22:09 -0800705 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800706 malloc_mutex_unlock(&init_lock);
707 return (true);
708 }
Jason Evans3383af62010-02-11 08:59:06 -0800709
Jason Evansb7924f52009-06-23 19:01:18 -0700710 /* Get number of CPUs. */
711 malloc_initializer = pthread_self();
712 malloc_mutex_unlock(&init_lock);
713 ncpus = malloc_ncpus();
714 malloc_mutex_lock(&init_lock);
715
Jason Evanse7339702010-10-23 18:37:06 -0700716 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700717 /*
Jason Evans5463a522009-12-29 00:09:15 -0800718 * For SMP systems, create more than one arena per CPU by
719 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700720 */
Jason Evanse7339702010-10-23 18:37:06 -0700721 if (ncpus > 1)
722 opt_narenas = ncpus << 2;
723 else
724 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700725 }
Jason Evanse7339702010-10-23 18:37:06 -0700726 narenas = opt_narenas;
727 /*
728 * Make sure that the arenas array can be allocated. In practice, this
729 * limit is enough to allow the allocator to function, but the ctl
730 * machinery will fail to allocate memory at far lower limits.
731 */
732 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700733 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800734 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
735 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700736 }
Jason Evans289053c2009-06-22 12:08:42 -0700737
Jason Evans289053c2009-06-22 12:08:42 -0700738 /* Allocate and initialize arenas. */
739 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
740 if (arenas == NULL) {
741 malloc_mutex_unlock(&init_lock);
742 return (true);
743 }
744 /*
745 * Zero the array. In practice, this should always be pre-zeroed,
746 * since it was just mmap()ed, but let's be sure.
747 */
748 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700749 /* Copy the pointer to the one arena that was already initialized. */
750 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700751
Jason Evans2dbecf12010-09-05 10:35:13 -0700752#ifdef JEMALLOC_ZONE
753 /* Register the custom zone. */
754 malloc_zone_register(create_zone());
755
756 /*
757 * Convert the default szone to an "overlay zone" that is capable of
758 * deallocating szone-allocated objects, but allocating new objects
759 * from jemalloc.
760 */
761 szone2ozone(malloc_default_zone());
762#endif
763
Jason Evans289053c2009-06-22 12:08:42 -0700764 malloc_initialized = true;
765 malloc_mutex_unlock(&init_lock);
766 return (false);
767}
768
Jason Evans2dbecf12010-09-05 10:35:13 -0700769#ifdef JEMALLOC_ZONE
770JEMALLOC_ATTR(constructor)
771void
772jemalloc_darwin_init(void)
773{
774
775 if (malloc_init_hard())
776 abort();
777}
778#endif
779
Jason Evans289053c2009-06-22 12:08:42 -0700780/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800781 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700782 */
783/******************************************************************************/
784/*
785 * Begin malloc(3)-compatible functions.
786 */
787
Jason Evans9ad48232010-01-03 11:59:20 -0800788JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800789JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700790void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800791je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700792{
793 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800794 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800795 prof_thr_cnt_t *cnt
796#ifdef JEMALLOC_CC_SILENCE
797 = NULL
798#endif
799 ;
Jason Evans289053c2009-06-22 12:08:42 -0700800
801 if (malloc_init()) {
802 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800803 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700804 }
805
Jason Evansc90ad712012-02-28 20:31:37 -0800806 if (size == 0)
807 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700808
Jason Evans7372b152012-02-10 20:22:09 -0800809 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700810 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700811 PROF_ALLOC_PREP(1, usize, cnt);
812 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700813 ret = NULL;
814 goto OOM;
815 }
Jason Evans93443682010-10-20 17:39:18 -0700816 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800817 SMALL_MAXCLASS) {
818 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700819 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700820 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700821 } else
822 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800823 } else {
824 if (config_stats)
825 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700826 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700827 }
Jason Evans289053c2009-06-22 12:08:42 -0700828
Jason Evansf2518142009-12-29 00:09:15 -0800829OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700830 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800831 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800832 malloc_write("<jemalloc>: Error in malloc(): "
833 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700834 abort();
835 }
836 errno = ENOMEM;
837 }
Jason Evans7372b152012-02-10 20:22:09 -0800838 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700839 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800840 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700841 assert(usize == isalloc(ret));
842 ALLOCATED_ADD(usize, 0);
843 }
Jason Evans289053c2009-06-22 12:08:42 -0700844 return (ret);
845}
846
Jason Evans9ad48232010-01-03 11:59:20 -0800847JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700848#ifdef JEMALLOC_PROF
849/*
Jason Evans7372b152012-02-10 20:22:09 -0800850 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700851 * PROF_ALLOC_PREP().
852 */
853JEMALLOC_ATTR(noinline)
854#endif
855static int
Jason Evans59656312012-02-28 21:37:38 -0800856imemalign(void **memptr, size_t alignment, size_t size,
857 bool enforce_min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700858{
859 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800860 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700861 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800862 prof_thr_cnt_t *cnt
863#ifdef JEMALLOC_CC_SILENCE
864 = NULL
865#endif
866 ;
Jason Evans289053c2009-06-22 12:08:42 -0700867
868 if (malloc_init())
869 result = NULL;
870 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800871 if (size == 0)
872 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800873
Jason Evans289053c2009-06-22 12:08:42 -0700874 /* Make sure that alignment is a large enough power of 2. */
875 if (((alignment - 1) & alignment) != 0
Jason Evans59656312012-02-28 21:37:38 -0800876 || (enforce_min_alignment && alignment < sizeof(void *))) {
Jason Evans7372b152012-02-10 20:22:09 -0800877 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800878 malloc_write("<jemalloc>: Error in "
879 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700880 abort();
881 }
882 result = NULL;
883 ret = EINVAL;
884 goto RETURN;
885 }
886
Jason Evans38d92102011-03-23 00:37:29 -0700887 usize = sa2u(size, alignment, NULL);
888 if (usize == 0) {
889 result = NULL;
890 ret = ENOMEM;
891 goto RETURN;
892 }
893
Jason Evans7372b152012-02-10 20:22:09 -0800894 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700895 PROF_ALLOC_PREP(2, usize, cnt);
896 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700897 result = NULL;
898 ret = EINVAL;
899 } else {
900 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800901 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
902 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700903 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800904 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700905 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700906 if (result != NULL) {
907 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700908 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700909 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700910 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700911 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700912 false);
913 }
Jason Evans0b270a92010-03-31 16:45:04 -0700914 }
Jason Evans6109fe02010-02-10 10:37:56 -0800915 } else
Jason Evans38d92102011-03-23 00:37:29 -0700916 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700917 }
918
919 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800920 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800921 malloc_write("<jemalloc>: Error in posix_memalign(): "
922 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700923 abort();
924 }
925 ret = ENOMEM;
926 goto RETURN;
927 }
928
929 *memptr = result;
930 ret = 0;
931
932RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800933 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700934 assert(usize == isalloc(result));
935 ALLOCATED_ADD(usize, 0);
936 }
Jason Evans7372b152012-02-10 20:22:09 -0800937 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700938 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700939 return (ret);
940}
941
Jason Evansa5070042011-08-12 13:48:27 -0700942JEMALLOC_ATTR(nonnull(1))
943JEMALLOC_ATTR(visibility("default"))
944int
Jason Evans0a5489e2012-03-01 17:19:20 -0800945je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700946{
947
Jason Evans59656312012-02-28 21:37:38 -0800948 return imemalign(memptr, alignment, size, true);
Jason Evansa5070042011-08-12 13:48:27 -0700949}
950
Jason Evans9ad48232010-01-03 11:59:20 -0800951JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800952JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700953void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800954je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700955{
956 void *ret;
957 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800958 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800959 prof_thr_cnt_t *cnt
960#ifdef JEMALLOC_CC_SILENCE
961 = NULL
962#endif
963 ;
Jason Evans289053c2009-06-22 12:08:42 -0700964
965 if (malloc_init()) {
966 num_size = 0;
967 ret = NULL;
968 goto RETURN;
969 }
970
971 num_size = num * size;
972 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800973 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700974 num_size = 1;
975 else {
976 ret = NULL;
977 goto RETURN;
978 }
979 /*
980 * Try to avoid division here. We know that it isn't possible to
981 * overflow during multiplication if neither operand uses any of the
982 * most significant half of the bits in a size_t.
983 */
984 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
985 && (num_size / size != num)) {
986 /* size_t overflow. */
987 ret = NULL;
988 goto RETURN;
989 }
990
Jason Evans7372b152012-02-10 20:22:09 -0800991 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700992 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700993 PROF_ALLOC_PREP(1, usize, cnt);
994 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700995 ret = NULL;
996 goto RETURN;
997 }
Jason Evans93443682010-10-20 17:39:18 -0700998 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800999 <= SMALL_MAXCLASS) {
1000 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001001 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001002 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001003 } else
1004 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001005 } else {
1006 if (config_stats)
1007 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001008 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001009 }
Jason Evans289053c2009-06-22 12:08:42 -07001010
1011RETURN:
1012 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001013 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001014 malloc_write("<jemalloc>: Error in calloc(): out of "
1015 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001016 abort();
1017 }
1018 errno = ENOMEM;
1019 }
1020
Jason Evans7372b152012-02-10 20:22:09 -08001021 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001022 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001023 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001024 assert(usize == isalloc(ret));
1025 ALLOCATED_ADD(usize, 0);
1026 }
Jason Evans289053c2009-06-22 12:08:42 -07001027 return (ret);
1028}
1029
Jason Evanse476f8a2010-01-16 09:53:50 -08001030JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001031void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001032je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001033{
1034 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001035 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001036 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001037 prof_thr_cnt_t *cnt
1038#ifdef JEMALLOC_CC_SILENCE
1039 = NULL
1040#endif
1041 ;
1042 prof_ctx_t *old_ctx
1043#ifdef JEMALLOC_CC_SILENCE
1044 = NULL
1045#endif
1046 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001047
Jason Evans289053c2009-06-22 12:08:42 -07001048 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -08001049 if (ptr != NULL) {
1050 /* realloc(ptr, 0) is equivalent to free(p). */
1051 if (config_prof || config_stats)
1052 old_size = isalloc(ptr);
1053 if (config_prof && opt_prof) {
1054 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001055 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001056 }
Jason Evansf081b882012-02-28 20:24:05 -08001057 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001058 ret = NULL;
1059 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -08001060 } else
1061 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001062 }
1063
1064 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001065 assert(malloc_initialized || malloc_initializer ==
1066 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001067
Jason Evans7372b152012-02-10 20:22:09 -08001068 if (config_prof || config_stats)
1069 old_size = isalloc(ptr);
1070 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001071 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001072 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001073 PROF_ALLOC_PREP(1, usize, cnt);
1074 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001075 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001076 ret = NULL;
1077 goto OOM;
1078 }
Jason Evans0b270a92010-03-31 16:45:04 -07001079 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001080 usize <= SMALL_MAXCLASS) {
1081 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001082 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001083 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001084 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001085 else
1086 old_ctx = NULL;
1087 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001088 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001089 if (ret == NULL)
1090 old_ctx = NULL;
1091 }
Jason Evans7372b152012-02-10 20:22:09 -08001092 } else {
1093 if (config_stats)
1094 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001095 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001096 }
Jason Evans289053c2009-06-22 12:08:42 -07001097
Jason Evans6109fe02010-02-10 10:37:56 -08001098OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001099 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001100 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001101 malloc_write("<jemalloc>: Error in realloc(): "
1102 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001103 abort();
1104 }
1105 errno = ENOMEM;
1106 }
1107 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001108 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001109 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001110 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001111 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001112 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001113 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001114 ret = NULL;
1115 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001116 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001117 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001118 PROF_ALLOC_PREP(1, usize, cnt);
1119 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001120 ret = NULL;
1121 else {
1122 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001123 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001124 SMALL_MAXCLASS) {
1125 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001126 if (ret != NULL) {
1127 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001128 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001129 }
1130 } else
1131 ret = imalloc(size);
1132 }
Jason Evans7372b152012-02-10 20:22:09 -08001133 } else {
1134 if (config_stats)
1135 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001136 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001137 }
Jason Evans6109fe02010-02-10 10:37:56 -08001138 }
Jason Evans569432c2009-12-29 00:09:15 -08001139
Jason Evans289053c2009-06-22 12:08:42 -07001140 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001141 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001142 malloc_write("<jemalloc>: Error in realloc(): "
1143 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001144 abort();
1145 }
1146 errno = ENOMEM;
1147 }
1148 }
1149
1150RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001151 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001152 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001153 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001154 assert(usize == isalloc(ret));
1155 ALLOCATED_ADD(usize, old_size);
1156 }
Jason Evans289053c2009-06-22 12:08:42 -07001157 return (ret);
1158}
1159
Jason Evanse476f8a2010-01-16 09:53:50 -08001160JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001161void
Jason Evans0a5489e2012-03-01 17:19:20 -08001162je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001163{
1164
Jason Evans289053c2009-06-22 12:08:42 -07001165 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001166 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001167
Jason Evansa25d0a82009-11-09 14:57:38 -08001168 assert(malloc_initialized || malloc_initializer ==
1169 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001170
Jason Evans7372b152012-02-10 20:22:09 -08001171 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001172 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001173 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001174 } else if (config_stats) {
1175 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001176 }
Jason Evans7372b152012-02-10 20:22:09 -08001177 if (config_stats)
1178 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001179 idalloc(ptr);
1180 }
1181}
1182
1183/*
1184 * End malloc(3)-compatible functions.
1185 */
1186/******************************************************************************/
1187/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001188 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001189 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001190
1191#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1192JEMALLOC_ATTR(malloc)
1193JEMALLOC_ATTR(visibility("default"))
1194void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001195je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001196{
Jason Evans7372b152012-02-10 20:22:09 -08001197 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001198#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001199 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001200#endif
Jason Evans7372b152012-02-10 20:22:09 -08001201 ;
Jason Evans59656312012-02-28 21:37:38 -08001202 imemalign(&ret, alignment, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001203 return (ret);
1204}
1205#endif
1206
1207#ifdef JEMALLOC_OVERRIDE_VALLOC
1208JEMALLOC_ATTR(malloc)
1209JEMALLOC_ATTR(visibility("default"))
1210void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001211je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001212{
Jason Evans7372b152012-02-10 20:22:09 -08001213 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001214#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001215 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001216#endif
Jason Evans7372b152012-02-10 20:22:09 -08001217 ;
Jason Evans59656312012-02-28 21:37:38 -08001218 imemalign(&ret, PAGE_SIZE, size, false);
Jason Evans6a0d2912010-09-20 16:44:23 -07001219 return (ret);
1220}
1221#endif
1222
Jason Evans0a5489e2012-03-01 17:19:20 -08001223#if (!defined(JEMALLOC_PREFIX) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001224/*
1225 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1226 * to inconsistently reference libc's malloc(3)-compatible functions
1227 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1228 *
1229 * These definitions interpose hooks in glibc.  The functions are actually
1230 * passed an extra argument for the caller return address, which will be
1231 * ignored.
1232 */
1233JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001234void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001235
1236JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001237void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001238
1239JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001240void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001241
1242JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001243void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001244#endif
1245
Jason Evans6a0d2912010-09-20 16:44:23 -07001246/*
1247 * End non-standard override functions.
1248 */
1249/******************************************************************************/
1250/*
Jason Evans289053c2009-06-22 12:08:42 -07001251 * Begin non-standard functions.
1252 */
1253
Jason Evanse476f8a2010-01-16 09:53:50 -08001254JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001255size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001256je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001257{
Jason Evans569432c2009-12-29 00:09:15 -08001258 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001259
Jason Evans8e3c3c62010-09-17 15:46:18 -07001260 assert(malloc_initialized || malloc_initializer == pthread_self());
1261
Jason Evans7372b152012-02-10 20:22:09 -08001262 if (config_ivsalloc)
1263 ret = ivsalloc(ptr);
1264 else {
1265 assert(ptr != NULL);
1266 ret = isalloc(ptr);
1267 }
Jason Evans289053c2009-06-22 12:08:42 -07001268
Jason Evans569432c2009-12-29 00:09:15 -08001269 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001270}
1271
Jason Evans4201af02010-01-24 02:53:40 -08001272JEMALLOC_ATTR(visibility("default"))
1273void
Jason Evans0a5489e2012-03-01 17:19:20 -08001274je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1275 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001276{
1277
Jason Evans698805c2010-03-03 17:45:38 -08001278 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001279}
1280
Jason Evans3c234352010-01-27 13:10:55 -08001281JEMALLOC_ATTR(visibility("default"))
1282int
Jason Evans0a5489e2012-03-01 17:19:20 -08001283je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001284 size_t newlen)
1285{
1286
Jason Evans95833312010-01-27 13:45:21 -08001287 if (malloc_init())
1288 return (EAGAIN);
1289
Jason Evans3c234352010-01-27 13:10:55 -08001290 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1291}
1292
1293JEMALLOC_ATTR(visibility("default"))
1294int
Jason Evans0a5489e2012-03-01 17:19:20 -08001295je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001296{
1297
Jason Evans95833312010-01-27 13:45:21 -08001298 if (malloc_init())
1299 return (EAGAIN);
1300
Jason Evans3c234352010-01-27 13:10:55 -08001301 return (ctl_nametomib(name, mibp, miblenp));
1302}
1303
1304JEMALLOC_ATTR(visibility("default"))
1305int
Jason Evans0a5489e2012-03-01 17:19:20 -08001306je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1307 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001308{
1309
Jason Evans95833312010-01-27 13:45:21 -08001310 if (malloc_init())
1311 return (EAGAIN);
1312
Jason Evans3c234352010-01-27 13:10:55 -08001313 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1314}
1315
Jason Evans7e77eaf2012-03-02 17:47:37 -08001316/*
1317 * End non-standard functions.
1318 */
1319/******************************************************************************/
1320/*
1321 * Begin experimental functions.
1322 */
1323#ifdef JEMALLOC_EXPERIMENTAL
1324
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001326iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001327{
1328
Jason Evans38d92102011-03-23 00:37:29 -07001329 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1330 NULL)));
1331
Jason Evans8e3c3c62010-09-17 15:46:18 -07001332 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001333 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001334 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001335 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001336 else
Jason Evans38d92102011-03-23 00:37:29 -07001337 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001338}
1339
Jason Evans6a0d2912010-09-20 16:44:23 -07001340JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001341JEMALLOC_ATTR(visibility("default"))
1342int
Jason Evans0a5489e2012-03-01 17:19:20 -08001343je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001344{
1345 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001346 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001347 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1348 & (SIZE_T_MAX-1));
1349 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001350 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001351
1352 assert(ptr != NULL);
1353 assert(size != 0);
1354
1355 if (malloc_init())
1356 goto OOM;
1357
Jason Evans749c2a02011-08-12 18:37:54 -07001358 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001359 if (usize == 0)
1360 goto OOM;
1361
Jason Evans7372b152012-02-10 20:22:09 -08001362 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001363 PROF_ALLOC_PREP(1, usize, cnt);
1364 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001365 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001366 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001367 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001368 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001369 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001370 alignment, NULL);
1371 assert(usize_promoted != 0);
1372 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001373 if (p == NULL)
1374 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001375 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001376 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001377 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001378 if (p == NULL)
1379 goto OOM;
1380 }
Jason Evans749c2a02011-08-12 18:37:54 -07001381 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001382 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001383 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001384 if (p == NULL)
1385 goto OOM;
1386 }
Jason Evans7372b152012-02-10 20:22:09 -08001387 if (rsize != NULL)
1388 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001389
1390 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001391 if (config_stats) {
1392 assert(usize == isalloc(p));
1393 ALLOCATED_ADD(usize, 0);
1394 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001395 return (ALLOCM_SUCCESS);
1396OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001397 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001398 malloc_write("<jemalloc>: Error in allocm(): "
1399 "out of memory\n");
1400 abort();
1401 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001402 *ptr = NULL;
1403 return (ALLOCM_ERR_OOM);
1404}
1405
Jason Evans6a0d2912010-09-20 16:44:23 -07001406JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001407JEMALLOC_ATTR(visibility("default"))
1408int
Jason Evans0a5489e2012-03-01 17:19:20 -08001409je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410{
1411 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001412 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001413 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001414 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1415 & (SIZE_T_MAX-1));
1416 bool zero = flags & ALLOCM_ZERO;
1417 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001418 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419
1420 assert(ptr != NULL);
1421 assert(*ptr != NULL);
1422 assert(size != 0);
1423 assert(SIZE_T_MAX - size >= extra);
1424 assert(malloc_initialized || malloc_initializer == pthread_self());
1425
1426 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001427 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001428 /*
1429 * usize isn't knowable before iralloc() returns when extra is
1430 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001431 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001432 * backtrace. prof_realloc() will use the actual usize to
1433 * decide whether to sample.
1434 */
1435 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1436 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001437 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001438 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001439 PROF_ALLOC_PREP(1, max_usize, cnt);
1440 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001441 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001442 /*
1443 * Use minimum usize to determine whether promotion may happen.
1444 */
1445 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1446 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001447 alignment, NULL)) <= SMALL_MAXCLASS) {
1448 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1449 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450 alignment, zero, no_move);
1451 if (q == NULL)
1452 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001453 if (max_usize < PAGE_SIZE) {
1454 usize = max_usize;
1455 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001456 } else
1457 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001458 } else {
1459 q = iralloc(p, size, extra, alignment, zero, no_move);
1460 if (q == NULL)
1461 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001462 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001463 }
Jason Evanse4f78462010-10-22 10:45:59 -07001464 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001465 if (rsize != NULL)
1466 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001467 } else {
1468 if (config_stats)
1469 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001470 q = iralloc(p, size, extra, alignment, zero, no_move);
1471 if (q == NULL)
1472 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001473 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001474 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001475 if (rsize != NULL) {
1476 if (config_stats == false)
1477 usize = isalloc(q);
1478 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001479 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001480 }
1481
1482 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001483 if (config_stats)
1484 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001485 return (ALLOCM_SUCCESS);
1486ERR:
1487 if (no_move)
1488 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001489OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001490 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001491 malloc_write("<jemalloc>: Error in rallocm(): "
1492 "out of memory\n");
1493 abort();
1494 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001495 return (ALLOCM_ERR_OOM);
1496}
1497
Jason Evans6a0d2912010-09-20 16:44:23 -07001498JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001499JEMALLOC_ATTR(visibility("default"))
1500int
Jason Evans0a5489e2012-03-01 17:19:20 -08001501je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001502{
1503 size_t sz;
1504
1505 assert(malloc_initialized || malloc_initializer == pthread_self());
1506
Jason Evans7372b152012-02-10 20:22:09 -08001507 if (config_ivsalloc)
1508 sz = ivsalloc(ptr);
1509 else {
1510 assert(ptr != NULL);
1511 sz = isalloc(ptr);
1512 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001513 assert(rsize != NULL);
1514 *rsize = sz;
1515
1516 return (ALLOCM_SUCCESS);
1517}
1518
Jason Evans6a0d2912010-09-20 16:44:23 -07001519JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001520JEMALLOC_ATTR(visibility("default"))
1521int
Jason Evans0a5489e2012-03-01 17:19:20 -08001522je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001523{
Jason Evanse4f78462010-10-22 10:45:59 -07001524 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001525
1526 assert(ptr != NULL);
1527 assert(malloc_initialized || malloc_initializer == pthread_self());
1528
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001530 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001531 if (config_prof && opt_prof) {
1532 if (config_stats == false)
1533 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001534 prof_free(ptr, usize);
1535 }
Jason Evans7372b152012-02-10 20:22:09 -08001536 if (config_stats)
1537 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001538 idalloc(ptr);
1539
1540 return (ALLOCM_SUCCESS);
1541}
1542
Jason Evans7e15dab2012-02-29 12:56:37 -08001543JEMALLOC_ATTR(visibility("default"))
1544int
Jason Evans0a5489e2012-03-01 17:19:20 -08001545je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001546{
1547 size_t usize;
1548 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1549 & (SIZE_T_MAX-1));
1550
1551 assert(size != 0);
1552
1553 if (malloc_init())
1554 return (ALLOCM_ERR_OOM);
1555
1556 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1557 if (usize == 0)
1558 return (ALLOCM_ERR_OOM);
1559
1560 if (rsize != NULL)
1561 *rsize = usize;
1562 return (ALLOCM_SUCCESS);
1563}
1564
Jason Evans7e77eaf2012-03-02 17:47:37 -08001565#endif
Jason Evans289053c2009-06-22 12:08:42 -07001566/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001567 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001568 */
1569/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001570
Jason Evans289053c2009-06-22 12:08:42 -07001571/*
1572 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001573 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001574 */
1575
Jason Evans2dbecf12010-09-05 10:35:13 -07001576void
Jason Evans804c9ec2009-06-22 17:44:33 -07001577jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001578{
Jason Evansfbbb6242010-01-24 17:56:48 -08001579 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001580
1581 /* Acquire all mutexes in a safe order. */
1582
Jason Evansfbbb6242010-01-24 17:56:48 -08001583 malloc_mutex_lock(&arenas_lock);
1584 for (i = 0; i < narenas; i++) {
1585 if (arenas[i] != NULL)
1586 malloc_mutex_lock(&arenas[i]->lock);
1587 }
Jason Evans289053c2009-06-22 12:08:42 -07001588
1589 malloc_mutex_lock(&base_mtx);
1590
1591 malloc_mutex_lock(&huge_mtx);
1592
Jason Evans7372b152012-02-10 20:22:09 -08001593 if (config_dss)
1594 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001595}
1596
Jason Evans2dbecf12010-09-05 10:35:13 -07001597void
Jason Evans804c9ec2009-06-22 17:44:33 -07001598jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001599{
1600 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001601
1602 /* Release all mutexes, now that fork() has completed. */
1603
Jason Evans7372b152012-02-10 20:22:09 -08001604 if (config_dss)
1605 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001606
1607 malloc_mutex_unlock(&huge_mtx);
1608
1609 malloc_mutex_unlock(&base_mtx);
1610
Jason Evans289053c2009-06-22 12:08:42 -07001611 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001612 if (arenas[i] != NULL)
1613 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001614 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001615 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001616}
Jason Evans2dbecf12010-09-05 10:35:13 -07001617
1618/******************************************************************************/