blob: 08e5f31c71759da820dd19230f27b4eb92bb8aaa [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans7372b152012-02-10 20:22:09 -080016#ifndef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070017__thread thread_allocated_t thread_allocated_tls;
Jason Evans93443682010-10-20 17:39:18 -070018#endif
Jason Evans7372b152012-02-10 20:22:09 -080019pthread_key_t thread_allocated_tsd;
Jason Evans93443682010-10-20 17:39:18 -070020
Jason Evans289053c2009-06-22 12:08:42 -070021/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070022static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070023
Jason Evansb7924f52009-06-23 19:01:18 -070024/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070025static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070026
Jason Evans289053c2009-06-22 12:08:42 -070027/* Used to avoid initialization races. */
Jason Evans7372b152012-02-10 20:22:09 -080028static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -070029
Jason Evansb7924f52009-06-23 19:01:18 -070030#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080031size_t pagesize;
32size_t pagesize_mask;
33size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070034#endif
35
Jason Evanse476f8a2010-01-16 09:53:50 -080036unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070037
Jason Evanse476f8a2010-01-16 09:53:50 -080038/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070039const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070040#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080041bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070042# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080043bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080044# else
45bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070046# endif
Jason Evans289053c2009-06-22 12:08:42 -070047#else
Jason Evanse476f8a2010-01-16 09:53:50 -080048bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080049bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070050#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080051bool opt_sysv = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080053bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070054size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070055
Jason Evans289053c2009-06-22 12:08:42 -070056/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080057/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070058
Jason Evans698805c2010-03-03 17:45:38 -080059static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080060static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070061static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070062static void arenas_cleanup(void *arg);
Jason Evans7372b152012-02-10 20:22:09 -080063#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -070064static void thread_allocated_cleanup(void *arg);
65#endif
Jason Evanse7339702010-10-23 18:37:06 -070066static bool malloc_conf_next(char const **opts_p, char const **k_p,
67 size_t *klen_p, char const **v_p, size_t *vlen_p);
68static void malloc_conf_error(const char *msg, const char *k, size_t klen,
69 const char *v, size_t vlen);
70static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070071static bool malloc_init_hard(void);
Jason Evansa5070042011-08-12 13:48:27 -070072static int imemalign(void **memptr, size_t alignment, size_t size);
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans289053c2009-06-22 12:08:42 -070074/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080075/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070076
Jason Evans7372b152012-02-10 20:22:09 -080077JEMALLOC_CATTR(visibility("hidden"), static)
Jason Evanse476f8a2010-01-16 09:53:50 -080078void
Jason Evans698805c2010-03-03 17:45:38 -080079wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070080{
Jason Evans7372b152012-02-10 20:22:09 -080081 UNUSED int result = write(STDERR_FILENO, s, strlen(s));
Jason Evansc9658dd2009-06-22 14:44:08 -070082}
83
Jason Evans698805c2010-03-03 17:45:38 -080084void (*JEMALLOC_P(malloc_message))(void *, const char *s)
85 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -070086
87/******************************************************************************/
88/*
Jason Evanse476f8a2010-01-16 09:53:50 -080089 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070090 */
91
Jason Evanse476f8a2010-01-16 09:53:50 -080092/* Create a new arena and insert it into the arenas array at index ind. */
93arena_t *
94arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070095{
96 arena_t *ret;
97
Jason Evansb1726102012-02-28 16:50:47 -080098 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080099 if (ret != NULL && arena_new(ret, ind) == false) {
100 arenas[ind] = ret;
101 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700102 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800103 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700104
Jason Evanse476f8a2010-01-16 09:53:50 -0800105 /*
106 * OOM here is quite inconvenient to propagate, since dealing with it
107 * would require a check for failure in the fast path. Instead, punt
108 * by using arenas[0]. In practice, this is an extremely unlikely
109 * failure.
110 */
Jason Evans698805c2010-03-03 17:45:38 -0800111 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800112 if (opt_abort)
113 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700114
Jason Evanse476f8a2010-01-16 09:53:50 -0800115 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700116}
117
Jason Evans289053c2009-06-22 12:08:42 -0700118/*
119 * Choose an arena based on a per-thread value (slow-path code only, called
120 * only by choose_arena()).
121 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800122arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700123choose_arena_hard(void)
124{
125 arena_t *ret;
126
Jason Evans289053c2009-06-22 12:08:42 -0700127 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700128 unsigned i, choose, first_null;
129
130 choose = 0;
131 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800132 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700133 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700134 for (i = 1; i < narenas; i++) {
135 if (arenas[i] != NULL) {
136 /*
137 * Choose the first arena that has the lowest
138 * number of threads assigned to it.
139 */
140 if (arenas[i]->nthreads <
141 arenas[choose]->nthreads)
142 choose = i;
143 } else if (first_null == narenas) {
144 /*
145 * Record the index of the first uninitialized
146 * arena, in case all extant arenas are in use.
147 *
148 * NB: It is possible for there to be
149 * discontinuities in terms of initialized
150 * versus uninitialized arenas, due to the
151 * "thread.arena" mallctl.
152 */
153 first_null = i;
154 }
155 }
156
157 if (arenas[choose] == 0 || first_null == narenas) {
158 /*
159 * Use an unloaded arena, or the least loaded arena if
160 * all arenas are already initialized.
161 */
162 ret = arenas[choose];
163 } else {
164 /* Initialize a new arena. */
165 ret = arenas_extend(first_null);
166 }
167 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800168 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700169 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700170 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700171 malloc_mutex_lock(&arenas_lock);
172 ret->nthreads++;
173 malloc_mutex_unlock(&arenas_lock);
174 }
Jason Evans289053c2009-06-22 12:08:42 -0700175
Jason Evans2dbecf12010-09-05 10:35:13 -0700176 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700177
178 return (ret);
179}
Jason Evans289053c2009-06-22 12:08:42 -0700180
Jason Evansa09f55c2010-09-20 16:05:41 -0700181/*
182 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
183 * provide a wrapper.
184 */
185int
186buferror(int errnum, char *buf, size_t buflen)
187{
188#ifdef _GNU_SOURCE
189 char *b = strerror_r(errno, buf, buflen);
190 if (b != buf) {
191 strncpy(buf, b, buflen);
192 buf[buflen-1] = '\0';
193 }
194 return (0);
195#else
196 return (strerror_r(errno, buf, buflen));
197#endif
198}
199
Jason Evans03c22372010-01-03 12:10:42 -0800200static void
201stats_print_atexit(void)
202{
203
Jason Evans7372b152012-02-10 20:22:09 -0800204 if (config_tcache && config_stats) {
205 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800206
Jason Evans7372b152012-02-10 20:22:09 -0800207 /*
208 * Merge stats from extant threads. This is racy, since
209 * individual threads do not lock when recording tcache stats
210 * events. As a consequence, the final stats may be slightly
211 * out of date by the time they are reported, if other threads
212 * continue to allocate.
213 */
214 for (i = 0; i < narenas; i++) {
215 arena_t *arena = arenas[i];
216 if (arena != NULL) {
217 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800218
Jason Evans7372b152012-02-10 20:22:09 -0800219 /*
220 * tcache_stats_merge() locks bins, so if any
221 * code is introduced that acquires both arena
222 * and bin locks in the opposite order,
223 * deadlocks may result.
224 */
225 malloc_mutex_lock(&arena->lock);
226 ql_foreach(tcache, &arena->tcache_ql, link) {
227 tcache_stats_merge(tcache, arena);
228 }
229 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800230 }
Jason Evans03c22372010-01-03 12:10:42 -0800231 }
232 }
Jason Evansed1bf452010-01-19 12:11:25 -0800233 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700234}
235
Jason Evans9dcad2d2011-02-13 18:11:54 -0800236thread_allocated_t *
237thread_allocated_get_hard(void)
238{
239 thread_allocated_t *thread_allocated = (thread_allocated_t *)
240 imalloc(sizeof(thread_allocated_t));
241 if (thread_allocated == NULL) {
242 static thread_allocated_t static_thread_allocated = {0, 0};
243 malloc_write("<jemalloc>: Error allocating TSD;"
244 " mallctl(\"thread.{de,}allocated[p]\", ...)"
245 " will be inaccurate\n");
246 if (opt_abort)
247 abort();
248 return (&static_thread_allocated);
249 }
250 pthread_setspecific(thread_allocated_tsd, thread_allocated);
251 thread_allocated->allocated = 0;
252 thread_allocated->deallocated = 0;
253 return (thread_allocated);
254}
Jason Evans9dcad2d2011-02-13 18:11:54 -0800255
Jason Evans289053c2009-06-22 12:08:42 -0700256/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800257 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700258 */
259/******************************************************************************/
260/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800261 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700262 */
263
Jason Evansc9658dd2009-06-22 14:44:08 -0700264static unsigned
265malloc_ncpus(void)
266{
267 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700268 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700269
Jason Evansb7924f52009-06-23 19:01:18 -0700270 result = sysconf(_SC_NPROCESSORS_ONLN);
271 if (result == -1) {
272 /* Error. */
273 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700274 }
Jason Evansb7924f52009-06-23 19:01:18 -0700275 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700276
277 return (ret);
278}
Jason Evansb7924f52009-06-23 19:01:18 -0700279
Jason Evans597632b2011-03-18 13:41:33 -0700280static void
281arenas_cleanup(void *arg)
282{
283 arena_t *arena = (arena_t *)arg;
284
285 malloc_mutex_lock(&arenas_lock);
286 arena->nthreads--;
287 malloc_mutex_unlock(&arenas_lock);
288}
289
Jason Evans7372b152012-02-10 20:22:09 -0800290#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700291static void
292thread_allocated_cleanup(void *arg)
293{
294 uint64_t *allocated = (uint64_t *)arg;
295
296 if (allocated != NULL)
297 idalloc(allocated);
298}
299#endif
300
Jason Evans289053c2009-06-22 12:08:42 -0700301/*
302 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
303 * implementation has to take pains to avoid infinite recursion during
304 * initialization.
305 */
306static inline bool
307malloc_init(void)
308{
309
310 if (malloc_initialized == false)
311 return (malloc_init_hard());
312
313 return (false);
314}
315
316static bool
Jason Evanse7339702010-10-23 18:37:06 -0700317malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
318 char const **v_p, size_t *vlen_p)
319{
320 bool accept;
321 const char *opts = *opts_p;
322
323 *k_p = opts;
324
325 for (accept = false; accept == false;) {
326 switch (*opts) {
327 case 'A': case 'B': case 'C': case 'D': case 'E':
328 case 'F': case 'G': case 'H': case 'I': case 'J':
329 case 'K': case 'L': case 'M': case 'N': case 'O':
330 case 'P': case 'Q': case 'R': case 'S': case 'T':
331 case 'U': case 'V': case 'W': case 'X': case 'Y':
332 case 'Z':
333 case 'a': case 'b': case 'c': case 'd': case 'e':
334 case 'f': case 'g': case 'h': case 'i': case 'j':
335 case 'k': case 'l': case 'm': case 'n': case 'o':
336 case 'p': case 'q': case 'r': case 's': case 't':
337 case 'u': case 'v': case 'w': case 'x': case 'y':
338 case 'z':
339 case '0': case '1': case '2': case '3': case '4':
340 case '5': case '6': case '7': case '8': case '9':
341 case '_':
342 opts++;
343 break;
344 case ':':
345 opts++;
346 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
347 *v_p = opts;
348 accept = true;
349 break;
350 case '\0':
351 if (opts != *opts_p) {
352 malloc_write("<jemalloc>: Conf string "
353 "ends with key\n");
354 }
355 return (true);
356 default:
357 malloc_write("<jemalloc>: Malformed conf "
358 "string\n");
359 return (true);
360 }
361 }
362
363 for (accept = false; accept == false;) {
364 switch (*opts) {
365 case ',':
366 opts++;
367 /*
368 * Look ahead one character here, because the
369 * next time this function is called, it will
370 * assume that end of input has been cleanly
371 * reached if no input remains, but we have
372 * optimistically already consumed the comma if
373 * one exists.
374 */
375 if (*opts == '\0') {
376 malloc_write("<jemalloc>: Conf string "
377 "ends with comma\n");
378 }
379 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
380 accept = true;
381 break;
382 case '\0':
383 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
384 accept = true;
385 break;
386 default:
387 opts++;
388 break;
389 }
390 }
391
392 *opts_p = opts;
393 return (false);
394}
395
396static void
397malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
398 size_t vlen)
399{
400 char buf[PATH_MAX + 1];
401
402 malloc_write("<jemalloc>: ");
403 malloc_write(msg);
404 malloc_write(": ");
405 memcpy(buf, k, klen);
406 memcpy(&buf[klen], ":", 1);
407 memcpy(&buf[klen+1], v, vlen);
408 buf[klen+1+vlen] = '\0';
409 malloc_write(buf);
410 malloc_write("\n");
411}
412
413static void
414malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700415{
416 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700417 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700418 const char *opts, *k, *v;
419 size_t klen, vlen;
420
421 for (i = 0; i < 3; i++) {
422 /* Get runtime configuration. */
423 switch (i) {
424 case 0:
425 if (JEMALLOC_P(malloc_conf) != NULL) {
426 /*
427 * Use options that were compiled into the
428 * program.
429 */
430 opts = JEMALLOC_P(malloc_conf);
431 } else {
432 /* No configuration specified. */
433 buf[0] = '\0';
434 opts = buf;
435 }
436 break;
437 case 1: {
438 int linklen;
439 const char *linkname =
440#ifdef JEMALLOC_PREFIX
441 "/etc/"JEMALLOC_PREFIX"malloc.conf"
442#else
443 "/etc/malloc.conf"
444#endif
445 ;
446
447 if ((linklen = readlink(linkname, buf,
448 sizeof(buf) - 1)) != -1) {
449 /*
450 * Use the contents of the "/etc/malloc.conf"
451 * symbolic link's name.
452 */
453 buf[linklen] = '\0';
454 opts = buf;
455 } else {
456 /* No configuration specified. */
457 buf[0] = '\0';
458 opts = buf;
459 }
460 break;
461 }
462 case 2: {
463 const char *envname =
464#ifdef JEMALLOC_PREFIX
465 JEMALLOC_CPREFIX"MALLOC_CONF"
466#else
467 "MALLOC_CONF"
468#endif
469 ;
470
471 if ((opts = getenv(envname)) != NULL) {
472 /*
473 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800474 * the value of the MALLOC_CONF environment
475 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700476 */
477 } else {
478 /* No configuration specified. */
479 buf[0] = '\0';
480 opts = buf;
481 }
482 break;
483 }
484 default:
485 /* NOTREACHED */
486 assert(false);
487 buf[0] = '\0';
488 opts = buf;
489 }
490
491 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
492 &vlen) == false) {
493#define CONF_HANDLE_BOOL(n) \
494 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
495 klen) == 0) { \
496 if (strncmp("true", v, vlen) == 0 && \
497 vlen == sizeof("true")-1) \
498 opt_##n = true; \
499 else if (strncmp("false", v, vlen) == \
500 0 && vlen == sizeof("false")-1) \
501 opt_##n = false; \
502 else { \
503 malloc_conf_error( \
504 "Invalid conf value", \
505 k, klen, v, vlen); \
506 } \
507 continue; \
508 }
509#define CONF_HANDLE_SIZE_T(n, min, max) \
510 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
511 klen) == 0) { \
512 unsigned long ul; \
513 char *end; \
514 \
515 errno = 0; \
516 ul = strtoul(v, &end, 0); \
517 if (errno != 0 || (uintptr_t)end - \
518 (uintptr_t)v != vlen) { \
519 malloc_conf_error( \
520 "Invalid conf value", \
521 k, klen, v, vlen); \
522 } else if (ul < min || ul > max) { \
523 malloc_conf_error( \
524 "Out-of-range conf value", \
525 k, klen, v, vlen); \
526 } else \
527 opt_##n = ul; \
528 continue; \
529 }
530#define CONF_HANDLE_SSIZE_T(n, min, max) \
531 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
532 klen) == 0) { \
533 long l; \
534 char *end; \
535 \
536 errno = 0; \
537 l = strtol(v, &end, 0); \
538 if (errno != 0 || (uintptr_t)end - \
539 (uintptr_t)v != vlen) { \
540 malloc_conf_error( \
541 "Invalid conf value", \
542 k, klen, v, vlen); \
543 } else if (l < (ssize_t)min || l > \
544 (ssize_t)max) { \
545 malloc_conf_error( \
546 "Out-of-range conf value", \
547 k, klen, v, vlen); \
548 } else \
549 opt_##n = l; \
550 continue; \
551 }
552#define CONF_HANDLE_CHAR_P(n, d) \
553 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
554 klen) == 0) { \
555 size_t cpylen = (vlen <= \
556 sizeof(opt_##n)-1) ? vlen : \
557 sizeof(opt_##n)-1; \
558 strncpy(opt_##n, v, cpylen); \
559 opt_##n[cpylen] = '\0'; \
560 continue; \
561 }
562
563 CONF_HANDLE_BOOL(abort)
Jason Evanse7339702010-10-23 18:37:06 -0700564 /*
565 * Chunks always require at least one * header page,
566 * plus one data page.
567 */
568 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
569 (sizeof(size_t) << 3) - 1)
570 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
571 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
572 (sizeof(size_t) << 3) - 1)
573 CONF_HANDLE_BOOL(stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800574 if (config_fill) {
575 CONF_HANDLE_BOOL(junk)
576 CONF_HANDLE_BOOL(zero)
577 }
578 if (config_sysv) {
579 CONF_HANDLE_BOOL(sysv)
580 }
581 if (config_xmalloc) {
582 CONF_HANDLE_BOOL(xmalloc)
583 }
584 if (config_tcache) {
585 CONF_HANDLE_BOOL(tcache)
586 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
587 (sizeof(size_t) << 3) - 1)
588 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
589 (sizeof(size_t) << 3) - 1)
590 }
591 if (config_prof) {
592 CONF_HANDLE_BOOL(prof)
593 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
Jason Evans7372b152012-02-10 20:22:09 -0800594 CONF_HANDLE_BOOL(prof_active)
595 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
596 (sizeof(uint64_t) << 3) - 1)
597 CONF_HANDLE_BOOL(prof_accum)
Jason Evans7372b152012-02-10 20:22:09 -0800598 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
599 (sizeof(uint64_t) << 3) - 1)
600 CONF_HANDLE_BOOL(prof_gdump)
601 CONF_HANDLE_BOOL(prof_leak)
602 }
Jason Evanse7339702010-10-23 18:37:06 -0700603 malloc_conf_error("Invalid conf pair", k, klen, v,
604 vlen);
605#undef CONF_HANDLE_BOOL
606#undef CONF_HANDLE_SIZE_T
607#undef CONF_HANDLE_SSIZE_T
608#undef CONF_HANDLE_CHAR_P
609 }
Jason Evanse7339702010-10-23 18:37:06 -0700610 }
611}
612
613static bool
614malloc_init_hard(void)
615{
Jason Evansb7924f52009-06-23 19:01:18 -0700616 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700617
618 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700619 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700620 /*
621 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800622 * acquired init_lock, or this thread is the initializing
623 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700624 */
625 malloc_mutex_unlock(&init_lock);
626 return (false);
627 }
Jason Evansb7924f52009-06-23 19:01:18 -0700628 if (malloc_initializer != (unsigned long)0) {
629 /* Busy-wait until the initializing thread completes. */
630 do {
631 malloc_mutex_unlock(&init_lock);
632 CPU_SPINWAIT;
633 malloc_mutex_lock(&init_lock);
634 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700635 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700636 return (false);
637 }
Jason Evans289053c2009-06-22 12:08:42 -0700638
Jason Evansb7924f52009-06-23 19:01:18 -0700639#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700640 /* Get page size. */
641 {
642 long result;
643
644 result = sysconf(_SC_PAGESIZE);
645 assert(result != -1);
Jason Evans30fbef82011-11-05 21:06:55 -0700646 pagesize = (size_t)result;
Jason Evansb7924f52009-06-23 19:01:18 -0700647
648 /*
649 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800650 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700651 */
652 assert(((result - 1) & result) == 0);
653 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800654 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700655 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700656#endif
Jason Evans289053c2009-06-22 12:08:42 -0700657
Jason Evans7372b152012-02-10 20:22:09 -0800658 if (config_prof)
659 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700660
Jason Evanse7339702010-10-23 18:37:06 -0700661 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700662
Jason Evansa0bf2422010-01-29 14:30:41 -0800663 /* Register fork handlers. */
664 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
665 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800666 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800667 if (opt_abort)
668 abort();
669 }
670
Jason Evans3c234352010-01-27 13:10:55 -0800671 if (ctl_boot()) {
672 malloc_mutex_unlock(&init_lock);
673 return (true);
674 }
675
Jason Evans03c22372010-01-03 12:10:42 -0800676 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700677 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800678 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800679 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800680 if (opt_abort)
681 abort();
682 }
Jason Evans289053c2009-06-22 12:08:42 -0700683 }
684
Jason Evansa0bf2422010-01-29 14:30:41 -0800685 if (chunk_boot()) {
686 malloc_mutex_unlock(&init_lock);
687 return (true);
688 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700689
Jason Evans3c234352010-01-27 13:10:55 -0800690 if (base_boot()) {
691 malloc_mutex_unlock(&init_lock);
692 return (true);
693 }
694
Jason Evans7372b152012-02-10 20:22:09 -0800695 if (config_prof)
696 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800697
Jason Evansb1726102012-02-28 16:50:47 -0800698 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700699
Jason Evans7372b152012-02-10 20:22:09 -0800700 if (config_tcache && tcache_boot()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700701 malloc_mutex_unlock(&init_lock);
702 return (true);
703 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800704
Jason Evanse476f8a2010-01-16 09:53:50 -0800705 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700706 malloc_mutex_unlock(&init_lock);
707 return (true);
708 }
Jason Evans289053c2009-06-22 12:08:42 -0700709
Jason Evans7372b152012-02-10 20:22:09 -0800710#ifdef NO_TLS
Jason Evans93443682010-10-20 17:39:18 -0700711 /* Initialize allocation counters before any allocations can occur. */
Jason Evans7372b152012-02-10 20:22:09 -0800712 if (config_stats && pthread_key_create(&thread_allocated_tsd,
713 thread_allocated_cleanup) != 0) {
Jason Evans93443682010-10-20 17:39:18 -0700714 malloc_mutex_unlock(&init_lock);
715 return (true);
716 }
717#endif
718
Jason Evans8e6f8b42011-11-03 18:40:03 -0700719 if (malloc_mutex_init(&arenas_lock))
720 return (true);
721
722 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
723 malloc_mutex_unlock(&init_lock);
724 return (true);
725 }
726
Jason Evansb7924f52009-06-23 19:01:18 -0700727 /*
728 * Create enough scaffolding to allow recursive allocation in
729 * malloc_ncpus().
730 */
731 narenas = 1;
732 arenas = init_arenas;
733 memset(arenas, 0, sizeof(arena_t *) * narenas);
734
735 /*
736 * Initialize one arena here. The rest are lazily created in
737 * choose_arena_hard().
738 */
739 arenas_extend(0);
740 if (arenas[0] == NULL) {
741 malloc_mutex_unlock(&init_lock);
742 return (true);
743 }
744
Jason Evansb7924f52009-06-23 19:01:18 -0700745 /*
746 * Assign the initial arena to the initial thread, in order to avoid
747 * spurious creation of an extra arena if the application switches to
748 * threaded mode.
749 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700750 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700751 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700752
Jason Evans7372b152012-02-10 20:22:09 -0800753 if (config_prof && prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800754 malloc_mutex_unlock(&init_lock);
755 return (true);
756 }
Jason Evans3383af62010-02-11 08:59:06 -0800757
Jason Evansb7924f52009-06-23 19:01:18 -0700758 /* Get number of CPUs. */
759 malloc_initializer = pthread_self();
760 malloc_mutex_unlock(&init_lock);
761 ncpus = malloc_ncpus();
762 malloc_mutex_lock(&init_lock);
763
Jason Evanse7339702010-10-23 18:37:06 -0700764 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700765 /*
Jason Evans5463a522009-12-29 00:09:15 -0800766 * For SMP systems, create more than one arena per CPU by
767 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700768 */
Jason Evanse7339702010-10-23 18:37:06 -0700769 if (ncpus > 1)
770 opt_narenas = ncpus << 2;
771 else
772 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700773 }
Jason Evanse7339702010-10-23 18:37:06 -0700774 narenas = opt_narenas;
775 /*
776 * Make sure that the arenas array can be allocated. In practice, this
777 * limit is enough to allow the allocator to function, but the ctl
778 * machinery will fail to allocate memory at far lower limits.
779 */
780 if (narenas > chunksize / sizeof(arena_t *)) {
781 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700782
Jason Evanse7339702010-10-23 18:37:06 -0700783 narenas = chunksize / sizeof(arena_t *);
784 malloc_write("<jemalloc>: Reducing narenas to limit (");
785 malloc_write(u2s(narenas, 10, buf));
786 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700787 }
Jason Evans289053c2009-06-22 12:08:42 -0700788
Jason Evans289053c2009-06-22 12:08:42 -0700789 /* Allocate and initialize arenas. */
790 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
791 if (arenas == NULL) {
792 malloc_mutex_unlock(&init_lock);
793 return (true);
794 }
795 /*
796 * Zero the array. In practice, this should always be pre-zeroed,
797 * since it was just mmap()ed, but let's be sure.
798 */
799 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700800 /* Copy the pointer to the one arena that was already initialized. */
801 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700802
Jason Evans2dbecf12010-09-05 10:35:13 -0700803#ifdef JEMALLOC_ZONE
804 /* Register the custom zone. */
805 malloc_zone_register(create_zone());
806
807 /*
808 * Convert the default szone to an "overlay zone" that is capable of
809 * deallocating szone-allocated objects, but allocating new objects
810 * from jemalloc.
811 */
812 szone2ozone(malloc_default_zone());
813#endif
814
Jason Evans289053c2009-06-22 12:08:42 -0700815 malloc_initialized = true;
816 malloc_mutex_unlock(&init_lock);
817 return (false);
818}
819
Jason Evans2dbecf12010-09-05 10:35:13 -0700820#ifdef JEMALLOC_ZONE
821JEMALLOC_ATTR(constructor)
822void
823jemalloc_darwin_init(void)
824{
825
826 if (malloc_init_hard())
827 abort();
828}
829#endif
830
Jason Evans289053c2009-06-22 12:08:42 -0700831/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800832 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700833 */
834/******************************************************************************/
835/*
836 * Begin malloc(3)-compatible functions.
837 */
838
Jason Evans9ad48232010-01-03 11:59:20 -0800839JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800840JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700841void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800842JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700843{
844 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800845 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800846 prof_thr_cnt_t *cnt
847#ifdef JEMALLOC_CC_SILENCE
848 = NULL
849#endif
850 ;
Jason Evans289053c2009-06-22 12:08:42 -0700851
852 if (malloc_init()) {
853 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800854 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700855 }
856
857 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -0800858 if (config_sysv == false || opt_sysv == false)
Jason Evans289053c2009-06-22 12:08:42 -0700859 size = 1;
860 else {
Jason Evans7372b152012-02-10 20:22:09 -0800861 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800862 malloc_write("<jemalloc>: Error in malloc(): "
863 "invalid size 0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800864 abort();
865 }
Jason Evans289053c2009-06-22 12:08:42 -0700866 ret = NULL;
867 goto RETURN;
868 }
869 }
870
Jason Evans7372b152012-02-10 20:22:09 -0800871 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700872 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700873 PROF_ALLOC_PREP(1, usize, cnt);
874 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700875 ret = NULL;
876 goto OOM;
877 }
Jason Evans93443682010-10-20 17:39:18 -0700878 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800879 SMALL_MAXCLASS) {
880 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700881 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700882 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700883 } else
884 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800885 } else {
886 if (config_stats)
887 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700888 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700889 }
Jason Evans289053c2009-06-22 12:08:42 -0700890
Jason Evansf2518142009-12-29 00:09:15 -0800891OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700892 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800893 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800894 malloc_write("<jemalloc>: Error in malloc(): "
895 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700896 abort();
897 }
898 errno = ENOMEM;
899 }
900
Jason Evansf2518142009-12-29 00:09:15 -0800901RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800902 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700903 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800904 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700905 assert(usize == isalloc(ret));
906 ALLOCATED_ADD(usize, 0);
907 }
Jason Evans289053c2009-06-22 12:08:42 -0700908 return (ret);
909}
910
Jason Evans9ad48232010-01-03 11:59:20 -0800911JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700912#ifdef JEMALLOC_PROF
913/*
Jason Evans7372b152012-02-10 20:22:09 -0800914 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700915 * PROF_ALLOC_PREP().
916 */
917JEMALLOC_ATTR(noinline)
918#endif
919static int
920imemalign(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700921{
922 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800923 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700924 void *result;
Jason Evans6ffbbeb2012-02-13 12:31:30 -0800925 prof_thr_cnt_t *cnt
926#ifdef JEMALLOC_CC_SILENCE
927 = NULL
928#endif
929 ;
Jason Evans289053c2009-06-22 12:08:42 -0700930
931 if (malloc_init())
932 result = NULL;
933 else {
Jason Evansf2518142009-12-29 00:09:15 -0800934 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -0800935 if (config_sysv == false || opt_sysv == false)
Jason Evansf2518142009-12-29 00:09:15 -0800936 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800937 else {
Jason Evans7372b152012-02-10 20:22:09 -0800938 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800939 malloc_write("<jemalloc>: Error in "
940 "posix_memalign(): invalid size "
941 "0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800942 abort();
943 }
Jason Evansf2518142009-12-29 00:09:15 -0800944 result = NULL;
945 *memptr = NULL;
946 ret = 0;
947 goto RETURN;
948 }
Jason Evansf2518142009-12-29 00:09:15 -0800949 }
950
Jason Evans289053c2009-06-22 12:08:42 -0700951 /* Make sure that alignment is a large enough power of 2. */
952 if (((alignment - 1) & alignment) != 0
953 || alignment < sizeof(void *)) {
Jason Evans7372b152012-02-10 20:22:09 -0800954 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800955 malloc_write("<jemalloc>: Error in "
956 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700957 abort();
958 }
959 result = NULL;
960 ret = EINVAL;
961 goto RETURN;
962 }
963
Jason Evans38d92102011-03-23 00:37:29 -0700964 usize = sa2u(size, alignment, NULL);
965 if (usize == 0) {
966 result = NULL;
967 ret = ENOMEM;
968 goto RETURN;
969 }
970
Jason Evans7372b152012-02-10 20:22:09 -0800971 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700972 PROF_ALLOC_PREP(2, usize, cnt);
973 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700974 result = NULL;
975 ret = EINVAL;
976 } else {
977 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800978 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
979 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700980 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800981 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700982 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700983 if (result != NULL) {
984 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700985 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700986 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700987 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700988 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700989 false);
990 }
Jason Evans0b270a92010-03-31 16:45:04 -0700991 }
Jason Evans6109fe02010-02-10 10:37:56 -0800992 } else
Jason Evans38d92102011-03-23 00:37:29 -0700993 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700994 }
995
996 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800997 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800998 malloc_write("<jemalloc>: Error in posix_memalign(): "
999 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001000 abort();
1001 }
1002 ret = ENOMEM;
1003 goto RETURN;
1004 }
1005
1006 *memptr = result;
1007 ret = 0;
1008
1009RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001010 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001011 assert(usize == isalloc(result));
1012 ALLOCATED_ADD(usize, 0);
1013 }
Jason Evans7372b152012-02-10 20:22:09 -08001014 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001015 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -07001016 return (ret);
1017}
1018
Jason Evansa5070042011-08-12 13:48:27 -07001019JEMALLOC_ATTR(nonnull(1))
1020JEMALLOC_ATTR(visibility("default"))
1021int
1022JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
1023{
1024
1025 return imemalign(memptr, alignment, size);
1026}
1027
Jason Evans9ad48232010-01-03 11:59:20 -08001028JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001029JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001030void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001031JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001032{
1033 void *ret;
1034 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -08001035 size_t usize;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001036 prof_thr_cnt_t *cnt
1037#ifdef JEMALLOC_CC_SILENCE
1038 = NULL
1039#endif
1040 ;
Jason Evans289053c2009-06-22 12:08:42 -07001041
1042 if (malloc_init()) {
1043 num_size = 0;
1044 ret = NULL;
1045 goto RETURN;
1046 }
1047
1048 num_size = num * size;
1049 if (num_size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -08001050 if ((config_sysv == false || opt_sysv == false)
1051 && ((num == 0) || (size == 0)))
Jason Evans289053c2009-06-22 12:08:42 -07001052 num_size = 1;
1053 else {
1054 ret = NULL;
1055 goto RETURN;
1056 }
1057 /*
1058 * Try to avoid division here. We know that it isn't possible to
1059 * overflow during multiplication if neither operand uses any of the
1060 * most significant half of the bits in a size_t.
1061 */
1062 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1063 && (num_size / size != num)) {
1064 /* size_t overflow. */
1065 ret = NULL;
1066 goto RETURN;
1067 }
1068
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001070 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -07001071 PROF_ALLOC_PREP(1, usize, cnt);
1072 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001073 ret = NULL;
1074 goto RETURN;
1075 }
Jason Evans93443682010-10-20 17:39:18 -07001076 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -08001077 <= SMALL_MAXCLASS) {
1078 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001079 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001080 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001081 } else
1082 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -08001083 } else {
1084 if (config_stats)
1085 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -07001086 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001087 }
Jason Evans289053c2009-06-22 12:08:42 -07001088
1089RETURN:
1090 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001091 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001092 malloc_write("<jemalloc>: Error in calloc(): out of "
1093 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001094 abort();
1095 }
1096 errno = ENOMEM;
1097 }
1098
Jason Evans7372b152012-02-10 20:22:09 -08001099 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001100 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001101 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001102 assert(usize == isalloc(ret));
1103 ALLOCATED_ADD(usize, 0);
1104 }
Jason Evans289053c2009-06-22 12:08:42 -07001105 return (ret);
1106}
1107
Jason Evanse476f8a2010-01-16 09:53:50 -08001108JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001109void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001110JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001111{
1112 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -08001113 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001114 size_t old_size = 0;
Jason Evans6ffbbeb2012-02-13 12:31:30 -08001115 prof_thr_cnt_t *cnt
1116#ifdef JEMALLOC_CC_SILENCE
1117 = NULL
1118#endif
1119 ;
1120 prof_ctx_t *old_ctx
1121#ifdef JEMALLOC_CC_SILENCE
1122 = NULL
1123#endif
1124 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001125
Jason Evans289053c2009-06-22 12:08:42 -07001126 if (size == 0) {
Jason Evans7372b152012-02-10 20:22:09 -08001127 if (config_sysv == false || opt_sysv == false)
Jason Evans289053c2009-06-22 12:08:42 -07001128 size = 1;
1129 else {
Jason Evanse476f8a2010-01-16 09:53:50 -08001130 if (ptr != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001131 if (config_prof || config_stats)
1132 old_size = isalloc(ptr);
1133 if (config_prof && opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001134 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001135 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001136 }
Jason Evans289053c2009-06-22 12:08:42 -07001137 idalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001138 } else if (config_prof && opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001139 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001140 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001141 }
Jason Evans289053c2009-06-22 12:08:42 -07001142 ret = NULL;
1143 goto RETURN;
1144 }
1145 }
1146
1147 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001148 assert(malloc_initialized || malloc_initializer ==
1149 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001150
Jason Evans7372b152012-02-10 20:22:09 -08001151 if (config_prof || config_stats)
1152 old_size = isalloc(ptr);
1153 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001154 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001155 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001156 PROF_ALLOC_PREP(1, usize, cnt);
1157 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001158 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001159 ret = NULL;
1160 goto OOM;
1161 }
Jason Evans0b270a92010-03-31 16:45:04 -07001162 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001163 usize <= SMALL_MAXCLASS) {
1164 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001165 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001166 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001167 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001168 else
1169 old_ctx = NULL;
1170 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001171 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001172 if (ret == NULL)
1173 old_ctx = NULL;
1174 }
Jason Evans7372b152012-02-10 20:22:09 -08001175 } else {
1176 if (config_stats)
1177 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001178 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001179 }
Jason Evans289053c2009-06-22 12:08:42 -07001180
Jason Evans6109fe02010-02-10 10:37:56 -08001181OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001182 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001183 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001184 malloc_write("<jemalloc>: Error in realloc(): "
1185 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001186 abort();
1187 }
1188 errno = ENOMEM;
1189 }
1190 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001191 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001192 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001193 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001194 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001195 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001196 ret = NULL;
1197 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001198 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001199 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001200 PROF_ALLOC_PREP(1, usize, cnt);
1201 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001202 ret = NULL;
1203 else {
1204 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001205 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001206 SMALL_MAXCLASS) {
1207 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001208 if (ret != NULL) {
1209 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001210 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001211 }
1212 } else
1213 ret = imalloc(size);
1214 }
Jason Evans7372b152012-02-10 20:22:09 -08001215 } else {
1216 if (config_stats)
1217 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001218 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001219 }
Jason Evans6109fe02010-02-10 10:37:56 -08001220 }
Jason Evans569432c2009-12-29 00:09:15 -08001221
Jason Evans289053c2009-06-22 12:08:42 -07001222 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001223 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001224 malloc_write("<jemalloc>: Error in realloc(): "
1225 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001226 abort();
1227 }
1228 errno = ENOMEM;
1229 }
1230 }
1231
1232RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001233 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001234 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001235 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001236 assert(usize == isalloc(ret));
1237 ALLOCATED_ADD(usize, old_size);
1238 }
Jason Evans289053c2009-06-22 12:08:42 -07001239 return (ret);
1240}
1241
Jason Evanse476f8a2010-01-16 09:53:50 -08001242JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001243void
Jason Evanse476f8a2010-01-16 09:53:50 -08001244JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001245{
1246
Jason Evans289053c2009-06-22 12:08:42 -07001247 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001248 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001249
Jason Evansa25d0a82009-11-09 14:57:38 -08001250 assert(malloc_initialized || malloc_initializer ==
1251 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001252
Jason Evans7372b152012-02-10 20:22:09 -08001253 if (config_prof && opt_prof) {
Jason Evanse4f78462010-10-22 10:45:59 -07001254 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001255 prof_free(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08001256 } else if (config_stats) {
1257 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001258 }
Jason Evans7372b152012-02-10 20:22:09 -08001259 if (config_stats)
1260 ALLOCATED_ADD(0, usize);
Jason Evans289053c2009-06-22 12:08:42 -07001261 idalloc(ptr);
1262 }
1263}
1264
1265/*
1266 * End malloc(3)-compatible functions.
1267 */
1268/******************************************************************************/
1269/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001270 * Begin non-standard override functions.
1271 *
1272 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1273 * entire point is to avoid accidental mixed allocator usage.
1274 */
1275#ifndef JEMALLOC_PREFIX
1276
1277#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1278JEMALLOC_ATTR(malloc)
1279JEMALLOC_ATTR(visibility("default"))
1280void *
1281JEMALLOC_P(memalign)(size_t alignment, size_t size)
1282{
Jason Evans7372b152012-02-10 20:22:09 -08001283 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001284#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001285 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001286#endif
Jason Evans7372b152012-02-10 20:22:09 -08001287 ;
1288 imemalign(&ret, alignment, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001289 return (ret);
1290}
1291#endif
1292
1293#ifdef JEMALLOC_OVERRIDE_VALLOC
1294JEMALLOC_ATTR(malloc)
1295JEMALLOC_ATTR(visibility("default"))
1296void *
1297JEMALLOC_P(valloc)(size_t size)
1298{
Jason Evans7372b152012-02-10 20:22:09 -08001299 void *ret
Jason Evans355b4382010-09-20 19:20:48 -07001300#ifdef JEMALLOC_CC_SILENCE
Jason Evans7372b152012-02-10 20:22:09 -08001301 = NULL
Jason Evans355b4382010-09-20 19:20:48 -07001302#endif
Jason Evans7372b152012-02-10 20:22:09 -08001303 ;
1304 imemalign(&ret, PAGE_SIZE, size);
Jason Evans6a0d2912010-09-20 16:44:23 -07001305 return (ret);
1306}
1307#endif
1308
1309#endif /* JEMALLOC_PREFIX */
1310/*
1311 * End non-standard override functions.
1312 */
1313/******************************************************************************/
1314/*
Jason Evans289053c2009-06-22 12:08:42 -07001315 * Begin non-standard functions.
1316 */
1317
Jason Evanse476f8a2010-01-16 09:53:50 -08001318JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001319size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001320JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001321{
Jason Evans569432c2009-12-29 00:09:15 -08001322 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001323
Jason Evans8e3c3c62010-09-17 15:46:18 -07001324 assert(malloc_initialized || malloc_initializer == pthread_self());
1325
Jason Evans7372b152012-02-10 20:22:09 -08001326 if (config_ivsalloc)
1327 ret = ivsalloc(ptr);
1328 else {
1329 assert(ptr != NULL);
1330 ret = isalloc(ptr);
1331 }
Jason Evans289053c2009-06-22 12:08:42 -07001332
Jason Evans569432c2009-12-29 00:09:15 -08001333 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001334}
1335
Jason Evans4201af02010-01-24 02:53:40 -08001336JEMALLOC_ATTR(visibility("default"))
1337void
Jason Evans698805c2010-03-03 17:45:38 -08001338JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1339 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001340{
1341
Jason Evans698805c2010-03-03 17:45:38 -08001342 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001343}
1344
Jason Evans3c234352010-01-27 13:10:55 -08001345JEMALLOC_ATTR(visibility("default"))
1346int
1347JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1348 size_t newlen)
1349{
1350
Jason Evans95833312010-01-27 13:45:21 -08001351 if (malloc_init())
1352 return (EAGAIN);
1353
Jason Evans3c234352010-01-27 13:10:55 -08001354 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1355}
1356
1357JEMALLOC_ATTR(visibility("default"))
1358int
1359JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1360{
1361
Jason Evans95833312010-01-27 13:45:21 -08001362 if (malloc_init())
1363 return (EAGAIN);
1364
Jason Evans3c234352010-01-27 13:10:55 -08001365 return (ctl_nametomib(name, mibp, miblenp));
1366}
1367
1368JEMALLOC_ATTR(visibility("default"))
1369int
1370JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1371 size_t *oldlenp, void *newp, size_t newlen)
1372{
1373
Jason Evans95833312010-01-27 13:45:21 -08001374 if (malloc_init())
1375 return (EAGAIN);
1376
Jason Evans3c234352010-01-27 13:10:55 -08001377 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1378}
1379
Jason Evans8e3c3c62010-09-17 15:46:18 -07001380JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001381iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001382{
1383
Jason Evans38d92102011-03-23 00:37:29 -07001384 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1385 NULL)));
1386
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001388 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001389 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001390 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001391 else
Jason Evans38d92102011-03-23 00:37:29 -07001392 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001393}
1394
Jason Evans6a0d2912010-09-20 16:44:23 -07001395JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001396JEMALLOC_ATTR(visibility("default"))
1397int
1398JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1399{
1400 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001401 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001402 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1403 & (SIZE_T_MAX-1));
1404 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001405 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406
1407 assert(ptr != NULL);
1408 assert(size != 0);
1409
1410 if (malloc_init())
1411 goto OOM;
1412
Jason Evans749c2a02011-08-12 18:37:54 -07001413 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001414 if (usize == 0)
1415 goto OOM;
1416
Jason Evans7372b152012-02-10 20:22:09 -08001417 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001418 PROF_ALLOC_PREP(1, usize, cnt);
1419 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001420 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001421 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001422 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001423 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001424 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001425 alignment, NULL);
1426 assert(usize_promoted != 0);
1427 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001428 if (p == NULL)
1429 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001430 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001431 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001432 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001433 if (p == NULL)
1434 goto OOM;
1435 }
Jason Evans749c2a02011-08-12 18:37:54 -07001436 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001437 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001438 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439 if (p == NULL)
1440 goto OOM;
1441 }
Jason Evans7372b152012-02-10 20:22:09 -08001442 if (rsize != NULL)
1443 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001444
1445 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001446 if (config_stats) {
1447 assert(usize == isalloc(p));
1448 ALLOCATED_ADD(usize, 0);
1449 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001450 return (ALLOCM_SUCCESS);
1451OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001452 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001453 malloc_write("<jemalloc>: Error in allocm(): "
1454 "out of memory\n");
1455 abort();
1456 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001457 *ptr = NULL;
1458 return (ALLOCM_ERR_OOM);
1459}
1460
Jason Evans6a0d2912010-09-20 16:44:23 -07001461JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001462JEMALLOC_ATTR(visibility("default"))
1463int
1464JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1465 int flags)
1466{
1467 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001468 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001469 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001470 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1471 & (SIZE_T_MAX-1));
1472 bool zero = flags & ALLOCM_ZERO;
1473 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001474 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001475
1476 assert(ptr != NULL);
1477 assert(*ptr != NULL);
1478 assert(size != 0);
1479 assert(SIZE_T_MAX - size >= extra);
1480 assert(malloc_initialized || malloc_initializer == pthread_self());
1481
1482 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001483 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001484 /*
1485 * usize isn't knowable before iralloc() returns when extra is
1486 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001487 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001488 * backtrace. prof_realloc() will use the actual usize to
1489 * decide whether to sample.
1490 */
1491 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1492 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001493 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001494 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001495 PROF_ALLOC_PREP(1, max_usize, cnt);
1496 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001497 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001498 /*
1499 * Use minimum usize to determine whether promotion may happen.
1500 */
1501 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1502 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001503 alignment, NULL)) <= SMALL_MAXCLASS) {
1504 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1505 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001506 alignment, zero, no_move);
1507 if (q == NULL)
1508 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001509 if (max_usize < PAGE_SIZE) {
1510 usize = max_usize;
1511 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001512 } else
1513 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001514 } else {
1515 q = iralloc(p, size, extra, alignment, zero, no_move);
1516 if (q == NULL)
1517 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001518 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001519 }
Jason Evanse4f78462010-10-22 10:45:59 -07001520 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001521 if (rsize != NULL)
1522 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001523 } else {
1524 if (config_stats)
1525 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001526 q = iralloc(p, size, extra, alignment, zero, no_move);
1527 if (q == NULL)
1528 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001529 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001530 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001531 if (rsize != NULL) {
1532 if (config_stats == false)
1533 usize = isalloc(q);
1534 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001535 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001536 }
1537
1538 *ptr = q;
Jason Evans7372b152012-02-10 20:22:09 -08001539 if (config_stats)
1540 ALLOCATED_ADD(usize, old_size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001541 return (ALLOCM_SUCCESS);
1542ERR:
1543 if (no_move)
1544 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001546 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001547 malloc_write("<jemalloc>: Error in rallocm(): "
1548 "out of memory\n");
1549 abort();
1550 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001551 return (ALLOCM_ERR_OOM);
1552}
1553
Jason Evans6a0d2912010-09-20 16:44:23 -07001554JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001555JEMALLOC_ATTR(visibility("default"))
1556int
1557JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1558{
1559 size_t sz;
1560
1561 assert(malloc_initialized || malloc_initializer == pthread_self());
1562
Jason Evans7372b152012-02-10 20:22:09 -08001563 if (config_ivsalloc)
1564 sz = ivsalloc(ptr);
1565 else {
1566 assert(ptr != NULL);
1567 sz = isalloc(ptr);
1568 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001569 assert(rsize != NULL);
1570 *rsize = sz;
1571
1572 return (ALLOCM_SUCCESS);
1573}
1574
Jason Evans6a0d2912010-09-20 16:44:23 -07001575JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001576JEMALLOC_ATTR(visibility("default"))
1577int
1578JEMALLOC_P(dallocm)(void *ptr, int flags)
1579{
Jason Evanse4f78462010-10-22 10:45:59 -07001580 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001581
1582 assert(ptr != NULL);
1583 assert(malloc_initialized || malloc_initializer == pthread_self());
1584
Jason Evans7372b152012-02-10 20:22:09 -08001585 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001586 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001587 if (config_prof && opt_prof) {
1588 if (config_stats == false)
1589 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001590 prof_free(ptr, usize);
1591 }
Jason Evans7372b152012-02-10 20:22:09 -08001592 if (config_stats)
1593 ALLOCATED_ADD(0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001594 idalloc(ptr);
1595
1596 return (ALLOCM_SUCCESS);
1597}
1598
Jason Evans289053c2009-06-22 12:08:42 -07001599/*
1600 * End non-standard functions.
1601 */
1602/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001603
Jason Evans289053c2009-06-22 12:08:42 -07001604/*
1605 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001606 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001607 */
1608
Jason Evans2dbecf12010-09-05 10:35:13 -07001609void
Jason Evans804c9ec2009-06-22 17:44:33 -07001610jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001611{
Jason Evansfbbb6242010-01-24 17:56:48 -08001612 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001613
1614 /* Acquire all mutexes in a safe order. */
1615
Jason Evansfbbb6242010-01-24 17:56:48 -08001616 malloc_mutex_lock(&arenas_lock);
1617 for (i = 0; i < narenas; i++) {
1618 if (arenas[i] != NULL)
1619 malloc_mutex_lock(&arenas[i]->lock);
1620 }
Jason Evans289053c2009-06-22 12:08:42 -07001621
1622 malloc_mutex_lock(&base_mtx);
1623
1624 malloc_mutex_lock(&huge_mtx);
1625
Jason Evans7372b152012-02-10 20:22:09 -08001626 if (config_dss)
1627 malloc_mutex_lock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001628}
1629
Jason Evans2dbecf12010-09-05 10:35:13 -07001630void
Jason Evans804c9ec2009-06-22 17:44:33 -07001631jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001632{
1633 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001634
1635 /* Release all mutexes, now that fork() has completed. */
1636
Jason Evans7372b152012-02-10 20:22:09 -08001637 if (config_dss)
1638 malloc_mutex_unlock(&dss_mtx);
Jason Evans289053c2009-06-22 12:08:42 -07001639
1640 malloc_mutex_unlock(&huge_mtx);
1641
1642 malloc_mutex_unlock(&base_mtx);
1643
Jason Evans289053c2009-06-22 12:08:42 -07001644 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001645 if (arenas[i] != NULL)
1646 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001647 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001648 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001649}
Jason Evans2dbecf12010-09-05 10:35:13 -07001650
1651/******************************************************************************/