blob: 4d10e90a0700e54e548cf9c6b3b2ac1d699209d6 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evans3c234352010-01-27 13:10:55 -08007malloc_mutex_t arenas_lock;
Jason Evanse476f8a2010-01-16 09:53:50 -08008arena_t **arenas;
9unsigned narenas;
Jason Evanse476f8a2010-01-16 09:53:50 -080010
Jason Evans597632b2011-03-18 13:41:33 -070011pthread_key_t arenas_tsd;
Jason Evanse476f8a2010-01-16 09:53:50 -080012#ifndef NO_TLS
Jason Evans2dbecf12010-09-05 10:35:13 -070013__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
Jason Evanse476f8a2010-01-16 09:53:50 -080014#endif
Jason Evans289053c2009-06-22 12:08:42 -070015
Jason Evans93443682010-10-20 17:39:18 -070016#ifdef JEMALLOC_STATS
17# ifndef NO_TLS
18__thread thread_allocated_t thread_allocated_tls;
19# else
20pthread_key_t thread_allocated_tsd;
21# endif
22#endif
23
Jason Evans289053c2009-06-22 12:08:42 -070024/* Set to true once the allocator has been initialized. */
Jason Evans93443682010-10-20 17:39:18 -070025static bool malloc_initialized = false;
Jason Evans289053c2009-06-22 12:08:42 -070026
Jason Evansb7924f52009-06-23 19:01:18 -070027/* Used to let the initializing thread recursively allocate. */
Jason Evans93443682010-10-20 17:39:18 -070028static pthread_t malloc_initializer = (unsigned long)0;
Jason Evansb7924f52009-06-23 19:01:18 -070029
Jason Evans289053c2009-06-22 12:08:42 -070030/* Used to avoid initialization races. */
Jason Evans893a0ed2011-03-18 19:30:18 -070031static malloc_mutex_t init_lock =
32#ifdef JEMALLOC_OSSPIN
33 0
34#else
35 MALLOC_MUTEX_INITIALIZER
36#endif
37 ;
Jason Evans289053c2009-06-22 12:08:42 -070038
Jason Evansb7924f52009-06-23 19:01:18 -070039#ifdef DYNAMIC_PAGE_SHIFT
Jason Evanse476f8a2010-01-16 09:53:50 -080040size_t pagesize;
41size_t pagesize_mask;
42size_t lg_pagesize;
Jason Evansb7924f52009-06-23 19:01:18 -070043#endif
44
Jason Evanse476f8a2010-01-16 09:53:50 -080045unsigned ncpus;
Jason Evans289053c2009-06-22 12:08:42 -070046
Jason Evanse476f8a2010-01-16 09:53:50 -080047/* Runtime configuration options. */
Jason Evanse7339702010-10-23 18:37:06 -070048const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070049#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080050bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070051# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080052bool opt_junk = true;
Jason Evansb7924f52009-06-23 19:01:18 -070053# endif
Jason Evans289053c2009-06-22 12:08:42 -070054#else
Jason Evanse476f8a2010-01-16 09:53:50 -080055bool opt_abort = false;
Jason Evansb7924f52009-06-23 19:01:18 -070056# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080057bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070058# endif
Jason Evans289053c2009-06-22 12:08:42 -070059#endif
Jason Evansb7924f52009-06-23 19:01:18 -070060#ifdef JEMALLOC_SYSV
Jason Evanse476f8a2010-01-16 09:53:50 -080061bool opt_sysv = false;
Jason Evansb7924f52009-06-23 19:01:18 -070062#endif
Jason Evansb8f0a652009-06-29 09:41:43 -070063#ifdef JEMALLOC_XMALLOC
Jason Evanse476f8a2010-01-16 09:53:50 -080064bool opt_xmalloc = false;
Jason Evansb8f0a652009-06-29 09:41:43 -070065#endif
Jason Evansb7924f52009-06-23 19:01:18 -070066#ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080067bool opt_zero = false;
Jason Evansb7924f52009-06-23 19:01:18 -070068#endif
Jason Evanse7339702010-10-23 18:37:06 -070069size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070070
Jason Evans289053c2009-06-22 12:08:42 -070071/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080072/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070073
Jason Evans698805c2010-03-03 17:45:38 -080074static void wrtmessage(void *cbopaque, const char *s);
Jason Evans03c22372010-01-03 12:10:42 -080075static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070076static unsigned malloc_ncpus(void);
Jason Evans597632b2011-03-18 13:41:33 -070077static void arenas_cleanup(void *arg);
Jason Evans93443682010-10-20 17:39:18 -070078#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
79static void thread_allocated_cleanup(void *arg);
80#endif
Jason Evanse7339702010-10-23 18:37:06 -070081static bool malloc_conf_next(char const **opts_p, char const **k_p,
82 size_t *klen_p, char const **v_p, size_t *vlen_p);
83static void malloc_conf_error(const char *msg, const char *k, size_t klen,
84 const char *v, size_t vlen);
85static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070086static bool malloc_init_hard(void);
87
Jason Evans289053c2009-06-22 12:08:42 -070088/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080089/* malloc_message() setup. */
Jason Evans289053c2009-06-22 12:08:42 -070090
Jason Evanse476f8a2010-01-16 09:53:50 -080091#ifdef JEMALLOC_HAVE_ATTR
92JEMALLOC_ATTR(visibility("hidden"))
93#else
94static
95#endif
96void
Jason Evans698805c2010-03-03 17:45:38 -080097wrtmessage(void *cbopaque, const char *s)
Jason Evansc9658dd2009-06-22 14:44:08 -070098{
Jason Evans355b4382010-09-20 19:20:48 -070099#ifdef JEMALLOC_CC_SILENCE
100 int result =
101#endif
102 write(STDERR_FILENO, s, strlen(s));
103#ifdef JEMALLOC_CC_SILENCE
104 if (result < 0)
105 result = errno;
106#endif
Jason Evansc9658dd2009-06-22 14:44:08 -0700107}
108
Jason Evans698805c2010-03-03 17:45:38 -0800109void (*JEMALLOC_P(malloc_message))(void *, const char *s)
110 JEMALLOC_ATTR(visibility("default")) = wrtmessage;
Jason Evansc9658dd2009-06-22 14:44:08 -0700111
112/******************************************************************************/
113/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800114 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -0700115 */
116
Jason Evanse476f8a2010-01-16 09:53:50 -0800117/* Create a new arena and insert it into the arenas array at index ind. */
118arena_t *
119arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -0700120{
121 arena_t *ret;
122
Jason Evanse476f8a2010-01-16 09:53:50 -0800123 /* Allocate enough space for trailing bins. */
Jason Evansc2fc8c82010-10-01 18:02:43 -0700124 ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
125 + (sizeof(arena_bin_t) * nbins));
Jason Evanse476f8a2010-01-16 09:53:50 -0800126 if (ret != NULL && arena_new(ret, ind) == false) {
127 arenas[ind] = ret;
128 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700129 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800130 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700131
Jason Evanse476f8a2010-01-16 09:53:50 -0800132 /*
133 * OOM here is quite inconvenient to propagate, since dealing with it
134 * would require a check for failure in the fast path. Instead, punt
135 * by using arenas[0]. In practice, this is an extremely unlikely
136 * failure.
137 */
Jason Evans698805c2010-03-03 17:45:38 -0800138 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800139 if (opt_abort)
140 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700141
Jason Evanse476f8a2010-01-16 09:53:50 -0800142 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700143}
144
Jason Evans289053c2009-06-22 12:08:42 -0700145/*
146 * Choose an arena based on a per-thread value (slow-path code only, called
147 * only by choose_arena()).
148 */
Jason Evanse476f8a2010-01-16 09:53:50 -0800149arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700150choose_arena_hard(void)
151{
152 arena_t *ret;
153
Jason Evans289053c2009-06-22 12:08:42 -0700154 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700155 unsigned i, choose, first_null;
156
157 choose = 0;
158 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800159 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700160 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700161 for (i = 1; i < narenas; i++) {
162 if (arenas[i] != NULL) {
163 /*
164 * Choose the first arena that has the lowest
165 * number of threads assigned to it.
166 */
167 if (arenas[i]->nthreads <
168 arenas[choose]->nthreads)
169 choose = i;
170 } else if (first_null == narenas) {
171 /*
172 * Record the index of the first uninitialized
173 * arena, in case all extant arenas are in use.
174 *
175 * NB: It is possible for there to be
176 * discontinuities in terms of initialized
177 * versus uninitialized arenas, due to the
178 * "thread.arena" mallctl.
179 */
180 first_null = i;
181 }
182 }
183
184 if (arenas[choose] == 0 || first_null == narenas) {
185 /*
186 * Use an unloaded arena, or the least loaded arena if
187 * all arenas are already initialized.
188 */
189 ret = arenas[choose];
190 } else {
191 /* Initialize a new arena. */
192 ret = arenas_extend(first_null);
193 }
194 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800195 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700196 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700197 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700198 malloc_mutex_lock(&arenas_lock);
199 ret->nthreads++;
200 malloc_mutex_unlock(&arenas_lock);
201 }
Jason Evans289053c2009-06-22 12:08:42 -0700202
Jason Evans2dbecf12010-09-05 10:35:13 -0700203 ARENA_SET(ret);
Jason Evans289053c2009-06-22 12:08:42 -0700204
205 return (ret);
206}
Jason Evans289053c2009-06-22 12:08:42 -0700207
Jason Evansa09f55c2010-09-20 16:05:41 -0700208/*
209 * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
210 * provide a wrapper.
211 */
212int
213buferror(int errnum, char *buf, size_t buflen)
214{
215#ifdef _GNU_SOURCE
216 char *b = strerror_r(errno, buf, buflen);
217 if (b != buf) {
218 strncpy(buf, b, buflen);
219 buf[buflen-1] = '\0';
220 }
221 return (0);
222#else
223 return (strerror_r(errno, buf, buflen));
224#endif
225}
226
Jason Evans03c22372010-01-03 12:10:42 -0800227static void
228stats_print_atexit(void)
229{
230
231#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
232 unsigned i;
233
234 /*
235 * Merge stats from extant threads. This is racy, since individual
236 * threads do not lock when recording tcache stats events. As a
237 * consequence, the final stats may be slightly out of date by the time
238 * they are reported, if other threads continue to allocate.
239 */
240 for (i = 0; i < narenas; i++) {
241 arena_t *arena = arenas[i];
242 if (arena != NULL) {
243 tcache_t *tcache;
244
Jason Evansdafde142010-03-17 16:27:39 -0700245 /*
246 * tcache_stats_merge() locks bins, so if any code is
247 * introduced that acquires both arena and bin locks in
248 * the opposite order, deadlocks may result.
249 */
Jason Evans03c22372010-01-03 12:10:42 -0800250 malloc_mutex_lock(&arena->lock);
251 ql_foreach(tcache, &arena->tcache_ql, link) {
252 tcache_stats_merge(tcache, arena);
253 }
254 malloc_mutex_unlock(&arena->lock);
255 }
256 }
257#endif
Jason Evansed1bf452010-01-19 12:11:25 -0800258 JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700259}
260
Jason Evans9dcad2d2011-02-13 18:11:54 -0800261#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
262thread_allocated_t *
263thread_allocated_get_hard(void)
264{
265 thread_allocated_t *thread_allocated = (thread_allocated_t *)
266 imalloc(sizeof(thread_allocated_t));
267 if (thread_allocated == NULL) {
268 static thread_allocated_t static_thread_allocated = {0, 0};
269 malloc_write("<jemalloc>: Error allocating TSD;"
270 " mallctl(\"thread.{de,}allocated[p]\", ...)"
271 " will be inaccurate\n");
272 if (opt_abort)
273 abort();
274 return (&static_thread_allocated);
275 }
276 pthread_setspecific(thread_allocated_tsd, thread_allocated);
277 thread_allocated->allocated = 0;
278 thread_allocated->deallocated = 0;
279 return (thread_allocated);
280}
281#endif
282
Jason Evans289053c2009-06-22 12:08:42 -0700283/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800284 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700285 */
286/******************************************************************************/
287/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800288 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700289 */
290
Jason Evansc9658dd2009-06-22 14:44:08 -0700291static unsigned
292malloc_ncpus(void)
293{
294 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700295 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700296
Jason Evansb7924f52009-06-23 19:01:18 -0700297 result = sysconf(_SC_NPROCESSORS_ONLN);
298 if (result == -1) {
299 /* Error. */
300 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700301 }
Jason Evansb7924f52009-06-23 19:01:18 -0700302 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700303
304 return (ret);
305}
Jason Evansb7924f52009-06-23 19:01:18 -0700306
Jason Evans597632b2011-03-18 13:41:33 -0700307static void
308arenas_cleanup(void *arg)
309{
310 arena_t *arena = (arena_t *)arg;
311
312 malloc_mutex_lock(&arenas_lock);
313 arena->nthreads--;
314 malloc_mutex_unlock(&arenas_lock);
315}
316
Jason Evans93443682010-10-20 17:39:18 -0700317#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
318static void
319thread_allocated_cleanup(void *arg)
320{
321 uint64_t *allocated = (uint64_t *)arg;
322
323 if (allocated != NULL)
324 idalloc(allocated);
325}
326#endif
327
Jason Evans289053c2009-06-22 12:08:42 -0700328/*
329 * FreeBSD's pthreads implementation calls malloc(3), so the malloc
330 * implementation has to take pains to avoid infinite recursion during
331 * initialization.
332 */
333static inline bool
334malloc_init(void)
335{
336
337 if (malloc_initialized == false)
338 return (malloc_init_hard());
339
340 return (false);
341}
342
343static bool
Jason Evanse7339702010-10-23 18:37:06 -0700344malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
345 char const **v_p, size_t *vlen_p)
346{
347 bool accept;
348 const char *opts = *opts_p;
349
350 *k_p = opts;
351
352 for (accept = false; accept == false;) {
353 switch (*opts) {
354 case 'A': case 'B': case 'C': case 'D': case 'E':
355 case 'F': case 'G': case 'H': case 'I': case 'J':
356 case 'K': case 'L': case 'M': case 'N': case 'O':
357 case 'P': case 'Q': case 'R': case 'S': case 'T':
358 case 'U': case 'V': case 'W': case 'X': case 'Y':
359 case 'Z':
360 case 'a': case 'b': case 'c': case 'd': case 'e':
361 case 'f': case 'g': case 'h': case 'i': case 'j':
362 case 'k': case 'l': case 'm': case 'n': case 'o':
363 case 'p': case 'q': case 'r': case 's': case 't':
364 case 'u': case 'v': case 'w': case 'x': case 'y':
365 case 'z':
366 case '0': case '1': case '2': case '3': case '4':
367 case '5': case '6': case '7': case '8': case '9':
368 case '_':
369 opts++;
370 break;
371 case ':':
372 opts++;
373 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
374 *v_p = opts;
375 accept = true;
376 break;
377 case '\0':
378 if (opts != *opts_p) {
379 malloc_write("<jemalloc>: Conf string "
380 "ends with key\n");
381 }
382 return (true);
383 default:
384 malloc_write("<jemalloc>: Malformed conf "
385 "string\n");
386 return (true);
387 }
388 }
389
390 for (accept = false; accept == false;) {
391 switch (*opts) {
392 case ',':
393 opts++;
394 /*
395 * Look ahead one character here, because the
396 * next time this function is called, it will
397 * assume that end of input has been cleanly
398 * reached if no input remains, but we have
399 * optimistically already consumed the comma if
400 * one exists.
401 */
402 if (*opts == '\0') {
403 malloc_write("<jemalloc>: Conf string "
404 "ends with comma\n");
405 }
406 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
407 accept = true;
408 break;
409 case '\0':
410 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
411 accept = true;
412 break;
413 default:
414 opts++;
415 break;
416 }
417 }
418
419 *opts_p = opts;
420 return (false);
421}
422
423static void
424malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
425 size_t vlen)
426{
427 char buf[PATH_MAX + 1];
428
429 malloc_write("<jemalloc>: ");
430 malloc_write(msg);
431 malloc_write(": ");
432 memcpy(buf, k, klen);
433 memcpy(&buf[klen], ":", 1);
434 memcpy(&buf[klen+1], v, vlen);
435 buf[klen+1+vlen] = '\0';
436 malloc_write(buf);
437 malloc_write("\n");
438}
439
440static void
441malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700442{
443 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700444 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700445 const char *opts, *k, *v;
446 size_t klen, vlen;
447
448 for (i = 0; i < 3; i++) {
449 /* Get runtime configuration. */
450 switch (i) {
451 case 0:
452 if (JEMALLOC_P(malloc_conf) != NULL) {
453 /*
454 * Use options that were compiled into the
455 * program.
456 */
457 opts = JEMALLOC_P(malloc_conf);
458 } else {
459 /* No configuration specified. */
460 buf[0] = '\0';
461 opts = buf;
462 }
463 break;
464 case 1: {
465 int linklen;
466 const char *linkname =
467#ifdef JEMALLOC_PREFIX
468 "/etc/"JEMALLOC_PREFIX"malloc.conf"
469#else
470 "/etc/malloc.conf"
471#endif
472 ;
473
474 if ((linklen = readlink(linkname, buf,
475 sizeof(buf) - 1)) != -1) {
476 /*
477 * Use the contents of the "/etc/malloc.conf"
478 * symbolic link's name.
479 */
480 buf[linklen] = '\0';
481 opts = buf;
482 } else {
483 /* No configuration specified. */
484 buf[0] = '\0';
485 opts = buf;
486 }
487 break;
488 }
489 case 2: {
490 const char *envname =
491#ifdef JEMALLOC_PREFIX
492 JEMALLOC_CPREFIX"MALLOC_CONF"
493#else
494 "MALLOC_CONF"
495#endif
496 ;
497
498 if ((opts = getenv(envname)) != NULL) {
499 /*
500 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800501 * the value of the MALLOC_CONF environment
502 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700503 */
504 } else {
505 /* No configuration specified. */
506 buf[0] = '\0';
507 opts = buf;
508 }
509 break;
510 }
511 default:
512 /* NOTREACHED */
513 assert(false);
514 buf[0] = '\0';
515 opts = buf;
516 }
517
518 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
519 &vlen) == false) {
520#define CONF_HANDLE_BOOL(n) \
521 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
522 klen) == 0) { \
523 if (strncmp("true", v, vlen) == 0 && \
524 vlen == sizeof("true")-1) \
525 opt_##n = true; \
526 else if (strncmp("false", v, vlen) == \
527 0 && vlen == sizeof("false")-1) \
528 opt_##n = false; \
529 else { \
530 malloc_conf_error( \
531 "Invalid conf value", \
532 k, klen, v, vlen); \
533 } \
534 continue; \
535 }
536#define CONF_HANDLE_SIZE_T(n, min, max) \
537 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
538 klen) == 0) { \
539 unsigned long ul; \
540 char *end; \
541 \
542 errno = 0; \
543 ul = strtoul(v, &end, 0); \
544 if (errno != 0 || (uintptr_t)end - \
545 (uintptr_t)v != vlen) { \
546 malloc_conf_error( \
547 "Invalid conf value", \
548 k, klen, v, vlen); \
549 } else if (ul < min || ul > max) { \
550 malloc_conf_error( \
551 "Out-of-range conf value", \
552 k, klen, v, vlen); \
553 } else \
554 opt_##n = ul; \
555 continue; \
556 }
557#define CONF_HANDLE_SSIZE_T(n, min, max) \
558 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
559 klen) == 0) { \
560 long l; \
561 char *end; \
562 \
563 errno = 0; \
564 l = strtol(v, &end, 0); \
565 if (errno != 0 || (uintptr_t)end - \
566 (uintptr_t)v != vlen) { \
567 malloc_conf_error( \
568 "Invalid conf value", \
569 k, klen, v, vlen); \
570 } else if (l < (ssize_t)min || l > \
571 (ssize_t)max) { \
572 malloc_conf_error( \
573 "Out-of-range conf value", \
574 k, klen, v, vlen); \
575 } else \
576 opt_##n = l; \
577 continue; \
578 }
579#define CONF_HANDLE_CHAR_P(n, d) \
580 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
581 klen) == 0) { \
582 size_t cpylen = (vlen <= \
583 sizeof(opt_##n)-1) ? vlen : \
584 sizeof(opt_##n)-1; \
585 strncpy(opt_##n, v, cpylen); \
586 opt_##n[cpylen] = '\0'; \
587 continue; \
588 }
589
590 CONF_HANDLE_BOOL(abort)
591 CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
592 PAGE_SHIFT-1)
593 CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
594 PAGE_SHIFT-1)
595 /*
596 * Chunks always require at least one * header page,
597 * plus one data page.
598 */
599 CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
600 (sizeof(size_t) << 3) - 1)
601 CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
602 CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
603 (sizeof(size_t) << 3) - 1)
604 CONF_HANDLE_BOOL(stats_print)
605#ifdef JEMALLOC_FILL
606 CONF_HANDLE_BOOL(junk)
607 CONF_HANDLE_BOOL(zero)
608#endif
609#ifdef JEMALLOC_SYSV
610 CONF_HANDLE_BOOL(sysv)
611#endif
612#ifdef JEMALLOC_XMALLOC
613 CONF_HANDLE_BOOL(xmalloc)
614#endif
615#ifdef JEMALLOC_TCACHE
616 CONF_HANDLE_BOOL(tcache)
617 CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
618 (sizeof(size_t) << 3) - 1)
619 CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
620 (sizeof(size_t) << 3) - 1)
621#endif
622#ifdef JEMALLOC_PROF
623 CONF_HANDLE_BOOL(prof)
624 CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
625 CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
626 CONF_HANDLE_BOOL(prof_active)
627 CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
628 (sizeof(uint64_t) << 3) - 1)
629 CONF_HANDLE_BOOL(prof_accum)
630 CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
631 (sizeof(size_t) << 3) - 1)
632 CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
633 (sizeof(uint64_t) << 3) - 1)
634 CONF_HANDLE_BOOL(prof_gdump)
635 CONF_HANDLE_BOOL(prof_leak)
636#endif
637#ifdef JEMALLOC_SWAP
638 CONF_HANDLE_BOOL(overcommit)
639#endif
640 malloc_conf_error("Invalid conf pair", k, klen, v,
641 vlen);
642#undef CONF_HANDLE_BOOL
643#undef CONF_HANDLE_SIZE_T
644#undef CONF_HANDLE_SSIZE_T
645#undef CONF_HANDLE_CHAR_P
646 }
647
648 /* Validate configuration of options that are inter-related. */
649 if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
650 malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
651 "relationship; restoring defaults\n");
652 opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
653 opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
654 }
655 }
656}
657
658static bool
659malloc_init_hard(void)
660{
Jason Evansb7924f52009-06-23 19:01:18 -0700661 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700662
663 malloc_mutex_lock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700664 if (malloc_initialized || malloc_initializer == pthread_self()) {
Jason Evans289053c2009-06-22 12:08:42 -0700665 /*
666 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800667 * acquired init_lock, or this thread is the initializing
668 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700669 */
670 malloc_mutex_unlock(&init_lock);
671 return (false);
672 }
Jason Evansb7924f52009-06-23 19:01:18 -0700673 if (malloc_initializer != (unsigned long)0) {
674 /* Busy-wait until the initializing thread completes. */
675 do {
676 malloc_mutex_unlock(&init_lock);
677 CPU_SPINWAIT;
678 malloc_mutex_lock(&init_lock);
679 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700680 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700681 return (false);
682 }
Jason Evans289053c2009-06-22 12:08:42 -0700683
Jason Evansb7924f52009-06-23 19:01:18 -0700684#ifdef DYNAMIC_PAGE_SHIFT
Jason Evansc9658dd2009-06-22 14:44:08 -0700685 /* Get page size. */
686 {
687 long result;
688
689 result = sysconf(_SC_PAGESIZE);
690 assert(result != -1);
Jason Evansb7924f52009-06-23 19:01:18 -0700691 pagesize = (unsigned)result;
692
693 /*
694 * We assume that pagesize is a power of 2 when calculating
Jason Evans94ad2b52009-12-29 00:09:15 -0800695 * pagesize_mask and lg_pagesize.
Jason Evansb7924f52009-06-23 19:01:18 -0700696 */
697 assert(((result - 1) & result) == 0);
698 pagesize_mask = result - 1;
Jason Evans94ad2b52009-12-29 00:09:15 -0800699 lg_pagesize = ffs((int)result) - 1;
Jason Evans289053c2009-06-22 12:08:42 -0700700 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700701#endif
Jason Evans289053c2009-06-22 12:08:42 -0700702
Jason Evans49d02932010-10-23 23:43:37 -0700703#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700704 prof_boot0();
Jason Evans49d02932010-10-23 23:43:37 -0700705#endif
Jason Evans289053c2009-06-22 12:08:42 -0700706
Jason Evanse7339702010-10-23 18:37:06 -0700707 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700708
Jason Evansa0bf2422010-01-29 14:30:41 -0800709 /* Register fork handlers. */
710 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
711 jemalloc_postfork) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800712 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800713 if (opt_abort)
714 abort();
715 }
716
Jason Evans3c234352010-01-27 13:10:55 -0800717 if (ctl_boot()) {
718 malloc_mutex_unlock(&init_lock);
719 return (true);
720 }
721
Jason Evans03c22372010-01-03 12:10:42 -0800722 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700723 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800724 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800725 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800726 if (opt_abort)
727 abort();
728 }
Jason Evans289053c2009-06-22 12:08:42 -0700729 }
730
Jason Evansa0bf2422010-01-29 14:30:41 -0800731 if (chunk_boot()) {
732 malloc_mutex_unlock(&init_lock);
733 return (true);
734 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700735
Jason Evans3c234352010-01-27 13:10:55 -0800736 if (base_boot()) {
737 malloc_mutex_unlock(&init_lock);
738 return (true);
739 }
740
Jason Evans3383af62010-02-11 08:59:06 -0800741#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700742 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800743#endif
744
Jason Evansa0bf2422010-01-29 14:30:41 -0800745 if (arena_boot()) {
Jason Evans289053c2009-06-22 12:08:42 -0700746 malloc_mutex_unlock(&init_lock);
747 return (true);
748 }
749
Jason Evans84cbbcb2009-12-29 00:09:15 -0800750#ifdef JEMALLOC_TCACHE
Jason Evans84c8eef2011-03-16 10:30:13 -0700751 if (tcache_boot()) {
752 malloc_mutex_unlock(&init_lock);
753 return (true);
754 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800755#endif
756
Jason Evanse476f8a2010-01-16 09:53:50 -0800757 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700758 malloc_mutex_unlock(&init_lock);
759 return (true);
760 }
Jason Evans289053c2009-06-22 12:08:42 -0700761
Jason Evans93443682010-10-20 17:39:18 -0700762#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
763 /* Initialize allocation counters before any allocations can occur. */
764 if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
765 != 0) {
766 malloc_mutex_unlock(&init_lock);
767 return (true);
768 }
769#endif
770
Jason Evansb7924f52009-06-23 19:01:18 -0700771 /*
772 * Create enough scaffolding to allow recursive allocation in
773 * malloc_ncpus().
774 */
775 narenas = 1;
776 arenas = init_arenas;
777 memset(arenas, 0, sizeof(arena_t *) * narenas);
778
779 /*
780 * Initialize one arena here. The rest are lazily created in
781 * choose_arena_hard().
782 */
783 arenas_extend(0);
784 if (arenas[0] == NULL) {
785 malloc_mutex_unlock(&init_lock);
786 return (true);
787 }
788
Jason Evansb7924f52009-06-23 19:01:18 -0700789 /*
790 * Assign the initial arena to the initial thread, in order to avoid
791 * spurious creation of an extra arena if the application switches to
792 * threaded mode.
793 */
Jason Evans2dbecf12010-09-05 10:35:13 -0700794 ARENA_SET(arenas[0]);
Jason Evans597632b2011-03-18 13:41:33 -0700795 arenas[0]->nthreads++;
Jason Evansb7924f52009-06-23 19:01:18 -0700796
Jason Evans819d11b2011-03-15 14:25:56 -0700797 if (malloc_mutex_init(&arenas_lock))
798 return (true);
Jason Evansb7924f52009-06-23 19:01:18 -0700799
Jason Evansc9573982011-03-23 00:27:50 -0700800 if (pthread_key_create(&arenas_tsd, arenas_cleanup) != 0) {
801 malloc_mutex_unlock(&init_lock);
802 return (true);
803 }
804
Jason Evans3383af62010-02-11 08:59:06 -0800805#ifdef JEMALLOC_PROF
Jason Evanse7339702010-10-23 18:37:06 -0700806 if (prof_boot2()) {
Jason Evans3383af62010-02-11 08:59:06 -0800807 malloc_mutex_unlock(&init_lock);
808 return (true);
809 }
810#endif
811
Jason Evansb7924f52009-06-23 19:01:18 -0700812 /* Get number of CPUs. */
813 malloc_initializer = pthread_self();
814 malloc_mutex_unlock(&init_lock);
815 ncpus = malloc_ncpus();
816 malloc_mutex_lock(&init_lock);
817
Jason Evanse7339702010-10-23 18:37:06 -0700818 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700819 /*
Jason Evans5463a522009-12-29 00:09:15 -0800820 * For SMP systems, create more than one arena per CPU by
821 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700822 */
Jason Evanse7339702010-10-23 18:37:06 -0700823 if (ncpus > 1)
824 opt_narenas = ncpus << 2;
825 else
826 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700827 }
Jason Evanse7339702010-10-23 18:37:06 -0700828 narenas = opt_narenas;
829 /*
830 * Make sure that the arenas array can be allocated. In practice, this
831 * limit is enough to allow the allocator to function, but the ctl
832 * machinery will fail to allocate memory at far lower limits.
833 */
834 if (narenas > chunksize / sizeof(arena_t *)) {
835 char buf[UMAX2S_BUFSIZE];
Jason Evans289053c2009-06-22 12:08:42 -0700836
Jason Evanse7339702010-10-23 18:37:06 -0700837 narenas = chunksize / sizeof(arena_t *);
838 malloc_write("<jemalloc>: Reducing narenas to limit (");
839 malloc_write(u2s(narenas, 10, buf));
840 malloc_write(")\n");
Jason Evans289053c2009-06-22 12:08:42 -0700841 }
Jason Evans289053c2009-06-22 12:08:42 -0700842
Jason Evans289053c2009-06-22 12:08:42 -0700843 /* Allocate and initialize arenas. */
844 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
845 if (arenas == NULL) {
846 malloc_mutex_unlock(&init_lock);
847 return (true);
848 }
849 /*
850 * Zero the array. In practice, this should always be pre-zeroed,
851 * since it was just mmap()ed, but let's be sure.
852 */
853 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700854 /* Copy the pointer to the one arena that was already initialized. */
855 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700856
Jason Evans2dbecf12010-09-05 10:35:13 -0700857#ifdef JEMALLOC_ZONE
858 /* Register the custom zone. */
859 malloc_zone_register(create_zone());
860
861 /*
862 * Convert the default szone to an "overlay zone" that is capable of
863 * deallocating szone-allocated objects, but allocating new objects
864 * from jemalloc.
865 */
866 szone2ozone(malloc_default_zone());
867#endif
868
Jason Evans289053c2009-06-22 12:08:42 -0700869 malloc_initialized = true;
870 malloc_mutex_unlock(&init_lock);
871 return (false);
872}
873
Jason Evans2dbecf12010-09-05 10:35:13 -0700874#ifdef JEMALLOC_ZONE
875JEMALLOC_ATTR(constructor)
876void
877jemalloc_darwin_init(void)
878{
879
880 if (malloc_init_hard())
881 abort();
882}
883#endif
884
Jason Evans289053c2009-06-22 12:08:42 -0700885/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800886 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700887 */
888/******************************************************************************/
889/*
890 * Begin malloc(3)-compatible functions.
891 */
892
Jason Evans9ad48232010-01-03 11:59:20 -0800893JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800894JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700895void *
Jason Evanse476f8a2010-01-16 09:53:50 -0800896JEMALLOC_P(malloc)(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700897{
898 void *ret;
Jason Evans93443682010-10-20 17:39:18 -0700899#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
900 size_t usize
901# ifdef JEMALLOC_CC_SILENCE
902 = 0
903# endif
904 ;
905#endif
Jason Evans6109fe02010-02-10 10:37:56 -0800906#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -0700907 prof_thr_cnt_t *cnt
908# ifdef JEMALLOC_CC_SILENCE
909 = NULL
910# endif
911 ;
Jason Evans6109fe02010-02-10 10:37:56 -0800912#endif
Jason Evans289053c2009-06-22 12:08:42 -0700913
914 if (malloc_init()) {
915 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800916 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700917 }
918
919 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -0700920#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700921 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -0700922#endif
Jason Evans289053c2009-06-22 12:08:42 -0700923 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -0700924#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -0700925 else {
Jason Evansf2518142009-12-29 00:09:15 -0800926# ifdef JEMALLOC_XMALLOC
927 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800928 malloc_write("<jemalloc>: Error in malloc(): "
929 "invalid size 0\n");
Jason Evansf2518142009-12-29 00:09:15 -0800930 abort();
931 }
932# endif
Jason Evans289053c2009-06-22 12:08:42 -0700933 ret = NULL;
934 goto RETURN;
935 }
Jason Evansb7924f52009-06-23 19:01:18 -0700936#endif
Jason Evans289053c2009-06-22 12:08:42 -0700937 }
938
Jason Evans6109fe02010-02-10 10:37:56 -0800939#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -0700940 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700941 usize = s2u(size);
942 if ((cnt = prof_alloc_prep(usize)) == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700943 ret = NULL;
944 goto OOM;
945 }
Jason Evans93443682010-10-20 17:39:18 -0700946 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -0700947 small_maxclass) {
948 ret = imalloc(small_maxclass+1);
949 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700950 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700951 } else
952 ret = imalloc(size);
953 } else
Jason Evans6109fe02010-02-10 10:37:56 -0800954#endif
Jason Evans93443682010-10-20 17:39:18 -0700955 {
956#ifdef JEMALLOC_STATS
957 usize = s2u(size);
958#endif
Jason Evans0b270a92010-03-31 16:45:04 -0700959 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700960 }
Jason Evans289053c2009-06-22 12:08:42 -0700961
Jason Evansf2518142009-12-29 00:09:15 -0800962OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700963 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -0700964#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -0700965 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800966 malloc_write("<jemalloc>: Error in malloc(): "
967 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700968 abort();
969 }
Jason Evansb7924f52009-06-23 19:01:18 -0700970#endif
Jason Evans289053c2009-06-22 12:08:42 -0700971 errno = ENOMEM;
972 }
973
Jason Evansf2518142009-12-29 00:09:15 -0800974#ifdef JEMALLOC_SYSV
975RETURN:
976#endif
Jason Evans6109fe02010-02-10 10:37:56 -0800977#ifdef JEMALLOC_PROF
978 if (opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700979 prof_malloc(ret, usize, cnt);
980#endif
981#ifdef JEMALLOC_STATS
982 if (ret != NULL) {
983 assert(usize == isalloc(ret));
984 ALLOCATED_ADD(usize, 0);
985 }
Jason Evans6109fe02010-02-10 10:37:56 -0800986#endif
Jason Evans289053c2009-06-22 12:08:42 -0700987 return (ret);
988}
989
Jason Evans9ad48232010-01-03 11:59:20 -0800990JEMALLOC_ATTR(nonnull(1))
Jason Evanse476f8a2010-01-16 09:53:50 -0800991JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700992int
Jason Evanse476f8a2010-01-16 09:53:50 -0800993JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700994{
995 int ret;
Jason Evans93443682010-10-20 17:39:18 -0700996 size_t usize
Jason Evans38d92102011-03-23 00:37:29 -0700997#ifdef JEMALLOC_CC_SILENCE
Jason Evans93443682010-10-20 17:39:18 -0700998 = 0
Jason Evans93443682010-10-20 17:39:18 -0700999#endif
Jason Evans38d92102011-03-23 00:37:29 -07001000 ;
1001 void *result;
Jason Evans6109fe02010-02-10 10:37:56 -08001002#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001003 prof_thr_cnt_t *cnt
1004# ifdef JEMALLOC_CC_SILENCE
1005 = NULL
1006# endif
1007 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001008#endif
Jason Evans289053c2009-06-22 12:08:42 -07001009
1010 if (malloc_init())
1011 result = NULL;
1012 else {
Jason Evansf2518142009-12-29 00:09:15 -08001013 if (size == 0) {
1014#ifdef JEMALLOC_SYSV
1015 if (opt_sysv == false)
1016#endif
1017 size = 1;
1018#ifdef JEMALLOC_SYSV
1019 else {
1020# ifdef JEMALLOC_XMALLOC
1021 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001022 malloc_write("<jemalloc>: Error in "
1023 "posix_memalign(): invalid size "
1024 "0\n");
Jason Evansf2518142009-12-29 00:09:15 -08001025 abort();
1026 }
1027# endif
1028 result = NULL;
1029 *memptr = NULL;
1030 ret = 0;
1031 goto RETURN;
1032 }
1033#endif
1034 }
1035
Jason Evans289053c2009-06-22 12:08:42 -07001036 /* Make sure that alignment is a large enough power of 2. */
1037 if (((alignment - 1) & alignment) != 0
1038 || alignment < sizeof(void *)) {
Jason Evansb7924f52009-06-23 19:01:18 -07001039#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001040 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001041 malloc_write("<jemalloc>: Error in "
1042 "posix_memalign(): invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -07001043 abort();
1044 }
Jason Evansb7924f52009-06-23 19:01:18 -07001045#endif
Jason Evans289053c2009-06-22 12:08:42 -07001046 result = NULL;
1047 ret = EINVAL;
1048 goto RETURN;
1049 }
1050
Jason Evans38d92102011-03-23 00:37:29 -07001051 usize = sa2u(size, alignment, NULL);
1052 if (usize == 0) {
1053 result = NULL;
1054 ret = ENOMEM;
1055 goto RETURN;
1056 }
1057
Jason Evans6109fe02010-02-10 10:37:56 -08001058#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001059 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001060 if ((cnt = prof_alloc_prep(usize)) == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001061 result = NULL;
1062 ret = EINVAL;
1063 } else {
1064 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001065 (uintptr_t)1U && usize <= small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001066 assert(sa2u(small_maxclass+1,
1067 alignment, NULL) != 0);
1068 result = ipalloc(sa2u(small_maxclass+1,
1069 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001070 if (result != NULL) {
1071 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -07001072 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001073 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001074 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001075 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001076 false);
1077 }
Jason Evans0b270a92010-03-31 16:45:04 -07001078 }
Jason Evans6109fe02010-02-10 10:37:56 -08001079 } else
1080#endif
Jason Evans38d92102011-03-23 00:37:29 -07001081 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -07001082 }
1083
1084 if (result == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001085#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001086 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001087 malloc_write("<jemalloc>: Error in posix_memalign(): "
1088 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001089 abort();
1090 }
Jason Evansb7924f52009-06-23 19:01:18 -07001091#endif
Jason Evans289053c2009-06-22 12:08:42 -07001092 ret = ENOMEM;
1093 goto RETURN;
1094 }
1095
1096 *memptr = result;
1097 ret = 0;
1098
1099RETURN:
Jason Evans93443682010-10-20 17:39:18 -07001100#ifdef JEMALLOC_STATS
1101 if (result != NULL) {
1102 assert(usize == isalloc(result));
1103 ALLOCATED_ADD(usize, 0);
1104 }
1105#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001106#ifdef JEMALLOC_PROF
1107 if (opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001108 prof_malloc(result, usize, cnt);
Jason Evans6109fe02010-02-10 10:37:56 -08001109#endif
Jason Evans289053c2009-06-22 12:08:42 -07001110 return (ret);
1111}
1112
Jason Evans9ad48232010-01-03 11:59:20 -08001113JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -08001114JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001115void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001116JEMALLOC_P(calloc)(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001117{
1118 void *ret;
1119 size_t num_size;
Jason Evans93443682010-10-20 17:39:18 -07001120#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1121 size_t usize
1122# ifdef JEMALLOC_CC_SILENCE
1123 = 0
1124# endif
1125 ;
1126#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001127#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001128 prof_thr_cnt_t *cnt
1129# ifdef JEMALLOC_CC_SILENCE
1130 = NULL
1131# endif
1132 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001133#endif
Jason Evans289053c2009-06-22 12:08:42 -07001134
1135 if (malloc_init()) {
1136 num_size = 0;
1137 ret = NULL;
1138 goto RETURN;
1139 }
1140
1141 num_size = num * size;
1142 if (num_size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001143#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001144 if ((opt_sysv == false) && ((num == 0) || (size == 0)))
Jason Evansb7924f52009-06-23 19:01:18 -07001145#endif
Jason Evans289053c2009-06-22 12:08:42 -07001146 num_size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001147#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001148 else {
1149 ret = NULL;
1150 goto RETURN;
1151 }
Jason Evansb7924f52009-06-23 19:01:18 -07001152#endif
Jason Evans289053c2009-06-22 12:08:42 -07001153 /*
1154 * Try to avoid division here. We know that it isn't possible to
1155 * overflow during multiplication if neither operand uses any of the
1156 * most significant half of the bits in a size_t.
1157 */
1158 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1159 && (num_size / size != num)) {
1160 /* size_t overflow. */
1161 ret = NULL;
1162 goto RETURN;
1163 }
1164
Jason Evans6109fe02010-02-10 10:37:56 -08001165#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001166 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001167 usize = s2u(num_size);
1168 if ((cnt = prof_alloc_prep(usize)) == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -07001169 ret = NULL;
1170 goto RETURN;
1171 }
Jason Evans93443682010-10-20 17:39:18 -07001172 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evans0b270a92010-03-31 16:45:04 -07001173 <= small_maxclass) {
1174 ret = icalloc(small_maxclass+1);
1175 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001176 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001177 } else
1178 ret = icalloc(num_size);
1179 } else
Jason Evans6109fe02010-02-10 10:37:56 -08001180#endif
Jason Evans93443682010-10-20 17:39:18 -07001181 {
1182#ifdef JEMALLOC_STATS
1183 usize = s2u(num_size);
1184#endif
Jason Evans0b270a92010-03-31 16:45:04 -07001185 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -07001186 }
Jason Evans289053c2009-06-22 12:08:42 -07001187
1188RETURN:
1189 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001190#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001191 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001192 malloc_write("<jemalloc>: Error in calloc(): out of "
1193 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001194 abort();
1195 }
Jason Evansb7924f52009-06-23 19:01:18 -07001196#endif
Jason Evans289053c2009-06-22 12:08:42 -07001197 errno = ENOMEM;
1198 }
1199
Jason Evans6109fe02010-02-10 10:37:56 -08001200#ifdef JEMALLOC_PROF
1201 if (opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001202 prof_malloc(ret, usize, cnt);
1203#endif
1204#ifdef JEMALLOC_STATS
1205 if (ret != NULL) {
1206 assert(usize == isalloc(ret));
1207 ALLOCATED_ADD(usize, 0);
1208 }
Jason Evans6109fe02010-02-10 10:37:56 -08001209#endif
Jason Evans289053c2009-06-22 12:08:42 -07001210 return (ret);
1211}
1212
Jason Evanse476f8a2010-01-16 09:53:50 -08001213JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001214void *
Jason Evanse476f8a2010-01-16 09:53:50 -08001215JEMALLOC_P(realloc)(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -07001216{
1217 void *ret;
Jason Evans93443682010-10-20 17:39:18 -07001218#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1219 size_t usize
Jason Evans355b4382010-09-20 19:20:48 -07001220# ifdef JEMALLOC_CC_SILENCE
1221 = 0
1222# endif
1223 ;
Jason Evans93443682010-10-20 17:39:18 -07001224 size_t old_size = 0;
1225#endif
1226#ifdef JEMALLOC_PROF
Jason Evans355b4382010-09-20 19:20:48 -07001227 prof_thr_cnt_t *cnt
1228# ifdef JEMALLOC_CC_SILENCE
1229 = NULL
1230# endif
1231 ;
1232 prof_ctx_t *old_ctx
1233# ifdef JEMALLOC_CC_SILENCE
1234 = NULL
1235# endif
1236 ;
Jason Evans6109fe02010-02-10 10:37:56 -08001237#endif
1238
Jason Evans289053c2009-06-22 12:08:42 -07001239 if (size == 0) {
Jason Evansb7924f52009-06-23 19:01:18 -07001240#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001241 if (opt_sysv == false)
Jason Evansb7924f52009-06-23 19:01:18 -07001242#endif
Jason Evans289053c2009-06-22 12:08:42 -07001243 size = 1;
Jason Evansb7924f52009-06-23 19:01:18 -07001244#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001245 else {
Jason Evanse476f8a2010-01-16 09:53:50 -08001246 if (ptr != NULL) {
Jason Evans93443682010-10-20 17:39:18 -07001247#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1248 old_size = isalloc(ptr);
1249#endif
Jason Evansfe5faa22010-02-11 13:38:12 -08001250#ifdef JEMALLOC_PROF
1251 if (opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001252 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001253 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001254 }
Jason Evanse476f8a2010-01-16 09:53:50 -08001255#endif
Jason Evans289053c2009-06-22 12:08:42 -07001256 idalloc(ptr);
Jason Evanse476f8a2010-01-16 09:53:50 -08001257 }
Jason Evansfe5faa22010-02-11 13:38:12 -08001258#ifdef JEMALLOC_PROF
1259 else if (opt_prof) {
Jason Evans50651562010-04-13 16:13:54 -07001260 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001261 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001262 }
1263#endif
Jason Evans289053c2009-06-22 12:08:42 -07001264 ret = NULL;
1265 goto RETURN;
1266 }
Jason Evansb7924f52009-06-23 19:01:18 -07001267#endif
Jason Evans289053c2009-06-22 12:08:42 -07001268 }
1269
1270 if (ptr != NULL) {
Jason Evansa25d0a82009-11-09 14:57:38 -08001271 assert(malloc_initialized || malloc_initializer ==
1272 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001273
Jason Evans93443682010-10-20 17:39:18 -07001274#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1275 old_size = isalloc(ptr);
1276#endif
Jason Evansfe5faa22010-02-11 13:38:12 -08001277#ifdef JEMALLOC_PROF
1278 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001279 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001280 old_ctx = prof_ctx_get(ptr);
Jason Evans93443682010-10-20 17:39:18 -07001281 if ((cnt = prof_alloc_prep(usize)) == NULL) {
Jason Evans6109fe02010-02-10 10:37:56 -08001282 ret = NULL;
1283 goto OOM;
1284 }
Jason Evans0b270a92010-03-31 16:45:04 -07001285 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evans93443682010-10-20 17:39:18 -07001286 usize <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001287 ret = iralloc(ptr, small_maxclass+1, 0, 0,
1288 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001289 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001290 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001291 } else
Jason Evans8e3c3c62010-09-17 15:46:18 -07001292 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001293 } else
Jason Evans569432c2009-12-29 00:09:15 -08001294#endif
Jason Evans93443682010-10-20 17:39:18 -07001295 {
1296#ifdef JEMALLOC_STATS
1297 usize = s2u(size);
1298#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001299 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001300 }
Jason Evans289053c2009-06-22 12:08:42 -07001301
Jason Evans6109fe02010-02-10 10:37:56 -08001302#ifdef JEMALLOC_PROF
1303OOM:
1304#endif
Jason Evans289053c2009-06-22 12:08:42 -07001305 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001306#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001307 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001308 malloc_write("<jemalloc>: Error in realloc(): "
1309 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001310 abort();
1311 }
Jason Evansb7924f52009-06-23 19:01:18 -07001312#endif
Jason Evans289053c2009-06-22 12:08:42 -07001313 errno = ENOMEM;
1314 }
1315 } else {
Jason Evansfe5faa22010-02-11 13:38:12 -08001316#ifdef JEMALLOC_PROF
Jason Evans93443682010-10-20 17:39:18 -07001317 if (opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001318 old_ctx = NULL;
Jason Evans569432c2009-12-29 00:09:15 -08001319#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001320 if (malloc_init()) {
1321#ifdef JEMALLOC_PROF
1322 if (opt_prof)
1323 cnt = NULL;
1324#endif
1325 ret = NULL;
1326 } else {
1327#ifdef JEMALLOC_PROF
Jason Evans0b270a92010-03-31 16:45:04 -07001328 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001329 usize = s2u(size);
1330 if ((cnt = prof_alloc_prep(usize)) == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001331 ret = NULL;
1332 else {
1333 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001334 (uintptr_t)1U && usize <=
Jason Evans0b270a92010-03-31 16:45:04 -07001335 small_maxclass) {
1336 ret = imalloc(small_maxclass+1);
1337 if (ret != NULL) {
1338 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001339 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001340 }
1341 } else
1342 ret = imalloc(size);
1343 }
Jason Evans6109fe02010-02-10 10:37:56 -08001344 } else
1345#endif
Jason Evans93443682010-10-20 17:39:18 -07001346 {
1347#ifdef JEMALLOC_STATS
1348 usize = s2u(size);
1349#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001350 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001351 }
Jason Evans6109fe02010-02-10 10:37:56 -08001352 }
Jason Evans569432c2009-12-29 00:09:15 -08001353
Jason Evans289053c2009-06-22 12:08:42 -07001354 if (ret == NULL) {
Jason Evansb7924f52009-06-23 19:01:18 -07001355#ifdef JEMALLOC_XMALLOC
Jason Evans289053c2009-06-22 12:08:42 -07001356 if (opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001357 malloc_write("<jemalloc>: Error in realloc(): "
1358 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001359 abort();
1360 }
Jason Evansb7924f52009-06-23 19:01:18 -07001361#endif
Jason Evans289053c2009-06-22 12:08:42 -07001362 errno = ENOMEM;
1363 }
1364 }
1365
Jason Evansb8f0a652009-06-29 09:41:43 -07001366#ifdef JEMALLOC_SYSV
Jason Evans289053c2009-06-22 12:08:42 -07001367RETURN:
Jason Evansb8f0a652009-06-29 09:41:43 -07001368#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001369#ifdef JEMALLOC_PROF
1370 if (opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001371 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans93443682010-10-20 17:39:18 -07001372#endif
1373#ifdef JEMALLOC_STATS
1374 if (ret != NULL) {
1375 assert(usize == isalloc(ret));
1376 ALLOCATED_ADD(usize, old_size);
1377 }
Jason Evans6109fe02010-02-10 10:37:56 -08001378#endif
Jason Evans289053c2009-06-22 12:08:42 -07001379 return (ret);
1380}
1381
Jason Evanse476f8a2010-01-16 09:53:50 -08001382JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001383void
Jason Evanse476f8a2010-01-16 09:53:50 -08001384JEMALLOC_P(free)(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001385{
1386
Jason Evans289053c2009-06-22 12:08:42 -07001387 if (ptr != NULL) {
Jason Evanse4f78462010-10-22 10:45:59 -07001388#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1389 size_t usize;
1390#endif
1391
Jason Evansa25d0a82009-11-09 14:57:38 -08001392 assert(malloc_initialized || malloc_initializer ==
1393 pthread_self());
Jason Evans289053c2009-06-22 12:08:42 -07001394
Jason Evanse4f78462010-10-22 10:45:59 -07001395#ifdef JEMALLOC_STATS
1396 usize = isalloc(ptr);
1397#endif
Jason Evans6109fe02010-02-10 10:37:56 -08001398#ifdef JEMALLOC_PROF
Jason Evanse4f78462010-10-22 10:45:59 -07001399 if (opt_prof) {
1400# ifndef JEMALLOC_STATS
1401 usize = isalloc(ptr);
1402# endif
1403 prof_free(ptr, usize);
1404 }
Jason Evans6109fe02010-02-10 10:37:56 -08001405#endif
Jason Evans93443682010-10-20 17:39:18 -07001406#ifdef JEMALLOC_STATS
Jason Evanse4f78462010-10-22 10:45:59 -07001407 ALLOCATED_ADD(0, usize);
Jason Evans93443682010-10-20 17:39:18 -07001408#endif
Jason Evans289053c2009-06-22 12:08:42 -07001409 idalloc(ptr);
1410 }
1411}
1412
1413/*
1414 * End malloc(3)-compatible functions.
1415 */
1416/******************************************************************************/
1417/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001418 * Begin non-standard override functions.
1419 *
1420 * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
1421 * entire point is to avoid accidental mixed allocator usage.
1422 */
1423#ifndef JEMALLOC_PREFIX
1424
1425#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1426JEMALLOC_ATTR(malloc)
1427JEMALLOC_ATTR(visibility("default"))
1428void *
1429JEMALLOC_P(memalign)(size_t alignment, size_t size)
1430{
1431 void *ret;
Jason Evans355b4382010-09-20 19:20:48 -07001432#ifdef JEMALLOC_CC_SILENCE
1433 int result =
1434#endif
1435 JEMALLOC_P(posix_memalign)(&ret, alignment, size);
1436#ifdef JEMALLOC_CC_SILENCE
1437 if (result != 0)
1438 return (NULL);
1439#endif
Jason Evans6a0d2912010-09-20 16:44:23 -07001440 return (ret);
1441}
1442#endif
1443
1444#ifdef JEMALLOC_OVERRIDE_VALLOC
1445JEMALLOC_ATTR(malloc)
1446JEMALLOC_ATTR(visibility("default"))
1447void *
1448JEMALLOC_P(valloc)(size_t size)
1449{
1450 void *ret;
Jason Evans355b4382010-09-20 19:20:48 -07001451#ifdef JEMALLOC_CC_SILENCE
1452 int result =
1453#endif
1454 JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
1455#ifdef JEMALLOC_CC_SILENCE
1456 if (result != 0)
1457 return (NULL);
1458#endif
Jason Evans6a0d2912010-09-20 16:44:23 -07001459 return (ret);
1460}
1461#endif
1462
1463#endif /* JEMALLOC_PREFIX */
1464/*
1465 * End non-standard override functions.
1466 */
1467/******************************************************************************/
1468/*
Jason Evans289053c2009-06-22 12:08:42 -07001469 * Begin non-standard functions.
1470 */
1471
Jason Evanse476f8a2010-01-16 09:53:50 -08001472JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001473size_t
Jason Evanse476f8a2010-01-16 09:53:50 -08001474JEMALLOC_P(malloc_usable_size)(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001475{
Jason Evans569432c2009-12-29 00:09:15 -08001476 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001477
Jason Evans8e3c3c62010-09-17 15:46:18 -07001478 assert(malloc_initialized || malloc_initializer == pthread_self());
1479
Jason Evans2dbecf12010-09-05 10:35:13 -07001480#ifdef JEMALLOC_IVSALLOC
1481 ret = ivsalloc(ptr);
1482#else
Jason Evans289053c2009-06-22 12:08:42 -07001483 assert(ptr != NULL);
Jason Evans569432c2009-12-29 00:09:15 -08001484 ret = isalloc(ptr);
Jason Evans2dbecf12010-09-05 10:35:13 -07001485#endif
Jason Evans289053c2009-06-22 12:08:42 -07001486
Jason Evans569432c2009-12-29 00:09:15 -08001487 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001488}
1489
Jason Evans4201af02010-01-24 02:53:40 -08001490JEMALLOC_ATTR(visibility("default"))
1491void
Jason Evans698805c2010-03-03 17:45:38 -08001492JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
1493 void *cbopaque, const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001494{
1495
Jason Evans698805c2010-03-03 17:45:38 -08001496 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001497}
1498
Jason Evans3c234352010-01-27 13:10:55 -08001499JEMALLOC_ATTR(visibility("default"))
1500int
1501JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
1502 size_t newlen)
1503{
1504
Jason Evans95833312010-01-27 13:45:21 -08001505 if (malloc_init())
1506 return (EAGAIN);
1507
Jason Evans3c234352010-01-27 13:10:55 -08001508 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1509}
1510
1511JEMALLOC_ATTR(visibility("default"))
1512int
1513JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
1514{
1515
Jason Evans95833312010-01-27 13:45:21 -08001516 if (malloc_init())
1517 return (EAGAIN);
1518
Jason Evans3c234352010-01-27 13:10:55 -08001519 return (ctl_nametomib(name, mibp, miblenp));
1520}
1521
1522JEMALLOC_ATTR(visibility("default"))
1523int
1524JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
1525 size_t *oldlenp, void *newp, size_t newlen)
1526{
1527
Jason Evans95833312010-01-27 13:45:21 -08001528 if (malloc_init())
1529 return (EAGAIN);
1530
Jason Evans3c234352010-01-27 13:10:55 -08001531 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1532}
1533
Jason Evans8e3c3c62010-09-17 15:46:18 -07001534JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001535iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001536{
1537
Jason Evans38d92102011-03-23 00:37:29 -07001538 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1539 NULL)));
1540
Jason Evans8e3c3c62010-09-17 15:46:18 -07001541 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001542 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001543 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001544 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001545 else
Jason Evans38d92102011-03-23 00:37:29 -07001546 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001547}
1548
Jason Evans6a0d2912010-09-20 16:44:23 -07001549JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001550JEMALLOC_ATTR(visibility("default"))
1551int
1552JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
1553{
1554 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001555 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001556 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1557 & (SIZE_T_MAX-1));
1558 bool zero = flags & ALLOCM_ZERO;
1559#ifdef JEMALLOC_PROF
1560 prof_thr_cnt_t *cnt;
1561#endif
1562
1563 assert(ptr != NULL);
1564 assert(size != 0);
1565
1566 if (malloc_init())
1567 goto OOM;
1568
Jason Evans38d92102011-03-23 00:37:29 -07001569 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
1570 NULL);
1571 if (usize == 0)
1572 goto OOM;
1573
Jason Evans8e3c3c62010-09-17 15:46:18 -07001574#ifdef JEMALLOC_PROF
1575 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001576 if ((cnt = prof_alloc_prep(usize)) == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001577 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001578 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evans8e3c3c62010-09-17 15:46:18 -07001579 small_maxclass) {
Jason Evans38d92102011-03-23 00:37:29 -07001580 size_t usize_promoted = (alignment == 0) ?
1581 s2u(small_maxclass+1) : sa2u(small_maxclass+1,
1582 alignment, NULL);
1583 assert(usize_promoted != 0);
1584 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001585 if (p == NULL)
1586 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001587 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001588 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001589 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001590 if (p == NULL)
1591 goto OOM;
1592 }
Jason Evans93443682010-10-20 17:39:18 -07001593
1594 if (rsize != NULL)
1595 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001596 } else
1597#endif
1598 {
Jason Evans38d92102011-03-23 00:37:29 -07001599 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001600 if (p == NULL)
1601 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001602#ifndef JEMALLOC_STATS
1603 if (rsize != NULL)
1604#endif
1605 {
Jason Evans93443682010-10-20 17:39:18 -07001606#ifdef JEMALLOC_STATS
1607 if (rsize != NULL)
1608#endif
1609 *rsize = usize;
1610 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001611 }
1612
1613 *ptr = p;
Jason Evans93443682010-10-20 17:39:18 -07001614#ifdef JEMALLOC_STATS
1615 assert(usize == isalloc(p));
1616 ALLOCATED_ADD(usize, 0);
1617#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001618 return (ALLOCM_SUCCESS);
1619OOM:
1620#ifdef JEMALLOC_XMALLOC
1621 if (opt_xmalloc) {
1622 malloc_write("<jemalloc>: Error in allocm(): "
1623 "out of memory\n");
1624 abort();
1625 }
1626#endif
1627 *ptr = NULL;
1628 return (ALLOCM_ERR_OOM);
1629}
1630
Jason Evans6a0d2912010-09-20 16:44:23 -07001631JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001632JEMALLOC_ATTR(visibility("default"))
1633int
1634JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
1635 int flags)
1636{
1637 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001638 size_t usize;
1639#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1640 size_t old_size;
1641#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001642 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1643 & (SIZE_T_MAX-1));
1644 bool zero = flags & ALLOCM_ZERO;
1645 bool no_move = flags & ALLOCM_NO_MOVE;
1646#ifdef JEMALLOC_PROF
Jason Evans8e3c3c62010-09-17 15:46:18 -07001647 prof_thr_cnt_t *cnt;
1648 prof_ctx_t *old_ctx;
1649#endif
1650
1651 assert(ptr != NULL);
1652 assert(*ptr != NULL);
1653 assert(size != 0);
1654 assert(SIZE_T_MAX - size >= extra);
1655 assert(malloc_initialized || malloc_initializer == pthread_self());
1656
1657 p = *ptr;
1658#ifdef JEMALLOC_PROF
1659 if (opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001660 /*
1661 * usize isn't knowable before iralloc() returns when extra is
1662 * non-zero. Therefore, compute its maximum possible value and
1663 * use that in prof_alloc_prep() to decide whether to capture a
1664 * backtrace. prof_realloc() will use the actual usize to
1665 * decide whether to sample.
1666 */
1667 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1668 sa2u(size+extra, alignment, NULL);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001669 old_size = isalloc(p);
1670 old_ctx = prof_ctx_get(p);
Jason Evans93443682010-10-20 17:39:18 -07001671 if ((cnt = prof_alloc_prep(max_usize)) == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001672 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001673 /*
1674 * Use minimum usize to determine whether promotion may happen.
1675 */
1676 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1677 && ((alignment == 0) ? s2u(size) : sa2u(size,
1678 alignment, NULL)) <= small_maxclass) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001679 q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
1680 size+extra) ? 0 : size+extra - (small_maxclass+1),
1681 alignment, zero, no_move);
1682 if (q == NULL)
1683 goto ERR;
Jason Evans183ba502011-08-11 22:51:00 -07001684 if (max_usize < PAGE_SIZE) {
1685 usize = max_usize;
1686 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001687 } else
1688 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001689 } else {
1690 q = iralloc(p, size, extra, alignment, zero, no_move);
1691 if (q == NULL)
1692 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001693 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001694 }
Jason Evanse4f78462010-10-22 10:45:59 -07001695 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001696 if (rsize != NULL)
1697 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001698 } else
1699#endif
1700 {
Jason Evans93443682010-10-20 17:39:18 -07001701#ifdef JEMALLOC_STATS
1702 old_size = isalloc(p);
1703#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001704 q = iralloc(p, size, extra, alignment, zero, no_move);
1705 if (q == NULL)
1706 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001707#ifndef JEMALLOC_STATS
1708 if (rsize != NULL)
1709#endif
1710 {
1711 usize = isalloc(q);
1712#ifdef JEMALLOC_STATS
1713 if (rsize != NULL)
1714#endif
1715 *rsize = usize;
1716 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001717 }
1718
1719 *ptr = q;
Jason Evans93443682010-10-20 17:39:18 -07001720#ifdef JEMALLOC_STATS
1721 ALLOCATED_ADD(usize, old_size);
1722#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001723 return (ALLOCM_SUCCESS);
1724ERR:
1725 if (no_move)
1726 return (ALLOCM_ERR_NOT_MOVED);
1727#ifdef JEMALLOC_PROF
1728OOM:
1729#endif
1730#ifdef JEMALLOC_XMALLOC
1731 if (opt_xmalloc) {
1732 malloc_write("<jemalloc>: Error in rallocm(): "
1733 "out of memory\n");
1734 abort();
1735 }
1736#endif
1737 return (ALLOCM_ERR_OOM);
1738}
1739
Jason Evans6a0d2912010-09-20 16:44:23 -07001740JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001741JEMALLOC_ATTR(visibility("default"))
1742int
1743JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
1744{
1745 size_t sz;
1746
1747 assert(malloc_initialized || malloc_initializer == pthread_self());
1748
1749#ifdef JEMALLOC_IVSALLOC
1750 sz = ivsalloc(ptr);
1751#else
1752 assert(ptr != NULL);
1753 sz = isalloc(ptr);
1754#endif
1755 assert(rsize != NULL);
1756 *rsize = sz;
1757
1758 return (ALLOCM_SUCCESS);
1759}
1760
Jason Evans6a0d2912010-09-20 16:44:23 -07001761JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001762JEMALLOC_ATTR(visibility("default"))
1763int
1764JEMALLOC_P(dallocm)(void *ptr, int flags)
1765{
Jason Evanse4f78462010-10-22 10:45:59 -07001766#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
1767 size_t usize;
1768#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001769
1770 assert(ptr != NULL);
1771 assert(malloc_initialized || malloc_initializer == pthread_self());
1772
Jason Evanse4f78462010-10-22 10:45:59 -07001773#ifdef JEMALLOC_STATS
1774 usize = isalloc(ptr);
1775#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001776#ifdef JEMALLOC_PROF
Jason Evanse4f78462010-10-22 10:45:59 -07001777 if (opt_prof) {
1778# ifndef JEMALLOC_STATS
1779 usize = isalloc(ptr);
1780# endif
1781 prof_free(ptr, usize);
1782 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001783#endif
Jason Evans93443682010-10-20 17:39:18 -07001784#ifdef JEMALLOC_STATS
Jason Evanse4f78462010-10-22 10:45:59 -07001785 ALLOCATED_ADD(0, usize);
Jason Evans93443682010-10-20 17:39:18 -07001786#endif
Jason Evans8e3c3c62010-09-17 15:46:18 -07001787 idalloc(ptr);
1788
1789 return (ALLOCM_SUCCESS);
1790}
1791
Jason Evans289053c2009-06-22 12:08:42 -07001792/*
1793 * End non-standard functions.
1794 */
1795/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001796
Jason Evans289053c2009-06-22 12:08:42 -07001797/*
1798 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001799 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001800 */
1801
Jason Evans2dbecf12010-09-05 10:35:13 -07001802void
Jason Evans804c9ec2009-06-22 17:44:33 -07001803jemalloc_prefork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001804{
Jason Evansfbbb6242010-01-24 17:56:48 -08001805 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001806
1807 /* Acquire all mutexes in a safe order. */
1808
Jason Evansfbbb6242010-01-24 17:56:48 -08001809 malloc_mutex_lock(&arenas_lock);
1810 for (i = 0; i < narenas; i++) {
1811 if (arenas[i] != NULL)
1812 malloc_mutex_lock(&arenas[i]->lock);
1813 }
Jason Evans289053c2009-06-22 12:08:42 -07001814
1815 malloc_mutex_lock(&base_mtx);
1816
1817 malloc_mutex_lock(&huge_mtx);
1818
Jason Evansb7924f52009-06-23 19:01:18 -07001819#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001820 malloc_mutex_lock(&dss_mtx);
1821#endif
Jason Evans4201af02010-01-24 02:53:40 -08001822
1823#ifdef JEMALLOC_SWAP
1824 malloc_mutex_lock(&swap_mtx);
1825#endif
Jason Evans289053c2009-06-22 12:08:42 -07001826}
1827
Jason Evans2dbecf12010-09-05 10:35:13 -07001828void
Jason Evans804c9ec2009-06-22 17:44:33 -07001829jemalloc_postfork(void)
Jason Evans289053c2009-06-22 12:08:42 -07001830{
1831 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001832
1833 /* Release all mutexes, now that fork() has completed. */
1834
Jason Evans4201af02010-01-24 02:53:40 -08001835#ifdef JEMALLOC_SWAP
1836 malloc_mutex_unlock(&swap_mtx);
1837#endif
1838
Jason Evansb7924f52009-06-23 19:01:18 -07001839#ifdef JEMALLOC_DSS
Jason Evans289053c2009-06-22 12:08:42 -07001840 malloc_mutex_unlock(&dss_mtx);
1841#endif
1842
1843 malloc_mutex_unlock(&huge_mtx);
1844
1845 malloc_mutex_unlock(&base_mtx);
1846
Jason Evans289053c2009-06-22 12:08:42 -07001847 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001848 if (arenas[i] != NULL)
1849 malloc_mutex_unlock(&arenas[i]->lock);
Jason Evans289053c2009-06-22 12:08:42 -07001850 }
Jason Evansfbbb6242010-01-24 17:56:48 -08001851 malloc_mutex_unlock(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001852}
Jason Evans2dbecf12010-09-05 10:35:13 -07001853
1854/******************************************************************************/