blob: cde998c44e350e6241968dc410315626396b6a72 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evansb1476112012-04-05 13:36:17 -070024bool opt_utrace = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080026bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070027size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070028
Jason Evanscd9a1342012-03-21 18:33:03 -070029unsigned ncpus;
30
31malloc_mutex_t arenas_lock;
32arena_t **arenas;
33unsigned narenas;
34
35/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070036static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070037
Jason Evans41b6afb2012-02-02 22:04:57 -080038#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070039/* Used to let the initializing thread recursively allocate. */
Jason Evans02b23122012-04-05 11:06:23 -070040# define NO_INITIALIZER ((unsigned long)0)
Jason Evans41b6afb2012-02-02 22:04:57 -080041# define INITIALIZER pthread_self()
42# define IS_INITIALIZER (malloc_initializer == pthread_self())
Jason Evans02b23122012-04-05 11:06:23 -070043static pthread_t malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080044#else
Jason Evans02b23122012-04-05 11:06:23 -070045# define NO_INITIALIZER false
Jason Evans41b6afb2012-02-02 22:04:57 -080046# define INITIALIZER true
47# define IS_INITIALIZER malloc_initializer
Jason Evans02b23122012-04-05 11:06:23 -070048static bool malloc_initializer = NO_INITIALIZER;
Jason Evans41b6afb2012-02-02 22:04:57 -080049#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070050
51/* Used to avoid initialization races. */
52static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
53
Jason Evansb1476112012-04-05 13:36:17 -070054typedef struct {
55 void *p; /* Input pointer (as in realloc(p, s)). */
56 size_t s; /* Request size. */
57 void *r; /* Result pointer. */
58} malloc_utrace_t;
59
60#ifdef JEMALLOC_UTRACE
61# define UTRACE(a, b, c) do { \
62 if (opt_utrace) { \
63 malloc_utrace_t ut; \
64 ut.p = (a); \
65 ut.s = (b); \
66 ut.r = (c); \
67 utrace(&ut, sizeof(ut)); \
68 } \
69} while (0)
70#else
71# define UTRACE(a, b, c)
72#endif
73
Jason Evans289053c2009-06-22 12:08:42 -070074/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080075/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070076
Jason Evans03c22372010-01-03 12:10:42 -080077static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070078static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070079static bool malloc_conf_next(char const **opts_p, char const **k_p,
80 size_t *klen_p, char const **v_p, size_t *vlen_p);
81static void malloc_conf_error(const char *msg, const char *k, size_t klen,
82 const char *v, size_t vlen);
83static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070084static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080085static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070086 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070087
Jason Evans289053c2009-06-22 12:08:42 -070088/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070089/*
Jason Evanse476f8a2010-01-16 09:53:50 -080090 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070091 */
92
Jason Evanse476f8a2010-01-16 09:53:50 -080093/* Create a new arena and insert it into the arenas array at index ind. */
94arena_t *
95arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070096{
97 arena_t *ret;
98
Jason Evansb1726102012-02-28 16:50:47 -080099 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -0800100 if (ret != NULL && arena_new(ret, ind) == false) {
101 arenas[ind] = ret;
102 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -0700103 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800104 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -0700105
Jason Evanse476f8a2010-01-16 09:53:50 -0800106 /*
107 * OOM here is quite inconvenient to propagate, since dealing with it
108 * would require a check for failure in the fast path. Instead, punt
109 * by using arenas[0]. In practice, this is an extremely unlikely
110 * failure.
111 */
Jason Evans698805c2010-03-03 17:45:38 -0800112 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -0800113 if (opt_abort)
114 abort();
Jason Evans289053c2009-06-22 12:08:42 -0700115
Jason Evanse476f8a2010-01-16 09:53:50 -0800116 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -0700117}
118
Jason Evans4c2faa82012-03-13 11:09:23 -0700119/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -0800120arena_t *
Jason Evans289053c2009-06-22 12:08:42 -0700121choose_arena_hard(void)
122{
123 arena_t *ret;
124
Jason Evans289053c2009-06-22 12:08:42 -0700125 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700126 unsigned i, choose, first_null;
127
128 choose = 0;
129 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800130 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700131 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700132 for (i = 1; i < narenas; i++) {
133 if (arenas[i] != NULL) {
134 /*
135 * Choose the first arena that has the lowest
136 * number of threads assigned to it.
137 */
138 if (arenas[i]->nthreads <
139 arenas[choose]->nthreads)
140 choose = i;
141 } else if (first_null == narenas) {
142 /*
143 * Record the index of the first uninitialized
144 * arena, in case all extant arenas are in use.
145 *
146 * NB: It is possible for there to be
147 * discontinuities in terms of initialized
148 * versus uninitialized arenas, due to the
149 * "thread.arena" mallctl.
150 */
151 first_null = i;
152 }
153 }
154
Jason Evans41b6afb2012-02-02 22:04:57 -0800155 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700156 /*
157 * Use an unloaded arena, or the least loaded arena if
158 * all arenas are already initialized.
159 */
160 ret = arenas[choose];
161 } else {
162 /* Initialize a new arena. */
163 ret = arenas_extend(first_null);
164 }
165 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800166 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700167 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700168 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700169 malloc_mutex_lock(&arenas_lock);
170 ret->nthreads++;
171 malloc_mutex_unlock(&arenas_lock);
172 }
Jason Evans289053c2009-06-22 12:08:42 -0700173
Jason Evanscd9a1342012-03-21 18:33:03 -0700174 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700175
176 return (ret);
177}
Jason Evans289053c2009-06-22 12:08:42 -0700178
Jason Evans03c22372010-01-03 12:10:42 -0800179static void
180stats_print_atexit(void)
181{
182
Jason Evans7372b152012-02-10 20:22:09 -0800183 if (config_tcache && config_stats) {
184 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800185
Jason Evans7372b152012-02-10 20:22:09 -0800186 /*
187 * Merge stats from extant threads. This is racy, since
188 * individual threads do not lock when recording tcache stats
189 * events. As a consequence, the final stats may be slightly
190 * out of date by the time they are reported, if other threads
191 * continue to allocate.
192 */
193 for (i = 0; i < narenas; i++) {
194 arena_t *arena = arenas[i];
195 if (arena != NULL) {
196 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800197
Jason Evans7372b152012-02-10 20:22:09 -0800198 /*
199 * tcache_stats_merge() locks bins, so if any
200 * code is introduced that acquires both arena
201 * and bin locks in the opposite order,
202 * deadlocks may result.
203 */
204 malloc_mutex_lock(&arena->lock);
205 ql_foreach(tcache, &arena->tcache_ql, link) {
206 tcache_stats_merge(tcache, arena);
207 }
208 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800209 }
Jason Evans03c22372010-01-03 12:10:42 -0800210 }
211 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800212 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700213}
214
Jason Evans289053c2009-06-22 12:08:42 -0700215/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800216 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700217 */
218/******************************************************************************/
219/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800220 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700221 */
222
Jason Evansc9658dd2009-06-22 14:44:08 -0700223static unsigned
224malloc_ncpus(void)
225{
226 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700227 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700228
Jason Evansb7924f52009-06-23 19:01:18 -0700229 result = sysconf(_SC_NPROCESSORS_ONLN);
230 if (result == -1) {
231 /* Error. */
232 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700233 }
Jason Evansb7924f52009-06-23 19:01:18 -0700234 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700235
236 return (ret);
237}
Jason Evansb7924f52009-06-23 19:01:18 -0700238
Jason Evanscd9a1342012-03-21 18:33:03 -0700239void
Jason Evans597632b2011-03-18 13:41:33 -0700240arenas_cleanup(void *arg)
241{
Jason Evanscd9a1342012-03-21 18:33:03 -0700242 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700243
244 malloc_mutex_lock(&arenas_lock);
245 arena->nthreads--;
246 malloc_mutex_unlock(&arenas_lock);
247}
248
Jason Evans289053c2009-06-22 12:08:42 -0700249static inline bool
250malloc_init(void)
251{
252
253 if (malloc_initialized == false)
254 return (malloc_init_hard());
255
256 return (false);
257}
258
259static bool
Jason Evanse7339702010-10-23 18:37:06 -0700260malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
261 char const **v_p, size_t *vlen_p)
262{
263 bool accept;
264 const char *opts = *opts_p;
265
266 *k_p = opts;
267
268 for (accept = false; accept == false;) {
269 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800270 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
271 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
272 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
273 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
274 case 'Y': case 'Z':
275 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
276 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
277 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
278 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
279 case 'y': case 'z':
280 case '0': case '1': case '2': case '3': case '4': case '5':
281 case '6': case '7': case '8': case '9':
282 case '_':
283 opts++;
284 break;
285 case ':':
286 opts++;
287 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
288 *v_p = opts;
289 accept = true;
290 break;
291 case '\0':
292 if (opts != *opts_p) {
293 malloc_write("<jemalloc>: Conf string ends "
294 "with key\n");
295 }
296 return (true);
297 default:
298 malloc_write("<jemalloc>: Malformed conf string\n");
299 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700300 }
301 }
302
303 for (accept = false; accept == false;) {
304 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800305 case ',':
306 opts++;
307 /*
308 * Look ahead one character here, because the next time
309 * this function is called, it will assume that end of
310 * input has been cleanly reached if no input remains,
311 * but we have optimistically already consumed the
312 * comma if one exists.
313 */
314 if (*opts == '\0') {
315 malloc_write("<jemalloc>: Conf string ends "
316 "with comma\n");
317 }
318 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
319 accept = true;
320 break;
321 case '\0':
322 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
323 accept = true;
324 break;
325 default:
326 opts++;
327 break;
Jason Evanse7339702010-10-23 18:37:06 -0700328 }
329 }
330
331 *opts_p = opts;
332 return (false);
333}
334
335static void
336malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
337 size_t vlen)
338{
Jason Evanse7339702010-10-23 18:37:06 -0700339
Jason Evansd81e4bd2012-03-06 14:57:45 -0800340 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
341 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700342}
343
344static void
345malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700346{
347 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700348 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700349 const char *opts, *k, *v;
350 size_t klen, vlen;
351
352 for (i = 0; i < 3; i++) {
353 /* Get runtime configuration. */
354 switch (i) {
355 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800356 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700357 /*
358 * Use options that were compiled into the
359 * program.
360 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800361 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700362 } else {
363 /* No configuration specified. */
364 buf[0] = '\0';
365 opts = buf;
366 }
367 break;
368 case 1: {
369 int linklen;
370 const char *linkname =
371#ifdef JEMALLOC_PREFIX
372 "/etc/"JEMALLOC_PREFIX"malloc.conf"
373#else
374 "/etc/malloc.conf"
375#endif
376 ;
377
378 if ((linklen = readlink(linkname, buf,
379 sizeof(buf) - 1)) != -1) {
380 /*
381 * Use the contents of the "/etc/malloc.conf"
382 * symbolic link's name.
383 */
384 buf[linklen] = '\0';
385 opts = buf;
386 } else {
387 /* No configuration specified. */
388 buf[0] = '\0';
389 opts = buf;
390 }
391 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800392 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700393 const char *envname =
394#ifdef JEMALLOC_PREFIX
395 JEMALLOC_CPREFIX"MALLOC_CONF"
396#else
397 "MALLOC_CONF"
398#endif
399 ;
400
401 if ((opts = getenv(envname)) != NULL) {
402 /*
403 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800404 * the value of the MALLOC_CONF environment
405 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700406 */
407 } else {
408 /* No configuration specified. */
409 buf[0] = '\0';
410 opts = buf;
411 }
412 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800413 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700414 /* NOTREACHED */
415 assert(false);
416 buf[0] = '\0';
417 opts = buf;
418 }
419
420 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
421 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800422#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700423 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
424 klen) == 0) { \
425 if (strncmp("true", v, vlen) == 0 && \
426 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800427 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700428 else if (strncmp("false", v, vlen) == \
429 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800430 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700431 else { \
432 malloc_conf_error( \
433 "Invalid conf value", \
434 k, klen, v, vlen); \
435 } \
436 continue; \
437 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800438#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700439 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
440 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800441 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700442 char *end; \
443 \
444 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800445 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700446 if (errno != 0 || (uintptr_t)end - \
447 (uintptr_t)v != vlen) { \
448 malloc_conf_error( \
449 "Invalid conf value", \
450 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800451 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700452 malloc_conf_error( \
453 "Out-of-range conf value", \
454 k, klen, v, vlen); \
455 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800456 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700457 continue; \
458 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800459#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700460 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
461 klen) == 0) { \
462 long l; \
463 char *end; \
464 \
465 errno = 0; \
466 l = strtol(v, &end, 0); \
467 if (errno != 0 || (uintptr_t)end - \
468 (uintptr_t)v != vlen) { \
469 malloc_conf_error( \
470 "Invalid conf value", \
471 k, klen, v, vlen); \
472 } else if (l < (ssize_t)min || l > \
473 (ssize_t)max) { \
474 malloc_conf_error( \
475 "Out-of-range conf value", \
476 k, klen, v, vlen); \
477 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800478 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700479 continue; \
480 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800481#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700482 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
483 klen) == 0) { \
484 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800485 sizeof(o)-1) ? vlen : \
486 sizeof(o)-1; \
487 strncpy(o, v, cpylen); \
488 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700489 continue; \
490 }
491
Jason Evansd81e4bd2012-03-06 14:57:45 -0800492 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700493 /*
494 * Chunks always require at least one * header page,
495 * plus one data page.
496 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700497 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
Jason Evanse7339702010-10-23 18:37:06 -0700498 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800499 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
500 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
501 -1, (sizeof(size_t) << 3) - 1)
502 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800503 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800504 CONF_HANDLE_BOOL(opt_junk, junk)
505 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800506 }
Jason Evansb1476112012-04-05 13:36:17 -0700507 if (config_utrace) {
508 CONF_HANDLE_BOOL(opt_utrace, utrace)
509 }
Jason Evans7372b152012-02-10 20:22:09 -0800510 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800511 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800512 }
513 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800514 CONF_HANDLE_BOOL(opt_tcache, tcache)
515 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
516 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800517 (sizeof(size_t) << 3) - 1)
518 }
519 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800520 CONF_HANDLE_BOOL(opt_prof, prof)
521 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
522 "jeprof")
523 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
524 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
525 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800526 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800527 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
528 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
529 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800530 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800531 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
532 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800533 }
Jason Evanse7339702010-10-23 18:37:06 -0700534 malloc_conf_error("Invalid conf pair", k, klen, v,
535 vlen);
536#undef CONF_HANDLE_BOOL
537#undef CONF_HANDLE_SIZE_T
538#undef CONF_HANDLE_SSIZE_T
539#undef CONF_HANDLE_CHAR_P
540 }
Jason Evanse7339702010-10-23 18:37:06 -0700541 }
542}
543
544static bool
545malloc_init_hard(void)
546{
Jason Evansb7924f52009-06-23 19:01:18 -0700547 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700548
549 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800550 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700551 /*
552 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800553 * acquired init_lock, or this thread is the initializing
554 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700555 */
556 malloc_mutex_unlock(&init_lock);
557 return (false);
558 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800559#ifdef JEMALLOC_THREADED_INIT
Jason Evans02b23122012-04-05 11:06:23 -0700560 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700561 /* Busy-wait until the initializing thread completes. */
562 do {
563 malloc_mutex_unlock(&init_lock);
564 CPU_SPINWAIT;
565 malloc_mutex_lock(&init_lock);
566 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700567 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700568 return (false);
569 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800570#endif
571 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700572
Jason Evanscd9a1342012-03-21 18:33:03 -0700573 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800574 if (config_prof)
575 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700576
Jason Evanse7339702010-10-23 18:37:06 -0700577 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700578
Mike Hommeye77fa592012-03-28 09:53:16 +0200579#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800580 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700581 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
582 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800583 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800584 if (opt_abort)
585 abort();
586 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800587#endif
Jason Evans3c234352010-01-27 13:10:55 -0800588
Jason Evans03c22372010-01-03 12:10:42 -0800589 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700590 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800591 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800592 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800593 if (opt_abort)
594 abort();
595 }
Jason Evans289053c2009-06-22 12:08:42 -0700596 }
597
Jason Evanscd9a1342012-03-21 18:33:03 -0700598 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800599 malloc_mutex_unlock(&init_lock);
600 return (true);
601 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700602
Jason Evans3c234352010-01-27 13:10:55 -0800603 if (base_boot()) {
604 malloc_mutex_unlock(&init_lock);
605 return (true);
606 }
607
Jason Evans41b6afb2012-02-02 22:04:57 -0800608 if (ctl_boot()) {
609 malloc_mutex_unlock(&init_lock);
610 return (true);
611 }
612
Jason Evans7372b152012-02-10 20:22:09 -0800613 if (config_prof)
614 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800615
Jason Evansb1726102012-02-28 16:50:47 -0800616 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700617
Jason Evanscd9a1342012-03-21 18:33:03 -0700618 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700619 malloc_mutex_unlock(&init_lock);
620 return (true);
621 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800622
Jason Evanse476f8a2010-01-16 09:53:50 -0800623 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700624 malloc_mutex_unlock(&init_lock);
625 return (true);
626 }
Jason Evans289053c2009-06-22 12:08:42 -0700627
Jason Evans8e6f8b42011-11-03 18:40:03 -0700628 if (malloc_mutex_init(&arenas_lock))
629 return (true);
630
Jason Evansb7924f52009-06-23 19:01:18 -0700631 /*
632 * Create enough scaffolding to allow recursive allocation in
633 * malloc_ncpus().
634 */
635 narenas = 1;
636 arenas = init_arenas;
637 memset(arenas, 0, sizeof(arena_t *) * narenas);
638
639 /*
640 * Initialize one arena here. The rest are lazily created in
641 * choose_arena_hard().
642 */
643 arenas_extend(0);
644 if (arenas[0] == NULL) {
645 malloc_mutex_unlock(&init_lock);
646 return (true);
647 }
648
Jason Evanscd9a1342012-03-21 18:33:03 -0700649 /* Initialize allocation counters before any allocations can occur. */
650 if (config_stats && thread_allocated_tsd_boot()) {
651 malloc_mutex_unlock(&init_lock);
652 return (true);
653 }
Jason Evansb7924f52009-06-23 19:01:18 -0700654
Jason Evanscd9a1342012-03-21 18:33:03 -0700655 if (arenas_tsd_boot()) {
656 malloc_mutex_unlock(&init_lock);
657 return (true);
658 }
659
660 if (config_tcache && tcache_boot1()) {
661 malloc_mutex_unlock(&init_lock);
662 return (true);
663 }
664
Jason Evans6da54182012-03-23 18:05:51 -0700665 if (config_prof && prof_boot2()) {
666 malloc_mutex_unlock(&init_lock);
667 return (true);
668 }
669
Jason Evansb7924f52009-06-23 19:01:18 -0700670 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700671 malloc_mutex_unlock(&init_lock);
672 ncpus = malloc_ncpus();
673 malloc_mutex_lock(&init_lock);
674
Jason Evanscd9a1342012-03-21 18:33:03 -0700675 if (chunk_boot1()) {
676 malloc_mutex_unlock(&init_lock);
677 return (true);
678 }
679
Jason Evans633aaff2012-04-03 08:47:07 -0700680 if (mutex_boot()) {
681 malloc_mutex_unlock(&init_lock);
682 return (true);
683 }
684
Jason Evanse7339702010-10-23 18:37:06 -0700685 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700686 /*
Jason Evans5463a522009-12-29 00:09:15 -0800687 * For SMP systems, create more than one arena per CPU by
688 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700689 */
Jason Evanse7339702010-10-23 18:37:06 -0700690 if (ncpus > 1)
691 opt_narenas = ncpus << 2;
692 else
693 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700694 }
Jason Evanse7339702010-10-23 18:37:06 -0700695 narenas = opt_narenas;
696 /*
697 * Make sure that the arenas array can be allocated. In practice, this
698 * limit is enough to allow the allocator to function, but the ctl
699 * machinery will fail to allocate memory at far lower limits.
700 */
701 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700702 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800703 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
704 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700705 }
Jason Evans289053c2009-06-22 12:08:42 -0700706
Jason Evans289053c2009-06-22 12:08:42 -0700707 /* Allocate and initialize arenas. */
708 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
709 if (arenas == NULL) {
710 malloc_mutex_unlock(&init_lock);
711 return (true);
712 }
713 /*
714 * Zero the array. In practice, this should always be pre-zeroed,
715 * since it was just mmap()ed, but let's be sure.
716 */
717 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700718 /* Copy the pointer to the one arena that was already initialized. */
719 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700720
721 malloc_initialized = true;
722 malloc_mutex_unlock(&init_lock);
723 return (false);
724}
725
726/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800727 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700728 */
729/******************************************************************************/
730/*
731 * Begin malloc(3)-compatible functions.
732 */
733
Jason Evans9ad48232010-01-03 11:59:20 -0800734JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800735JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700736void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800737je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700738{
739 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800740 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700741 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700742
743 if (malloc_init()) {
744 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700745 goto label_oom;
Jason Evans289053c2009-06-22 12:08:42 -0700746 }
747
Jason Evansc90ad712012-02-28 20:31:37 -0800748 if (size == 0)
749 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700750
Jason Evans7372b152012-02-10 20:22:09 -0800751 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700752 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700753 PROF_ALLOC_PREP(1, usize, cnt);
754 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700755 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700756 goto label_oom;
Jason Evans0b270a92010-03-31 16:45:04 -0700757 }
Jason Evans93443682010-10-20 17:39:18 -0700758 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800759 SMALL_MAXCLASS) {
760 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700761 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700762 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700763 } else
764 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800765 } else {
766 if (config_stats)
767 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700768 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700769 }
Jason Evans289053c2009-06-22 12:08:42 -0700770
Jason Evansa1ee7832012-04-10 15:07:44 -0700771label_oom:
Jason Evans289053c2009-06-22 12:08:42 -0700772 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800773 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800774 malloc_write("<jemalloc>: Error in malloc(): "
775 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700776 abort();
777 }
778 errno = ENOMEM;
779 }
Jason Evans7372b152012-02-10 20:22:09 -0800780 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700781 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800782 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700783 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700784 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700785 }
Jason Evansb1476112012-04-05 13:36:17 -0700786 UTRACE(0, size, ret);
Jason Evans289053c2009-06-22 12:08:42 -0700787 return (ret);
788}
789
Jason Evans9ad48232010-01-03 11:59:20 -0800790JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700791#ifdef JEMALLOC_PROF
792/*
Jason Evans7372b152012-02-10 20:22:09 -0800793 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700794 * PROF_ALLOC_PREP().
795 */
796JEMALLOC_ATTR(noinline)
797#endif
798static int
Jason Evans59656312012-02-28 21:37:38 -0800799imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700800 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700801{
802 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800803 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700804 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700805 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700806
Jason Evans0a0bbf62012-03-13 12:55:21 -0700807 assert(min_alignment != 0);
808
Jason Evans289053c2009-06-22 12:08:42 -0700809 if (malloc_init())
810 result = NULL;
811 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800812 if (size == 0)
813 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800814
Jason Evans289053c2009-06-22 12:08:42 -0700815 /* Make sure that alignment is a large enough power of 2. */
816 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700817 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800818 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700819 malloc_write("<jemalloc>: Error allocating "
820 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700821 abort();
822 }
823 result = NULL;
824 ret = EINVAL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700825 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700826 }
827
Jason Evans38d92102011-03-23 00:37:29 -0700828 usize = sa2u(size, alignment, NULL);
829 if (usize == 0) {
830 result = NULL;
831 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700832 goto label_return;
Jason Evans38d92102011-03-23 00:37:29 -0700833 }
834
Jason Evans7372b152012-02-10 20:22:09 -0800835 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700836 PROF_ALLOC_PREP(2, usize, cnt);
837 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700838 result = NULL;
839 ret = EINVAL;
840 } else {
841 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800842 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
843 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700844 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800845 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700846 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700847 if (result != NULL) {
848 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700849 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700850 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700851 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700852 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700853 false);
854 }
Jason Evans0b270a92010-03-31 16:45:04 -0700855 }
Jason Evans6109fe02010-02-10 10:37:56 -0800856 } else
Jason Evans38d92102011-03-23 00:37:29 -0700857 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700858 }
859
860 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800861 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700862 malloc_write("<jemalloc>: Error allocating aligned "
863 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700864 abort();
865 }
866 ret = ENOMEM;
Jason Evansa1ee7832012-04-10 15:07:44 -0700867 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700868 }
869
870 *memptr = result;
871 ret = 0;
872
Jason Evansa1ee7832012-04-10 15:07:44 -0700873label_return:
Jason Evans7372b152012-02-10 20:22:09 -0800874 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700875 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700876 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700877 }
Jason Evans7372b152012-02-10 20:22:09 -0800878 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700879 prof_malloc(result, usize, cnt);
Jason Evansb1476112012-04-05 13:36:17 -0700880 UTRACE(0, size, result);
Jason Evans289053c2009-06-22 12:08:42 -0700881 return (ret);
882}
883
Jason Evansa5070042011-08-12 13:48:27 -0700884JEMALLOC_ATTR(nonnull(1))
885JEMALLOC_ATTR(visibility("default"))
886int
Jason Evans0a5489e2012-03-01 17:19:20 -0800887je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700888{
889
Jason Evans0a0bbf62012-03-13 12:55:21 -0700890 return imemalign(memptr, alignment, size, sizeof(void *));
891}
892
893JEMALLOC_ATTR(malloc)
894JEMALLOC_ATTR(visibility("default"))
895void *
896je_aligned_alloc(size_t alignment, size_t size)
897{
898 void *ret;
899 int err;
900
901 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
902 ret = NULL;
903 errno = err;
904 }
905 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700906}
907
Jason Evans9ad48232010-01-03 11:59:20 -0800908JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800909JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700910void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800911je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700912{
913 void *ret;
914 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800915 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700916 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700917
918 if (malloc_init()) {
919 num_size = 0;
920 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700921 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700922 }
923
924 num_size = num * size;
925 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800926 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700927 num_size = 1;
928 else {
929 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700930 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700931 }
932 /*
933 * Try to avoid division here. We know that it isn't possible to
934 * overflow during multiplication if neither operand uses any of the
935 * most significant half of the bits in a size_t.
936 */
937 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
938 && (num_size / size != num)) {
939 /* size_t overflow. */
940 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700941 goto label_return;
Jason Evans289053c2009-06-22 12:08:42 -0700942 }
943
Jason Evans7372b152012-02-10 20:22:09 -0800944 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700945 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700946 PROF_ALLOC_PREP(1, usize, cnt);
947 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700948 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -0700949 goto label_return;
Jason Evans0b270a92010-03-31 16:45:04 -0700950 }
Jason Evans93443682010-10-20 17:39:18 -0700951 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800952 <= SMALL_MAXCLASS) {
953 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700954 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700955 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700956 } else
957 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800958 } else {
959 if (config_stats)
960 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700961 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700962 }
Jason Evans289053c2009-06-22 12:08:42 -0700963
Jason Evansa1ee7832012-04-10 15:07:44 -0700964label_return:
Jason Evans289053c2009-06-22 12:08:42 -0700965 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800966 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800967 malloc_write("<jemalloc>: Error in calloc(): out of "
968 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700969 abort();
970 }
971 errno = ENOMEM;
972 }
973
Jason Evans7372b152012-02-10 20:22:09 -0800974 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700975 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800976 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700977 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700978 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700979 }
Jason Evansb1476112012-04-05 13:36:17 -0700980 UTRACE(0, num_size, ret);
Jason Evans289053c2009-06-22 12:08:42 -0700981 return (ret);
982}
983
Jason Evanse476f8a2010-01-16 09:53:50 -0800984JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700985void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800986je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700987{
988 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800989 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700990 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700991 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
992 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800993
Jason Evans289053c2009-06-22 12:08:42 -0700994 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800995 if (ptr != NULL) {
996 /* realloc(ptr, 0) is equivalent to free(p). */
997 if (config_prof || config_stats)
998 old_size = isalloc(ptr);
999 if (config_prof && opt_prof) {
1000 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -08001001 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001002 }
Jason Evansf081b882012-02-28 20:24:05 -08001003 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -07001004 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001005 goto label_return;
Jason Evansc90ad712012-02-28 20:31:37 -08001006 } else
1007 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -07001008 }
1009
1010 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -08001011 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -07001012
Jason Evans7372b152012-02-10 20:22:09 -08001013 if (config_prof || config_stats)
1014 old_size = isalloc(ptr);
1015 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001016 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -07001017 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -07001018 PROF_ALLOC_PREP(1, usize, cnt);
1019 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -07001020 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001021 ret = NULL;
Jason Evansa1ee7832012-04-10 15:07:44 -07001022 goto label_oom;
Jason Evans6109fe02010-02-10 10:37:56 -08001023 }
Jason Evans0b270a92010-03-31 16:45:04 -07001024 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -08001025 usize <= SMALL_MAXCLASS) {
1026 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -07001027 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -07001028 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001029 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001030 else
1031 old_ctx = NULL;
1032 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001033 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001034 if (ret == NULL)
1035 old_ctx = NULL;
1036 }
Jason Evans7372b152012-02-10 20:22:09 -08001037 } else {
1038 if (config_stats)
1039 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001040 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001041 }
Jason Evans289053c2009-06-22 12:08:42 -07001042
Jason Evansa1ee7832012-04-10 15:07:44 -07001043label_oom:
Jason Evans289053c2009-06-22 12:08:42 -07001044 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001045 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001046 malloc_write("<jemalloc>: Error in realloc(): "
1047 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001048 abort();
1049 }
1050 errno = ENOMEM;
1051 }
1052 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001053 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001054 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001055 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001056 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001057 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001058 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001059 ret = NULL;
1060 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001061 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001062 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001063 PROF_ALLOC_PREP(1, usize, cnt);
1064 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001065 ret = NULL;
1066 else {
1067 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001068 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001069 SMALL_MAXCLASS) {
1070 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001071 if (ret != NULL) {
1072 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001073 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001074 }
1075 } else
1076 ret = imalloc(size);
1077 }
Jason Evans7372b152012-02-10 20:22:09 -08001078 } else {
1079 if (config_stats)
1080 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001081 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001082 }
Jason Evans6109fe02010-02-10 10:37:56 -08001083 }
Jason Evans569432c2009-12-29 00:09:15 -08001084
Jason Evans289053c2009-06-22 12:08:42 -07001085 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001086 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001087 malloc_write("<jemalloc>: Error in realloc(): "
1088 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001089 abort();
1090 }
1091 errno = ENOMEM;
1092 }
1093 }
1094
Jason Evansa1ee7832012-04-10 15:07:44 -07001095label_return:
Jason Evans7372b152012-02-10 20:22:09 -08001096 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001097 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001098 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001099 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001100 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001101 ta = thread_allocated_tsd_get();
1102 ta->allocated += usize;
1103 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001104 }
Jason Evansb1476112012-04-05 13:36:17 -07001105 UTRACE(ptr, size, ret);
Jason Evans289053c2009-06-22 12:08:42 -07001106 return (ret);
1107}
1108
Jason Evanse476f8a2010-01-16 09:53:50 -08001109JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001110void
Jason Evans0a5489e2012-03-01 17:19:20 -08001111je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001112{
1113
Jason Evansb1476112012-04-05 13:36:17 -07001114 UTRACE(ptr, 0, 0);
Jason Evansf0047372012-04-02 15:18:24 -07001115 if (ptr != NULL) {
1116 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001117
Jason Evansf0047372012-04-02 15:18:24 -07001118 assert(malloc_initialized || IS_INITIALIZER);
1119
1120 if (config_prof && opt_prof) {
1121 usize = isalloc(ptr);
1122 prof_free(ptr, usize);
1123 } else if (config_stats) {
1124 usize = isalloc(ptr);
1125 }
1126 if (config_stats)
1127 thread_allocated_tsd_get()->deallocated += usize;
1128 idalloc(ptr);
1129 }
Jason Evans289053c2009-06-22 12:08:42 -07001130}
1131
1132/*
1133 * End malloc(3)-compatible functions.
1134 */
1135/******************************************************************************/
1136/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001137 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001138 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001139
1140#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1141JEMALLOC_ATTR(malloc)
1142JEMALLOC_ATTR(visibility("default"))
1143void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001144je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001145{
Jason Evans9225a192012-03-23 15:39:07 -07001146 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001147 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001148 return (ret);
1149}
1150#endif
1151
1152#ifdef JEMALLOC_OVERRIDE_VALLOC
1153JEMALLOC_ATTR(malloc)
1154JEMALLOC_ATTR(visibility("default"))
1155void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001156je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001157{
Jason Evans9225a192012-03-23 15:39:07 -07001158 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001159 imemalign(&ret, PAGE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001160 return (ret);
1161}
1162#endif
1163
Mike Hommey5c89c502012-03-26 17:46:57 +02001164/*
1165 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1166 * #define je_malloc malloc
1167 */
1168#define malloc_is_malloc 1
1169#define is_malloc_(a) malloc_is_ ## a
1170#define is_malloc(a) is_malloc_(a)
1171
1172#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001173/*
1174 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1175 * to inconsistently reference libc's malloc(3)-compatible functions
1176 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1177 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001178 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001179 * passed an extra argument for the caller return address, which will be
1180 * ignored.
1181 */
1182JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001183void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001184
1185JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001186void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001187
1188JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001189void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001190
1191JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001192void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001193#endif
1194
Jason Evans6a0d2912010-09-20 16:44:23 -07001195/*
1196 * End non-standard override functions.
1197 */
1198/******************************************************************************/
1199/*
Jason Evans289053c2009-06-22 12:08:42 -07001200 * Begin non-standard functions.
1201 */
1202
Jason Evanse476f8a2010-01-16 09:53:50 -08001203JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001204size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001205je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001206{
Jason Evans569432c2009-12-29 00:09:15 -08001207 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001208
Jason Evans41b6afb2012-02-02 22:04:57 -08001209 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001210
Jason Evans7372b152012-02-10 20:22:09 -08001211 if (config_ivsalloc)
1212 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001213 else
Jason Evansf0047372012-04-02 15:18:24 -07001214 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001215
Jason Evans569432c2009-12-29 00:09:15 -08001216 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001217}
1218
Jason Evans4201af02010-01-24 02:53:40 -08001219JEMALLOC_ATTR(visibility("default"))
1220void
Jason Evans0a5489e2012-03-01 17:19:20 -08001221je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1222 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001223{
1224
Jason Evans698805c2010-03-03 17:45:38 -08001225 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001226}
1227
Jason Evans3c234352010-01-27 13:10:55 -08001228JEMALLOC_ATTR(visibility("default"))
1229int
Jason Evans0a5489e2012-03-01 17:19:20 -08001230je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001231 size_t newlen)
1232{
1233
Jason Evans95833312010-01-27 13:45:21 -08001234 if (malloc_init())
1235 return (EAGAIN);
1236
Jason Evans3c234352010-01-27 13:10:55 -08001237 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1238}
1239
1240JEMALLOC_ATTR(visibility("default"))
1241int
Jason Evans0a5489e2012-03-01 17:19:20 -08001242je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001243{
1244
Jason Evans95833312010-01-27 13:45:21 -08001245 if (malloc_init())
1246 return (EAGAIN);
1247
Jason Evans3c234352010-01-27 13:10:55 -08001248 return (ctl_nametomib(name, mibp, miblenp));
1249}
1250
1251JEMALLOC_ATTR(visibility("default"))
1252int
Jason Evans0a5489e2012-03-01 17:19:20 -08001253je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1254 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001255{
1256
Jason Evans95833312010-01-27 13:45:21 -08001257 if (malloc_init())
1258 return (EAGAIN);
1259
Jason Evans3c234352010-01-27 13:10:55 -08001260 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1261}
1262
Jason Evans7e77eaf2012-03-02 17:47:37 -08001263/*
1264 * End non-standard functions.
1265 */
1266/******************************************************************************/
1267/*
1268 * Begin experimental functions.
1269 */
1270#ifdef JEMALLOC_EXPERIMENTAL
1271
Jason Evans8e3c3c62010-09-17 15:46:18 -07001272JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001273iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001274{
1275
Jason Evans38d92102011-03-23 00:37:29 -07001276 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1277 NULL)));
1278
Jason Evans8e3c3c62010-09-17 15:46:18 -07001279 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001280 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001281 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001282 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001283 else
Jason Evans38d92102011-03-23 00:37:29 -07001284 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001285}
1286
Jason Evans6a0d2912010-09-20 16:44:23 -07001287JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001288JEMALLOC_ATTR(visibility("default"))
1289int
Jason Evans0a5489e2012-03-01 17:19:20 -08001290je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001291{
1292 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001293 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001294 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1295 & (SIZE_T_MAX-1));
1296 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001297 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001298
1299 assert(ptr != NULL);
1300 assert(size != 0);
1301
1302 if (malloc_init())
Jason Evansa1ee7832012-04-10 15:07:44 -07001303 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001304
Jason Evans749c2a02011-08-12 18:37:54 -07001305 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001306 if (usize == 0)
Jason Evansa1ee7832012-04-10 15:07:44 -07001307 goto label_oom;
Jason Evans38d92102011-03-23 00:37:29 -07001308
Jason Evans7372b152012-02-10 20:22:09 -08001309 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001310 PROF_ALLOC_PREP(1, usize, cnt);
1311 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001312 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001313 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001314 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001315 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001316 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001317 alignment, NULL);
1318 assert(usize_promoted != 0);
1319 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001320 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001321 goto label_oom;
Jason Evans93443682010-10-20 17:39:18 -07001322 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001323 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001324 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001326 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001327 }
Jason Evans749c2a02011-08-12 18:37:54 -07001328 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001329 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001330 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001331 if (p == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001332 goto label_oom;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001333 }
Jason Evans7372b152012-02-10 20:22:09 -08001334 if (rsize != NULL)
1335 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001336
1337 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001338 if (config_stats) {
1339 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001340 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001341 }
Jason Evansb1476112012-04-05 13:36:17 -07001342 UTRACE(0, size, p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001343 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001344label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001345 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001346 malloc_write("<jemalloc>: Error in allocm(): "
1347 "out of memory\n");
1348 abort();
1349 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001350 *ptr = NULL;
Jason Evansb1476112012-04-05 13:36:17 -07001351 UTRACE(0, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001352 return (ALLOCM_ERR_OOM);
1353}
1354
Jason Evans6a0d2912010-09-20 16:44:23 -07001355JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001356JEMALLOC_ATTR(visibility("default"))
1357int
Jason Evans0a5489e2012-03-01 17:19:20 -08001358je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001359{
1360 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001361 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001362 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001363 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1364 & (SIZE_T_MAX-1));
1365 bool zero = flags & ALLOCM_ZERO;
1366 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001367 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001368
1369 assert(ptr != NULL);
1370 assert(*ptr != NULL);
1371 assert(size != 0);
1372 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001373 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374
1375 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001376 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001377 /*
1378 * usize isn't knowable before iralloc() returns when extra is
1379 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001380 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001381 * backtrace. prof_realloc() will use the actual usize to
1382 * decide whether to sample.
1383 */
1384 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1385 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001386 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001387 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001388 PROF_ALLOC_PREP(1, max_usize, cnt);
1389 if (cnt == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001390 goto label_oom;
Jason Evans183ba502011-08-11 22:51:00 -07001391 /*
1392 * Use minimum usize to determine whether promotion may happen.
1393 */
1394 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1395 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001396 alignment, NULL)) <= SMALL_MAXCLASS) {
1397 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1398 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001399 alignment, zero, no_move);
1400 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001401 goto label_err;
Jason Evansae4c7b42012-04-02 07:04:34 -07001402 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001403 usize = max_usize;
1404 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001405 } else
1406 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001407 } else {
1408 q = iralloc(p, size, extra, alignment, zero, no_move);
1409 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001410 goto label_err;
Jason Evans93443682010-10-20 17:39:18 -07001411 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001412 }
Jason Evanse4f78462010-10-22 10:45:59 -07001413 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001414 if (rsize != NULL)
1415 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001416 } else {
1417 if (config_stats)
1418 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419 q = iralloc(p, size, extra, alignment, zero, no_move);
1420 if (q == NULL)
Jason Evansa1ee7832012-04-10 15:07:44 -07001421 goto label_err;
Jason Evans7372b152012-02-10 20:22:09 -08001422 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001423 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001424 if (rsize != NULL) {
1425 if (config_stats == false)
1426 usize = isalloc(q);
1427 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001428 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001429 }
1430
1431 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001432 if (config_stats) {
1433 thread_allocated_t *ta;
1434 ta = thread_allocated_tsd_get();
1435 ta->allocated += usize;
1436 ta->deallocated += old_size;
1437 }
Jason Evansb1476112012-04-05 13:36:17 -07001438 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001439 return (ALLOCM_SUCCESS);
Jason Evansa1ee7832012-04-10 15:07:44 -07001440label_err:
Jason Evansb1476112012-04-05 13:36:17 -07001441 if (no_move) {
1442 UTRACE(p, size, q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443 return (ALLOCM_ERR_NOT_MOVED);
Jason Evansb1476112012-04-05 13:36:17 -07001444 }
Jason Evansa1ee7832012-04-10 15:07:44 -07001445label_oom:
Jason Evans7372b152012-02-10 20:22:09 -08001446 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001447 malloc_write("<jemalloc>: Error in rallocm(): "
1448 "out of memory\n");
1449 abort();
1450 }
Jason Evansb1476112012-04-05 13:36:17 -07001451 UTRACE(p, size, 0);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001452 return (ALLOCM_ERR_OOM);
1453}
1454
Jason Evans6a0d2912010-09-20 16:44:23 -07001455JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001456JEMALLOC_ATTR(visibility("default"))
1457int
Jason Evans0a5489e2012-03-01 17:19:20 -08001458je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001459{
1460 size_t sz;
1461
Jason Evans41b6afb2012-02-02 22:04:57 -08001462 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001463
Jason Evans7372b152012-02-10 20:22:09 -08001464 if (config_ivsalloc)
1465 sz = ivsalloc(ptr);
1466 else {
1467 assert(ptr != NULL);
1468 sz = isalloc(ptr);
1469 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001470 assert(rsize != NULL);
1471 *rsize = sz;
1472
1473 return (ALLOCM_SUCCESS);
1474}
1475
Jason Evans6a0d2912010-09-20 16:44:23 -07001476JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001477JEMALLOC_ATTR(visibility("default"))
1478int
Jason Evans0a5489e2012-03-01 17:19:20 -08001479je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001480{
Jason Evanse4f78462010-10-22 10:45:59 -07001481 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001482
1483 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001484 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001485
Jason Evansb1476112012-04-05 13:36:17 -07001486 UTRACE(ptr, 0, 0);
Jason Evans7372b152012-02-10 20:22:09 -08001487 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001488 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001489 if (config_prof && opt_prof) {
1490 if (config_stats == false)
1491 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001492 prof_free(ptr, usize);
1493 }
Jason Evans7372b152012-02-10 20:22:09 -08001494 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001495 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001496 idalloc(ptr);
1497
1498 return (ALLOCM_SUCCESS);
1499}
1500
Jason Evans7e15dab2012-02-29 12:56:37 -08001501JEMALLOC_ATTR(visibility("default"))
1502int
Jason Evans0a5489e2012-03-01 17:19:20 -08001503je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001504{
1505 size_t usize;
1506 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1507 & (SIZE_T_MAX-1));
1508
1509 assert(size != 0);
1510
1511 if (malloc_init())
1512 return (ALLOCM_ERR_OOM);
1513
1514 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1515 if (usize == 0)
1516 return (ALLOCM_ERR_OOM);
1517
1518 if (rsize != NULL)
1519 *rsize = usize;
1520 return (ALLOCM_SUCCESS);
1521}
1522
Jason Evans7e77eaf2012-03-02 17:47:37 -08001523#endif
Jason Evans289053c2009-06-22 12:08:42 -07001524/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001525 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001526 */
1527/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001528/*
1529 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001530 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001531 */
1532
Jason Evans41b6afb2012-02-02 22:04:57 -08001533#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001534void
Jason Evans804c9ec2009-06-22 17:44:33 -07001535jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001536#else
1537void
1538_malloc_prefork(void)
1539#endif
Jason Evans289053c2009-06-22 12:08:42 -07001540{
Jason Evansfbbb6242010-01-24 17:56:48 -08001541 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001542
1543 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001544 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001545 for (i = 0; i < narenas; i++) {
1546 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001547 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001548 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001549 base_prefork();
1550 huge_prefork();
1551 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001552}
1553
Jason Evans41b6afb2012-02-02 22:04:57 -08001554#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001555void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001556jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001557#else
1558void
1559_malloc_postfork(void)
1560#endif
Jason Evans289053c2009-06-22 12:08:42 -07001561{
1562 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001563
1564 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001565 chunk_dss_postfork_parent();
1566 huge_postfork_parent();
1567 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001568 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001569 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001570 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001571 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001572 malloc_mutex_postfork_parent(&arenas_lock);
1573}
1574
1575void
1576jemalloc_postfork_child(void)
1577{
1578 unsigned i;
1579
1580 /* Release all mutexes, now that fork() has completed. */
1581 chunk_dss_postfork_child();
1582 huge_postfork_child();
1583 base_postfork_child();
1584 for (i = 0; i < narenas; i++) {
1585 if (arenas[i] != NULL)
1586 arena_postfork_child(arenas[i]);
1587 }
1588 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001589}
Jason Evans2dbecf12010-09-05 10:35:13 -07001590
1591/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001592/*
1593 * The following functions are used for TLS allocation/deallocation in static
1594 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1595 * is that these avoid accessing TLS variables.
1596 */
1597
1598static void *
1599a0alloc(size_t size, bool zero)
1600{
1601
1602 if (malloc_init())
1603 return (NULL);
1604
1605 if (size == 0)
1606 size = 1;
1607
1608 if (size <= arena_maxclass)
1609 return (arena_malloc(arenas[0], size, zero, false));
1610 else
1611 return (huge_malloc(size, zero));
1612}
1613
1614void *
1615a0malloc(size_t size)
1616{
1617
1618 return (a0alloc(size, false));
1619}
1620
1621void *
1622a0calloc(size_t num, size_t size)
1623{
1624
1625 return (a0alloc(num * size, true));
1626}
1627
1628void
1629a0free(void *ptr)
1630{
1631 arena_chunk_t *chunk;
1632
1633 if (ptr == NULL)
1634 return;
1635
1636 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1637 if (chunk != ptr)
1638 arena_dalloc(chunk->arena, chunk, ptr, false);
1639 else
1640 huge_dalloc(ptr, true);
1641}
1642
1643/******************************************************************************/