blob: 690cf08244668fe3f47d5534d9cb6e54fcc5d6b0 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028unsigned ncpus;
29
30malloc_mutex_t arenas_lock;
31arena_t **arenas;
32unsigned narenas;
33
34/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070035static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070036
Jason Evans41b6afb2012-02-02 22:04:57 -080037#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070038/* Used to let the initializing thread recursively allocate. */
39static pthread_t malloc_initializer = (unsigned long)0;
Jason Evans41b6afb2012-02-02 22:04:57 -080040# define INITIALIZER pthread_self()
41# define IS_INITIALIZER (malloc_initializer == pthread_self())
42#else
43static bool malloc_initializer = false;
44# define INITIALIZER true
45# define IS_INITIALIZER malloc_initializer
46#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070047
48/* Used to avoid initialization races. */
49static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
50
Jason Evans289053c2009-06-22 12:08:42 -070051/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080052/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070053
Jason Evans03c22372010-01-03 12:10:42 -080054static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070055static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070056static bool malloc_conf_next(char const **opts_p, char const **k_p,
57 size_t *klen_p, char const **v_p, size_t *vlen_p);
58static void malloc_conf_error(const char *msg, const char *k, size_t klen,
59 const char *v, size_t vlen);
60static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070061static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080062static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070063 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070064
Jason Evans289053c2009-06-22 12:08:42 -070065/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070066/*
Jason Evanse476f8a2010-01-16 09:53:50 -080067 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070068 */
69
Jason Evanse476f8a2010-01-16 09:53:50 -080070/* Create a new arena and insert it into the arenas array at index ind. */
71arena_t *
72arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070073{
74 arena_t *ret;
75
Jason Evansb1726102012-02-28 16:50:47 -080076 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080077 if (ret != NULL && arena_new(ret, ind) == false) {
78 arenas[ind] = ret;
79 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070080 }
Jason Evanse476f8a2010-01-16 09:53:50 -080081 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070082
Jason Evanse476f8a2010-01-16 09:53:50 -080083 /*
84 * OOM here is quite inconvenient to propagate, since dealing with it
85 * would require a check for failure in the fast path. Instead, punt
86 * by using arenas[0]. In practice, this is an extremely unlikely
87 * failure.
88 */
Jason Evans698805c2010-03-03 17:45:38 -080089 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080090 if (opt_abort)
91 abort();
Jason Evans289053c2009-06-22 12:08:42 -070092
Jason Evanse476f8a2010-01-16 09:53:50 -080093 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -070094}
95
Jason Evans4c2faa82012-03-13 11:09:23 -070096/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -080097arena_t *
Jason Evans289053c2009-06-22 12:08:42 -070098choose_arena_hard(void)
99{
100 arena_t *ret;
101
Jason Evans289053c2009-06-22 12:08:42 -0700102 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700103 unsigned i, choose, first_null;
104
105 choose = 0;
106 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800107 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700108 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700109 for (i = 1; i < narenas; i++) {
110 if (arenas[i] != NULL) {
111 /*
112 * Choose the first arena that has the lowest
113 * number of threads assigned to it.
114 */
115 if (arenas[i]->nthreads <
116 arenas[choose]->nthreads)
117 choose = i;
118 } else if (first_null == narenas) {
119 /*
120 * Record the index of the first uninitialized
121 * arena, in case all extant arenas are in use.
122 *
123 * NB: It is possible for there to be
124 * discontinuities in terms of initialized
125 * versus uninitialized arenas, due to the
126 * "thread.arena" mallctl.
127 */
128 first_null = i;
129 }
130 }
131
Jason Evans41b6afb2012-02-02 22:04:57 -0800132 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700133 /*
134 * Use an unloaded arena, or the least loaded arena if
135 * all arenas are already initialized.
136 */
137 ret = arenas[choose];
138 } else {
139 /* Initialize a new arena. */
140 ret = arenas_extend(first_null);
141 }
142 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800143 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700144 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700145 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700146 malloc_mutex_lock(&arenas_lock);
147 ret->nthreads++;
148 malloc_mutex_unlock(&arenas_lock);
149 }
Jason Evans289053c2009-06-22 12:08:42 -0700150
Jason Evanscd9a1342012-03-21 18:33:03 -0700151 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700152
153 return (ret);
154}
Jason Evans289053c2009-06-22 12:08:42 -0700155
Jason Evans03c22372010-01-03 12:10:42 -0800156static void
157stats_print_atexit(void)
158{
159
Jason Evans7372b152012-02-10 20:22:09 -0800160 if (config_tcache && config_stats) {
161 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800162
Jason Evans7372b152012-02-10 20:22:09 -0800163 /*
164 * Merge stats from extant threads. This is racy, since
165 * individual threads do not lock when recording tcache stats
166 * events. As a consequence, the final stats may be slightly
167 * out of date by the time they are reported, if other threads
168 * continue to allocate.
169 */
170 for (i = 0; i < narenas; i++) {
171 arena_t *arena = arenas[i];
172 if (arena != NULL) {
173 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800174
Jason Evans7372b152012-02-10 20:22:09 -0800175 /*
176 * tcache_stats_merge() locks bins, so if any
177 * code is introduced that acquires both arena
178 * and bin locks in the opposite order,
179 * deadlocks may result.
180 */
181 malloc_mutex_lock(&arena->lock);
182 ql_foreach(tcache, &arena->tcache_ql, link) {
183 tcache_stats_merge(tcache, arena);
184 }
185 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800186 }
Jason Evans03c22372010-01-03 12:10:42 -0800187 }
188 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800189 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700190}
191
Jason Evans289053c2009-06-22 12:08:42 -0700192/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800193 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700194 */
195/******************************************************************************/
196/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800197 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700198 */
199
Jason Evansc9658dd2009-06-22 14:44:08 -0700200static unsigned
201malloc_ncpus(void)
202{
203 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700204 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700205
Jason Evansb7924f52009-06-23 19:01:18 -0700206 result = sysconf(_SC_NPROCESSORS_ONLN);
207 if (result == -1) {
208 /* Error. */
209 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700210 }
Jason Evansb7924f52009-06-23 19:01:18 -0700211 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700212
213 return (ret);
214}
Jason Evansb7924f52009-06-23 19:01:18 -0700215
Jason Evanscd9a1342012-03-21 18:33:03 -0700216void
Jason Evans597632b2011-03-18 13:41:33 -0700217arenas_cleanup(void *arg)
218{
Jason Evanscd9a1342012-03-21 18:33:03 -0700219 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700220
221 malloc_mutex_lock(&arenas_lock);
222 arena->nthreads--;
223 malloc_mutex_unlock(&arenas_lock);
224}
225
Jason Evans289053c2009-06-22 12:08:42 -0700226static inline bool
227malloc_init(void)
228{
229
230 if (malloc_initialized == false)
231 return (malloc_init_hard());
232
233 return (false);
234}
235
236static bool
Jason Evanse7339702010-10-23 18:37:06 -0700237malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
238 char const **v_p, size_t *vlen_p)
239{
240 bool accept;
241 const char *opts = *opts_p;
242
243 *k_p = opts;
244
245 for (accept = false; accept == false;) {
246 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800247 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
248 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
249 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
250 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
251 case 'Y': case 'Z':
252 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
253 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
254 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
255 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
256 case 'y': case 'z':
257 case '0': case '1': case '2': case '3': case '4': case '5':
258 case '6': case '7': case '8': case '9':
259 case '_':
260 opts++;
261 break;
262 case ':':
263 opts++;
264 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
265 *v_p = opts;
266 accept = true;
267 break;
268 case '\0':
269 if (opts != *opts_p) {
270 malloc_write("<jemalloc>: Conf string ends "
271 "with key\n");
272 }
273 return (true);
274 default:
275 malloc_write("<jemalloc>: Malformed conf string\n");
276 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700277 }
278 }
279
280 for (accept = false; accept == false;) {
281 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800282 case ',':
283 opts++;
284 /*
285 * Look ahead one character here, because the next time
286 * this function is called, it will assume that end of
287 * input has been cleanly reached if no input remains,
288 * but we have optimistically already consumed the
289 * comma if one exists.
290 */
291 if (*opts == '\0') {
292 malloc_write("<jemalloc>: Conf string ends "
293 "with comma\n");
294 }
295 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
296 accept = true;
297 break;
298 case '\0':
299 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
300 accept = true;
301 break;
302 default:
303 opts++;
304 break;
Jason Evanse7339702010-10-23 18:37:06 -0700305 }
306 }
307
308 *opts_p = opts;
309 return (false);
310}
311
312static void
313malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
314 size_t vlen)
315{
Jason Evanse7339702010-10-23 18:37:06 -0700316
Jason Evansd81e4bd2012-03-06 14:57:45 -0800317 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
318 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700319}
320
321static void
322malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700323{
324 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700325 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700326 const char *opts, *k, *v;
327 size_t klen, vlen;
328
329 for (i = 0; i < 3; i++) {
330 /* Get runtime configuration. */
331 switch (i) {
332 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800333 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700334 /*
335 * Use options that were compiled into the
336 * program.
337 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800338 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700339 } else {
340 /* No configuration specified. */
341 buf[0] = '\0';
342 opts = buf;
343 }
344 break;
345 case 1: {
346 int linklen;
347 const char *linkname =
348#ifdef JEMALLOC_PREFIX
349 "/etc/"JEMALLOC_PREFIX"malloc.conf"
350#else
351 "/etc/malloc.conf"
352#endif
353 ;
354
355 if ((linklen = readlink(linkname, buf,
356 sizeof(buf) - 1)) != -1) {
357 /*
358 * Use the contents of the "/etc/malloc.conf"
359 * symbolic link's name.
360 */
361 buf[linklen] = '\0';
362 opts = buf;
363 } else {
364 /* No configuration specified. */
365 buf[0] = '\0';
366 opts = buf;
367 }
368 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800369 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *envname =
371#ifdef JEMALLOC_PREFIX
372 JEMALLOC_CPREFIX"MALLOC_CONF"
373#else
374 "MALLOC_CONF"
375#endif
376 ;
377
378 if ((opts = getenv(envname)) != NULL) {
379 /*
380 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800381 * the value of the MALLOC_CONF environment
382 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700383 */
384 } else {
385 /* No configuration specified. */
386 buf[0] = '\0';
387 opts = buf;
388 }
389 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800390 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700391 /* NOTREACHED */
392 assert(false);
393 buf[0] = '\0';
394 opts = buf;
395 }
396
397 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
398 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800399#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700400 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
401 klen) == 0) { \
402 if (strncmp("true", v, vlen) == 0 && \
403 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800404 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700405 else if (strncmp("false", v, vlen) == \
406 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800407 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700408 else { \
409 malloc_conf_error( \
410 "Invalid conf value", \
411 k, klen, v, vlen); \
412 } \
413 continue; \
414 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800415#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700416 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
417 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800418 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700419 char *end; \
420 \
421 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800422 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700423 if (errno != 0 || (uintptr_t)end - \
424 (uintptr_t)v != vlen) { \
425 malloc_conf_error( \
426 "Invalid conf value", \
427 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800428 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700429 malloc_conf_error( \
430 "Out-of-range conf value", \
431 k, klen, v, vlen); \
432 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800433 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700434 continue; \
435 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800436#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700437 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
438 klen) == 0) { \
439 long l; \
440 char *end; \
441 \
442 errno = 0; \
443 l = strtol(v, &end, 0); \
444 if (errno != 0 || (uintptr_t)end - \
445 (uintptr_t)v != vlen) { \
446 malloc_conf_error( \
447 "Invalid conf value", \
448 k, klen, v, vlen); \
449 } else if (l < (ssize_t)min || l > \
450 (ssize_t)max) { \
451 malloc_conf_error( \
452 "Out-of-range conf value", \
453 k, klen, v, vlen); \
454 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800455 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700456 continue; \
457 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800458#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700459 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
460 klen) == 0) { \
461 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800462 sizeof(o)-1) ? vlen : \
463 sizeof(o)-1; \
464 strncpy(o, v, cpylen); \
465 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700466 continue; \
467 }
468
Jason Evansd81e4bd2012-03-06 14:57:45 -0800469 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700470 /*
471 * Chunks always require at least one * header page,
472 * plus one data page.
473 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700474 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
Jason Evanse7339702010-10-23 18:37:06 -0700475 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800476 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
477 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
478 -1, (sizeof(size_t) << 3) - 1)
479 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800480 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800481 CONF_HANDLE_BOOL(opt_junk, junk)
482 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800483 }
Jason Evans7372b152012-02-10 20:22:09 -0800484 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800485 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800486 }
487 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800488 CONF_HANDLE_BOOL(opt_tcache, tcache)
489 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
490 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800491 (sizeof(size_t) << 3) - 1)
492 }
493 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800494 CONF_HANDLE_BOOL(opt_prof, prof)
495 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
496 "jeprof")
497 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
498 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
499 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800500 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800501 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
502 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
503 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800504 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800505 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
506 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800507 }
Jason Evanse7339702010-10-23 18:37:06 -0700508 malloc_conf_error("Invalid conf pair", k, klen, v,
509 vlen);
510#undef CONF_HANDLE_BOOL
511#undef CONF_HANDLE_SIZE_T
512#undef CONF_HANDLE_SSIZE_T
513#undef CONF_HANDLE_CHAR_P
514 }
Jason Evanse7339702010-10-23 18:37:06 -0700515 }
516}
517
518static bool
519malloc_init_hard(void)
520{
Jason Evansb7924f52009-06-23 19:01:18 -0700521 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700522
523 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800524 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700525 /*
526 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800527 * acquired init_lock, or this thread is the initializing
528 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700529 */
530 malloc_mutex_unlock(&init_lock);
531 return (false);
532 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800533#ifdef JEMALLOC_THREADED_INIT
534 if (IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700535 /* Busy-wait until the initializing thread completes. */
536 do {
537 malloc_mutex_unlock(&init_lock);
538 CPU_SPINWAIT;
539 malloc_mutex_lock(&init_lock);
540 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700541 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700542 return (false);
543 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800544#endif
545 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700546
Jason Evanscd9a1342012-03-21 18:33:03 -0700547 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800548 if (config_prof)
549 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700550
Jason Evanse7339702010-10-23 18:37:06 -0700551 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700552
Mike Hommeye77fa592012-03-28 09:53:16 +0200553#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800554 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700555 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
556 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800557 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800558 if (opt_abort)
559 abort();
560 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800561#endif
Jason Evans3c234352010-01-27 13:10:55 -0800562
Jason Evans03c22372010-01-03 12:10:42 -0800563 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700564 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800565 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800566 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800567 if (opt_abort)
568 abort();
569 }
Jason Evans289053c2009-06-22 12:08:42 -0700570 }
571
Jason Evanscd9a1342012-03-21 18:33:03 -0700572 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800573 malloc_mutex_unlock(&init_lock);
574 return (true);
575 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700576
Jason Evans3c234352010-01-27 13:10:55 -0800577 if (base_boot()) {
578 malloc_mutex_unlock(&init_lock);
579 return (true);
580 }
581
Jason Evans41b6afb2012-02-02 22:04:57 -0800582 if (ctl_boot()) {
583 malloc_mutex_unlock(&init_lock);
584 return (true);
585 }
586
Jason Evans7372b152012-02-10 20:22:09 -0800587 if (config_prof)
588 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800589
Jason Evansb1726102012-02-28 16:50:47 -0800590 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700591
Jason Evanscd9a1342012-03-21 18:33:03 -0700592 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700593 malloc_mutex_unlock(&init_lock);
594 return (true);
595 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800596
Jason Evanse476f8a2010-01-16 09:53:50 -0800597 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700598 malloc_mutex_unlock(&init_lock);
599 return (true);
600 }
Jason Evans289053c2009-06-22 12:08:42 -0700601
Jason Evans8e6f8b42011-11-03 18:40:03 -0700602 if (malloc_mutex_init(&arenas_lock))
603 return (true);
604
Jason Evansb7924f52009-06-23 19:01:18 -0700605 /*
606 * Create enough scaffolding to allow recursive allocation in
607 * malloc_ncpus().
608 */
609 narenas = 1;
610 arenas = init_arenas;
611 memset(arenas, 0, sizeof(arena_t *) * narenas);
612
613 /*
614 * Initialize one arena here. The rest are lazily created in
615 * choose_arena_hard().
616 */
617 arenas_extend(0);
618 if (arenas[0] == NULL) {
619 malloc_mutex_unlock(&init_lock);
620 return (true);
621 }
622
Jason Evanscd9a1342012-03-21 18:33:03 -0700623 /* Initialize allocation counters before any allocations can occur. */
624 if (config_stats && thread_allocated_tsd_boot()) {
625 malloc_mutex_unlock(&init_lock);
626 return (true);
627 }
Jason Evansb7924f52009-06-23 19:01:18 -0700628
Jason Evanscd9a1342012-03-21 18:33:03 -0700629 if (arenas_tsd_boot()) {
630 malloc_mutex_unlock(&init_lock);
631 return (true);
632 }
633
634 if (config_tcache && tcache_boot1()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
638
Jason Evans6da54182012-03-23 18:05:51 -0700639 if (config_prof && prof_boot2()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evansb7924f52009-06-23 19:01:18 -0700644 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700645 malloc_mutex_unlock(&init_lock);
646 ncpus = malloc_ncpus();
647 malloc_mutex_lock(&init_lock);
648
Jason Evanscd9a1342012-03-21 18:33:03 -0700649 if (chunk_boot1()) {
650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
653
Jason Evans633aaff2012-04-03 08:47:07 -0700654 if (mutex_boot()) {
655 malloc_mutex_unlock(&init_lock);
656 return (true);
657 }
658
Jason Evanse7339702010-10-23 18:37:06 -0700659 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700660 /*
Jason Evans5463a522009-12-29 00:09:15 -0800661 * For SMP systems, create more than one arena per CPU by
662 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700663 */
Jason Evanse7339702010-10-23 18:37:06 -0700664 if (ncpus > 1)
665 opt_narenas = ncpus << 2;
666 else
667 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700668 }
Jason Evanse7339702010-10-23 18:37:06 -0700669 narenas = opt_narenas;
670 /*
671 * Make sure that the arenas array can be allocated. In practice, this
672 * limit is enough to allow the allocator to function, but the ctl
673 * machinery will fail to allocate memory at far lower limits.
674 */
675 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700676 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800677 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
678 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700679 }
Jason Evans289053c2009-06-22 12:08:42 -0700680
Jason Evans289053c2009-06-22 12:08:42 -0700681 /* Allocate and initialize arenas. */
682 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
683 if (arenas == NULL) {
684 malloc_mutex_unlock(&init_lock);
685 return (true);
686 }
687 /*
688 * Zero the array. In practice, this should always be pre-zeroed,
689 * since it was just mmap()ed, but let's be sure.
690 */
691 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700692 /* Copy the pointer to the one arena that was already initialized. */
693 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700694
695 malloc_initialized = true;
696 malloc_mutex_unlock(&init_lock);
697 return (false);
698}
699
700/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800701 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700702 */
703/******************************************************************************/
704/*
705 * Begin malloc(3)-compatible functions.
706 */
707
Jason Evans9ad48232010-01-03 11:59:20 -0800708JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800709JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700710void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800711je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700712{
713 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800714 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700715 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700716
717 if (malloc_init()) {
718 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800719 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700720 }
721
Jason Evansc90ad712012-02-28 20:31:37 -0800722 if (size == 0)
723 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700724
Jason Evans7372b152012-02-10 20:22:09 -0800725 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700726 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700727 PROF_ALLOC_PREP(1, usize, cnt);
728 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700729 ret = NULL;
730 goto OOM;
731 }
Jason Evans93443682010-10-20 17:39:18 -0700732 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800733 SMALL_MAXCLASS) {
734 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700735 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700736 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700737 } else
738 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800739 } else {
740 if (config_stats)
741 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700742 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700743 }
Jason Evans289053c2009-06-22 12:08:42 -0700744
Jason Evansf2518142009-12-29 00:09:15 -0800745OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700746 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800747 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800748 malloc_write("<jemalloc>: Error in malloc(): "
749 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700750 abort();
751 }
752 errno = ENOMEM;
753 }
Jason Evans7372b152012-02-10 20:22:09 -0800754 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700755 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800756 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700757 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700758 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700759 }
Jason Evans289053c2009-06-22 12:08:42 -0700760 return (ret);
761}
762
Jason Evans9ad48232010-01-03 11:59:20 -0800763JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700764#ifdef JEMALLOC_PROF
765/*
Jason Evans7372b152012-02-10 20:22:09 -0800766 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700767 * PROF_ALLOC_PREP().
768 */
769JEMALLOC_ATTR(noinline)
770#endif
771static int
Jason Evans59656312012-02-28 21:37:38 -0800772imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700773 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700774{
775 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800776 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700777 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700778 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700779
Jason Evans0a0bbf62012-03-13 12:55:21 -0700780 assert(min_alignment != 0);
781
Jason Evans289053c2009-06-22 12:08:42 -0700782 if (malloc_init())
783 result = NULL;
784 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800785 if (size == 0)
786 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800787
Jason Evans289053c2009-06-22 12:08:42 -0700788 /* Make sure that alignment is a large enough power of 2. */
789 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700790 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800791 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700792 malloc_write("<jemalloc>: Error allocating "
793 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700794 abort();
795 }
796 result = NULL;
797 ret = EINVAL;
798 goto RETURN;
799 }
800
Jason Evans38d92102011-03-23 00:37:29 -0700801 usize = sa2u(size, alignment, NULL);
802 if (usize == 0) {
803 result = NULL;
804 ret = ENOMEM;
805 goto RETURN;
806 }
807
Jason Evans7372b152012-02-10 20:22:09 -0800808 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700809 PROF_ALLOC_PREP(2, usize, cnt);
810 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700811 result = NULL;
812 ret = EINVAL;
813 } else {
814 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800815 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
816 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700817 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800818 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700819 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700820 if (result != NULL) {
821 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700822 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700823 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700824 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700825 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700826 false);
827 }
Jason Evans0b270a92010-03-31 16:45:04 -0700828 }
Jason Evans6109fe02010-02-10 10:37:56 -0800829 } else
Jason Evans38d92102011-03-23 00:37:29 -0700830 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700831 }
832
833 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800834 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700835 malloc_write("<jemalloc>: Error allocating aligned "
836 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700837 abort();
838 }
839 ret = ENOMEM;
840 goto RETURN;
841 }
842
843 *memptr = result;
844 ret = 0;
845
846RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800847 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700848 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700849 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700850 }
Jason Evans7372b152012-02-10 20:22:09 -0800851 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700852 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700853 return (ret);
854}
855
Jason Evansa5070042011-08-12 13:48:27 -0700856JEMALLOC_ATTR(nonnull(1))
857JEMALLOC_ATTR(visibility("default"))
858int
Jason Evans0a5489e2012-03-01 17:19:20 -0800859je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700860{
861
Jason Evans0a0bbf62012-03-13 12:55:21 -0700862 return imemalign(memptr, alignment, size, sizeof(void *));
863}
864
865JEMALLOC_ATTR(malloc)
866JEMALLOC_ATTR(visibility("default"))
867void *
868je_aligned_alloc(size_t alignment, size_t size)
869{
870 void *ret;
871 int err;
872
873 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
874 ret = NULL;
875 errno = err;
876 }
877 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700878}
879
Jason Evans9ad48232010-01-03 11:59:20 -0800880JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800881JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700882void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800883je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700884{
885 void *ret;
886 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800887 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700888 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700889
890 if (malloc_init()) {
891 num_size = 0;
892 ret = NULL;
893 goto RETURN;
894 }
895
896 num_size = num * size;
897 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800898 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700899 num_size = 1;
900 else {
901 ret = NULL;
902 goto RETURN;
903 }
904 /*
905 * Try to avoid division here. We know that it isn't possible to
906 * overflow during multiplication if neither operand uses any of the
907 * most significant half of the bits in a size_t.
908 */
909 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
910 && (num_size / size != num)) {
911 /* size_t overflow. */
912 ret = NULL;
913 goto RETURN;
914 }
915
Jason Evans7372b152012-02-10 20:22:09 -0800916 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700917 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700918 PROF_ALLOC_PREP(1, usize, cnt);
919 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700920 ret = NULL;
921 goto RETURN;
922 }
Jason Evans93443682010-10-20 17:39:18 -0700923 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800924 <= SMALL_MAXCLASS) {
925 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700926 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700927 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700928 } else
929 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800930 } else {
931 if (config_stats)
932 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700933 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700934 }
Jason Evans289053c2009-06-22 12:08:42 -0700935
936RETURN:
937 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800938 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800939 malloc_write("<jemalloc>: Error in calloc(): out of "
940 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700941 abort();
942 }
943 errno = ENOMEM;
944 }
945
Jason Evans7372b152012-02-10 20:22:09 -0800946 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700947 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800948 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700949 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700950 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700951 }
Jason Evans289053c2009-06-22 12:08:42 -0700952 return (ret);
953}
954
Jason Evanse476f8a2010-01-16 09:53:50 -0800955JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700956void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800957je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700958{
959 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800960 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700961 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700962 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
963 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800964
Jason Evans289053c2009-06-22 12:08:42 -0700965 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800966 if (ptr != NULL) {
967 /* realloc(ptr, 0) is equivalent to free(p). */
968 if (config_prof || config_stats)
969 old_size = isalloc(ptr);
970 if (config_prof && opt_prof) {
971 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -0800972 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800973 }
Jason Evansf081b882012-02-28 20:24:05 -0800974 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -0700975 ret = NULL;
976 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -0800977 } else
978 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700979 }
980
981 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -0800982 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -0700983
Jason Evans7372b152012-02-10 20:22:09 -0800984 if (config_prof || config_stats)
985 old_size = isalloc(ptr);
986 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700987 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -0700988 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -0700989 PROF_ALLOC_PREP(1, usize, cnt);
990 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -0700991 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800992 ret = NULL;
993 goto OOM;
994 }
Jason Evans0b270a92010-03-31 16:45:04 -0700995 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -0800996 usize <= SMALL_MAXCLASS) {
997 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700998 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700999 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -07001000 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -07001001 else
1002 old_ctx = NULL;
1003 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001004 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001005 if (ret == NULL)
1006 old_ctx = NULL;
1007 }
Jason Evans7372b152012-02-10 20:22:09 -08001008 } else {
1009 if (config_stats)
1010 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001011 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001012 }
Jason Evans289053c2009-06-22 12:08:42 -07001013
Jason Evans6109fe02010-02-10 10:37:56 -08001014OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001015 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001016 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001017 malloc_write("<jemalloc>: Error in realloc(): "
1018 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001019 abort();
1020 }
1021 errno = ENOMEM;
1022 }
1023 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001024 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001025 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001026 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001027 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001028 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001029 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001030 ret = NULL;
1031 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001032 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001033 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001034 PROF_ALLOC_PREP(1, usize, cnt);
1035 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001036 ret = NULL;
1037 else {
1038 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001039 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001040 SMALL_MAXCLASS) {
1041 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001042 if (ret != NULL) {
1043 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001044 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001045 }
1046 } else
1047 ret = imalloc(size);
1048 }
Jason Evans7372b152012-02-10 20:22:09 -08001049 } else {
1050 if (config_stats)
1051 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001052 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001053 }
Jason Evans6109fe02010-02-10 10:37:56 -08001054 }
Jason Evans569432c2009-12-29 00:09:15 -08001055
Jason Evans289053c2009-06-22 12:08:42 -07001056 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001057 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001058 malloc_write("<jemalloc>: Error in realloc(): "
1059 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001060 abort();
1061 }
1062 errno = ENOMEM;
1063 }
1064 }
1065
1066RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001067 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001068 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001069 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001070 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001071 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001072 ta = thread_allocated_tsd_get();
1073 ta->allocated += usize;
1074 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001075 }
Jason Evans289053c2009-06-22 12:08:42 -07001076 return (ret);
1077}
1078
Jason Evanse476f8a2010-01-16 09:53:50 -08001079JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001080void
Jason Evans0a5489e2012-03-01 17:19:20 -08001081je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001082{
1083
Jason Evansf0047372012-04-02 15:18:24 -07001084 if (ptr != NULL) {
1085 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001086
Jason Evansf0047372012-04-02 15:18:24 -07001087 assert(malloc_initialized || IS_INITIALIZER);
1088
1089 if (config_prof && opt_prof) {
1090 usize = isalloc(ptr);
1091 prof_free(ptr, usize);
1092 } else if (config_stats) {
1093 usize = isalloc(ptr);
1094 }
1095 if (config_stats)
1096 thread_allocated_tsd_get()->deallocated += usize;
1097 idalloc(ptr);
1098 }
Jason Evans289053c2009-06-22 12:08:42 -07001099}
1100
1101/*
1102 * End malloc(3)-compatible functions.
1103 */
1104/******************************************************************************/
1105/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001106 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001107 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001108
1109#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1110JEMALLOC_ATTR(malloc)
1111JEMALLOC_ATTR(visibility("default"))
1112void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001113je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001114{
Jason Evans9225a192012-03-23 15:39:07 -07001115 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001116 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001117 return (ret);
1118}
1119#endif
1120
1121#ifdef JEMALLOC_OVERRIDE_VALLOC
1122JEMALLOC_ATTR(malloc)
1123JEMALLOC_ATTR(visibility("default"))
1124void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001125je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001126{
Jason Evans9225a192012-03-23 15:39:07 -07001127 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001128 imemalign(&ret, PAGE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001129 return (ret);
1130}
1131#endif
1132
Mike Hommey5c89c502012-03-26 17:46:57 +02001133/*
1134 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1135 * #define je_malloc malloc
1136 */
1137#define malloc_is_malloc 1
1138#define is_malloc_(a) malloc_is_ ## a
1139#define is_malloc(a) is_malloc_(a)
1140
1141#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001142/*
1143 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1144 * to inconsistently reference libc's malloc(3)-compatible functions
1145 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1146 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001147 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001148 * passed an extra argument for the caller return address, which will be
1149 * ignored.
1150 */
1151JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001152void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001153
1154JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001155void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001156
1157JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001158void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001159
1160JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001161void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001162#endif
1163
Jason Evans6a0d2912010-09-20 16:44:23 -07001164/*
1165 * End non-standard override functions.
1166 */
1167/******************************************************************************/
1168/*
Jason Evans289053c2009-06-22 12:08:42 -07001169 * Begin non-standard functions.
1170 */
1171
Jason Evanse476f8a2010-01-16 09:53:50 -08001172JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001173size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001174je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001175{
Jason Evans569432c2009-12-29 00:09:15 -08001176 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001177
Jason Evans41b6afb2012-02-02 22:04:57 -08001178 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001179
Jason Evans7372b152012-02-10 20:22:09 -08001180 if (config_ivsalloc)
1181 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001182 else
Jason Evansf0047372012-04-02 15:18:24 -07001183 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001184
Jason Evans569432c2009-12-29 00:09:15 -08001185 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001186}
1187
Jason Evans4201af02010-01-24 02:53:40 -08001188JEMALLOC_ATTR(visibility("default"))
1189void
Jason Evans0a5489e2012-03-01 17:19:20 -08001190je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1191 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001192{
1193
Jason Evans698805c2010-03-03 17:45:38 -08001194 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001195}
1196
Jason Evans3c234352010-01-27 13:10:55 -08001197JEMALLOC_ATTR(visibility("default"))
1198int
Jason Evans0a5489e2012-03-01 17:19:20 -08001199je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001200 size_t newlen)
1201{
1202
Jason Evans95833312010-01-27 13:45:21 -08001203 if (malloc_init())
1204 return (EAGAIN);
1205
Jason Evans3c234352010-01-27 13:10:55 -08001206 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1207}
1208
1209JEMALLOC_ATTR(visibility("default"))
1210int
Jason Evans0a5489e2012-03-01 17:19:20 -08001211je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001212{
1213
Jason Evans95833312010-01-27 13:45:21 -08001214 if (malloc_init())
1215 return (EAGAIN);
1216
Jason Evans3c234352010-01-27 13:10:55 -08001217 return (ctl_nametomib(name, mibp, miblenp));
1218}
1219
1220JEMALLOC_ATTR(visibility("default"))
1221int
Jason Evans0a5489e2012-03-01 17:19:20 -08001222je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1223 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001224{
1225
Jason Evans95833312010-01-27 13:45:21 -08001226 if (malloc_init())
1227 return (EAGAIN);
1228
Jason Evans3c234352010-01-27 13:10:55 -08001229 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1230}
1231
Jason Evans7e77eaf2012-03-02 17:47:37 -08001232/*
1233 * End non-standard functions.
1234 */
1235/******************************************************************************/
1236/*
1237 * Begin experimental functions.
1238 */
1239#ifdef JEMALLOC_EXPERIMENTAL
1240
Jason Evans8e3c3c62010-09-17 15:46:18 -07001241JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001242iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001243{
1244
Jason Evans38d92102011-03-23 00:37:29 -07001245 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1246 NULL)));
1247
Jason Evans8e3c3c62010-09-17 15:46:18 -07001248 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001249 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001250 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001251 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001252 else
Jason Evans38d92102011-03-23 00:37:29 -07001253 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001254}
1255
Jason Evans6a0d2912010-09-20 16:44:23 -07001256JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001257JEMALLOC_ATTR(visibility("default"))
1258int
Jason Evans0a5489e2012-03-01 17:19:20 -08001259je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001260{
1261 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001262 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001263 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1264 & (SIZE_T_MAX-1));
1265 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001266 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001267
1268 assert(ptr != NULL);
1269 assert(size != 0);
1270
1271 if (malloc_init())
1272 goto OOM;
1273
Jason Evans749c2a02011-08-12 18:37:54 -07001274 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001275 if (usize == 0)
1276 goto OOM;
1277
Jason Evans7372b152012-02-10 20:22:09 -08001278 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001279 PROF_ALLOC_PREP(1, usize, cnt);
1280 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001281 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001282 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001283 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001284 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001285 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001286 alignment, NULL);
1287 assert(usize_promoted != 0);
1288 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001289 if (p == NULL)
1290 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001291 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001292 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001293 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001294 if (p == NULL)
1295 goto OOM;
1296 }
Jason Evans749c2a02011-08-12 18:37:54 -07001297 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001298 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001299 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001300 if (p == NULL)
1301 goto OOM;
1302 }
Jason Evans7372b152012-02-10 20:22:09 -08001303 if (rsize != NULL)
1304 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001305
1306 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001307 if (config_stats) {
1308 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001309 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001310 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001311 return (ALLOCM_SUCCESS);
1312OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001313 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001314 malloc_write("<jemalloc>: Error in allocm(): "
1315 "out of memory\n");
1316 abort();
1317 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001318 *ptr = NULL;
1319 return (ALLOCM_ERR_OOM);
1320}
1321
Jason Evans6a0d2912010-09-20 16:44:23 -07001322JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001323JEMALLOC_ATTR(visibility("default"))
1324int
Jason Evans0a5489e2012-03-01 17:19:20 -08001325je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001326{
1327 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001328 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001329 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001330 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1331 & (SIZE_T_MAX-1));
1332 bool zero = flags & ALLOCM_ZERO;
1333 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001334 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001335
1336 assert(ptr != NULL);
1337 assert(*ptr != NULL);
1338 assert(size != 0);
1339 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001340 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001341
1342 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001343 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001344 /*
1345 * usize isn't knowable before iralloc() returns when extra is
1346 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001347 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001348 * backtrace. prof_realloc() will use the actual usize to
1349 * decide whether to sample.
1350 */
1351 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1352 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001353 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001354 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001355 PROF_ALLOC_PREP(1, max_usize, cnt);
1356 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001357 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001358 /*
1359 * Use minimum usize to determine whether promotion may happen.
1360 */
1361 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1362 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001363 alignment, NULL)) <= SMALL_MAXCLASS) {
1364 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1365 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001366 alignment, zero, no_move);
1367 if (q == NULL)
1368 goto ERR;
Jason Evansae4c7b42012-04-02 07:04:34 -07001369 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001370 usize = max_usize;
1371 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001372 } else
1373 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374 } else {
1375 q = iralloc(p, size, extra, alignment, zero, no_move);
1376 if (q == NULL)
1377 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001378 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001379 }
Jason Evanse4f78462010-10-22 10:45:59 -07001380 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001381 if (rsize != NULL)
1382 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001383 } else {
1384 if (config_stats)
1385 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001386 q = iralloc(p, size, extra, alignment, zero, no_move);
1387 if (q == NULL)
1388 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001389 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001390 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001391 if (rsize != NULL) {
1392 if (config_stats == false)
1393 usize = isalloc(q);
1394 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001395 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001396 }
1397
1398 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001399 if (config_stats) {
1400 thread_allocated_t *ta;
1401 ta = thread_allocated_tsd_get();
1402 ta->allocated += usize;
1403 ta->deallocated += old_size;
1404 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001405 return (ALLOCM_SUCCESS);
1406ERR:
1407 if (no_move)
1408 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001409OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001410 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001411 malloc_write("<jemalloc>: Error in rallocm(): "
1412 "out of memory\n");
1413 abort();
1414 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001415 return (ALLOCM_ERR_OOM);
1416}
1417
Jason Evans6a0d2912010-09-20 16:44:23 -07001418JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001419JEMALLOC_ATTR(visibility("default"))
1420int
Jason Evans0a5489e2012-03-01 17:19:20 -08001421je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001422{
1423 size_t sz;
1424
Jason Evans41b6afb2012-02-02 22:04:57 -08001425 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001426
Jason Evans7372b152012-02-10 20:22:09 -08001427 if (config_ivsalloc)
1428 sz = ivsalloc(ptr);
1429 else {
1430 assert(ptr != NULL);
1431 sz = isalloc(ptr);
1432 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001433 assert(rsize != NULL);
1434 *rsize = sz;
1435
1436 return (ALLOCM_SUCCESS);
1437}
1438
Jason Evans6a0d2912010-09-20 16:44:23 -07001439JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440JEMALLOC_ATTR(visibility("default"))
1441int
Jason Evans0a5489e2012-03-01 17:19:20 -08001442je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443{
Jason Evanse4f78462010-10-22 10:45:59 -07001444 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001445
1446 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001447 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001448
Jason Evans7372b152012-02-10 20:22:09 -08001449 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001450 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001451 if (config_prof && opt_prof) {
1452 if (config_stats == false)
1453 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001454 prof_free(ptr, usize);
1455 }
Jason Evans7372b152012-02-10 20:22:09 -08001456 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001457 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001458 idalloc(ptr);
1459
1460 return (ALLOCM_SUCCESS);
1461}
1462
Jason Evans7e15dab2012-02-29 12:56:37 -08001463JEMALLOC_ATTR(visibility("default"))
1464int
Jason Evans0a5489e2012-03-01 17:19:20 -08001465je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001466{
1467 size_t usize;
1468 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1469 & (SIZE_T_MAX-1));
1470
1471 assert(size != 0);
1472
1473 if (malloc_init())
1474 return (ALLOCM_ERR_OOM);
1475
1476 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1477 if (usize == 0)
1478 return (ALLOCM_ERR_OOM);
1479
1480 if (rsize != NULL)
1481 *rsize = usize;
1482 return (ALLOCM_SUCCESS);
1483}
1484
Jason Evans7e77eaf2012-03-02 17:47:37 -08001485#endif
Jason Evans289053c2009-06-22 12:08:42 -07001486/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001487 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001488 */
1489/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001490/*
1491 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001492 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001493 */
1494
Jason Evans41b6afb2012-02-02 22:04:57 -08001495#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001496void
Jason Evans804c9ec2009-06-22 17:44:33 -07001497jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001498#else
1499void
1500_malloc_prefork(void)
1501#endif
Jason Evans289053c2009-06-22 12:08:42 -07001502{
Jason Evansfbbb6242010-01-24 17:56:48 -08001503 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001504
1505 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001506 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001507 for (i = 0; i < narenas; i++) {
1508 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001509 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001510 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001511 base_prefork();
1512 huge_prefork();
1513 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001514}
1515
Jason Evans41b6afb2012-02-02 22:04:57 -08001516#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001517void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001518jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001519#else
1520void
1521_malloc_postfork(void)
1522#endif
Jason Evans289053c2009-06-22 12:08:42 -07001523{
1524 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001525
1526 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001527 chunk_dss_postfork_parent();
1528 huge_postfork_parent();
1529 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001530 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001531 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001532 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001533 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001534 malloc_mutex_postfork_parent(&arenas_lock);
1535}
1536
1537void
1538jemalloc_postfork_child(void)
1539{
1540 unsigned i;
1541
1542 /* Release all mutexes, now that fork() has completed. */
1543 chunk_dss_postfork_child();
1544 huge_postfork_child();
1545 base_postfork_child();
1546 for (i = 0; i < narenas; i++) {
1547 if (arenas[i] != NULL)
1548 arena_postfork_child(arenas[i]);
1549 }
1550 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001551}
Jason Evans2dbecf12010-09-05 10:35:13 -07001552
1553/******************************************************************************/
Jason Evans01b3fe52012-04-03 09:28:00 -07001554/*
1555 * The following functions are used for TLS allocation/deallocation in static
1556 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
1557 * is that these avoid accessing TLS variables.
1558 */
1559
1560static void *
1561a0alloc(size_t size, bool zero)
1562{
1563
1564 if (malloc_init())
1565 return (NULL);
1566
1567 if (size == 0)
1568 size = 1;
1569
1570 if (size <= arena_maxclass)
1571 return (arena_malloc(arenas[0], size, zero, false));
1572 else
1573 return (huge_malloc(size, zero));
1574}
1575
1576void *
1577a0malloc(size_t size)
1578{
1579
1580 return (a0alloc(size, false));
1581}
1582
1583void *
1584a0calloc(size_t num, size_t size)
1585{
1586
1587 return (a0alloc(num * size, true));
1588}
1589
1590void
1591a0free(void *ptr)
1592{
1593 arena_chunk_t *chunk;
1594
1595 if (ptr == NULL)
1596 return;
1597
1598 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1599 if (chunk != ptr)
1600 arena_dalloc(chunk->arena, chunk, ptr, false);
1601 else
1602 huge_dalloc(ptr, true);
1603}
1604
1605/******************************************************************************/