blob: c70192205e94804b6b05315e91bf1a92c4dd7bcc [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evans289053c2009-06-22 12:08:42 -07003
Jason Evans289053c2009-06-22 12:08:42 -07004/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -08005/* Data. */
Jason Evans289053c2009-06-22 12:08:42 -07006
Jason Evanscd9a1342012-03-21 18:33:03 -07007malloc_tsd_data(, arenas, arena_t *, NULL)
8malloc_tsd_data(, thread_allocated, thread_allocated_t,
9 THREAD_ALLOCATED_INITIALIZER)
Jason Evans289053c2009-06-22 12:08:42 -070010
Jason Evanse476f8a2010-01-16 09:53:50 -080011/* Runtime configuration options. */
Jason Evans0a5489e2012-03-01 17:19:20 -080012const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
Jason Evansb7924f52009-06-23 19:01:18 -070013#ifdef JEMALLOC_DEBUG
Jason Evanse476f8a2010-01-16 09:53:50 -080014bool opt_abort = true;
Jason Evansb7924f52009-06-23 19:01:18 -070015# ifdef JEMALLOC_FILL
Jason Evanse476f8a2010-01-16 09:53:50 -080016bool opt_junk = true;
Jason Evans7372b152012-02-10 20:22:09 -080017# else
18bool opt_junk = false;
Jason Evansb7924f52009-06-23 19:01:18 -070019# endif
Jason Evans289053c2009-06-22 12:08:42 -070020#else
Jason Evanse476f8a2010-01-16 09:53:50 -080021bool opt_abort = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080022bool opt_junk = false;
Jason Evans289053c2009-06-22 12:08:42 -070023#endif
Jason Evanse476f8a2010-01-16 09:53:50 -080024bool opt_xmalloc = false;
Jason Evanse476f8a2010-01-16 09:53:50 -080025bool opt_zero = false;
Jason Evanse7339702010-10-23 18:37:06 -070026size_t opt_narenas = 0;
Jason Evans289053c2009-06-22 12:08:42 -070027
Jason Evanscd9a1342012-03-21 18:33:03 -070028unsigned ncpus;
29
30malloc_mutex_t arenas_lock;
31arena_t **arenas;
32unsigned narenas;
33
34/* Set to true once the allocator has been initialized. */
Jason Evans4eeb52f2012-04-02 01:46:25 -070035static bool malloc_initialized = false;
Jason Evanscd9a1342012-03-21 18:33:03 -070036
Jason Evans41b6afb2012-02-02 22:04:57 -080037#ifdef JEMALLOC_THREADED_INIT
Jason Evanscd9a1342012-03-21 18:33:03 -070038/* Used to let the initializing thread recursively allocate. */
39static pthread_t malloc_initializer = (unsigned long)0;
Jason Evans41b6afb2012-02-02 22:04:57 -080040# define INITIALIZER pthread_self()
41# define IS_INITIALIZER (malloc_initializer == pthread_self())
42#else
43static bool malloc_initializer = false;
44# define INITIALIZER true
45# define IS_INITIALIZER malloc_initializer
46#endif
Jason Evanscd9a1342012-03-21 18:33:03 -070047
48/* Used to avoid initialization races. */
49static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
50
Jason Evans289053c2009-06-22 12:08:42 -070051/******************************************************************************/
Jason Evanse476f8a2010-01-16 09:53:50 -080052/* Function prototypes for non-inline static functions. */
Jason Evans289053c2009-06-22 12:08:42 -070053
Jason Evans03c22372010-01-03 12:10:42 -080054static void stats_print_atexit(void);
Jason Evansc9658dd2009-06-22 14:44:08 -070055static unsigned malloc_ncpus(void);
Jason Evanse7339702010-10-23 18:37:06 -070056static bool malloc_conf_next(char const **opts_p, char const **k_p,
57 size_t *klen_p, char const **v_p, size_t *vlen_p);
58static void malloc_conf_error(const char *msg, const char *k, size_t klen,
59 const char *v, size_t vlen);
60static void malloc_conf_init(void);
Jason Evans289053c2009-06-22 12:08:42 -070061static bool malloc_init_hard(void);
Jason Evans59656312012-02-28 21:37:38 -080062static int imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -070063 size_t min_alignment);
Jason Evans289053c2009-06-22 12:08:42 -070064
Jason Evans289053c2009-06-22 12:08:42 -070065/******************************************************************************/
Jason Evansc9658dd2009-06-22 14:44:08 -070066/*
Jason Evanse476f8a2010-01-16 09:53:50 -080067 * Begin miscellaneous support functions.
Jason Evansb7924f52009-06-23 19:01:18 -070068 */
69
Jason Evanse476f8a2010-01-16 09:53:50 -080070/* Create a new arena and insert it into the arenas array at index ind. */
71arena_t *
72arenas_extend(unsigned ind)
Jason Evans289053c2009-06-22 12:08:42 -070073{
74 arena_t *ret;
75
Jason Evansb1726102012-02-28 16:50:47 -080076 ret = (arena_t *)base_alloc(sizeof(arena_t));
Jason Evanse476f8a2010-01-16 09:53:50 -080077 if (ret != NULL && arena_new(ret, ind) == false) {
78 arenas[ind] = ret;
79 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -070080 }
Jason Evanse476f8a2010-01-16 09:53:50 -080081 /* Only reached if there is an OOM error. */
Jason Evans289053c2009-06-22 12:08:42 -070082
Jason Evanse476f8a2010-01-16 09:53:50 -080083 /*
84 * OOM here is quite inconvenient to propagate, since dealing with it
85 * would require a check for failure in the fast path. Instead, punt
86 * by using arenas[0]. In practice, this is an extremely unlikely
87 * failure.
88 */
Jason Evans698805c2010-03-03 17:45:38 -080089 malloc_write("<jemalloc>: Error initializing arena\n");
Jason Evanse476f8a2010-01-16 09:53:50 -080090 if (opt_abort)
91 abort();
Jason Evans289053c2009-06-22 12:08:42 -070092
Jason Evanse476f8a2010-01-16 09:53:50 -080093 return (arenas[0]);
Jason Evans289053c2009-06-22 12:08:42 -070094}
95
Jason Evans4c2faa82012-03-13 11:09:23 -070096/* Slow path, called only by choose_arena(). */
Jason Evanse476f8a2010-01-16 09:53:50 -080097arena_t *
Jason Evans289053c2009-06-22 12:08:42 -070098choose_arena_hard(void)
99{
100 arena_t *ret;
101
Jason Evans289053c2009-06-22 12:08:42 -0700102 if (narenas > 1) {
Jason Evans597632b2011-03-18 13:41:33 -0700103 unsigned i, choose, first_null;
104
105 choose = 0;
106 first_null = narenas;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800107 malloc_mutex_lock(&arenas_lock);
Jason Evans0657f122011-03-18 17:56:14 -0700108 assert(arenas[0] != NULL);
Jason Evans597632b2011-03-18 13:41:33 -0700109 for (i = 1; i < narenas; i++) {
110 if (arenas[i] != NULL) {
111 /*
112 * Choose the first arena that has the lowest
113 * number of threads assigned to it.
114 */
115 if (arenas[i]->nthreads <
116 arenas[choose]->nthreads)
117 choose = i;
118 } else if (first_null == narenas) {
119 /*
120 * Record the index of the first uninitialized
121 * arena, in case all extant arenas are in use.
122 *
123 * NB: It is possible for there to be
124 * discontinuities in terms of initialized
125 * versus uninitialized arenas, due to the
126 * "thread.arena" mallctl.
127 */
128 first_null = i;
129 }
130 }
131
Jason Evans41b6afb2012-02-02 22:04:57 -0800132 if (arenas[choose]->nthreads == 0 || first_null == narenas) {
Jason Evans597632b2011-03-18 13:41:33 -0700133 /*
134 * Use an unloaded arena, or the least loaded arena if
135 * all arenas are already initialized.
136 */
137 ret = arenas[choose];
138 } else {
139 /* Initialize a new arena. */
140 ret = arenas_extend(first_null);
141 }
142 ret->nthreads++;
Jason Evans3ee7a5c2009-12-29 00:09:15 -0800143 malloc_mutex_unlock(&arenas_lock);
Jason Evans597632b2011-03-18 13:41:33 -0700144 } else {
Jason Evans289053c2009-06-22 12:08:42 -0700145 ret = arenas[0];
Jason Evans597632b2011-03-18 13:41:33 -0700146 malloc_mutex_lock(&arenas_lock);
147 ret->nthreads++;
148 malloc_mutex_unlock(&arenas_lock);
149 }
Jason Evans289053c2009-06-22 12:08:42 -0700150
Jason Evanscd9a1342012-03-21 18:33:03 -0700151 arenas_tsd_set(&ret);
Jason Evans289053c2009-06-22 12:08:42 -0700152
153 return (ret);
154}
Jason Evans289053c2009-06-22 12:08:42 -0700155
Jason Evans03c22372010-01-03 12:10:42 -0800156static void
157stats_print_atexit(void)
158{
159
Jason Evans7372b152012-02-10 20:22:09 -0800160 if (config_tcache && config_stats) {
161 unsigned i;
Jason Evans03c22372010-01-03 12:10:42 -0800162
Jason Evans7372b152012-02-10 20:22:09 -0800163 /*
164 * Merge stats from extant threads. This is racy, since
165 * individual threads do not lock when recording tcache stats
166 * events. As a consequence, the final stats may be slightly
167 * out of date by the time they are reported, if other threads
168 * continue to allocate.
169 */
170 for (i = 0; i < narenas; i++) {
171 arena_t *arena = arenas[i];
172 if (arena != NULL) {
173 tcache_t *tcache;
Jason Evans03c22372010-01-03 12:10:42 -0800174
Jason Evans7372b152012-02-10 20:22:09 -0800175 /*
176 * tcache_stats_merge() locks bins, so if any
177 * code is introduced that acquires both arena
178 * and bin locks in the opposite order,
179 * deadlocks may result.
180 */
181 malloc_mutex_lock(&arena->lock);
182 ql_foreach(tcache, &arena->tcache_ql, link) {
183 tcache_stats_merge(tcache, arena);
184 }
185 malloc_mutex_unlock(&arena->lock);
Jason Evans03c22372010-01-03 12:10:42 -0800186 }
Jason Evans03c22372010-01-03 12:10:42 -0800187 }
188 }
Jason Evans0a5489e2012-03-01 17:19:20 -0800189 je_malloc_stats_print(NULL, NULL, NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700190}
191
Jason Evans289053c2009-06-22 12:08:42 -0700192/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800193 * End miscellaneous support functions.
Jason Evans289053c2009-06-22 12:08:42 -0700194 */
195/******************************************************************************/
196/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800197 * Begin initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700198 */
199
Jason Evansc9658dd2009-06-22 14:44:08 -0700200static unsigned
201malloc_ncpus(void)
202{
203 unsigned ret;
Jason Evansb7924f52009-06-23 19:01:18 -0700204 long result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700205
Jason Evansb7924f52009-06-23 19:01:18 -0700206 result = sysconf(_SC_NPROCESSORS_ONLN);
207 if (result == -1) {
208 /* Error. */
209 ret = 1;
Jason Evansc9658dd2009-06-22 14:44:08 -0700210 }
Jason Evansb7924f52009-06-23 19:01:18 -0700211 ret = (unsigned)result;
Jason Evansc9658dd2009-06-22 14:44:08 -0700212
213 return (ret);
214}
Jason Evansb7924f52009-06-23 19:01:18 -0700215
Jason Evanscd9a1342012-03-21 18:33:03 -0700216void
Jason Evans597632b2011-03-18 13:41:33 -0700217arenas_cleanup(void *arg)
218{
Jason Evanscd9a1342012-03-21 18:33:03 -0700219 arena_t *arena = *(arena_t **)arg;
Jason Evans597632b2011-03-18 13:41:33 -0700220
221 malloc_mutex_lock(&arenas_lock);
222 arena->nthreads--;
223 malloc_mutex_unlock(&arenas_lock);
224}
225
Jason Evans289053c2009-06-22 12:08:42 -0700226static inline bool
227malloc_init(void)
228{
229
230 if (malloc_initialized == false)
231 return (malloc_init_hard());
232
233 return (false);
234}
235
236static bool
Jason Evanse7339702010-10-23 18:37:06 -0700237malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
238 char const **v_p, size_t *vlen_p)
239{
240 bool accept;
241 const char *opts = *opts_p;
242
243 *k_p = opts;
244
245 for (accept = false; accept == false;) {
246 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800247 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
248 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
249 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
250 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
251 case 'Y': case 'Z':
252 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
253 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
254 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
255 case 's': case 't': case 'u': case 'v': case 'w': case 'x':
256 case 'y': case 'z':
257 case '0': case '1': case '2': case '3': case '4': case '5':
258 case '6': case '7': case '8': case '9':
259 case '_':
260 opts++;
261 break;
262 case ':':
263 opts++;
264 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
265 *v_p = opts;
266 accept = true;
267 break;
268 case '\0':
269 if (opts != *opts_p) {
270 malloc_write("<jemalloc>: Conf string ends "
271 "with key\n");
272 }
273 return (true);
274 default:
275 malloc_write("<jemalloc>: Malformed conf string\n");
276 return (true);
Jason Evanse7339702010-10-23 18:37:06 -0700277 }
278 }
279
280 for (accept = false; accept == false;) {
281 switch (*opts) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800282 case ',':
283 opts++;
284 /*
285 * Look ahead one character here, because the next time
286 * this function is called, it will assume that end of
287 * input has been cleanly reached if no input remains,
288 * but we have optimistically already consumed the
289 * comma if one exists.
290 */
291 if (*opts == '\0') {
292 malloc_write("<jemalloc>: Conf string ends "
293 "with comma\n");
294 }
295 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
296 accept = true;
297 break;
298 case '\0':
299 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
300 accept = true;
301 break;
302 default:
303 opts++;
304 break;
Jason Evanse7339702010-10-23 18:37:06 -0700305 }
306 }
307
308 *opts_p = opts;
309 return (false);
310}
311
312static void
313malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
314 size_t vlen)
315{
Jason Evanse7339702010-10-23 18:37:06 -0700316
Jason Evansd81e4bd2012-03-06 14:57:45 -0800317 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
318 (int)vlen, v);
Jason Evanse7339702010-10-23 18:37:06 -0700319}
320
321static void
322malloc_conf_init(void)
Jason Evans289053c2009-06-22 12:08:42 -0700323{
324 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -0700325 char buf[PATH_MAX + 1];
Jason Evanse7339702010-10-23 18:37:06 -0700326 const char *opts, *k, *v;
327 size_t klen, vlen;
328
329 for (i = 0; i < 3; i++) {
330 /* Get runtime configuration. */
331 switch (i) {
332 case 0:
Jason Evans0a5489e2012-03-01 17:19:20 -0800333 if (je_malloc_conf != NULL) {
Jason Evanse7339702010-10-23 18:37:06 -0700334 /*
335 * Use options that were compiled into the
336 * program.
337 */
Jason Evans0a5489e2012-03-01 17:19:20 -0800338 opts = je_malloc_conf;
Jason Evanse7339702010-10-23 18:37:06 -0700339 } else {
340 /* No configuration specified. */
341 buf[0] = '\0';
342 opts = buf;
343 }
344 break;
345 case 1: {
346 int linklen;
347 const char *linkname =
348#ifdef JEMALLOC_PREFIX
349 "/etc/"JEMALLOC_PREFIX"malloc.conf"
350#else
351 "/etc/malloc.conf"
352#endif
353 ;
354
355 if ((linklen = readlink(linkname, buf,
356 sizeof(buf) - 1)) != -1) {
357 /*
358 * Use the contents of the "/etc/malloc.conf"
359 * symbolic link's name.
360 */
361 buf[linklen] = '\0';
362 opts = buf;
363 } else {
364 /* No configuration specified. */
365 buf[0] = '\0';
366 opts = buf;
367 }
368 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800369 } case 2: {
Jason Evanse7339702010-10-23 18:37:06 -0700370 const char *envname =
371#ifdef JEMALLOC_PREFIX
372 JEMALLOC_CPREFIX"MALLOC_CONF"
373#else
374 "MALLOC_CONF"
375#endif
376 ;
377
378 if ((opts = getenv(envname)) != NULL) {
379 /*
380 * Do nothing; opts is already initialized to
Jason Evans8ad0eac2010-12-17 18:07:53 -0800381 * the value of the MALLOC_CONF environment
382 * variable.
Jason Evanse7339702010-10-23 18:37:06 -0700383 */
384 } else {
385 /* No configuration specified. */
386 buf[0] = '\0';
387 opts = buf;
388 }
389 break;
Jason Evansd81e4bd2012-03-06 14:57:45 -0800390 } default:
Jason Evanse7339702010-10-23 18:37:06 -0700391 /* NOTREACHED */
392 assert(false);
393 buf[0] = '\0';
394 opts = buf;
395 }
396
397 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
398 &vlen) == false) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800399#define CONF_HANDLE_BOOL(o, n) \
Jason Evanse7339702010-10-23 18:37:06 -0700400 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
401 klen) == 0) { \
402 if (strncmp("true", v, vlen) == 0 && \
403 vlen == sizeof("true")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800404 o = true; \
Jason Evanse7339702010-10-23 18:37:06 -0700405 else if (strncmp("false", v, vlen) == \
406 0 && vlen == sizeof("false")-1) \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800407 o = false; \
Jason Evanse7339702010-10-23 18:37:06 -0700408 else { \
409 malloc_conf_error( \
410 "Invalid conf value", \
411 k, klen, v, vlen); \
412 } \
413 continue; \
414 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800415#define CONF_HANDLE_SIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700416 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
417 klen) == 0) { \
Jason Evans41b6afb2012-02-02 22:04:57 -0800418 uintmax_t um; \
Jason Evanse7339702010-10-23 18:37:06 -0700419 char *end; \
420 \
421 errno = 0; \
Jason Evans41b6afb2012-02-02 22:04:57 -0800422 um = malloc_strtoumax(v, &end, 0); \
Jason Evanse7339702010-10-23 18:37:06 -0700423 if (errno != 0 || (uintptr_t)end - \
424 (uintptr_t)v != vlen) { \
425 malloc_conf_error( \
426 "Invalid conf value", \
427 k, klen, v, vlen); \
Jason Evans41b6afb2012-02-02 22:04:57 -0800428 } else if (um < min || um > max) { \
Jason Evanse7339702010-10-23 18:37:06 -0700429 malloc_conf_error( \
430 "Out-of-range conf value", \
431 k, klen, v, vlen); \
432 } else \
Jason Evans41b6afb2012-02-02 22:04:57 -0800433 o = um; \
Jason Evanse7339702010-10-23 18:37:06 -0700434 continue; \
435 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800436#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
Jason Evanse7339702010-10-23 18:37:06 -0700437 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
438 klen) == 0) { \
439 long l; \
440 char *end; \
441 \
442 errno = 0; \
443 l = strtol(v, &end, 0); \
444 if (errno != 0 || (uintptr_t)end - \
445 (uintptr_t)v != vlen) { \
446 malloc_conf_error( \
447 "Invalid conf value", \
448 k, klen, v, vlen); \
449 } else if (l < (ssize_t)min || l > \
450 (ssize_t)max) { \
451 malloc_conf_error( \
452 "Out-of-range conf value", \
453 k, klen, v, vlen); \
454 } else \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800455 o = l; \
Jason Evanse7339702010-10-23 18:37:06 -0700456 continue; \
457 }
Jason Evansd81e4bd2012-03-06 14:57:45 -0800458#define CONF_HANDLE_CHAR_P(o, n, d) \
Jason Evanse7339702010-10-23 18:37:06 -0700459 if (sizeof(#n)-1 == klen && strncmp(#n, k, \
460 klen) == 0) { \
461 size_t cpylen = (vlen <= \
Jason Evansd81e4bd2012-03-06 14:57:45 -0800462 sizeof(o)-1) ? vlen : \
463 sizeof(o)-1; \
464 strncpy(o, v, cpylen); \
465 o[cpylen] = '\0'; \
Jason Evanse7339702010-10-23 18:37:06 -0700466 continue; \
467 }
468
Jason Evansd81e4bd2012-03-06 14:57:45 -0800469 CONF_HANDLE_BOOL(opt_abort, abort)
Jason Evanse7339702010-10-23 18:37:06 -0700470 /*
471 * Chunks always require at least one * header page,
472 * plus one data page.
473 */
Jason Evansae4c7b42012-04-02 07:04:34 -0700474 CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
Jason Evanse7339702010-10-23 18:37:06 -0700475 (sizeof(size_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800476 CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
477 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
478 -1, (sizeof(size_t) << 3) - 1)
479 CONF_HANDLE_BOOL(opt_stats_print, stats_print)
Jason Evans7372b152012-02-10 20:22:09 -0800480 if (config_fill) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800481 CONF_HANDLE_BOOL(opt_junk, junk)
482 CONF_HANDLE_BOOL(opt_zero, zero)
Jason Evans7372b152012-02-10 20:22:09 -0800483 }
Jason Evans7372b152012-02-10 20:22:09 -0800484 if (config_xmalloc) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800485 CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
Jason Evans7372b152012-02-10 20:22:09 -0800486 }
487 if (config_tcache) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800488 CONF_HANDLE_BOOL(opt_tcache, tcache)
489 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
490 lg_tcache_max, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800491 (sizeof(size_t) << 3) - 1)
492 }
493 if (config_prof) {
Jason Evansd81e4bd2012-03-06 14:57:45 -0800494 CONF_HANDLE_BOOL(opt_prof, prof)
495 CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix,
496 "jeprof")
497 CONF_HANDLE_BOOL(opt_prof_active, prof_active)
498 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
499 lg_prof_sample, 0,
Jason Evans7372b152012-02-10 20:22:09 -0800500 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800501 CONF_HANDLE_BOOL(opt_prof_accum, prof_accum)
502 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
503 lg_prof_interval, -1,
Jason Evans7372b152012-02-10 20:22:09 -0800504 (sizeof(uint64_t) << 3) - 1)
Jason Evansd81e4bd2012-03-06 14:57:45 -0800505 CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump)
506 CONF_HANDLE_BOOL(opt_prof_leak, prof_leak)
Jason Evans7372b152012-02-10 20:22:09 -0800507 }
Jason Evanse7339702010-10-23 18:37:06 -0700508 malloc_conf_error("Invalid conf pair", k, klen, v,
509 vlen);
510#undef CONF_HANDLE_BOOL
511#undef CONF_HANDLE_SIZE_T
512#undef CONF_HANDLE_SSIZE_T
513#undef CONF_HANDLE_CHAR_P
514 }
Jason Evanse7339702010-10-23 18:37:06 -0700515 }
516}
517
518static bool
519malloc_init_hard(void)
520{
Jason Evansb7924f52009-06-23 19:01:18 -0700521 arena_t *init_arenas[1];
Jason Evans289053c2009-06-22 12:08:42 -0700522
523 malloc_mutex_lock(&init_lock);
Jason Evans41b6afb2012-02-02 22:04:57 -0800524 if (malloc_initialized || IS_INITIALIZER) {
Jason Evans289053c2009-06-22 12:08:42 -0700525 /*
526 * Another thread initialized the allocator before this one
Jason Evansa25d0a82009-11-09 14:57:38 -0800527 * acquired init_lock, or this thread is the initializing
528 * thread, and it is recursively allocating.
Jason Evans289053c2009-06-22 12:08:42 -0700529 */
530 malloc_mutex_unlock(&init_lock);
531 return (false);
532 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800533#ifdef JEMALLOC_THREADED_INIT
534 if (IS_INITIALIZER == false) {
Jason Evansb7924f52009-06-23 19:01:18 -0700535 /* Busy-wait until the initializing thread completes. */
536 do {
537 malloc_mutex_unlock(&init_lock);
538 CPU_SPINWAIT;
539 malloc_mutex_lock(&init_lock);
540 } while (malloc_initialized == false);
Jason Evans2541e1b2010-07-22 11:35:59 -0700541 malloc_mutex_unlock(&init_lock);
Jason Evansb7924f52009-06-23 19:01:18 -0700542 return (false);
543 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800544#endif
545 malloc_initializer = INITIALIZER;
Jason Evans289053c2009-06-22 12:08:42 -0700546
Jason Evanscd9a1342012-03-21 18:33:03 -0700547 malloc_tsd_boot();
Jason Evans7372b152012-02-10 20:22:09 -0800548 if (config_prof)
549 prof_boot0();
Jason Evans289053c2009-06-22 12:08:42 -0700550
Jason Evanse7339702010-10-23 18:37:06 -0700551 malloc_conf_init();
Jason Evans289053c2009-06-22 12:08:42 -0700552
Mike Hommeye77fa592012-03-28 09:53:16 +0200553#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE))
Jason Evansa0bf2422010-01-29 14:30:41 -0800554 /* Register fork handlers. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -0700555 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
556 jemalloc_postfork_child) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800557 malloc_write("<jemalloc>: Error in pthread_atfork()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800558 if (opt_abort)
559 abort();
560 }
Jason Evans41b6afb2012-02-02 22:04:57 -0800561#endif
Jason Evans3c234352010-01-27 13:10:55 -0800562
Jason Evans03c22372010-01-03 12:10:42 -0800563 if (opt_stats_print) {
Jason Evans289053c2009-06-22 12:08:42 -0700564 /* Print statistics at exit. */
Jason Evansa0bf2422010-01-29 14:30:41 -0800565 if (atexit(stats_print_atexit) != 0) {
Jason Evans698805c2010-03-03 17:45:38 -0800566 malloc_write("<jemalloc>: Error in atexit()\n");
Jason Evansa0bf2422010-01-29 14:30:41 -0800567 if (opt_abort)
568 abort();
569 }
Jason Evans289053c2009-06-22 12:08:42 -0700570 }
571
Jason Evanscd9a1342012-03-21 18:33:03 -0700572 if (chunk_boot0()) {
Jason Evansa0bf2422010-01-29 14:30:41 -0800573 malloc_mutex_unlock(&init_lock);
574 return (true);
575 }
Jason Evansc9658dd2009-06-22 14:44:08 -0700576
Jason Evans3c234352010-01-27 13:10:55 -0800577 if (base_boot()) {
578 malloc_mutex_unlock(&init_lock);
579 return (true);
580 }
581
Jason Evans41b6afb2012-02-02 22:04:57 -0800582 if (ctl_boot()) {
583 malloc_mutex_unlock(&init_lock);
584 return (true);
585 }
586
Jason Evans7372b152012-02-10 20:22:09 -0800587 if (config_prof)
588 prof_boot1();
Jason Evans3383af62010-02-11 08:59:06 -0800589
Jason Evansb1726102012-02-28 16:50:47 -0800590 arena_boot();
Jason Evans289053c2009-06-22 12:08:42 -0700591
Jason Evanscd9a1342012-03-21 18:33:03 -0700592 if (config_tcache && tcache_boot0()) {
Jason Evans84c8eef2011-03-16 10:30:13 -0700593 malloc_mutex_unlock(&init_lock);
594 return (true);
595 }
Jason Evans84cbbcb2009-12-29 00:09:15 -0800596
Jason Evanse476f8a2010-01-16 09:53:50 -0800597 if (huge_boot()) {
Jason Evansc9658dd2009-06-22 14:44:08 -0700598 malloc_mutex_unlock(&init_lock);
599 return (true);
600 }
Jason Evans289053c2009-06-22 12:08:42 -0700601
Jason Evans8e6f8b42011-11-03 18:40:03 -0700602 if (malloc_mutex_init(&arenas_lock))
603 return (true);
604
Jason Evansb7924f52009-06-23 19:01:18 -0700605 /*
606 * Create enough scaffolding to allow recursive allocation in
607 * malloc_ncpus().
608 */
609 narenas = 1;
610 arenas = init_arenas;
611 memset(arenas, 0, sizeof(arena_t *) * narenas);
612
613 /*
614 * Initialize one arena here. The rest are lazily created in
615 * choose_arena_hard().
616 */
617 arenas_extend(0);
618 if (arenas[0] == NULL) {
619 malloc_mutex_unlock(&init_lock);
620 return (true);
621 }
622
Jason Evanscd9a1342012-03-21 18:33:03 -0700623 /* Initialize allocation counters before any allocations can occur. */
624 if (config_stats && thread_allocated_tsd_boot()) {
625 malloc_mutex_unlock(&init_lock);
626 return (true);
627 }
Jason Evansb7924f52009-06-23 19:01:18 -0700628
Jason Evanscd9a1342012-03-21 18:33:03 -0700629 if (arenas_tsd_boot()) {
630 malloc_mutex_unlock(&init_lock);
631 return (true);
632 }
633
634 if (config_tcache && tcache_boot1()) {
635 malloc_mutex_unlock(&init_lock);
636 return (true);
637 }
638
Jason Evans6da54182012-03-23 18:05:51 -0700639 if (config_prof && prof_boot2()) {
640 malloc_mutex_unlock(&init_lock);
641 return (true);
642 }
643
Jason Evansb7924f52009-06-23 19:01:18 -0700644 /* Get number of CPUs. */
Jason Evansb7924f52009-06-23 19:01:18 -0700645 malloc_mutex_unlock(&init_lock);
646 ncpus = malloc_ncpus();
647 malloc_mutex_lock(&init_lock);
648
Jason Evanscd9a1342012-03-21 18:33:03 -0700649 if (chunk_boot1()) {
650 malloc_mutex_unlock(&init_lock);
651 return (true);
652 }
653
Jason Evanse7339702010-10-23 18:37:06 -0700654 if (opt_narenas == 0) {
Jason Evans289053c2009-06-22 12:08:42 -0700655 /*
Jason Evans5463a522009-12-29 00:09:15 -0800656 * For SMP systems, create more than one arena per CPU by
657 * default.
Jason Evans289053c2009-06-22 12:08:42 -0700658 */
Jason Evanse7339702010-10-23 18:37:06 -0700659 if (ncpus > 1)
660 opt_narenas = ncpus << 2;
661 else
662 opt_narenas = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700663 }
Jason Evanse7339702010-10-23 18:37:06 -0700664 narenas = opt_narenas;
665 /*
666 * Make sure that the arenas array can be allocated. In practice, this
667 * limit is enough to allow the allocator to function, but the ctl
668 * machinery will fail to allocate memory at far lower limits.
669 */
670 if (narenas > chunksize / sizeof(arena_t *)) {
Jason Evanse7339702010-10-23 18:37:06 -0700671 narenas = chunksize / sizeof(arena_t *);
Jason Evansd81e4bd2012-03-06 14:57:45 -0800672 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
673 narenas);
Jason Evans289053c2009-06-22 12:08:42 -0700674 }
Jason Evans289053c2009-06-22 12:08:42 -0700675
Jason Evans289053c2009-06-22 12:08:42 -0700676 /* Allocate and initialize arenas. */
677 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
678 if (arenas == NULL) {
679 malloc_mutex_unlock(&init_lock);
680 return (true);
681 }
682 /*
683 * Zero the array. In practice, this should always be pre-zeroed,
684 * since it was just mmap()ed, but let's be sure.
685 */
686 memset(arenas, 0, sizeof(arena_t *) * narenas);
Jason Evansb7924f52009-06-23 19:01:18 -0700687 /* Copy the pointer to the one arena that was already initialized. */
688 arenas[0] = init_arenas[0];
Jason Evans289053c2009-06-22 12:08:42 -0700689
690 malloc_initialized = true;
691 malloc_mutex_unlock(&init_lock);
692 return (false);
693}
694
695/*
Jason Evanse476f8a2010-01-16 09:53:50 -0800696 * End initialization functions.
Jason Evans289053c2009-06-22 12:08:42 -0700697 */
698/******************************************************************************/
699/*
700 * Begin malloc(3)-compatible functions.
701 */
702
Jason Evans9ad48232010-01-03 11:59:20 -0800703JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800704JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700705void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800706je_malloc(size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700707{
708 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800709 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700710 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700711
712 if (malloc_init()) {
713 ret = NULL;
Jason Evansf2518142009-12-29 00:09:15 -0800714 goto OOM;
Jason Evans289053c2009-06-22 12:08:42 -0700715 }
716
Jason Evansc90ad712012-02-28 20:31:37 -0800717 if (size == 0)
718 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700719
Jason Evans7372b152012-02-10 20:22:09 -0800720 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700721 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -0700722 PROF_ALLOC_PREP(1, usize, cnt);
723 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700724 ret = NULL;
725 goto OOM;
726 }
Jason Evans93443682010-10-20 17:39:18 -0700727 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -0800728 SMALL_MAXCLASS) {
729 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700730 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700731 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700732 } else
733 ret = imalloc(size);
Jason Evans7372b152012-02-10 20:22:09 -0800734 } else {
735 if (config_stats)
736 usize = s2u(size);
Jason Evans0b270a92010-03-31 16:45:04 -0700737 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -0700738 }
Jason Evans289053c2009-06-22 12:08:42 -0700739
Jason Evansf2518142009-12-29 00:09:15 -0800740OOM:
Jason Evans289053c2009-06-22 12:08:42 -0700741 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800742 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800743 malloc_write("<jemalloc>: Error in malloc(): "
744 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700745 abort();
746 }
747 errno = ENOMEM;
748 }
Jason Evans7372b152012-02-10 20:22:09 -0800749 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700750 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800751 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700752 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700753 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700754 }
Jason Evans289053c2009-06-22 12:08:42 -0700755 return (ret);
756}
757
Jason Evans9ad48232010-01-03 11:59:20 -0800758JEMALLOC_ATTR(nonnull(1))
Jason Evansa5070042011-08-12 13:48:27 -0700759#ifdef JEMALLOC_PROF
760/*
Jason Evans7372b152012-02-10 20:22:09 -0800761 * Avoid any uncertainty as to how many backtrace frames to ignore in
Jason Evansa5070042011-08-12 13:48:27 -0700762 * PROF_ALLOC_PREP().
763 */
764JEMALLOC_ATTR(noinline)
765#endif
766static int
Jason Evans59656312012-02-28 21:37:38 -0800767imemalign(void **memptr, size_t alignment, size_t size,
Jason Evans0a0bbf62012-03-13 12:55:21 -0700768 size_t min_alignment)
Jason Evans289053c2009-06-22 12:08:42 -0700769{
770 int ret;
Jason Evans7372b152012-02-10 20:22:09 -0800771 size_t usize;
Jason Evans38d92102011-03-23 00:37:29 -0700772 void *result;
Jason Evans9225a192012-03-23 15:39:07 -0700773 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700774
Jason Evans0a0bbf62012-03-13 12:55:21 -0700775 assert(min_alignment != 0);
776
Jason Evans289053c2009-06-22 12:08:42 -0700777 if (malloc_init())
778 result = NULL;
779 else {
Jason Evansc90ad712012-02-28 20:31:37 -0800780 if (size == 0)
781 size = 1;
Jason Evansf2518142009-12-29 00:09:15 -0800782
Jason Evans289053c2009-06-22 12:08:42 -0700783 /* Make sure that alignment is a large enough power of 2. */
784 if (((alignment - 1) & alignment) != 0
Jason Evans0a0bbf62012-03-13 12:55:21 -0700785 || (alignment < min_alignment)) {
Jason Evans7372b152012-02-10 20:22:09 -0800786 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700787 malloc_write("<jemalloc>: Error allocating "
788 "aligned memory: invalid alignment\n");
Jason Evans289053c2009-06-22 12:08:42 -0700789 abort();
790 }
791 result = NULL;
792 ret = EINVAL;
793 goto RETURN;
794 }
795
Jason Evans38d92102011-03-23 00:37:29 -0700796 usize = sa2u(size, alignment, NULL);
797 if (usize == 0) {
798 result = NULL;
799 ret = ENOMEM;
800 goto RETURN;
801 }
802
Jason Evans7372b152012-02-10 20:22:09 -0800803 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -0700804 PROF_ALLOC_PREP(2, usize, cnt);
805 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700806 result = NULL;
807 ret = EINVAL;
808 } else {
809 if (prof_promote && (uintptr_t)cnt !=
Jason Evansb1726102012-02-28 16:50:47 -0800810 (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
811 assert(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700812 alignment, NULL) != 0);
Jason Evansb1726102012-02-28 16:50:47 -0800813 result = ipalloc(sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -0700814 alignment, NULL), alignment, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700815 if (result != NULL) {
816 arena_prof_promoted(result,
Jason Evans93443682010-10-20 17:39:18 -0700817 usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700818 }
Jason Evans8e3c3c62010-09-17 15:46:18 -0700819 } else {
Jason Evans38d92102011-03-23 00:37:29 -0700820 result = ipalloc(usize, alignment,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700821 false);
822 }
Jason Evans0b270a92010-03-31 16:45:04 -0700823 }
Jason Evans6109fe02010-02-10 10:37:56 -0800824 } else
Jason Evans38d92102011-03-23 00:37:29 -0700825 result = ipalloc(usize, alignment, false);
Jason Evans289053c2009-06-22 12:08:42 -0700826 }
827
828 if (result == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800829 if (config_xmalloc && opt_xmalloc) {
Jason Evans0a0bbf62012-03-13 12:55:21 -0700830 malloc_write("<jemalloc>: Error allocating aligned "
831 "memory: out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700832 abort();
833 }
834 ret = ENOMEM;
835 goto RETURN;
836 }
837
838 *memptr = result;
839 ret = 0;
840
841RETURN:
Jason Evans7372b152012-02-10 20:22:09 -0800842 if (config_stats && result != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700843 assert(usize == isalloc(result));
Jason Evanscd9a1342012-03-21 18:33:03 -0700844 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700845 }
Jason Evans7372b152012-02-10 20:22:09 -0800846 if (config_prof && opt_prof && result != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700847 prof_malloc(result, usize, cnt);
Jason Evans289053c2009-06-22 12:08:42 -0700848 return (ret);
849}
850
Jason Evansa5070042011-08-12 13:48:27 -0700851JEMALLOC_ATTR(nonnull(1))
852JEMALLOC_ATTR(visibility("default"))
853int
Jason Evans0a5489e2012-03-01 17:19:20 -0800854je_posix_memalign(void **memptr, size_t alignment, size_t size)
Jason Evansa5070042011-08-12 13:48:27 -0700855{
856
Jason Evans0a0bbf62012-03-13 12:55:21 -0700857 return imemalign(memptr, alignment, size, sizeof(void *));
858}
859
860JEMALLOC_ATTR(malloc)
861JEMALLOC_ATTR(visibility("default"))
862void *
863je_aligned_alloc(size_t alignment, size_t size)
864{
865 void *ret;
866 int err;
867
868 if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
869 ret = NULL;
870 errno = err;
871 }
872 return (ret);
Jason Evansa5070042011-08-12 13:48:27 -0700873}
874
Jason Evans9ad48232010-01-03 11:59:20 -0800875JEMALLOC_ATTR(malloc)
Jason Evanse476f8a2010-01-16 09:53:50 -0800876JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700877void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800878je_calloc(size_t num, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700879{
880 void *ret;
881 size_t num_size;
Jason Evans7372b152012-02-10 20:22:09 -0800882 size_t usize;
Jason Evans9225a192012-03-23 15:39:07 -0700883 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans289053c2009-06-22 12:08:42 -0700884
885 if (malloc_init()) {
886 num_size = 0;
887 ret = NULL;
888 goto RETURN;
889 }
890
891 num_size = num * size;
892 if (num_size == 0) {
Jason Evansc90ad712012-02-28 20:31:37 -0800893 if (num == 0 || size == 0)
Jason Evans289053c2009-06-22 12:08:42 -0700894 num_size = 1;
895 else {
896 ret = NULL;
897 goto RETURN;
898 }
899 /*
900 * Try to avoid division here. We know that it isn't possible to
901 * overflow during multiplication if neither operand uses any of the
902 * most significant half of the bits in a size_t.
903 */
904 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
905 && (num_size / size != num)) {
906 /* size_t overflow. */
907 ret = NULL;
908 goto RETURN;
909 }
910
Jason Evans7372b152012-02-10 20:22:09 -0800911 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700912 usize = s2u(num_size);
Jason Evansa5070042011-08-12 13:48:27 -0700913 PROF_ALLOC_PREP(1, usize, cnt);
914 if (cnt == NULL) {
Jason Evans0b270a92010-03-31 16:45:04 -0700915 ret = NULL;
916 goto RETURN;
917 }
Jason Evans93443682010-10-20 17:39:18 -0700918 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
Jason Evansb1726102012-02-28 16:50:47 -0800919 <= SMALL_MAXCLASS) {
920 ret = icalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -0700921 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700922 arena_prof_promoted(ret, usize);
Jason Evans0b270a92010-03-31 16:45:04 -0700923 } else
924 ret = icalloc(num_size);
Jason Evans7372b152012-02-10 20:22:09 -0800925 } else {
926 if (config_stats)
927 usize = s2u(num_size);
Jason Evans0b270a92010-03-31 16:45:04 -0700928 ret = icalloc(num_size);
Jason Evans93443682010-10-20 17:39:18 -0700929 }
Jason Evans289053c2009-06-22 12:08:42 -0700930
931RETURN:
932 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -0800933 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -0800934 malloc_write("<jemalloc>: Error in calloc(): out of "
935 "memory\n");
Jason Evans289053c2009-06-22 12:08:42 -0700936 abort();
937 }
938 errno = ENOMEM;
939 }
940
Jason Evans7372b152012-02-10 20:22:09 -0800941 if (config_prof && opt_prof && ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700942 prof_malloc(ret, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -0800943 if (config_stats && ret != NULL) {
Jason Evans93443682010-10-20 17:39:18 -0700944 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -0700945 thread_allocated_tsd_get()->allocated += usize;
Jason Evans93443682010-10-20 17:39:18 -0700946 }
Jason Evans289053c2009-06-22 12:08:42 -0700947 return (ret);
948}
949
Jason Evanse476f8a2010-01-16 09:53:50 -0800950JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -0700951void *
Jason Evans0a5489e2012-03-01 17:19:20 -0800952je_realloc(void *ptr, size_t size)
Jason Evans289053c2009-06-22 12:08:42 -0700953{
954 void *ret;
Jason Evans7372b152012-02-10 20:22:09 -0800955 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -0700956 size_t old_size = 0;
Jason Evans9225a192012-03-23 15:39:07 -0700957 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
958 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans6109fe02010-02-10 10:37:56 -0800959
Jason Evans289053c2009-06-22 12:08:42 -0700960 if (size == 0) {
Jason Evansf081b882012-02-28 20:24:05 -0800961 if (ptr != NULL) {
962 /* realloc(ptr, 0) is equivalent to free(p). */
963 if (config_prof || config_stats)
964 old_size = isalloc(ptr);
965 if (config_prof && opt_prof) {
966 old_ctx = prof_ctx_get(ptr);
Jason Evans6109fe02010-02-10 10:37:56 -0800967 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800968 }
Jason Evansf081b882012-02-28 20:24:05 -0800969 idalloc(ptr);
Jason Evans289053c2009-06-22 12:08:42 -0700970 ret = NULL;
971 goto RETURN;
Jason Evansc90ad712012-02-28 20:31:37 -0800972 } else
973 size = 1;
Jason Evans289053c2009-06-22 12:08:42 -0700974 }
975
976 if (ptr != NULL) {
Jason Evans41b6afb2012-02-02 22:04:57 -0800977 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans289053c2009-06-22 12:08:42 -0700978
Jason Evans7372b152012-02-10 20:22:09 -0800979 if (config_prof || config_stats)
980 old_size = isalloc(ptr);
981 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -0700982 usize = s2u(size);
Jason Evans50651562010-04-13 16:13:54 -0700983 old_ctx = prof_ctx_get(ptr);
Jason Evansa5070042011-08-12 13:48:27 -0700984 PROF_ALLOC_PREP(1, usize, cnt);
985 if (cnt == NULL) {
Jason Evans46405e62011-08-30 23:37:29 -0700986 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -0800987 ret = NULL;
988 goto OOM;
989 }
Jason Evans0b270a92010-03-31 16:45:04 -0700990 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
Jason Evansb1726102012-02-28 16:50:47 -0800991 usize <= SMALL_MAXCLASS) {
992 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
Jason Evans8e3c3c62010-09-17 15:46:18 -0700993 false, false);
Jason Evans0b270a92010-03-31 16:45:04 -0700994 if (ret != NULL)
Jason Evans93443682010-10-20 17:39:18 -0700995 arena_prof_promoted(ret, usize);
Jason Evans46405e62011-08-30 23:37:29 -0700996 else
997 old_ctx = NULL;
998 } else {
Jason Evans8e3c3c62010-09-17 15:46:18 -0700999 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans46405e62011-08-30 23:37:29 -07001000 if (ret == NULL)
1001 old_ctx = NULL;
1002 }
Jason Evans7372b152012-02-10 20:22:09 -08001003 } else {
1004 if (config_stats)
1005 usize = s2u(size);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001006 ret = iralloc(ptr, size, 0, 0, false, false);
Jason Evans93443682010-10-20 17:39:18 -07001007 }
Jason Evans289053c2009-06-22 12:08:42 -07001008
Jason Evans6109fe02010-02-10 10:37:56 -08001009OOM:
Jason Evans289053c2009-06-22 12:08:42 -07001010 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001011 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001012 malloc_write("<jemalloc>: Error in realloc(): "
1013 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001014 abort();
1015 }
1016 errno = ENOMEM;
1017 }
1018 } else {
Jason Evansf081b882012-02-28 20:24:05 -08001019 /* realloc(NULL, size) is equivalent to malloc(size). */
Jason Evans7372b152012-02-10 20:22:09 -08001020 if (config_prof && opt_prof)
Jason Evans50651562010-04-13 16:13:54 -07001021 old_ctx = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001022 if (malloc_init()) {
Jason Evans7372b152012-02-10 20:22:09 -08001023 if (config_prof && opt_prof)
Jason Evans6109fe02010-02-10 10:37:56 -08001024 cnt = NULL;
Jason Evans6109fe02010-02-10 10:37:56 -08001025 ret = NULL;
1026 } else {
Jason Evans7372b152012-02-10 20:22:09 -08001027 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001028 usize = s2u(size);
Jason Evansa5070042011-08-12 13:48:27 -07001029 PROF_ALLOC_PREP(1, usize, cnt);
1030 if (cnt == NULL)
Jason Evans0b270a92010-03-31 16:45:04 -07001031 ret = NULL;
1032 else {
1033 if (prof_promote && (uintptr_t)cnt !=
Jason Evans93443682010-10-20 17:39:18 -07001034 (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001035 SMALL_MAXCLASS) {
1036 ret = imalloc(SMALL_MAXCLASS+1);
Jason Evans0b270a92010-03-31 16:45:04 -07001037 if (ret != NULL) {
1038 arena_prof_promoted(ret,
Jason Evans93443682010-10-20 17:39:18 -07001039 usize);
Jason Evans0b270a92010-03-31 16:45:04 -07001040 }
1041 } else
1042 ret = imalloc(size);
1043 }
Jason Evans7372b152012-02-10 20:22:09 -08001044 } else {
1045 if (config_stats)
1046 usize = s2u(size);
Jason Evans6109fe02010-02-10 10:37:56 -08001047 ret = imalloc(size);
Jason Evans93443682010-10-20 17:39:18 -07001048 }
Jason Evans6109fe02010-02-10 10:37:56 -08001049 }
Jason Evans569432c2009-12-29 00:09:15 -08001050
Jason Evans289053c2009-06-22 12:08:42 -07001051 if (ret == NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08001052 if (config_xmalloc && opt_xmalloc) {
Jason Evans698805c2010-03-03 17:45:38 -08001053 malloc_write("<jemalloc>: Error in realloc(): "
1054 "out of memory\n");
Jason Evans289053c2009-06-22 12:08:42 -07001055 abort();
1056 }
1057 errno = ENOMEM;
1058 }
1059 }
1060
1061RETURN:
Jason Evans7372b152012-02-10 20:22:09 -08001062 if (config_prof && opt_prof)
Jason Evanse4f78462010-10-22 10:45:59 -07001063 prof_realloc(ret, usize, cnt, old_size, old_ctx);
Jason Evans7372b152012-02-10 20:22:09 -08001064 if (config_stats && ret != NULL) {
Jason Evanscd9a1342012-03-21 18:33:03 -07001065 thread_allocated_t *ta;
Jason Evans93443682010-10-20 17:39:18 -07001066 assert(usize == isalloc(ret));
Jason Evanscd9a1342012-03-21 18:33:03 -07001067 ta = thread_allocated_tsd_get();
1068 ta->allocated += usize;
1069 ta->deallocated += old_size;
Jason Evans93443682010-10-20 17:39:18 -07001070 }
Jason Evans289053c2009-06-22 12:08:42 -07001071 return (ret);
1072}
1073
Jason Evanse476f8a2010-01-16 09:53:50 -08001074JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001075void
Jason Evans0a5489e2012-03-01 17:19:20 -08001076je_free(void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001077{
1078
Jason Evansf0047372012-04-02 15:18:24 -07001079 if (ptr != NULL) {
1080 size_t usize;
Jason Evanse4f78462010-10-22 10:45:59 -07001081
Jason Evansf0047372012-04-02 15:18:24 -07001082 assert(malloc_initialized || IS_INITIALIZER);
1083
1084 if (config_prof && opt_prof) {
1085 usize = isalloc(ptr);
1086 prof_free(ptr, usize);
1087 } else if (config_stats) {
1088 usize = isalloc(ptr);
1089 }
1090 if (config_stats)
1091 thread_allocated_tsd_get()->deallocated += usize;
1092 idalloc(ptr);
1093 }
Jason Evans289053c2009-06-22 12:08:42 -07001094}
1095
1096/*
1097 * End malloc(3)-compatible functions.
1098 */
1099/******************************************************************************/
1100/*
Jason Evans6a0d2912010-09-20 16:44:23 -07001101 * Begin non-standard override functions.
Jason Evans6a0d2912010-09-20 16:44:23 -07001102 */
Jason Evans6a0d2912010-09-20 16:44:23 -07001103
1104#ifdef JEMALLOC_OVERRIDE_MEMALIGN
1105JEMALLOC_ATTR(malloc)
1106JEMALLOC_ATTR(visibility("default"))
1107void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001108je_memalign(size_t alignment, size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001109{
Jason Evans9225a192012-03-23 15:39:07 -07001110 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evans0a0bbf62012-03-13 12:55:21 -07001111 imemalign(&ret, alignment, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001112 return (ret);
1113}
1114#endif
1115
1116#ifdef JEMALLOC_OVERRIDE_VALLOC
1117JEMALLOC_ATTR(malloc)
1118JEMALLOC_ATTR(visibility("default"))
1119void *
Jason Evans0a5489e2012-03-01 17:19:20 -08001120je_valloc(size_t size)
Jason Evans6a0d2912010-09-20 16:44:23 -07001121{
Jason Evans9225a192012-03-23 15:39:07 -07001122 void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
Jason Evansae4c7b42012-04-02 07:04:34 -07001123 imemalign(&ret, PAGE, size, 1);
Jason Evans6a0d2912010-09-20 16:44:23 -07001124 return (ret);
1125}
1126#endif
1127
Mike Hommey5c89c502012-03-26 17:46:57 +02001128/*
1129 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1130 * #define je_malloc malloc
1131 */
1132#define malloc_is_malloc 1
1133#define is_malloc_(a) malloc_is_ ## a
1134#define is_malloc(a) is_malloc_(a)
1135
1136#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
Jason Evans4bb09832012-02-29 10:37:27 -08001137/*
1138 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1139 * to inconsistently reference libc's malloc(3)-compatible functions
1140 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1141 *
Mike Hommey3c2ba0d2012-03-27 14:20:13 +02001142 * These definitions interpose hooks in glibc. The functions are actually
Jason Evans4bb09832012-02-29 10:37:27 -08001143 * passed an extra argument for the caller return address, which will be
1144 * ignored.
1145 */
1146JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001147void (* const __free_hook)(void *ptr) = je_free;
Jason Evans4bb09832012-02-29 10:37:27 -08001148
1149JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001150void *(* const __malloc_hook)(size_t size) = je_malloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001151
1152JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001153void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc;
Jason Evans4bb09832012-02-29 10:37:27 -08001154
1155JEMALLOC_ATTR(visibility("default"))
Jason Evans0a5489e2012-03-01 17:19:20 -08001156void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign;
Jason Evans4bb09832012-02-29 10:37:27 -08001157#endif
1158
Jason Evans6a0d2912010-09-20 16:44:23 -07001159/*
1160 * End non-standard override functions.
1161 */
1162/******************************************************************************/
1163/*
Jason Evans289053c2009-06-22 12:08:42 -07001164 * Begin non-standard functions.
1165 */
1166
Jason Evanse476f8a2010-01-16 09:53:50 -08001167JEMALLOC_ATTR(visibility("default"))
Jason Evans289053c2009-06-22 12:08:42 -07001168size_t
Jason Evans0a5489e2012-03-01 17:19:20 -08001169je_malloc_usable_size(const void *ptr)
Jason Evans289053c2009-06-22 12:08:42 -07001170{
Jason Evans569432c2009-12-29 00:09:15 -08001171 size_t ret;
Jason Evans289053c2009-06-22 12:08:42 -07001172
Jason Evans41b6afb2012-02-02 22:04:57 -08001173 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001174
Jason Evans7372b152012-02-10 20:22:09 -08001175 if (config_ivsalloc)
1176 ret = ivsalloc(ptr);
Jason Evans2465bdf2012-03-26 13:13:55 -07001177 else
Jason Evansf0047372012-04-02 15:18:24 -07001178 ret = (ptr != NULL) ? isalloc(ptr) : 0;
Jason Evans289053c2009-06-22 12:08:42 -07001179
Jason Evans569432c2009-12-29 00:09:15 -08001180 return (ret);
Jason Evans289053c2009-06-22 12:08:42 -07001181}
1182
Jason Evans4201af02010-01-24 02:53:40 -08001183JEMALLOC_ATTR(visibility("default"))
1184void
Jason Evans0a5489e2012-03-01 17:19:20 -08001185je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1186 const char *opts)
Jason Evans4201af02010-01-24 02:53:40 -08001187{
1188
Jason Evans698805c2010-03-03 17:45:38 -08001189 stats_print(write_cb, cbopaque, opts);
Jason Evans4201af02010-01-24 02:53:40 -08001190}
1191
Jason Evans3c234352010-01-27 13:10:55 -08001192JEMALLOC_ATTR(visibility("default"))
1193int
Jason Evans0a5489e2012-03-01 17:19:20 -08001194je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
Jason Evans3c234352010-01-27 13:10:55 -08001195 size_t newlen)
1196{
1197
Jason Evans95833312010-01-27 13:45:21 -08001198 if (malloc_init())
1199 return (EAGAIN);
1200
Jason Evans3c234352010-01-27 13:10:55 -08001201 return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1202}
1203
1204JEMALLOC_ATTR(visibility("default"))
1205int
Jason Evans0a5489e2012-03-01 17:19:20 -08001206je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
Jason Evans3c234352010-01-27 13:10:55 -08001207{
1208
Jason Evans95833312010-01-27 13:45:21 -08001209 if (malloc_init())
1210 return (EAGAIN);
1211
Jason Evans3c234352010-01-27 13:10:55 -08001212 return (ctl_nametomib(name, mibp, miblenp));
1213}
1214
1215JEMALLOC_ATTR(visibility("default"))
1216int
Jason Evans0a5489e2012-03-01 17:19:20 -08001217je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1218 void *newp, size_t newlen)
Jason Evans3c234352010-01-27 13:10:55 -08001219{
1220
Jason Evans95833312010-01-27 13:45:21 -08001221 if (malloc_init())
1222 return (EAGAIN);
1223
Jason Evans3c234352010-01-27 13:10:55 -08001224 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1225}
1226
Jason Evans7e77eaf2012-03-02 17:47:37 -08001227/*
1228 * End non-standard functions.
1229 */
1230/******************************************************************************/
1231/*
1232 * Begin experimental functions.
1233 */
1234#ifdef JEMALLOC_EXPERIMENTAL
1235
Jason Evans8e3c3c62010-09-17 15:46:18 -07001236JEMALLOC_INLINE void *
Jason Evans38d92102011-03-23 00:37:29 -07001237iallocm(size_t usize, size_t alignment, bool zero)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001238{
1239
Jason Evans38d92102011-03-23 00:37:29 -07001240 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment,
1241 NULL)));
1242
Jason Evans8e3c3c62010-09-17 15:46:18 -07001243 if (alignment != 0)
Jason Evans38d92102011-03-23 00:37:29 -07001244 return (ipalloc(usize, alignment, zero));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001245 else if (zero)
Jason Evans38d92102011-03-23 00:37:29 -07001246 return (icalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001247 else
Jason Evans38d92102011-03-23 00:37:29 -07001248 return (imalloc(usize));
Jason Evans8e3c3c62010-09-17 15:46:18 -07001249}
1250
Jason Evans6a0d2912010-09-20 16:44:23 -07001251JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001252JEMALLOC_ATTR(visibility("default"))
1253int
Jason Evans0a5489e2012-03-01 17:19:20 -08001254je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001255{
1256 void *p;
Jason Evans93443682010-10-20 17:39:18 -07001257 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001258 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1259 & (SIZE_T_MAX-1));
1260 bool zero = flags & ALLOCM_ZERO;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001261 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001262
1263 assert(ptr != NULL);
1264 assert(size != 0);
1265
1266 if (malloc_init())
1267 goto OOM;
1268
Jason Evans749c2a02011-08-12 18:37:54 -07001269 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
Jason Evans38d92102011-03-23 00:37:29 -07001270 if (usize == 0)
1271 goto OOM;
1272
Jason Evans7372b152012-02-10 20:22:09 -08001273 if (config_prof && opt_prof) {
Jason Evansa5070042011-08-12 13:48:27 -07001274 PROF_ALLOC_PREP(1, usize, cnt);
1275 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001276 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001277 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
Jason Evansb1726102012-02-28 16:50:47 -08001278 SMALL_MAXCLASS) {
Jason Evans38d92102011-03-23 00:37:29 -07001279 size_t usize_promoted = (alignment == 0) ?
Jason Evansb1726102012-02-28 16:50:47 -08001280 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
Jason Evans38d92102011-03-23 00:37:29 -07001281 alignment, NULL);
1282 assert(usize_promoted != 0);
1283 p = iallocm(usize_promoted, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001284 if (p == NULL)
1285 goto OOM;
Jason Evans93443682010-10-20 17:39:18 -07001286 arena_prof_promoted(p, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001287 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001288 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001289 if (p == NULL)
1290 goto OOM;
1291 }
Jason Evans749c2a02011-08-12 18:37:54 -07001292 prof_malloc(p, usize, cnt);
Jason Evans7372b152012-02-10 20:22:09 -08001293 } else {
Jason Evans38d92102011-03-23 00:37:29 -07001294 p = iallocm(usize, alignment, zero);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001295 if (p == NULL)
1296 goto OOM;
1297 }
Jason Evans7372b152012-02-10 20:22:09 -08001298 if (rsize != NULL)
1299 *rsize = usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001300
1301 *ptr = p;
Jason Evans7372b152012-02-10 20:22:09 -08001302 if (config_stats) {
1303 assert(usize == isalloc(p));
Jason Evanscd9a1342012-03-21 18:33:03 -07001304 thread_allocated_tsd_get()->allocated += usize;
Jason Evans7372b152012-02-10 20:22:09 -08001305 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001306 return (ALLOCM_SUCCESS);
1307OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001308 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001309 malloc_write("<jemalloc>: Error in allocm(): "
1310 "out of memory\n");
1311 abort();
1312 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001313 *ptr = NULL;
1314 return (ALLOCM_ERR_OOM);
1315}
1316
Jason Evans6a0d2912010-09-20 16:44:23 -07001317JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001318JEMALLOC_ATTR(visibility("default"))
1319int
Jason Evans0a5489e2012-03-01 17:19:20 -08001320je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001321{
1322 void *p, *q;
Jason Evans93443682010-10-20 17:39:18 -07001323 size_t usize;
Jason Evans93443682010-10-20 17:39:18 -07001324 size_t old_size;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001325 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1326 & (SIZE_T_MAX-1));
1327 bool zero = flags & ALLOCM_ZERO;
1328 bool no_move = flags & ALLOCM_NO_MOVE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001329 prof_thr_cnt_t *cnt;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001330
1331 assert(ptr != NULL);
1332 assert(*ptr != NULL);
1333 assert(size != 0);
1334 assert(SIZE_T_MAX - size >= extra);
Jason Evans41b6afb2012-02-02 22:04:57 -08001335 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001336
1337 p = *ptr;
Jason Evans7372b152012-02-10 20:22:09 -08001338 if (config_prof && opt_prof) {
Jason Evans93443682010-10-20 17:39:18 -07001339 /*
1340 * usize isn't knowable before iralloc() returns when extra is
1341 * non-zero. Therefore, compute its maximum possible value and
Jason Evansa5070042011-08-12 13:48:27 -07001342 * use that in PROF_ALLOC_PREP() to decide whether to capture a
Jason Evans93443682010-10-20 17:39:18 -07001343 * backtrace. prof_realloc() will use the actual usize to
1344 * decide whether to sample.
1345 */
1346 size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1347 sa2u(size+extra, alignment, NULL);
Jason Evans46405e62011-08-30 23:37:29 -07001348 prof_ctx_t *old_ctx = prof_ctx_get(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001349 old_size = isalloc(p);
Jason Evansa5070042011-08-12 13:48:27 -07001350 PROF_ALLOC_PREP(1, max_usize, cnt);
1351 if (cnt == NULL)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001352 goto OOM;
Jason Evans183ba502011-08-11 22:51:00 -07001353 /*
1354 * Use minimum usize to determine whether promotion may happen.
1355 */
1356 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1357 && ((alignment == 0) ? s2u(size) : sa2u(size,
Jason Evansb1726102012-02-28 16:50:47 -08001358 alignment, NULL)) <= SMALL_MAXCLASS) {
1359 q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1360 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
Jason Evans8e3c3c62010-09-17 15:46:18 -07001361 alignment, zero, no_move);
1362 if (q == NULL)
1363 goto ERR;
Jason Evansae4c7b42012-04-02 07:04:34 -07001364 if (max_usize < PAGE) {
Jason Evans183ba502011-08-11 22:51:00 -07001365 usize = max_usize;
1366 arena_prof_promoted(q, usize);
Jason Evansb493ce22011-08-12 11:28:47 -07001367 } else
1368 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001369 } else {
1370 q = iralloc(p, size, extra, alignment, zero, no_move);
1371 if (q == NULL)
1372 goto ERR;
Jason Evans93443682010-10-20 17:39:18 -07001373 usize = isalloc(q);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001374 }
Jason Evanse4f78462010-10-22 10:45:59 -07001375 prof_realloc(q, usize, cnt, old_size, old_ctx);
Jason Evanseacb8962011-03-23 00:30:30 -07001376 if (rsize != NULL)
1377 *rsize = usize;
Jason Evans7372b152012-02-10 20:22:09 -08001378 } else {
1379 if (config_stats)
1380 old_size = isalloc(p);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001381 q = iralloc(p, size, extra, alignment, zero, no_move);
1382 if (q == NULL)
1383 goto ERR;
Jason Evans7372b152012-02-10 20:22:09 -08001384 if (config_stats)
Jason Evans93443682010-10-20 17:39:18 -07001385 usize = isalloc(q);
Jason Evans7372b152012-02-10 20:22:09 -08001386 if (rsize != NULL) {
1387 if (config_stats == false)
1388 usize = isalloc(q);
1389 *rsize = usize;
Jason Evans93443682010-10-20 17:39:18 -07001390 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001391 }
1392
1393 *ptr = q;
Jason Evanscd9a1342012-03-21 18:33:03 -07001394 if (config_stats) {
1395 thread_allocated_t *ta;
1396 ta = thread_allocated_tsd_get();
1397 ta->allocated += usize;
1398 ta->deallocated += old_size;
1399 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001400 return (ALLOCM_SUCCESS);
1401ERR:
1402 if (no_move)
1403 return (ALLOCM_ERR_NOT_MOVED);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001404OOM:
Jason Evans7372b152012-02-10 20:22:09 -08001405 if (config_xmalloc && opt_xmalloc) {
Jason Evans8e3c3c62010-09-17 15:46:18 -07001406 malloc_write("<jemalloc>: Error in rallocm(): "
1407 "out of memory\n");
1408 abort();
1409 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001410 return (ALLOCM_ERR_OOM);
1411}
1412
Jason Evans6a0d2912010-09-20 16:44:23 -07001413JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001414JEMALLOC_ATTR(visibility("default"))
1415int
Jason Evans0a5489e2012-03-01 17:19:20 -08001416je_sallocm(const void *ptr, size_t *rsize, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001417{
1418 size_t sz;
1419
Jason Evans41b6afb2012-02-02 22:04:57 -08001420 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001421
Jason Evans7372b152012-02-10 20:22:09 -08001422 if (config_ivsalloc)
1423 sz = ivsalloc(ptr);
1424 else {
1425 assert(ptr != NULL);
1426 sz = isalloc(ptr);
1427 }
Jason Evans8e3c3c62010-09-17 15:46:18 -07001428 assert(rsize != NULL);
1429 *rsize = sz;
1430
1431 return (ALLOCM_SUCCESS);
1432}
1433
Jason Evans6a0d2912010-09-20 16:44:23 -07001434JEMALLOC_ATTR(nonnull(1))
Jason Evans8e3c3c62010-09-17 15:46:18 -07001435JEMALLOC_ATTR(visibility("default"))
1436int
Jason Evans0a5489e2012-03-01 17:19:20 -08001437je_dallocm(void *ptr, int flags)
Jason Evans8e3c3c62010-09-17 15:46:18 -07001438{
Jason Evanse4f78462010-10-22 10:45:59 -07001439 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001440
1441 assert(ptr != NULL);
Jason Evans41b6afb2012-02-02 22:04:57 -08001442 assert(malloc_initialized || IS_INITIALIZER);
Jason Evans8e3c3c62010-09-17 15:46:18 -07001443
Jason Evans7372b152012-02-10 20:22:09 -08001444 if (config_stats)
Jason Evanse4f78462010-10-22 10:45:59 -07001445 usize = isalloc(ptr);
Jason Evans7372b152012-02-10 20:22:09 -08001446 if (config_prof && opt_prof) {
1447 if (config_stats == false)
1448 usize = isalloc(ptr);
Jason Evanse4f78462010-10-22 10:45:59 -07001449 prof_free(ptr, usize);
1450 }
Jason Evans7372b152012-02-10 20:22:09 -08001451 if (config_stats)
Jason Evanscd9a1342012-03-21 18:33:03 -07001452 thread_allocated_tsd_get()->deallocated += usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07001453 idalloc(ptr);
1454
1455 return (ALLOCM_SUCCESS);
1456}
1457
Jason Evans7e15dab2012-02-29 12:56:37 -08001458JEMALLOC_ATTR(visibility("default"))
1459int
Jason Evans0a5489e2012-03-01 17:19:20 -08001460je_nallocm(size_t *rsize, size_t size, int flags)
Jason Evans7e15dab2012-02-29 12:56:37 -08001461{
1462 size_t usize;
1463 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1464 & (SIZE_T_MAX-1));
1465
1466 assert(size != 0);
1467
1468 if (malloc_init())
1469 return (ALLOCM_ERR_OOM);
1470
1471 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, NULL);
1472 if (usize == 0)
1473 return (ALLOCM_ERR_OOM);
1474
1475 if (rsize != NULL)
1476 *rsize = usize;
1477 return (ALLOCM_SUCCESS);
1478}
1479
Jason Evans7e77eaf2012-03-02 17:47:37 -08001480#endif
Jason Evans289053c2009-06-22 12:08:42 -07001481/*
Jason Evans7e77eaf2012-03-02 17:47:37 -08001482 * End experimental functions.
Jason Evans289053c2009-06-22 12:08:42 -07001483 */
1484/******************************************************************************/
Jason Evans289053c2009-06-22 12:08:42 -07001485
Jason Evans289053c2009-06-22 12:08:42 -07001486/*
1487 * The following functions are used by threading libraries for protection of
Jason Evans28177d42010-09-20 11:24:24 -07001488 * malloc during fork().
Jason Evans289053c2009-06-22 12:08:42 -07001489 */
1490
Jason Evans41b6afb2012-02-02 22:04:57 -08001491#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001492void
Jason Evans804c9ec2009-06-22 17:44:33 -07001493jemalloc_prefork(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001494#else
1495void
1496_malloc_prefork(void)
1497#endif
Jason Evans289053c2009-06-22 12:08:42 -07001498{
Jason Evansfbbb6242010-01-24 17:56:48 -08001499 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001500
1501 /* Acquire all mutexes in a safe order. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001502 malloc_mutex_prefork(&arenas_lock);
Jason Evansfbbb6242010-01-24 17:56:48 -08001503 for (i = 0; i < narenas; i++) {
1504 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001505 arena_prefork(arenas[i]);
Jason Evansfbbb6242010-01-24 17:56:48 -08001506 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001507 base_prefork();
1508 huge_prefork();
1509 chunk_dss_prefork();
Jason Evans289053c2009-06-22 12:08:42 -07001510}
1511
Jason Evans41b6afb2012-02-02 22:04:57 -08001512#ifndef JEMALLOC_MUTEX_INIT_CB
Jason Evans2dbecf12010-09-05 10:35:13 -07001513void
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001514jemalloc_postfork_parent(void)
Jason Evans41b6afb2012-02-02 22:04:57 -08001515#else
1516void
1517_malloc_postfork(void)
1518#endif
Jason Evans289053c2009-06-22 12:08:42 -07001519{
1520 unsigned i;
Jason Evans289053c2009-06-22 12:08:42 -07001521
1522 /* Release all mutexes, now that fork() has completed. */
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001523 chunk_dss_postfork_parent();
1524 huge_postfork_parent();
1525 base_postfork_parent();
Jason Evans289053c2009-06-22 12:08:42 -07001526 for (i = 0; i < narenas; i++) {
Jason Evansfbbb6242010-01-24 17:56:48 -08001527 if (arenas[i] != NULL)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001528 arena_postfork_parent(arenas[i]);
Jason Evans289053c2009-06-22 12:08:42 -07001529 }
Jason Evans4e2e3dd2012-03-13 16:31:41 -07001530 malloc_mutex_postfork_parent(&arenas_lock);
1531}
1532
1533void
1534jemalloc_postfork_child(void)
1535{
1536 unsigned i;
1537
1538 /* Release all mutexes, now that fork() has completed. */
1539 chunk_dss_postfork_child();
1540 huge_postfork_child();
1541 base_postfork_child();
1542 for (i = 0; i < narenas; i++) {
1543 if (arenas[i] != NULL)
1544 arena_postfork_child(arenas[i]);
1545 }
1546 malloc_mutex_postfork_child(&arenas_lock);
Jason Evans289053c2009-06-22 12:08:42 -07001547}
Jason Evans2dbecf12010-09-05 10:35:13 -07001548
1549/******************************************************************************/