Reduce cpp conditional logic complexity.
Convert configuration-related cpp conditional logic to use static
constant variables, e.g.:
#ifdef JEMALLOC_DEBUG
[...]
#endif
becomes:
if (config_debug) {
[...]
}
The advantage is clearer, more concise code. The main disadvantage is
that data structures no longer have conditionally defined fields, so
they pay the cost of all fields regardless of whether they are used. In
practice, this is only a minor concern; config_stats will go away in an
upcoming change, and config_prof is the only other major feature that
depends on more than a few special-purpose fields.
diff --git a/src/arena.c b/src/arena.c
index d166ca1..356b628 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -188,9 +188,7 @@
static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
static bool small_size2bin_init(void);
-#ifdef JEMALLOC_DEBUG
static void small_size2bin_validate(void);
-#endif
static bool small_size2bin_init_hard(void);
static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
size_t min_run_size);
@@ -211,8 +209,8 @@
}
/* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t,
- arena_chunk_map_t, u.rb_link, arena_run_comp)
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
@@ -246,8 +244,8 @@
}
/* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t,
- arena_chunk_map_t, u.rb_link, arena_avail_comp)
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_avail_comp)
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
@@ -257,7 +255,7 @@
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
- dassert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->magic == ARENA_RUN_MAGIC);
assert(run->nfree > 0);
assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
@@ -295,17 +293,16 @@
run->nfree++;
}
-#ifdef JEMALLOC_DEBUG
static inline void
arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
- size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT));
+ UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind <<
+ PAGE_SHIFT));
for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
assert(p[i] == 0);
}
-#endif
static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
@@ -315,9 +312,6 @@
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
arena_avail_tree_t *runs_avail;
-#ifdef JEMALLOC_STATS
- size_t cactive_diff;
-#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
old_ndirty = chunk->ndirty;
@@ -336,13 +330,17 @@
rem_pages = total_pages - need_pages;
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
-#ifdef JEMALLOC_STATS
- /* Update stats_cactive if nactive is crossing a chunk multiple. */
- cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
- PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
-#endif
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING((arena->nactive +
+ need_pages) << PAGE_SHIFT) - CHUNK_CEILING(arena->nactive <<
+ PAGE_SHIFT);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
@@ -390,13 +388,10 @@
chunk + ((run_ind+i) <<
PAGE_SHIFT)), 0,
PAGE_SIZE);
- }
-#ifdef JEMALLOC_DEBUG
- else {
+ } else if (config_debug) {
arena_chunk_validate_zeroed(
chunk, run_ind+i);
}
-#endif
}
} else {
/*
@@ -427,40 +422,34 @@
chunk->map[run_ind-map_bias].bits =
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not
* actually cause an observable failure.
*/
- if (flag_dirty == 0 &&
+ if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
== 0)
arena_chunk_validate_zeroed(chunk, run_ind);
-#endif
for (i = 1; i < need_pages - 1; i++) {
chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
| (chunk->map[run_ind+i-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
-#ifdef JEMALLOC_DEBUG
- if (flag_dirty == 0 &&
+ if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind+i-map_bias].bits &
CHUNK_MAP_UNZEROED) == 0)
arena_chunk_validate_zeroed(chunk, run_ind+i);
-#endif
}
chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
- 1) << PAGE_SHIFT) |
(chunk->map[run_ind+need_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
- if (flag_dirty == 0 &&
+ if (config_debug && flag_dirty == 0 &&
(chunk->map[run_ind+need_pages-1-map_bias].bits &
CHUNK_MAP_UNZEROED) == 0) {
arena_chunk_validate_zeroed(chunk,
run_ind+need_pages-1);
}
-#endif
}
}
@@ -498,9 +487,8 @@
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
-#ifdef JEMALLOC_STATS
- arena->stats.mapped += chunksize;
-#endif
+ if (config_stats)
+ arena->stats.mapped += chunksize;
chunk->arena = arena;
ql_elm_new(chunk, link_dirty);
@@ -526,13 +514,10 @@
if (zero == false) {
for (i = map_bias+1; i < chunk_npages-1; i++)
chunk->map[i-map_bias].bits = unzeroed;
- }
-#ifdef JEMALLOC_DEBUG
- else {
+ } else if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++)
assert(chunk->map[i-map_bias].bits == unzeroed);
}
-#endif
chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
unzeroed;
@@ -571,9 +556,8 @@
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
- arena->stats.mapped -= chunksize;
-#endif
+ if (config_stats)
+ arena->stats.mapped -= chunksize;
} else
arena->spare = chunk;
}
@@ -677,12 +661,8 @@
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
size_t pageind, flag_unzeroed;
-#ifdef JEMALLOC_DEBUG
size_t ndirty;
-#endif
-#ifdef JEMALLOC_STATS
size_t nmadvise;
-#endif
ql_new(&mapelms);
@@ -692,10 +672,7 @@
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
* mappings, but not for file-backed mappings.
*/
-# ifdef JEMALLOC_SWAP
- swap_enabled ? CHUNK_MAP_UNZEROED :
-# endif
- 0;
+ (config_swap && swap_enabled) ? CHUNK_MAP_UNZEROED : 0;
#else
CHUNK_MAP_UNZEROED;
#endif
@@ -730,9 +707,6 @@
assert(pageind + npages <= chunk_npages);
if (mapelm->bits & CHUNK_MAP_DIRTY) {
size_t i;
-#ifdef JEMALLOC_STATS
- size_t cactive_diff;
-#endif
arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm);
@@ -755,17 +729,19 @@
CHUNK_MAP_ALLOCATED;
}
-#ifdef JEMALLOC_STATS
- /*
- * Update stats_cactive if nactive is crossing a
- * chunk multiple.
- */
- cactive_diff = CHUNK_CEILING((arena->nactive +
- npages) << PAGE_SHIFT) -
- CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
-#endif
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is
+ * crossing a chunk multiple.
+ */
+ size_t cactive_diff =
+ CHUNK_CEILING((arena->nactive +
+ npages) << PAGE_SHIFT) -
+ CHUNK_CEILING(arena->nactive <<
+ PAGE_SHIFT);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
arena->nactive += npages;
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
@@ -782,7 +758,7 @@
chunk + (uintptr_t)(pageind << PAGE_SHIFT));
assert((mapelm->bits >> PAGE_SHIFT) == 0);
- dassert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(arena,
run->bin);
arena_bin_info_t *bin_info =
@@ -793,53 +769,45 @@
}
assert(pageind == chunk_npages);
-#ifdef JEMALLOC_DEBUG
- ndirty = chunk->ndirty;
-#endif
-#ifdef JEMALLOC_STATS
- arena->stats.purged += chunk->ndirty;
-#endif
+ if (config_debug)
+ ndirty = chunk->ndirty;
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
arena->ndirty -= chunk->ndirty;
chunk->ndirty = 0;
ql_remove(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = false;
malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_STATS
- nmadvise = 0;
-#endif
+ if (config_stats)
+ nmadvise = 0;
ql_foreach(mapelm, &mapelms, u.ql_link) {
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
size_t npages = mapelm->bits >> PAGE_SHIFT;
assert(pageind + npages <= chunk_npages);
-#ifdef JEMALLOC_DEBUG
assert(ndirty >= npages);
- ndirty -= npages;
-#endif
+ if (config_debug)
+ ndirty -= npages;
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
- (npages << PAGE_SHIFT), MADV_DONTNEED);
+# define MADV_PURGE MADV_DONTNEED
#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
- madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
- (npages << PAGE_SHIFT), MADV_FREE);
+# define MADV_PURGE MADV_FREE
#else
# error "No method defined for purging unused dirty pages."
#endif
-
-#ifdef JEMALLOC_STATS
- nmadvise++;
-#endif
+ madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
+ (npages << PAGE_SHIFT), MADV_PURGE);
+#undef MADV_PURGE
+ if (config_stats)
+ nmadvise++;
}
-#ifdef JEMALLOC_DEBUG
assert(ndirty == 0);
-#endif
malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
- arena->stats.nmadvise += nmadvise;
-#endif
+ if (config_stats)
+ arena->stats.nmadvise += nmadvise;
/* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL;
@@ -859,23 +827,22 @@
{
arena_chunk_t *chunk;
size_t npurgatory;
-#ifdef JEMALLOC_DEBUG
- size_t ndirty = 0;
+ if (config_debug) {
+ size_t ndirty = 0;
- ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
- assert(chunk->dirtied);
- ndirty += chunk->ndirty;
+ ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
+ assert(chunk->dirtied);
+ ndirty += chunk->ndirty;
+ }
+ assert(ndirty == arena->ndirty);
}
- assert(ndirty == arena->ndirty);
-#endif
assert(arena->ndirty > arena->npurgatory || all);
assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory) || all);
-#ifdef JEMALLOC_STATS
- arena->stats.npurge++;
-#endif
+ if (config_stats)
+ arena->stats.npurge++;
/*
* Compute the minimum number of pages that this thread should try to
@@ -957,9 +924,6 @@
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
arena_avail_tree_t *runs_avail;
-#ifdef JEMALLOC_STATS
- size_t cactive_diff;
-#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
@@ -981,13 +945,17 @@
size = bin_info->run_size;
}
run_pages = (size >> PAGE_SHIFT);
-#ifdef JEMALLOC_STATS
- /* Update stats_cactive if nactive is crossing a chunk multiple. */
- cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
- CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
- if (cactive_diff != 0)
- stats_cactive_sub(cactive_diff);
-#endif
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING(arena->nactive <<
+ PAGE_SHIFT) - CHUNK_CEILING((arena->nactive - run_pages) <<
+ PAGE_SHIFT);
+ if (cactive_diff != 0)
+ stats_cactive_sub(cactive_diff);
+ }
arena->nactive -= run_pages;
/*
@@ -1144,9 +1112,8 @@
| flag_dirty | (chunk->map[pageind-map_bias].bits &
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-#ifdef JEMALLOC_DEBUG
- {
- size_t tail_npages = newsize >> PAGE_SHIFT;
+ if (config_debug) {
+ UNUSED size_t tail_npages = newsize >> PAGE_SHIFT;
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & ~PAGE_MASK) == 0);
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
@@ -1156,7 +1123,6 @@
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
.bits & CHUNK_MAP_ALLOCATED) != 0);
}
-#endif
chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
(chunk->map[pageind+head_npages-map_bias].bits &
CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
@@ -1231,9 +1197,8 @@
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT))
<< PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
- bin->stats.reruns++;
-#endif
+ if (config_stats)
+ bin->stats.reruns++;
return (run);
}
/* No existing runs have any space available. */
@@ -1255,20 +1220,19 @@
run->nextind = 0;
run->nfree = bin_info->nregs;
bitmap_init(bitmap, &bin_info->bitmap_info);
-#ifdef JEMALLOC_DEBUG
- run->magic = ARENA_RUN_MAGIC;
-#endif
+ if (config_debug)
+ run->magic = ARENA_RUN_MAGIC;
}
malloc_mutex_unlock(&arena->lock);
/********************************/
malloc_mutex_lock(&bin->lock);
if (run != NULL) {
-#ifdef JEMALLOC_STATS
- bin->stats.nruns++;
- bin->stats.curruns++;
- if (bin->stats.curruns > bin->stats.highruns)
- bin->stats.highruns = bin->stats.curruns;
-#endif
+ if (config_stats) {
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ if (bin->stats.curruns > bin->stats.highruns)
+ bin->stats.highruns = bin->stats.curruns;
+ }
return (run);
}
@@ -1291,9 +1255,8 @@
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT))
<< PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
- bin->stats.reruns++;
-#endif
+ if (config_stats)
+ bin->stats.reruns++;
return (run);
}
@@ -1318,7 +1281,7 @@
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
- dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
+ assert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
@@ -1346,13 +1309,12 @@
bin->runcur = run;
- dassert(bin->runcur->magic == ARENA_RUN_MAGIC);
+ assert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
return (arena_run_reg_alloc(bin->runcur, bin_info));
}
-#ifdef JEMALLOC_PROF
void
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
@@ -1365,15 +1327,10 @@
}
}
}
-#endif
-#ifdef JEMALLOC_TCACHE
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
-# ifdef JEMALLOC_PROF
- , uint64_t prof_accumbytes
-# endif
- )
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+ uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
@@ -1382,11 +1339,11 @@
assert(tbin->ncached == 0);
-#ifdef JEMALLOC_PROF
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
-#endif
+ if (config_prof) {
+ malloc_mutex_lock(&arena->lock);
+ arena_prof_accum(arena, prof_accumbytes);
+ malloc_mutex_unlock(&arena->lock);
+ }
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1400,17 +1357,16 @@
/* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr;
}
-#ifdef JEMALLOC_STATS
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
- bin->stats.nmalloc += i;
- bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.nfills++;
- tbin->tstats.nrequests = 0;
-#endif
+ if (config_stats) {
+ bin->stats.allocated += i * arena_bin_info[binind].reg_size;
+ bin->stats.nmalloc += i;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.nfills++;
+ tbin->tstats.nrequests = 0;
+ }
malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
}
-#endif
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
@@ -1436,27 +1392,25 @@
return (NULL);
}
-#ifdef JEMALLOC_STATS
- bin->stats.allocated += size;
- bin->stats.nmalloc++;
- bin->stats.nrequests++;
-#endif
+ if (config_stats) {
+ bin->stats.allocated += size;
+ bin->stats.nmalloc++;
+ bin->stats.nrequests++;
+ }
malloc_mutex_unlock(&bin->lock);
-#ifdef JEMALLOC_PROF
- if (isthreaded == false) {
+ if (config_prof && isthreaded == false) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, size);
malloc_mutex_unlock(&arena->lock);
}
-#endif
if (zero == false) {
-#ifdef JEMALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
+ if (config_fill) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
} else
memset(ret, 0, size);
@@ -1476,31 +1430,31 @@
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
-#ifdef JEMALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+ if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+ arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].curruns;
+ }
}
-#endif
-#ifdef JEMALLOC_PROF
- arena_prof_accum(arena, size);
-#endif
+ if (config_prof)
+ arena_prof_accum(arena, size);
malloc_mutex_unlock(&arena->lock);
if (zero == false) {
-#ifdef JEMALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
+ if (config_fill) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
}
return (ret);
@@ -1514,18 +1468,14 @@
assert(QUANTUM_CEILING(size) <= arena_maxclass);
if (size <= small_maxclass) {
-#ifdef JEMALLOC_TCACHE
tcache_t *tcache;
- if ((tcache = tcache_get()) != NULL)
+ if (config_tcache && (tcache = tcache_get()) != NULL)
return (tcache_alloc_small(tcache, size, zero));
else
-
-#endif
return (arena_malloc_small(choose_arena(), size, zero));
} else {
-#ifdef JEMALLOC_TCACHE
- if (size <= tcache_maxclass) {
+ if (config_tcache && size <= tcache_maxclass) {
tcache_t *tcache;
if ((tcache = tcache_get()) != NULL)
@@ -1535,7 +1485,6 @@
size, zero));
}
} else
-#endif
return (arena_malloc_large(choose_arena(), size, zero));
}
}
@@ -1586,29 +1535,28 @@
}
}
-#ifdef JEMALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+ if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+ arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].curruns;
+ }
}
-#endif
malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
-#endif
return (ret);
}
@@ -1631,7 +1579,7 @@
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
- dassert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
@@ -1647,7 +1595,6 @@
return (ret);
}
-#ifdef JEMALLOC_PROF
void
arena_prof_promoted(const void *ptr, size_t size)
{
@@ -1685,7 +1632,7 @@
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
PAGE_SHIFT));
- dassert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->magic == ARENA_RUN_MAGIC);
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
@@ -1707,7 +1654,6 @@
return (ret);
}
-#endif
static void
arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
@@ -1781,16 +1727,14 @@
((past - run_ind) << PAGE_SHIFT), false);
/* npages = past - run_ind; */
}
-#ifdef JEMALLOC_DEBUG
- run->magic = 0;
-#endif
+ if (config_debug)
+ run->magic = 0;
arena_run_dalloc(arena, run, true);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
- bin->stats.curruns--;
-#endif
+ if (config_stats)
+ bin->stats.curruns--;
}
static void
@@ -1836,25 +1780,20 @@
size_t pageind;
arena_run_t *run;
arena_bin_t *bin;
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
size_t size;
-#endif
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
- dassert(run->magic == ARENA_RUN_MAGIC);
+ assert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
size_t binind = arena_bin_index(arena, bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
- size = bin_info->reg_size;
-#endif
+ if (config_fill || config_stats)
+ size = bin_info->reg_size;
-#ifdef JEMALLOC_FILL
- if (opt_junk)
+ if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
-#endif
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
@@ -1863,13 +1802,12 @@
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
-#ifdef JEMALLOC_STATS
- bin->stats.allocated -= size;
- bin->stats.ndalloc++;
-#endif
+ if (config_stats) {
+ bin->stats.allocated -= size;
+ bin->stats.ndalloc++;
+ }
}
-#ifdef JEMALLOC_STATS
void
arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
@@ -1907,10 +1845,10 @@
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
-#ifdef JEMALLOC_TCACHE
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
-#endif
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].highruns += bin->stats.highruns;
@@ -1918,37 +1856,24 @@
malloc_mutex_unlock(&bin->lock);
}
}
-#endif
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
- /* Large allocation. */
-#ifdef JEMALLOC_FILL
-# ifndef JEMALLOC_STATS
- if (opt_junk)
-# endif
-#endif
- {
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
+ if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
PAGE_SHIFT;
size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
-#endif
-#ifdef JEMALLOC_FILL
-# ifdef JEMALLOC_STATS
- if (opt_junk)
-# endif
+ if (config_fill && config_stats && opt_junk)
memset(ptr, 0x5a, size);
-#endif
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
-#endif
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= size;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
+ }
}
arena_run_dalloc(arena, (arena_run_t *)ptr, true);
@@ -1968,24 +1893,25 @@
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
true);
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+ if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
+ arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].curruns;
+ }
}
-#endif
malloc_mutex_unlock(&arena->lock);
}
@@ -2038,25 +1964,29 @@
chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> PAGE_SHIFT)
+ - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> PAGE_SHIFT)
+ - 1].curruns--;
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) -
- 1].curruns;
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
+ arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].nrequests++;
+ arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
+ if (arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].curruns > arena->stats.lstats[(size >>
+ PAGE_SHIFT) - 1].highruns) {
+ arena->stats.lstats[(size >> PAGE_SHIFT)
+ - 1].highruns = arena->stats.lstats[(size >>
+ PAGE_SHIFT) - 1].curruns;
+ }
}
-#endif
malloc_mutex_unlock(&arena->lock);
return (false);
}
@@ -2078,12 +2008,10 @@
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
size);
}
-#endif
return (false);
} else {
arena_chunk_t *chunk;
@@ -2091,16 +2019,14 @@
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
- dassert(arena->magic == ARENA_MAGIC);
+ assert(arena->magic == ARENA_MAGIC);
if (psize < oldsize) {
-#ifdef JEMALLOC_FILL
/* Fill before shrinking in order avoid a race. */
- if (opt_junk) {
+ if (config_fill && opt_junk) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
-#endif
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
@@ -2108,12 +2034,11 @@
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
-#ifdef JEMALLOC_FILL
- if (ret == false && zero == false && opt_zero) {
+ if (config_fill && ret == false && zero == false &&
+ opt_zero) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
size - oldsize);
}
-#endif
return (ret);
}
}
@@ -2135,12 +2060,10 @@
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
size + extra >= oldsize)) {
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size),
0x5a, oldsize - size);
}
-#endif
return (ptr);
}
} else {
@@ -2222,22 +2145,21 @@
if (malloc_mutex_init(&arena->lock))
return (true);
-#ifdef JEMALLOC_STATS
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
-# ifdef JEMALLOC_TCACHE
- ql_new(&arena->tcache_ql);
-# endif
-#endif
+ if (config_stats) {
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+ arena->stats.lstats =
+ (malloc_large_stats_t *)base_alloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (arena->stats.lstats == NULL)
+ return (true);
+ memset(arena->stats.lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (config_tcache)
+ ql_new(&arena->tcache_ql);
+ }
-#ifdef JEMALLOC_PROF
- arena->prof_accumbytes = 0;
-#endif
+ if (config_prof)
+ arena->prof_accumbytes = 0;
/* Initialize chunks. */
ql_new(&arena->chunks_dirty);
@@ -2251,84 +2173,41 @@
arena_avail_tree_new(&arena->runs_avail_dirty);
/* Initialize bins. */
- i = 0;
-#ifdef JEMALLOC_TINY
- /* (2^n)-spaced tiny bins. */
- for (; i < ntbins; i++) {
+ for (i = 0; i < nbins; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-#endif
-
- /* Quantum-spaced bins. */
- for (; i < ntbins + nqbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
+ if (config_stats)
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
- /* Cacheline-spaced bins. */
- for (; i < ntbins + nqbins + ncbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-
- /* Subpage-spaced bins. */
- for (; i < nbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
-
-#ifdef JEMALLOC_DEBUG
- arena->magic = ARENA_MAGIC;
-#endif
+ if (config_debug)
+ arena->magic = ARENA_MAGIC;
return (false);
}
-#ifdef JEMALLOC_DEBUG
static void
small_size2bin_validate(void)
{
size_t i, size, binind;
i = 1;
-# ifdef JEMALLOC_TINY
/* Tiny. */
- for (; i < (1U << LG_TINY_MIN); i++) {
- size = pow2_ceil(1U << LG_TINY_MIN);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- assert(SMALL_SIZE2BIN(i) == binind);
+ if (config_tiny) {
+ for (; i < (1U << LG_TINY_MIN); i++) {
+ size = pow2_ceil(1U << LG_TINY_MIN);
+ binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+ assert(SMALL_SIZE2BIN(i) == binind);
+ }
+ for (; i < qspace_min; i++) {
+ size = pow2_ceil(i);
+ binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+ assert(SMALL_SIZE2BIN(i) == binind);
+ }
}
- for (; i < qspace_min; i++) {
- size = pow2_ceil(i);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- assert(SMALL_SIZE2BIN(i) == binind);
- }
-# endif
/* Quantum-spaced. */
for (; i <= qspace_max; i++) {
size = QUANTUM_CEILING(i);
@@ -2350,7 +2229,6 @@
assert(SMALL_SIZE2BIN(i) == binind);
}
}
-#endif
static bool
small_size2bin_init(void)
@@ -2363,9 +2241,8 @@
return (small_size2bin_init_hard());
small_size2bin = const_small_size2bin;
-#ifdef JEMALLOC_DEBUG
- small_size2bin_validate();
-#endif
+ if (config_debug)
+ small_size2bin_validate();
return (false);
}
@@ -2388,19 +2265,19 @@
return (true);
i = 1;
-#ifdef JEMALLOC_TINY
/* Tiny. */
- for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
- size = pow2_ceil(1U << LG_TINY_MIN);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- CUSTOM_SMALL_SIZE2BIN(i) = binind;
+ if (config_tiny) {
+ for (; i < (1U << LG_TINY_MIN); i += TINY_MIN) {
+ size = pow2_ceil(1U << LG_TINY_MIN);
+ binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+ CUSTOM_SMALL_SIZE2BIN(i) = binind;
+ }
+ for (; i < qspace_min; i += TINY_MIN) {
+ size = pow2_ceil(i);
+ binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
+ CUSTOM_SMALL_SIZE2BIN(i) = binind;
+ }
}
- for (; i < qspace_min; i += TINY_MIN) {
- size = pow2_ceil(i);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- CUSTOM_SMALL_SIZE2BIN(i) = binind;
- }
-#endif
/* Quantum-spaced. */
for (; i <= qspace_max; i += TINY_MIN) {
size = QUANTUM_CEILING(i);
@@ -2423,9 +2300,8 @@
}
small_size2bin = custom_small_size2bin;
-#ifdef JEMALLOC_DEBUG
- small_size2bin_validate();
-#endif
+ if (config_debug)
+ small_size2bin_validate();
return (false);
#undef CUSTOM_SMALL_SIZE2BIN
}
@@ -2448,9 +2324,7 @@
uint32_t try_nregs, good_nregs;
uint32_t try_hdr_size, good_hdr_size;
uint32_t try_bitmap_offset, good_bitmap_offset;
-#ifdef JEMALLOC_PROF
uint32_t try_ctx0_offset, good_ctx0_offset;
-#endif
uint32_t try_reg0_offset, good_reg0_offset;
assert(min_run_size >= PAGE_SIZE);
@@ -2481,8 +2355,7 @@
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote == false) {
+ if (config_prof && opt_prof && prof_promote == false) {
/* Pad to a quantum boundary. */
try_hdr_size = QUANTUM_CEILING(try_hdr_size);
try_ctx0_offset = try_hdr_size;
@@ -2490,7 +2363,6 @@
try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
} else
try_ctx0_offset = 0;
-#endif
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
@@ -2504,9 +2376,7 @@
good_nregs = try_nregs;
good_hdr_size = try_hdr_size;
good_bitmap_offset = try_bitmap_offset;
-#ifdef JEMALLOC_PROF
good_ctx0_offset = try_ctx0_offset;
-#endif
good_reg0_offset = try_reg0_offset;
/* Try more aggressive settings. */
@@ -2526,8 +2396,7 @@
try_bitmap_offset = try_hdr_size;
/* Add space for bitmap. */
try_hdr_size += bitmap_size(try_nregs);
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote == false) {
+ if (config_prof && opt_prof && prof_promote == false) {
/* Pad to a quantum boundary. */
try_hdr_size = QUANTUM_CEILING(try_hdr_size);
try_ctx0_offset = try_hdr_size;
@@ -2537,7 +2406,6 @@
try_hdr_size += try_nregs *
sizeof(prof_ctx_t *);
}
-#endif
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
@@ -2553,9 +2421,7 @@
bin_info->run_size = good_run_size;
bin_info->nregs = good_nregs;
bin_info->bitmap_offset = good_bitmap_offset;
-#ifdef JEMALLOC_PROF
bin_info->ctx0_offset = good_ctx0_offset;
-#endif
bin_info->reg0_offset = good_reg0_offset;
return (good_run_size);
@@ -2574,15 +2440,17 @@
prev_run_size = PAGE_SIZE;
i = 0;
-#ifdef JEMALLOC_TINY
/* (2^n)-spaced tiny bins. */
- for (; i < ntbins; i++) {
- bin_info = &arena_bin_info[i];
- bin_info->reg_size = (1U << (LG_TINY_MIN + i));
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+ if (config_tiny) {
+ for (; i < ntbins; i++) {
+ bin_info = &arena_bin_info[i];
+ bin_info->reg_size = (1U << (LG_TINY_MIN + i));
+ prev_run_size = bin_info_run_size_calc(bin_info,
+ prev_run_size);
+ bitmap_info_init(&bin_info->bitmap_info,
+ bin_info->nregs);
+ }
}
-#endif
/* Quantum-spaced bins. */
for (; i < ntbins + nqbins; i++) {
@@ -2631,9 +2499,8 @@
assert(sspace_min < PAGE_SIZE);
sspace_max = PAGE_SIZE - SUBPAGE;
-#ifdef JEMALLOC_TINY
- assert(LG_QUANTUM >= LG_TINY_MIN);
-#endif
+ if (config_tiny)
+ assert(LG_QUANTUM >= LG_TINY_MIN);
assert(ntbins <= LG_QUANTUM);
nqbins = qspace_max >> LG_QUANTUM;
ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
@@ -2652,23 +2519,18 @@
* small size classes, plus a "not small" size class must be stored in
* 8 bits of arena_chunk_map_t's bits field.
*/
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote) {
- if (nbins > 255) {
- char line_buf[UMAX2S_BUFSIZE];
- malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(u2s(nbins, 10, line_buf));
- malloc_write(" > max 255)\n");
- abort();
- }
- } else
-#endif
- if (nbins > 256) {
- char line_buf[UMAX2S_BUFSIZE];
- malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(u2s(nbins, 10, line_buf));
- malloc_write(" > max 256)\n");
- abort();
+ if (config_prof && opt_prof && prof_promote && nbins > 255) {
+ char line_buf[UMAX2S_BUFSIZE];
+ malloc_write("<jemalloc>: Too many small size classes (");
+ malloc_write(u2s(nbins, 10, line_buf));
+ malloc_write(" > max 255)\n");
+ abort();
+ } else if (nbins > 256) {
+ char line_buf[UMAX2S_BUFSIZE];
+ malloc_write("<jemalloc>: Too many small size classes (");
+ malloc_write(u2s(nbins, 10, line_buf));
+ malloc_write(" > max 256)\n");
+ abort();
}
/*
diff --git a/src/chunk.c b/src/chunk.c
index d190c6f..57ab20d 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -5,18 +5,12 @@
/* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
-#ifdef JEMALLOC_SWAP
bool opt_overcommit = true;
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
-#endif
-#ifdef JEMALLOC_IVSALLOC
rtree_t *chunks_rtree;
-#endif
/* Various chunk-related settings. */
size_t chunksize;
@@ -41,67 +35,50 @@
assert(size != 0);
assert((size & chunksize_mask) == 0);
-#ifdef JEMALLOC_SWAP
- if (swap_enabled) {
+ if (config_swap && swap_enabled) {
ret = chunk_alloc_swap(size, zero);
if (ret != NULL)
goto RETURN;
}
if (swap_enabled == false || opt_overcommit) {
-#endif
-#ifdef JEMALLOC_DSS
- ret = chunk_alloc_dss(size, zero);
- if (ret != NULL)
- goto RETURN;
-#endif
+ if (config_dss) {
+ ret = chunk_alloc_dss(size, zero);
+ if (ret != NULL)
+ goto RETURN;
+ }
ret = chunk_alloc_mmap(size);
if (ret != NULL) {
*zero = true;
goto RETURN;
}
-#ifdef JEMALLOC_SWAP
}
-#endif
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
-#ifdef JEMALLOC_IVSALLOC
- if (base == false && ret != NULL) {
+ if (config_ivsalloc && base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size, true);
return (NULL);
}
}
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- if (ret != NULL) {
-# ifdef JEMALLOC_PROF
+ if ((config_stats || config_prof) && ret != NULL) {
bool gdump;
-# endif
malloc_mutex_lock(&chunks_mtx);
-# ifdef JEMALLOC_STATS
- stats_chunks.nchunks += (size / chunksize);
-# endif
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
-# ifdef JEMALLOC_PROF
- gdump = true;
-# endif
- }
-# ifdef JEMALLOC_PROF
- else
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
gdump = false;
-# endif
malloc_mutex_unlock(&chunks_mtx);
-# ifdef JEMALLOC_PROF
- if (opt_prof && opt_prof_gdump && gdump)
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
-# endif
}
-#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@@ -116,24 +93,20 @@
assert(size != 0);
assert((size & chunksize_mask) == 0);
-#ifdef JEMALLOC_IVSALLOC
- rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- malloc_mutex_lock(&chunks_mtx);
- stats_chunks.curchunks -= (size / chunksize);
- malloc_mutex_unlock(&chunks_mtx);
-#endif
+ if (config_ivsalloc)
+ rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+ if (config_stats || config_prof) {
+ malloc_mutex_lock(&chunks_mtx);
+ stats_chunks.curchunks -= (size / chunksize);
+ malloc_mutex_unlock(&chunks_mtx);
+ }
if (unmap) {
-#ifdef JEMALLOC_SWAP
- if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
+ if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
+ size) == false)
return;
-#endif
-#ifdef JEMALLOC_DSS
- if (chunk_dealloc_dss(chunk, size) == false)
+ if (config_dss && chunk_dealloc_dss(chunk, size) == false)
return;
-#endif
chunk_dealloc_mmap(chunk, size);
}
}
@@ -148,26 +121,23 @@
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- if (malloc_mutex_init(&chunks_mtx))
+ if (config_stats || config_prof) {
+ if (malloc_mutex_init(&chunks_mtx))
+ return (true);
+ memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+ }
+ if (config_swap && chunk_swap_boot())
return (true);
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
-#endif
-#ifdef JEMALLOC_SWAP
- if (chunk_swap_boot())
- return (true);
-#endif
if (chunk_mmap_boot())
return (true);
-#ifdef JEMALLOC_DSS
- if (chunk_dss_boot())
+ if (config_dss && chunk_dss_boot())
return (true);
-#endif
-#ifdef JEMALLOC_IVSALLOC
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
- if (chunks_rtree == NULL)
- return (true);
-#endif
+ if (config_ivsalloc) {
+ chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk);
+ if (chunks_rtree == NULL)
+ return (true);
+ }
return (false);
}
diff --git a/src/chunk_dss.c b/src/chunk_dss.c
index 5c0e290..c25baea 100644
--- a/src/chunk_dss.c
+++ b/src/chunk_dss.c
@@ -1,6 +1,5 @@
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_DSS
/******************************************************************************/
/* Data. */
@@ -35,6 +34,8 @@
{
extent_node_t *node, key;
+ cassert(config_dss);
+
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&dss_mtx);
@@ -74,6 +75,8 @@
{
void *ret;
+ cassert(config_dss);
+
ret = chunk_recycle_dss(size, zero);
if (ret != NULL)
return (ret);
@@ -131,6 +134,8 @@
{
extent_node_t *xnode, *node, *prev, key;
+ cassert(config_dss);
+
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
@@ -204,6 +209,8 @@
{
bool ret;
+ cassert(config_dss);
+
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
@@ -220,6 +227,8 @@
{
bool ret;
+ cassert(config_dss);
+
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
@@ -269,6 +278,8 @@
chunk_dss_boot(void)
{
+ cassert(config_dss);
+
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = sbrk(0);
@@ -281,4 +292,3 @@
}
/******************************************************************************/
-#endif /* JEMALLOC_DSS */
diff --git a/src/chunk_swap.c b/src/chunk_swap.c
index cb25ae0..fe9ca30 100644
--- a/src/chunk_swap.c
+++ b/src/chunk_swap.c
@@ -1,6 +1,6 @@
#define JEMALLOC_CHUNK_SWAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_SWAP
+
/******************************************************************************/
/* Data. */
@@ -9,9 +9,7 @@
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
-#ifdef JEMALLOC_STATS
size_t swap_avail;
-#endif
/* Base address of the mmap()ed file(s). */
static void *swap_base;
@@ -42,6 +40,8 @@
{
extent_node_t *node, key;
+ cassert(config_swap);
+
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&swap_mtx);
@@ -65,9 +65,8 @@
node->size -= size;
extent_tree_szad_insert(&swap_chunks_szad, node);
}
-#ifdef JEMALLOC_STATS
- swap_avail -= size;
-#endif
+ if (config_stats)
+ swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (*zero)
@@ -84,6 +83,7 @@
{
void *ret;
+ cassert(config_swap);
assert(swap_enabled);
ret = chunk_recycle_swap(size, zero);
@@ -94,9 +94,8 @@
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
ret = swap_end;
swap_end = (void *)((uintptr_t)swap_end + size);
-#ifdef JEMALLOC_STATS
- swap_avail -= size;
-#endif
+ if (config_stats)
+ swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (swap_prezeroed)
@@ -116,6 +115,8 @@
{
extent_node_t *xnode, *node, *prev, key;
+ cassert(config_swap);
+
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
@@ -189,6 +190,7 @@
{
bool ret;
+ cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
@@ -207,6 +209,7 @@
{
bool ret;
+ cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
@@ -237,9 +240,8 @@
} else
madvise(chunk, size, MADV_DONTNEED);
-#ifdef JEMALLOC_STATS
- swap_avail += size;
-#endif
+ if (config_stats)
+ swap_avail += size;
ret = false;
goto RETURN;
}
@@ -260,6 +262,8 @@
size_t cumsize, voff;
size_t sizes[nfds];
+ cassert(config_swap);
+
malloc_mutex_lock(&swap_mtx);
/* Get file sizes. */
@@ -362,9 +366,8 @@
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
-#ifdef JEMALLOC_STATS
- swap_avail = cumsize;
-#endif
+ if (config_stats)
+ swap_avail = cumsize;
swap_enabled = true;
@@ -378,6 +381,8 @@
chunk_swap_boot(void)
{
+ cassert(config_swap);
+
if (malloc_mutex_init(&swap_mtx))
return (true);
@@ -385,9 +390,8 @@
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
-#ifdef JEMALLOC_STATS
- swap_avail = 0;
-#endif
+ if (config_stats)
+ swap_avail = 0;
swap_base = NULL;
swap_end = NULL;
swap_max = NULL;
@@ -397,6 +401,3 @@
return (false);
}
-
-/******************************************************************************/
-#endif /* JEMALLOC_SWAP */
diff --git a/src/ckh.c b/src/ckh.c
index 43fcc25..f7eaa78 100644
--- a/src/ckh.c
+++ b/src/ckh.c
@@ -73,7 +73,7 @@
size_t hash1, hash2, bucket, cell;
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
@@ -394,9 +394,8 @@
goto RETURN;
}
-#ifdef JEMALLOC_DEBUG
- ckh->magic = CKH_MAGIC;
-#endif
+ if (config_debug)
+ ckh->magic = CKH_MAGIC;
ret = false;
RETURN:
@@ -408,7 +407,7 @@
{
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
#ifdef CKH_VERBOSE
malloc_printf(
@@ -433,7 +432,7 @@
{
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
return (ckh->count);
}
@@ -464,7 +463,7 @@
bool ret;
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
@@ -489,7 +488,7 @@
size_t cell;
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@@ -521,7 +520,7 @@
size_t cell;
assert(ckh != NULL);
- dassert(ckh->magic == CKH_MAGIC);
+ assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
diff --git a/src/ctl.c b/src/ctl.c
index e5336d3..05be431 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -27,16 +27,12 @@
const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \
size_t i);
-#ifdef JEMALLOC_STATS
static bool ctl_arena_init(ctl_arena_stats_t *astats);
-#endif
static void ctl_arena_clear(ctl_arena_stats_t *astats);
-#ifdef JEMALLOC_STATS
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
-#endif
static void ctl_arena_refresh(arena_t *arena, unsigned i);
static void ctl_refresh(void);
static bool ctl_init(void);
@@ -45,16 +41,12 @@
CTL_PROTO(version)
CTL_PROTO(epoch)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(tcache_flush)
-#endif
CTL_PROTO(thread_arena)
-#ifdef JEMALLOC_STATS
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
-#endif
CTL_PROTO(config_debug)
CTL_PROTO(config_dss)
CTL_PROTO(config_dynamic_page_shift)
@@ -77,21 +69,12 @@
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
-#ifdef JEMALLOC_FILL
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
-#endif
-#ifdef JEMALLOC_SYSV
CTL_PROTO(opt_sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
CTL_PROTO(opt_xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_gc_sweep)
-#endif
-#ifdef JEMALLOC_PROF
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@@ -102,10 +85,7 @@
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
CTL_PROTO(opt_lg_prof_tcmax)
-#endif
-#ifdef JEMALLOC_SWAP
CTL_PROTO(opt_overcommit)
-#endif
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -119,35 +99,26 @@
CTL_PROTO(arenas_subpage)
CTL_PROTO(arenas_pagesize)
CTL_PROTO(arenas_chunksize)
-#ifdef JEMALLOC_TINY
CTL_PROTO(arenas_tspace_min)
CTL_PROTO(arenas_tspace_max)
-#endif
CTL_PROTO(arenas_qspace_min)
CTL_PROTO(arenas_qspace_max)
CTL_PROTO(arenas_cspace_min)
CTL_PROTO(arenas_cspace_max)
CTL_PROTO(arenas_sspace_min)
CTL_PROTO(arenas_sspace_max)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(arenas_tcache_max)
-#endif
CTL_PROTO(arenas_ntbins)
CTL_PROTO(arenas_nqbins)
CTL_PROTO(arenas_ncbins)
CTL_PROTO(arenas_nsbins)
CTL_PROTO(arenas_nbins)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(arenas_nhbins)
-#endif
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
-#ifdef JEMALLOC_PROF
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
-#endif
-#ifdef JEMALLOC_STATS
CTL_PROTO(stats_chunks_current)
CTL_PROTO(stats_chunks_total)
CTL_PROTO(stats_chunks_high)
@@ -166,10 +137,8 @@
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-#endif
CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_highruns)
@@ -181,31 +150,22 @@
CTL_PROTO(stats_arenas_i_lruns_j_highruns)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
-#endif
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
-#ifdef JEMALLOC_STATS
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
-#endif
INDEX_PROTO(stats_arenas_i)
-#ifdef JEMALLOC_STATS
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
-#endif
-#ifdef JEMALLOC_SWAP
-# ifdef JEMALLOC_STATS
CTL_PROTO(swap_avail)
-# endif
CTL_PROTO(swap_prezeroed)
CTL_PROTO(swap_nfds)
CTL_PROTO(swap_fds)
-#endif
/******************************************************************************/
/* mallctl tree. */
@@ -223,21 +183,16 @@
*/
#define INDEX(i) false, {.indexed = {i##_index}}, NULL
-#ifdef JEMALLOC_TCACHE
static const ctl_node_t tcache_node[] = {
{NAME("flush"), CTL(tcache_flush)}
};
-#endif
static const ctl_node_t thread_node[] = {
- {NAME("arena"), CTL(thread_arena)}
-#ifdef JEMALLOC_STATS
- ,
+ {NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)}
-#endif
};
static const ctl_node_t config_node[] = {
@@ -265,27 +220,13 @@
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)}
-#ifdef JEMALLOC_FILL
- ,
+ {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)}
-#endif
-#ifdef JEMALLOC_SYSV
- ,
- {NAME("sysv"), CTL(opt_sysv)}
-#endif
-#ifdef JEMALLOC_XMALLOC
- ,
- {NAME("xmalloc"), CTL(opt_xmalloc)}
-#endif
-#ifdef JEMALLOC_TCACHE
- ,
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("sysv"), CTL(opt_sysv)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)}
-#endif
-#ifdef JEMALLOC_PROF
- ,
+ {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@@ -295,12 +236,8 @@
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
{NAME("prof_accum"), CTL(opt_prof_accum)},
- {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
-#endif
-#ifdef JEMALLOC_SWAP
- ,
+ {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)},
{NAME("overcommit"), CTL(opt_overcommit)}
-#endif
};
static const ctl_node_t arenas_bin_i_node[] = {
@@ -335,42 +272,33 @@
{NAME("subpage"), CTL(arenas_subpage)},
{NAME("pagesize"), CTL(arenas_pagesize)},
{NAME("chunksize"), CTL(arenas_chunksize)},
-#ifdef JEMALLOC_TINY
{NAME("tspace_min"), CTL(arenas_tspace_min)},
{NAME("tspace_max"), CTL(arenas_tspace_max)},
-#endif
{NAME("qspace_min"), CTL(arenas_qspace_min)},
{NAME("qspace_max"), CTL(arenas_qspace_max)},
{NAME("cspace_min"), CTL(arenas_cspace_min)},
{NAME("cspace_max"), CTL(arenas_cspace_max)},
{NAME("sspace_min"), CTL(arenas_sspace_min)},
{NAME("sspace_max"), CTL(arenas_sspace_max)},
-#ifdef JEMALLOC_TCACHE
{NAME("tcache_max"), CTL(arenas_tcache_max)},
-#endif
{NAME("ntbins"), CTL(arenas_ntbins)},
{NAME("nqbins"), CTL(arenas_nqbins)},
{NAME("ncbins"), CTL(arenas_ncbins)},
{NAME("nsbins"), CTL(arenas_nsbins)},
{NAME("nbins"), CTL(arenas_nbins)},
-#ifdef JEMALLOC_TCACHE
{NAME("nhbins"), CTL(arenas_nhbins)},
-#endif
{NAME("bin"), CHILD(arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(arenas_lrun)},
{NAME("purge"), CTL(arenas_purge)}
};
-#ifdef JEMALLOC_PROF
static const ctl_node_t prof_node[] = {
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("interval"), CTL(prof_interval)}
};
-#endif
-#ifdef JEMALLOC_STATS
static const ctl_node_t stats_chunks_node[] = {
{NAME("current"), CTL(stats_chunks_current)},
{NAME("total"), CTL(stats_chunks_total)},
@@ -402,10 +330,8 @@
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
-#ifdef JEMALLOC_TCACHE
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
-#endif
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
{NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)},
@@ -433,14 +359,11 @@
static const ctl_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
-#endif
static const ctl_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}
-#ifdef JEMALLOC_STATS
- ,
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
@@ -449,7 +372,6 @@
{NAME("large"), CHILD(stats_arenas_i_large)},
{NAME("bins"), CHILD(stats_arenas_i_bins)},
{NAME("lruns"), CHILD(stats_arenas_i_lruns)}
-#endif
};
static const ctl_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(stats_arenas_i)}
@@ -460,46 +382,34 @@
};
static const ctl_node_t stats_node[] = {
-#ifdef JEMALLOC_STATS
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("chunks"), CHILD(stats_chunks)},
{NAME("huge"), CHILD(stats_huge)},
-#endif
{NAME("arenas"), CHILD(stats_arenas)}
};
-#ifdef JEMALLOC_SWAP
static const ctl_node_t swap_node[] = {
-# ifdef JEMALLOC_STATS
{NAME("avail"), CTL(swap_avail)},
-# endif
{NAME("prezeroed"), CTL(swap_prezeroed)},
{NAME("nfds"), CTL(swap_nfds)},
{NAME("fds"), CTL(swap_fds)}
};
-#endif
static const ctl_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
-#ifdef JEMALLOC_TCACHE
{NAME("tcache"), CHILD(tcache)},
-#endif
{NAME("thread"), CHILD(thread)},
{NAME("config"), CHILD(config)},
{NAME("opt"), CHILD(opt)},
{NAME("arenas"), CHILD(arenas)},
-#ifdef JEMALLOC_PROF
{NAME("prof"), CHILD(prof)},
-#endif
{NAME("stats"), CHILD(stats)}
-#ifdef JEMALLOC_SWAP
,
{NAME("swap"), CHILD(swap)}
-#endif
};
static const ctl_node_t super_root_node[] = {
{NAME(""), CHILD(root)}
@@ -512,7 +422,6 @@
/******************************************************************************/
-#ifdef JEMALLOC_STATS
static bool
ctl_arena_init(ctl_arena_stats_t *astats)
{
@@ -532,7 +441,6 @@
return (false);
}
-#endif
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
@@ -540,18 +448,18 @@
astats->pactive = 0;
astats->pdirty = 0;
-#ifdef JEMALLOC_STATS
- memset(&astats->astats, 0, sizeof(arena_stats_t));
- astats->allocated_small = 0;
- astats->nmalloc_small = 0;
- astats->ndalloc_small = 0;
- astats->nrequests_small = 0;
- memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t));
-#endif
+ if (config_stats) {
+ memset(&astats->astats, 0, sizeof(arena_stats_t));
+ astats->allocated_small = 0;
+ astats->nmalloc_small = 0;
+ astats->ndalloc_small = 0;
+ astats->nrequests_small = 0;
+ memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
+ memset(astats->lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ }
}
-#ifdef JEMALLOC_STATS
static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
@@ -604,17 +512,17 @@
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
-#ifdef JEMALLOC_TCACHE
- sstats->bstats[i].nfills += astats->bstats[i].nfills;
- sstats->bstats[i].nflushes += astats->bstats[i].nflushes;
-#endif
+ if (config_tcache) {
+ sstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ }
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].highruns += astats->bstats[i].highruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
}
-#endif
static void
ctl_arena_refresh(arena_t *arena, unsigned i)
@@ -625,17 +533,17 @@
ctl_arena_clear(astats);
sstats->nthreads += astats->nthreads;
-#ifdef JEMALLOC_STATS
- ctl_arena_stats_amerge(astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
-#else
- astats->pactive += arena->nactive;
- astats->pdirty += arena->ndirty;
- /* Merge into sum stats as well. */
- sstats->pactive += arena->nactive;
- sstats->pdirty += arena->ndirty;
-#endif
+ if (config_stats) {
+ ctl_arena_stats_amerge(astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+ } else {
+ astats->pactive += arena->nactive;
+ astats->pdirty += arena->ndirty;
+ /* Merge into sum stats as well. */
+ sstats->pactive += arena->nactive;
+ sstats->pdirty += arena->ndirty;
+ }
}
static void
@@ -644,19 +552,19 @@
unsigned i;
arena_t *tarenas[narenas];
-#ifdef JEMALLOC_STATS
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
+ if (config_stats) {
+ malloc_mutex_lock(&chunks_mtx);
+ ctl_stats.chunks.current = stats_chunks.curchunks;
+ ctl_stats.chunks.total = stats_chunks.nchunks;
+ ctl_stats.chunks.high = stats_chunks.highchunks;
+ malloc_mutex_unlock(&chunks_mtx);
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
-#endif
+ malloc_mutex_lock(&huge_mtx);
+ ctl_stats.huge.allocated = huge_allocated;
+ ctl_stats.huge.nmalloc = huge_nmalloc;
+ ctl_stats.huge.ndalloc = huge_ndalloc;
+ malloc_mutex_unlock(&huge_mtx);
+ }
/*
* Clear sum stats, since they will be merged into by
@@ -682,20 +590,20 @@
ctl_arena_refresh(tarenas[i], i);
}
-#ifdef JEMALLOC_STATS
- ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
- + ctl_stats.arenas[narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
- ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ if (config_stats) {
+ ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
+ + ctl_stats.arenas[narenas].astats.allocated_large
+ + ctl_stats.huge.allocated;
+ ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
+ PAGE_SHIFT) + ctl_stats.huge.allocated;
+ ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
-# ifdef JEMALLOC_SWAP
- malloc_mutex_lock(&swap_mtx);
- ctl_stats.swap_avail = swap_avail;
- malloc_mutex_unlock(&swap_mtx);
-# endif
-#endif
+ if (config_swap) {
+ malloc_mutex_lock(&swap_mtx);
+ ctl_stats.swap_avail = swap_avail;
+ malloc_mutex_unlock(&swap_mtx);
+ }
+ }
ctl_epoch++;
}
@@ -707,10 +615,6 @@
malloc_mutex_lock(&ctl_mtx);
if (ctl_initialized == false) {
-#ifdef JEMALLOC_STATS
- unsigned i;
-#endif
-
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
@@ -729,14 +633,15 @@
* ever get used. Lazy initialization would allow errors to
* cause inconsistent state to be viewable by the application.
*/
-#ifdef JEMALLOC_STATS
- for (i = 0; i <= narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- ret = true;
- goto RETURN;
+ if (config_stats) {
+ unsigned i;
+ for (i = 0; i <= narenas; i++) {
+ if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ ret = true;
+ goto RETURN;
+ }
}
}
-#endif
ctl_stats.arenas[narenas].initialized = true;
ctl_epoch = 0;
@@ -998,6 +903,54 @@
} \
} while (0)
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define CTL_RO_CLGEN(c, l, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ if (l) \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+RETURN: \
+ if (l) \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_CGEN(c, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+RETURN: \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
@@ -1021,7 +974,26 @@
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
-#define CTL_RO_NL_GEN(n, v, t) \
+#define CTL_RO_NL_CGEN(c, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = v; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+RETURN: \
+ return (ret); \
+}
+
+#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
@@ -1038,7 +1010,7 @@
return (ret); \
}
-#define CTL_RO_TRUE_GEN(n) \
+#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
@@ -1046,25 +1018,10 @@
int ret; \
bool oldval; \
\
+ if (n == false) \
+ return (ENOENT); \
READONLY(); \
- oldval = true; \
- READ(oldval, bool); \
- \
- ret = 0; \
-RETURN: \
- return (ret); \
-}
-
-#define CTL_RO_FALSE_GEN(n) \
-static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
- int ret; \
- bool oldval; \
- \
- READONLY(); \
- oldval = false; \
+ oldval = n; \
READ(oldval, bool); \
\
ret = 0; \
@@ -1094,7 +1051,6 @@
return (ret);
}
-#ifdef JEMALLOC_TCACHE
static int
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
@@ -1102,6 +1058,9 @@
int ret;
tcache_t *tcache;
+ if (config_tcache == false)
+ return (ENOENT);
+
VOID();
tcache = TCACHE_GET();
@@ -1116,7 +1075,6 @@
RETURN:
return (ret);
}
-#endif
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1151,13 +1109,11 @@
/* Set new arena association. */
ARENA_SET(arena);
-#ifdef JEMALLOC_TCACHE
- {
+ if (config_tcache) {
tcache_t *tcache = TCACHE_GET();
if (tcache != NULL)
tcache->arena = arena;
}
-#endif
}
ret = 0;
@@ -1165,104 +1121,29 @@
return (ret);
}
-#ifdef JEMALLOC_STATS
-CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_allocatedp, ALLOCATEDP_GET(), uint64_t *);
-CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_deallocatedp, DEALLOCATEDP_GET(), uint64_t *);
-#endif
+CTL_RO_NL_CGEN(config_stats, thread_allocated, ALLOCATED_GET(), uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_allocatedp, ALLOCATEDP_GET(), uint64_t *)
+CTL_RO_NL_CGEN(config_stats, thread_deallocated, DEALLOCATED_GET(), uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, DEALLOCATEDP_GET(),
+ uint64_t *)
/******************************************************************************/
-#ifdef JEMALLOC_DEBUG
-CTL_RO_TRUE_GEN(config_debug)
-#else
-CTL_RO_FALSE_GEN(config_debug)
-#endif
-
-#ifdef JEMALLOC_DSS
-CTL_RO_TRUE_GEN(config_dss)
-#else
-CTL_RO_FALSE_GEN(config_dss)
-#endif
-
-#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
-CTL_RO_TRUE_GEN(config_dynamic_page_shift)
-#else
-CTL_RO_FALSE_GEN(config_dynamic_page_shift)
-#endif
-
-#ifdef JEMALLOC_FILL
-CTL_RO_TRUE_GEN(config_fill)
-#else
-CTL_RO_FALSE_GEN(config_fill)
-#endif
-
-#ifdef JEMALLOC_LAZY_LOCK
-CTL_RO_TRUE_GEN(config_lazy_lock)
-#else
-CTL_RO_FALSE_GEN(config_lazy_lock)
-#endif
-
-#ifdef JEMALLOC_PROF
-CTL_RO_TRUE_GEN(config_prof)
-#else
-CTL_RO_FALSE_GEN(config_prof)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
-CTL_RO_TRUE_GEN(config_prof_libgcc)
-#else
-CTL_RO_FALSE_GEN(config_prof_libgcc)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-CTL_RO_TRUE_GEN(config_prof_libunwind)
-#else
-CTL_RO_FALSE_GEN(config_prof_libunwind)
-#endif
-
-#ifdef JEMALLOC_STATS
-CTL_RO_TRUE_GEN(config_stats)
-#else
-CTL_RO_FALSE_GEN(config_stats)
-#endif
-
-#ifdef JEMALLOC_SWAP
-CTL_RO_TRUE_GEN(config_swap)
-#else
-CTL_RO_FALSE_GEN(config_swap)
-#endif
-
-#ifdef JEMALLOC_SYSV
-CTL_RO_TRUE_GEN(config_sysv)
-#else
-CTL_RO_FALSE_GEN(config_sysv)
-#endif
-
-#ifdef JEMALLOC_TCACHE
-CTL_RO_TRUE_GEN(config_tcache)
-#else
-CTL_RO_FALSE_GEN(config_tcache)
-#endif
-
-#ifdef JEMALLOC_TINY
-CTL_RO_TRUE_GEN(config_tiny)
-#else
-CTL_RO_FALSE_GEN(config_tiny)
-#endif
-
-#ifdef JEMALLOC_TLS
-CTL_RO_TRUE_GEN(config_tls)
-#else
-CTL_RO_FALSE_GEN(config_tls)
-#endif
-
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_TRUE_GEN(config_xmalloc)
-#else
-CTL_RO_FALSE_GEN(config_xmalloc)
-#endif
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_dynamic_page_shift)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_swap)
+CTL_RO_BOOL_CONFIG_GEN(config_sysv)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tiny)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
@@ -1273,35 +1154,24 @@
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-#ifdef JEMALLOC_FILL
-CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
-CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
-#endif
-#ifdef JEMALLOC_SYSV
-CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
-#endif
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
-#endif
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
-CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
-#endif
-#ifdef JEMALLOC_PROF
-CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
-CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
-#endif
-#ifdef JEMALLOC_SWAP
-CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
-#endif
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_sysv, opt_sysv, opt_sysv, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep,
+ ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
+CTL_RO_NL_CGEN(config_swap, opt_overcommit, opt_overcommit, bool)
/******************************************************************************/
@@ -1360,27 +1230,21 @@
CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
-#ifdef JEMALLOC_TINY
-CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
-CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
-#endif
+CTL_RO_NL_CGEN(config_tiny, arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
+CTL_RO_NL_CGEN(config_tiny, arenas_tspace_max, (qspace_min >> 1), size_t)
CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
-#endif
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
-#endif
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
static int
@@ -1423,7 +1287,6 @@
/******************************************************************************/
-#ifdef JEMALLOC_PROF
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
@@ -1431,6 +1294,9 @@
int ret;
bool oldval;
+ if (config_prof == false)
+ return (ENOENT);
+
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
@@ -1457,6 +1323,9 @@
int ret;
const char *filename = NULL;
+ if (config_prof == false)
+ return (ENOENT);
+
WRITEONLY();
WRITE(filename, const char *);
@@ -1470,56 +1339,53 @@
return (ret);
}
-CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
-#endif
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t)
-CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_allocated,
+CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+ size_t)
+CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
+CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
-CTL_RO_GEN(stats_arenas_i_small_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
-CTL_RO_GEN(stats_arenas_i_large_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(stats_arenas_i_bins_j_nfills,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nflushes,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-#endif
-CTL_RO_GEN(stats_arenas_i_bins_j_nruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nreruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_highruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_highruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
const ctl_node_t *
@@ -1531,15 +1397,15 @@
return (super_stats_arenas_i_bins_j_node);
}
-CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_highruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_highruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t)
const ctl_node_t *
@@ -1551,20 +1417,17 @@
return (super_stats_arenas_i_lruns_j_node);
}
-#endif
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped,
- size_t)
-CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge,
- uint64_t)
-CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise,
- uint64_t)
-CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged,
- uint64_t)
-#endif
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
const ctl_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
@@ -1583,19 +1446,15 @@
return (ret);
}
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *)
-CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
-CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
-#endif
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
/******************************************************************************/
-#ifdef JEMALLOC_SWAP
-# ifdef JEMALLOC_STATS
-CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t)
-# endif
+CTL_RO_CGEN(config_swap && config_stats, swap_avail, ctl_stats.swap_avail,
+ size_t)
static int
swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1603,6 +1462,9 @@
{
int ret;
+ if (config_swap == false)
+ return (ENOENT);
+
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
@@ -1625,7 +1487,7 @@
return (ret);
}
-CTL_RO_GEN(swap_nfds, swap_nfds, size_t)
+CTL_RO_CGEN(config_swap, swap_nfds, swap_nfds, size_t)
static int
swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1633,6 +1495,9 @@
{
int ret;
+ if (config_swap == false)
+ return (ENOENT);
+
malloc_mutex_lock(&ctl_mtx);
if (swap_enabled) {
READONLY();
@@ -1667,4 +1532,3 @@
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-#endif
diff --git a/src/extent.c b/src/extent.c
index 3c04d3a..8c09b48 100644
--- a/src/extent.c
+++ b/src/extent.c
@@ -3,7 +3,6 @@
/******************************************************************************/
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
@@ -25,7 +24,6 @@
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
-#endif
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
diff --git a/src/huge.c b/src/huge.c
index a4f9b05..1eee436 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -4,11 +4,9 @@
/******************************************************************************/
/* Data. */
-#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
-#endif
malloc_mutex_t huge_mtx;
@@ -49,21 +47,19 @@
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
-#endif
+ if (config_stats) {
+ stats_cactive_add(csize);
+ huge_nmalloc++;
+ huge_allocated += csize;
+ }
malloc_mutex_unlock(&huge_mtx);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
memset(ret, 0, csize);
}
-#endif
return (ret);
}
@@ -134,21 +130,19 @@
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_add(chunk_size);
- huge_nmalloc++;
- huge_allocated += chunk_size;
-#endif
+ if (config_stats) {
+ stats_cactive_add(chunk_size);
+ huge_nmalloc++;
+ huge_allocated += chunk_size;
+ }
malloc_mutex_unlock(&huge_mtx);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
-#endif
return (ret);
}
@@ -164,12 +158,10 @@
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
-#endif
return (ptr);
}
@@ -223,15 +215,10 @@
* source nor the destination are in swap or dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
- if (oldsize >= chunksize
-# ifdef JEMALLOC_SWAP
- && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
- chunk_in_swap(ret) == false))
-# endif
-# ifdef JEMALLOC_DSS
- && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
-# endif
- ) {
+ if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
+ false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
+ false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
+ chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
@@ -285,23 +272,16 @@
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
-#endif
+ if (config_stats) {
+ stats_cactive_sub(node->size);
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ }
malloc_mutex_unlock(&huge_mtx);
- if (unmap) {
- /* Unmap chunk. */
-#ifdef JEMALLOC_FILL
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
- if (opt_junk)
- memset(node->addr, 0x5a, node->size);
-#endif
-#endif
- }
+ if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
+ memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
@@ -328,7 +308,6 @@
return (ret);
}
-#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@@ -365,7 +344,6 @@
malloc_mutex_unlock(&huge_mtx);
}
-#endif
bool
huge_boot(void)
@@ -376,11 +354,11 @@
return (true);
extent_tree_ad_new(&huge);
-#ifdef JEMALLOC_STATS
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
-#endif
+ if (config_stats) {
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+ }
return (false);
}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index a161c2e..9e1814d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -13,13 +13,10 @@
__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
#endif
-#ifdef JEMALLOC_STATS
-# ifndef NO_TLS
+#ifndef NO_TLS
__thread thread_allocated_t thread_allocated_tls;
-# else
-pthread_key_t thread_allocated_tsd;
-# endif
#endif
+pthread_key_t thread_allocated_tsd;
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
@@ -28,13 +25,7 @@
static pthread_t malloc_initializer = (unsigned long)0;
/* Used to avoid initialization races. */
-static malloc_mutex_t init_lock =
-#ifdef JEMALLOC_OSSPIN
- 0
-#else
- MALLOC_MUTEX_INITIALIZER
-#endif
- ;
+static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#ifdef DYNAMIC_PAGE_SHIFT
size_t pagesize;
@@ -50,22 +41,16 @@
bool opt_abort = true;
# ifdef JEMALLOC_FILL
bool opt_junk = true;
+# else
+bool opt_junk = false;
# endif
#else
bool opt_abort = false;
-# ifdef JEMALLOC_FILL
bool opt_junk = false;
-# endif
#endif
-#ifdef JEMALLOC_SYSV
bool opt_sysv = false;
-#endif
-#ifdef JEMALLOC_XMALLOC
bool opt_xmalloc = false;
-#endif
-#ifdef JEMALLOC_FILL
bool opt_zero = false;
-#endif
size_t opt_narenas = 0;
/******************************************************************************/
@@ -75,7 +60,7 @@
static void stats_print_atexit(void);
static unsigned malloc_ncpus(void);
static void arenas_cleanup(void *arg);
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
static void thread_allocated_cleanup(void *arg);
#endif
static bool malloc_conf_next(char const **opts_p, char const **k_p,
@@ -89,22 +74,11 @@
/******************************************************************************/
/* malloc_message() setup. */
-#ifdef JEMALLOC_HAVE_ATTR
-JEMALLOC_ATTR(visibility("hidden"))
-#else
-static
-#endif
+JEMALLOC_CATTR(visibility("hidden"), static)
void
wrtmessage(void *cbopaque, const char *s)
{
-#ifdef JEMALLOC_CC_SILENCE
- int result =
-#endif
- write(STDERR_FILENO, s, strlen(s));
-#ifdef JEMALLOC_CC_SILENCE
- if (result < 0)
- result = errno;
-#endif
+ UNUSED int result = write(STDERR_FILENO, s, strlen(s));
}
void (*JEMALLOC_P(malloc_message))(void *, const char *s)
@@ -229,37 +203,38 @@
stats_print_atexit(void)
{
-#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
- unsigned i;
+ if (config_tcache && config_stats) {
+ unsigned i;
- /*
- * Merge stats from extant threads. This is racy, since individual
- * threads do not lock when recording tcache stats events. As a
- * consequence, the final stats may be slightly out of date by the time
- * they are reported, if other threads continue to allocate.
- */
- for (i = 0; i < narenas; i++) {
- arena_t *arena = arenas[i];
- if (arena != NULL) {
- tcache_t *tcache;
+ /*
+ * Merge stats from extant threads. This is racy, since
+ * individual threads do not lock when recording tcache stats
+ * events. As a consequence, the final stats may be slightly
+ * out of date by the time they are reported, if other threads
+ * continue to allocate.
+ */
+ for (i = 0; i < narenas; i++) {
+ arena_t *arena = arenas[i];
+ if (arena != NULL) {
+ tcache_t *tcache;
- /*
- * tcache_stats_merge() locks bins, so if any code is
- * introduced that acquires both arena and bin locks in
- * the opposite order, deadlocks may result.
- */
- malloc_mutex_lock(&arena->lock);
- ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tcache, arena);
+ /*
+ * tcache_stats_merge() locks bins, so if any
+ * code is introduced that acquires both arena
+ * and bin locks in the opposite order,
+ * deadlocks may result.
+ */
+ malloc_mutex_lock(&arena->lock);
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ tcache_stats_merge(tcache, arena);
+ }
+ malloc_mutex_unlock(&arena->lock);
}
- malloc_mutex_unlock(&arena->lock);
}
}
-#endif
JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
}
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *
thread_allocated_get_hard(void)
{
@@ -279,7 +254,6 @@
thread_allocated->deallocated = 0;
return (thread_allocated);
}
-#endif
/*
* End miscellaneous support functions.
@@ -315,7 +289,7 @@
malloc_mutex_unlock(&arenas_lock);
}
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
static void
thread_allocated_cleanup(void *arg)
{
@@ -603,41 +577,42 @@
CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
(sizeof(size_t) << 3) - 1)
CONF_HANDLE_BOOL(stats_print)
-#ifdef JEMALLOC_FILL
- CONF_HANDLE_BOOL(junk)
- CONF_HANDLE_BOOL(zero)
-#endif
-#ifdef JEMALLOC_SYSV
- CONF_HANDLE_BOOL(sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
- CONF_HANDLE_BOOL(xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
- CONF_HANDLE_BOOL(tcache)
- CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
- (sizeof(size_t) << 3) - 1)
-#endif
-#ifdef JEMALLOC_PROF
- CONF_HANDLE_BOOL(prof)
- CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
- CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
- CONF_HANDLE_BOOL(prof_active)
- CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(prof_accum)
- CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(prof_gdump)
- CONF_HANDLE_BOOL(prof_leak)
-#endif
-#ifdef JEMALLOC_SWAP
- CONF_HANDLE_BOOL(overcommit)
-#endif
+ if (config_fill) {
+ CONF_HANDLE_BOOL(junk)
+ CONF_HANDLE_BOOL(zero)
+ }
+ if (config_sysv) {
+ CONF_HANDLE_BOOL(sysv)
+ }
+ if (config_xmalloc) {
+ CONF_HANDLE_BOOL(xmalloc)
+ }
+ if (config_tcache) {
+ CONF_HANDLE_BOOL(tcache)
+ CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
+ (sizeof(size_t) << 3) - 1)
+ }
+ if (config_prof) {
+ CONF_HANDLE_BOOL(prof)
+ CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
+ CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0,
+ LG_PROF_BT_MAX)
+ CONF_HANDLE_BOOL(prof_active)
+ CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(prof_accum)
+ CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
+ (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(prof_gdump)
+ CONF_HANDLE_BOOL(prof_leak)
+ }
+ if (config_swap) {
+ CONF_HANDLE_BOOL(overcommit)
+ }
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_HANDLE_BOOL
@@ -701,9 +676,8 @@
}
#endif
-#ifdef JEMALLOC_PROF
- prof_boot0();
-#endif
+ if (config_prof)
+ prof_boot0();
malloc_conf_init();
@@ -739,31 +713,28 @@
return (true);
}
-#ifdef JEMALLOC_PROF
- prof_boot1();
-#endif
+ if (config_prof)
+ prof_boot1();
if (arena_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#ifdef JEMALLOC_TCACHE
- if (tcache_boot()) {
+ if (config_tcache && tcache_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#endif
if (huge_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
+#ifdef NO_TLS
/* Initialize allocation counters before any allocations can occur. */
- if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
- != 0) {
+ if (config_stats && pthread_key_create(&thread_allocated_tsd,
+ thread_allocated_cleanup) != 0) {
malloc_mutex_unlock(&init_lock);
return (true);
}
@@ -803,12 +774,10 @@
ARENA_SET(arenas[0]);
arenas[0]->nthreads++;
-#ifdef JEMALLOC_PROF
- if (prof_boot2()) {
+ if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#endif
/* Get number of CPUs. */
malloc_initializer = pthread_self();
@@ -897,20 +866,8 @@
JEMALLOC_P(malloc)(size_t size)
{
void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ size_t usize;
+ prof_thr_cnt_t *cnt;
if (malloc_init()) {
ret = NULL;
@@ -918,27 +875,20 @@
}
if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
+ if (config_sysv == false || opt_sysv == false)
size = 1;
-#ifdef JEMALLOC_SYSV
else {
-# ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"invalid size 0\n");
abort();
}
-# endif
ret = NULL;
goto RETURN;
}
-#endif
}
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(size);
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
@@ -952,47 +902,36 @@
arena_prof_promoted(ret, usize);
} else
ret = imalloc(size);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ } else {
+ if (config_stats)
+ usize = s2u(size);
ret = imalloc(size);
}
OOM:
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
-#endif
errno = ENOMEM;
}
-#ifdef JEMALLOC_SYSV
RETURN:
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof && ret != NULL)
+ if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
+ if (config_stats && ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, 0);
}
-#endif
return (ret);
}
JEMALLOC_ATTR(nonnull(1))
#ifdef JEMALLOC_PROF
/*
- * Avoid any uncertainty as to how many backtrace frames to ignore in
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
* PROF_ALLOC_PREP().
*/
JEMALLOC_ATTR(noinline)
@@ -1001,56 +940,38 @@
imemalign(void **memptr, size_t alignment, size_t size)
{
int ret;
- size_t usize
-#ifdef JEMALLOC_CC_SILENCE
- = 0
-#endif
- ;
+ size_t usize;
void *result;
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ prof_thr_cnt_t *cnt;
if (malloc_init())
result = NULL;
else {
if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
+ if (config_sysv == false || opt_sysv == false)
size = 1;
-#ifdef JEMALLOC_SYSV
else {
-# ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in "
"posix_memalign(): invalid size "
"0\n");
abort();
}
-# endif
result = NULL;
*memptr = NULL;
ret = 0;
goto RETURN;
}
-#endif
}
/* Make sure that alignment is a large enough power of 2. */
if (((alignment - 1) & alignment) != 0
|| alignment < sizeof(void *)) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in "
"posix_memalign(): invalid alignment\n");
abort();
}
-#endif
result = NULL;
ret = EINVAL;
goto RETURN;
@@ -1063,8 +984,7 @@
goto RETURN;
}
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
PROF_ALLOC_PREP(2, usize, cnt);
if (cnt == NULL) {
result = NULL;
@@ -1086,18 +1006,15 @@
}
}
} else
-#endif
result = ipalloc(usize, alignment, false);
}
if (result == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in posix_memalign(): "
"out of memory\n");
abort();
}
-#endif
ret = ENOMEM;
goto RETURN;
}
@@ -1106,16 +1023,12 @@
ret = 0;
RETURN:
-#ifdef JEMALLOC_STATS
- if (result != NULL) {
+ if (config_stats && result != NULL) {
assert(usize == isalloc(result));
ALLOCATED_ADD(usize, 0);
}
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof && result != NULL)
+ if (config_prof && opt_prof && result != NULL)
prof_malloc(result, usize, cnt);
-#endif
return (ret);
}
@@ -1135,20 +1048,8 @@
{
void *ret;
size_t num_size;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ size_t usize;
+ prof_thr_cnt_t *cnt;
if (malloc_init()) {
num_size = 0;
@@ -1158,16 +1059,13 @@
num_size = num * size;
if (num_size == 0) {
-#ifdef JEMALLOC_SYSV
- if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
+ if ((config_sysv == false || opt_sysv == false)
+ && ((num == 0) || (size == 0)))
num_size = 1;
-#ifdef JEMALLOC_SYSV
else {
ret = NULL;
goto RETURN;
}
-#endif
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
@@ -1180,8 +1078,7 @@
goto RETURN;
}
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(num_size);
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL) {
@@ -1195,37 +1092,28 @@
arena_prof_promoted(ret, usize);
} else
ret = icalloc(num_size);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(num_size);
-#endif
+ } else {
+ if (config_stats)
+ usize = s2u(num_size);
ret = icalloc(num_size);
}
RETURN:
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
-#endif
errno = ENOMEM;
}
-#ifdef JEMALLOC_PROF
- if (opt_prof && ret != NULL)
+ if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
+ if (config_stats && ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, 0);
}
-#endif
return (ret);
}
@@ -1234,67 +1122,39 @@
JEMALLOC_P(realloc)(void *ptr, size_t size)
{
void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
+ size_t usize;
size_t old_size = 0;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
- prof_ctx_t *old_ctx
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ prof_thr_cnt_t *cnt;
+ prof_ctx_t *old_ctx;
if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
+ if (config_sysv == false || opt_sysv == false)
size = 1;
-#ifdef JEMALLOC_SYSV
else {
if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof || config_stats)
+ old_size = isalloc(ptr);
+ if (config_prof && opt_prof) {
old_ctx = prof_ctx_get(ptr);
cnt = NULL;
}
-#endif
idalloc(ptr);
- }
-#ifdef JEMALLOC_PROF
- else if (opt_prof) {
+ } else if (config_prof && opt_prof) {
old_ctx = NULL;
cnt = NULL;
}
-#endif
ret = NULL;
goto RETURN;
}
-#endif
}
if (ptr != NULL) {
assert(malloc_initialized || malloc_initializer ==
pthread_self());
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof || config_stats)
+ old_size = isalloc(ptr);
+ if (config_prof && opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
PROF_ALLOC_PREP(1, usize, cnt);
@@ -1316,42 +1176,30 @@
if (ret == NULL)
old_ctx = NULL;
}
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ } else {
+ if (config_stats)
+ usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false, false);
}
-#ifdef JEMALLOC_PROF
OOM:
-#endif
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
-#endif
errno = ENOMEM;
}
} else {
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+ if (config_prof && opt_prof)
old_ctx = NULL;
-#endif
if (malloc_init()) {
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+ if (config_prof && opt_prof)
cnt = NULL;
-#endif
ret = NULL;
} else {
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(size);
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL)
@@ -1368,41 +1216,30 @@
} else
ret = imalloc(size);
}
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ } else {
+ if (config_stats)
+ usize = s2u(size);
ret = imalloc(size);
}
}
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
-#endif
errno = ENOMEM;
}
}
-#ifdef JEMALLOC_SYSV
RETURN:
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+ if (config_prof && opt_prof)
prof_realloc(ret, usize, cnt, old_size, old_ctx);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
+ if (config_stats && ret != NULL) {
assert(usize == isalloc(ret));
ALLOCATED_ADD(usize, old_size);
}
-#endif
return (ret);
}
@@ -1412,27 +1249,19 @@
{
if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
-#endif
assert(malloc_initialized || malloc_initializer ==
pthread_self());
-#ifdef JEMALLOC_STATS
- usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
-# ifndef JEMALLOC_STATS
+ if (config_prof && opt_prof) {
usize = isalloc(ptr);
-# endif
prof_free(ptr, usize);
+ } else if (config_stats) {
+ usize = isalloc(ptr);
}
-#endif
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(0, usize);
-#endif
+ if (config_stats)
+ ALLOCATED_ADD(0, usize);
idalloc(ptr);
}
}
@@ -1455,15 +1284,12 @@
void *
JEMALLOC_P(memalign)(size_t alignment, size_t size)
{
- void *ret;
+ void *ret
#ifdef JEMALLOC_CC_SILENCE
- int result =
+ = NULL
#endif
- imemalign(&ret, alignment, size);
-#ifdef JEMALLOC_CC_SILENCE
- if (result != 0)
- return (NULL);
-#endif
+ ;
+ imemalign(&ret, alignment, size);
return (ret);
}
#endif
@@ -1474,15 +1300,12 @@
void *
JEMALLOC_P(valloc)(size_t size)
{
- void *ret;
+ void *ret
#ifdef JEMALLOC_CC_SILENCE
- int result =
+ = NULL
#endif
- imemalign(&ret, PAGE_SIZE, size);
-#ifdef JEMALLOC_CC_SILENCE
- if (result != 0)
- return (NULL);
-#endif
+ ;
+ imemalign(&ret, PAGE_SIZE, size);
return (ret);
}
#endif
@@ -1504,12 +1327,12 @@
assert(malloc_initialized || malloc_initializer == pthread_self());
-#ifdef JEMALLOC_IVSALLOC
- ret = ivsalloc(ptr);
-#else
- assert(ptr != NULL);
- ret = isalloc(ptr);
-#endif
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr);
+ else {
+ assert(ptr != NULL);
+ ret = isalloc(ptr);
+ }
return (ret);
}
@@ -1583,9 +1406,7 @@
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
-#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
-#endif
assert(ptr != NULL);
assert(size != 0);
@@ -1597,8 +1418,7 @@
if (usize == 0)
goto OOM;
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
PROF_ALLOC_PREP(1, usize, cnt);
if (cnt == NULL)
goto OOM;
@@ -1618,39 +1438,26 @@
goto OOM;
}
prof_malloc(p, usize, cnt);
- if (rsize != NULL)
- *rsize = usize;
- } else
-#endif
- {
+ } else {
p = iallocm(usize, alignment, zero);
if (p == NULL)
goto OOM;
-#ifndef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- {
-#ifdef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- *rsize = usize;
- }
}
+ if (rsize != NULL)
+ *rsize = usize;
*ptr = p;
-#ifdef JEMALLOC_STATS
- assert(usize == isalloc(p));
- ALLOCATED_ADD(usize, 0);
-#endif
+ if (config_stats) {
+ assert(usize == isalloc(p));
+ ALLOCATED_ADD(usize, 0);
+ }
return (ALLOCM_SUCCESS);
OOM:
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in allocm(): "
"out of memory\n");
abort();
}
-#endif
*ptr = NULL;
return (ALLOCM_ERR_OOM);
}
@@ -1663,16 +1470,12 @@
{
void *p, *q;
size_t usize;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t old_size;
-#endif
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
-#ifdef JEMALLOC_PROF
prof_thr_cnt_t *cnt;
-#endif
assert(ptr != NULL);
assert(*ptr != NULL);
@@ -1681,8 +1484,7 @@
assert(malloc_initialized || malloc_initializer == pthread_self());
p = *ptr;
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
/*
* usize isn't knowable before iralloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
@@ -1722,45 +1524,34 @@
prof_realloc(q, usize, cnt, old_size, old_ctx);
if (rsize != NULL)
*rsize = usize;
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- old_size = isalloc(p);
-#endif
+ } else {
+ if (config_stats)
+ old_size = isalloc(p);
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto ERR;
-#ifndef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- {
+ if (config_stats)
usize = isalloc(q);
-#ifdef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- *rsize = usize;
+ if (rsize != NULL) {
+ if (config_stats == false)
+ usize = isalloc(q);
+ *rsize = usize;
}
}
*ptr = q;
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(usize, old_size);
-#endif
+ if (config_stats)
+ ALLOCATED_ADD(usize, old_size);
return (ALLOCM_SUCCESS);
ERR:
if (no_move)
return (ALLOCM_ERR_NOT_MOVED);
-#ifdef JEMALLOC_PROF
OOM:
-#endif
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocm(): "
"out of memory\n");
abort();
}
-#endif
return (ALLOCM_ERR_OOM);
}
@@ -1773,12 +1564,12 @@
assert(malloc_initialized || malloc_initializer == pthread_self());
-#ifdef JEMALLOC_IVSALLOC
- sz = ivsalloc(ptr);
-#else
- assert(ptr != NULL);
- sz = isalloc(ptr);
-#endif
+ if (config_ivsalloc)
+ sz = ivsalloc(ptr);
+ else {
+ assert(ptr != NULL);
+ sz = isalloc(ptr);
+ }
assert(rsize != NULL);
*rsize = sz;
@@ -1790,27 +1581,20 @@
int
JEMALLOC_P(dallocm)(void *ptr, int flags)
{
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
-#endif
assert(ptr != NULL);
assert(malloc_initialized || malloc_initializer == pthread_self());
-#ifdef JEMALLOC_STATS
- usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
-# ifndef JEMALLOC_STATS
+ if (config_stats)
usize = isalloc(ptr);
-# endif
+ if (config_prof && opt_prof) {
+ if (config_stats == false)
+ usize = isalloc(ptr);
prof_free(ptr, usize);
}
-#endif
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(0, usize);
-#endif
+ if (config_stats)
+ ALLOCATED_ADD(0, usize);
idalloc(ptr);
return (ALLOCM_SUCCESS);
@@ -1843,13 +1627,11 @@
malloc_mutex_lock(&huge_mtx);
-#ifdef JEMALLOC_DSS
- malloc_mutex_lock(&dss_mtx);
-#endif
+ if (config_dss)
+ malloc_mutex_lock(&dss_mtx);
-#ifdef JEMALLOC_SWAP
- malloc_mutex_lock(&swap_mtx);
-#endif
+ if (config_swap)
+ malloc_mutex_lock(&swap_mtx);
}
void
@@ -1859,13 +1641,11 @@
/* Release all mutexes, now that fork() has completed. */
-#ifdef JEMALLOC_SWAP
- malloc_mutex_unlock(&swap_mtx);
-#endif
+ if (config_swap)
+ malloc_mutex_unlock(&swap_mtx);
-#ifdef JEMALLOC_DSS
- malloc_mutex_unlock(&dss_mtx);
-#endif
+ if (config_dss)
+ malloc_mutex_unlock(&dss_mtx);
malloc_mutex_unlock(&huge_mtx);
diff --git a/src/prof.c b/src/prof.c
index 8a144b4..113cf15 100644
--- a/src/prof.c
+++ b/src/prof.c
@@ -1,6 +1,5 @@
#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
@@ -102,6 +101,8 @@
bt_init(prof_bt_t *bt, void **vec)
{
+ cassert(config_prof);
+
bt->vec = vec;
bt->len = 0;
}
@@ -110,6 +111,8 @@
bt_destroy(prof_bt_t *bt)
{
+ cassert(config_prof);
+
idalloc(bt);
}
@@ -118,6 +121,8 @@
{
prof_bt_t *ret;
+ cassert(config_prof);
+
/*
* Create a single allocation that has space for vec immediately
* following the prof_bt_t structure. The backtraces that get
@@ -141,6 +146,8 @@
prof_enter(void)
{
+ cassert(config_prof);
+
malloc_mutex_lock(&enq_mtx);
enq = true;
malloc_mutex_unlock(&enq_mtx);
@@ -153,6 +160,8 @@
{
bool idump, gdump;
+ cassert(config_prof);
+
malloc_mutex_unlock(&bt2ctx_mtx);
malloc_mutex_lock(&enq_mtx);
@@ -178,6 +187,7 @@
unsigned i;
int err;
+ cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
assert(max <= (1U << opt_lg_prof_bt_max));
@@ -204,12 +214,13 @@
break;
}
}
-#endif
-#ifdef JEMALLOC_PROF_LIBGCC
+#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
{
+ cassert(config_prof);
+
return (_URC_NO_REASON);
}
@@ -218,6 +229,8 @@
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ cassert(config_prof);
+
if (data->nignore > 0)
data->nignore--;
else {
@@ -235,10 +248,11 @@
{
prof_unwind_data_t data = {bt, nignore, max};
+ cassert(config_prof);
+
_Unwind_Backtrace(prof_unwind_callback, &data);
}
-#endif
-#ifdef JEMALLOC_PROF_GCC
+#elif (defined(JEMALLOC_PROF_GCC))
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
@@ -257,6 +271,7 @@
} else \
return;
+ cassert(config_prof);
assert(nignore <= 3);
assert(max <= (1U << opt_lg_prof_bt_max));
@@ -407,6 +422,14 @@
BT_FRAME(130)
#undef BT_FRAME
}
+#else
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
+{
+
+ cassert(config_prof);
+ assert(false);
+}
#endif
prof_thr_cnt_t *
@@ -418,6 +441,8 @@
} ret;
prof_tdata_t *prof_tdata;
+ cassert(config_prof);
+
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata == NULL) {
prof_tdata = prof_tdata_init();
@@ -553,6 +578,8 @@
bool ret = false;
ssize_t err;
+ cassert(config_prof);
+
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
if (propagate_err == false) {
@@ -573,6 +600,8 @@
{
unsigned i, slen, n;
+ cassert(config_prof);
+
i = 0;
slen = strlen(s);
while (i < slen) {
@@ -602,6 +631,8 @@
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
+ cassert(config_prof);
+
malloc_mutex_lock(&ctx->lock);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
@@ -648,6 +679,8 @@
prof_ctx_destroy(prof_ctx_t *ctx)
{
+ cassert(config_prof);
+
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
@@ -686,6 +719,8 @@
{
bool destroy;
+ cassert(config_prof);
+
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(&ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
@@ -723,6 +758,8 @@
char buf[UMAX2S_BUFSIZE];
unsigned i;
+ cassert(config_prof);
+
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
@@ -767,6 +804,8 @@
char mpath[6 + UMAX2S_BUFSIZE
+ 5 + 1];
+ cassert(config_prof);
+
i = 0;
s = "/proc/";
@@ -827,6 +866,8 @@
char buf[UMAX2S_BUFSIZE];
size_t leak_nctx;
+ cassert(config_prof);
+
prof_enter();
prof_dump_fd = creat(filename, 0644);
if (prof_dump_fd == -1) {
@@ -917,6 +958,8 @@
char *s;
unsigned i, slen;
+ cassert(config_prof);
+
/*
* Construct a filename of the form:
*
@@ -979,6 +1022,8 @@
{
char filename[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (prof_booted == false)
return;
@@ -995,6 +1040,8 @@
{
char filename[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
@@ -1019,6 +1066,8 @@
{
char filename_buf[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (opt_prof == false || prof_booted == false)
return (true);
@@ -1040,6 +1089,8 @@
{
char filename[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
@@ -1066,6 +1117,7 @@
uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
+ cassert(config_prof);
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
@@ -1094,6 +1146,8 @@
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
+ cassert(config_prof);
+
if (bt1->len != bt2->len)
return (false);
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
@@ -1104,6 +1158,8 @@
{
prof_tdata_t *prof_tdata;
+ cassert(config_prof);
+
/* Initialize an empty cache for this thread. */
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
if (prof_tdata == NULL)
@@ -1138,6 +1194,8 @@
prof_thr_cnt_t *cnt;
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
+ cassert(config_prof);
+
/*
* Delete the hash table. All of its contents can still be iterated
* over via the LRU.
@@ -1161,6 +1219,8 @@
prof_boot0(void)
{
+ cassert(config_prof);
+
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
@@ -1169,6 +1229,8 @@
prof_boot1(void)
{
+ cassert(config_prof);
+
/*
* opt_prof and prof_promote must be in their final state before any
* arenas are initialized, so this function must be executed early.
@@ -1197,6 +1259,8 @@
prof_boot2(void)
{
+ cassert(config_prof);
+
if (opt_prof) {
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
@@ -1241,4 +1305,3 @@
}
/******************************************************************************/
-#endif /* JEMALLOC_PROF */
diff --git a/src/stats.c b/src/stats.c
index dc172e4..e644653 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -39,14 +39,11 @@
bool opt_stats_print = false;
-#ifdef JEMALLOC_STATS
size_t stats_cactive = 0;
-#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-#ifdef JEMALLOC_STATS
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
void *cbopaque, const char *format, va_list ap);
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
@@ -55,10 +52,10 @@
void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
-#endif
/******************************************************************************/
+/* XXX Refactor by adding malloc_vsnprintf(). */
/*
* We don't want to depend on vsnprintf() for production builds, since that can
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
@@ -99,7 +96,6 @@
return (&s[i]);
}
-#ifdef JEMALLOC_STATS
static void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
@@ -149,9 +145,7 @@
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
-#endif
-#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
@@ -377,7 +371,6 @@
stats_arena_bins_print(write_cb, cbopaque, i);
stats_arena_lruns_print(write_cb, cbopaque, i);
}
-#endif
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
@@ -674,8 +667,7 @@
write_cb(cbopaque, ")\n");
}
-#ifdef JEMALLOC_STATS
- {
+ if (config_stats) {
int err;
size_t sszp, ssz;
size_t *cactive;
@@ -785,6 +777,5 @@
}
}
}
-#endif /* #ifdef JEMALLOC_STATS */
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
}
diff --git a/src/tcache.c b/src/tcache.c
index 31c329e..398fc0a 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -38,31 +38,22 @@
{
void *ret;
- arena_tcache_fill_small(tcache->arena, tbin, binind
-#ifdef JEMALLOC_PROF
- , tcache->prof_accumbytes
-#endif
- );
-#ifdef JEMALLOC_PROF
- tcache->prof_accumbytes = 0;
-#endif
+ arena_tcache_fill_small(tcache->arena, tbin, binind,
+ config_prof ? tcache->prof_accumbytes : 0);
+ if (config_prof)
+ tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache_t *tcache
-#endif
- )
+tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
-#ifdef JEMALLOC_STATS
bool merged_stats = false;
-#endif
assert(binind < nbins);
assert(rem <= tbin->ncached);
@@ -74,25 +65,21 @@
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
-#ifdef JEMALLOC_PROF
- if (arena == tcache->arena) {
+ if (config_prof && arena == tcache->arena) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
-#endif
malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
- if (arena == tcache->arena) {
+ if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
-#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
@@ -117,8 +104,7 @@
}
malloc_mutex_unlock(&bin->lock);
}
-#ifdef JEMALLOC_STATS
- if (merged_stats == false) {
+ if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@@ -130,7 +116,6 @@
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
-#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@@ -140,17 +125,12 @@
}
void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache_t *tcache
-#endif
- )
+tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
-#ifdef JEMALLOC_STATS
bool merged_stats = false;
-#endif
assert(binind < nhbins);
assert(rem <= tbin->ncached);
@@ -162,23 +142,21 @@
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- if (arena == tcache->arena) {
-#endif
-#ifdef JEMALLOC_PROF
- arena_prof_accum(arena, tcache->prof_accumbytes);
- tcache->prof_accumbytes = 0;
-#endif
-#ifdef JEMALLOC_STATS
- merged_stats = true;
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - nbins].nrequests +=
- tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
-#endif
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
+ if ((config_prof || config_stats) && arena == tcache->arena) {
+ if (config_prof) {
+ arena_prof_accum(arena,
+ tcache->prof_accumbytes);
+ tcache->prof_accumbytes = 0;
+ }
+ if (config_stats) {
+ merged_stats = true;
+ arena->stats.nrequests_large +=
+ tbin->tstats.nrequests;
+ arena->stats.lstats[binind - nbins].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
}
-#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
@@ -199,8 +177,7 @@
}
malloc_mutex_unlock(&arena->lock);
}
-#ifdef JEMALLOC_STATS
- if (merged_stats == false) {
+ if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@@ -213,7 +190,6 @@
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
-#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@@ -254,13 +230,13 @@
if (tcache == NULL)
return (NULL);
-#ifdef JEMALLOC_STATS
- /* Link into list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&arena->lock);
-#endif
+ if (config_stats) {
+ /* Link into list of extant tcaches. */
+ malloc_mutex_lock(&arena->lock);
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&arena->lock);
+ }
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
@@ -282,43 +258,32 @@
unsigned i;
size_t tcache_size;
-#ifdef JEMALLOC_STATS
- /* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&tcache->arena->lock);
- tcache_stats_merge(tcache, tcache->arena);
-#endif
+ if (config_stats) {
+ /* Unlink from list of extant tcaches. */
+ malloc_mutex_lock(&tcache->arena->lock);
+ ql_remove(&tcache->arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&tcache->arena->lock);
+ tcache_stats_merge(tcache, tcache->arena);
+ }
for (i = 0; i < nbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache
-#endif
- );
+ tcache_bin_flush_small(tbin, i, 0, tcache);
-#ifdef JEMALLOC_STATS
- if (tbin->tstats.nrequests != 0) {
+ if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
-#endif
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache
-#endif
- );
+ tcache_bin_flush_large(tbin, i, 0, tcache);
-#ifdef JEMALLOC_STATS
- if (tbin->tstats.nrequests != 0) {
+ if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
@@ -326,16 +291,13 @@
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
-#endif
}
-#ifdef JEMALLOC_PROF
- if (tcache->prof_accumbytes > 0) {
+ if (config_prof && tcache->prof_accumbytes > 0) {
malloc_mutex_lock(&tcache->arena->lock);
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&tcache->arena->lock);
}
-#endif
tcache_size = arena_salloc(tcache);
if (tcache_size <= small_maxclass) {
@@ -389,7 +351,6 @@
}
}
-#ifdef JEMALLOC_STATS
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
@@ -413,7 +374,6 @@
tbin->tstats.nrequests = 0;
}
}
-#endif
bool
tcache_boot(void)