Initialize arena_bin_info at compile time rather than at boot time.
This resolves #370.
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 6e71b5f..866b12f 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -470,7 +470,7 @@
extern ssize_t opt_lg_dirty_mult;
extern ssize_t opt_decay_time;
-extern arena_bin_info_t arena_bin_info[NBINS];
+extern const arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
@@ -511,13 +511,13 @@
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
-void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
+void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
-typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
+typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
-void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
#endif
void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
bool zero);
@@ -634,7 +634,7 @@
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
-size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
+size_t arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
@@ -1058,7 +1058,7 @@
const arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
- arena_bin_info_t *bin_info;
+ const arena_bin_info_t *bin_info;
const arena_chunk_map_misc_t *miscelm;
const void *rpages;
@@ -1099,7 +1099,8 @@
}
JEMALLOC_INLINE size_t
-arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
+arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info,
+ const void *ptr)
{
size_t diff, interval, shift, regind;
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh
index 2b0ca29..c9b8471 100755
--- a/include/jemalloc/internal/size_classes.sh
+++ b/include/jemalloc/internal/size_classes.sh
@@ -40,6 +40,36 @@
done
}
+run_size() {
+ lg_p=$1
+ lg_grp=$2
+ lg_delta=$3
+ ndelta=$4
+
+ pow2 ${lg_p}; p=${pow2_result}
+
+ pow2 ${lg_grp}; grp=${pow2_result}
+ pow2 ${lg_delta}; delta=${pow2_result}
+ reg_size=$((${grp} + ${delta}*${ndelta}))
+
+ # Compute smallest run size that is an integer multiple of reg_size.
+ try_run_size=${p}
+ try_nregs=$((${try_run_size} / ${reg_size}))
+ perfect=0
+ while [ ${perfect} -eq 0 ] ; do
+ perfect_run_size=${try_run_size}
+ perfect_nregs=${try_nregs}
+
+ try_run_size=$((${try_run_size} + ${p}))
+ try_nregs=$((${try_run_size} / ${reg_size}))
+ if [ ${perfect_run_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
+ perfect=1
+ fi
+ done
+
+ run_size_pgs=$((${perfect_run_size} / ${p}))
+}
+
size_class() {
index=$1
lg_grp=$2
@@ -65,8 +95,10 @@
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
bin="yes"
+ run_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${run_size_pgs}
else
bin="no"
+ pgs=0
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
@@ -74,14 +106,15 @@
else
lg_delta_lookup="no"
fi
- printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
+ printf ' SC(%3d, %6d, %8d, %6d, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${pgs} ${lg_delta_lookup}
# Defined upon return:
- # - lg_delta_lookup (${lg_delta} or "no")
# - bin ("yes" or "no")
+ # - pgs
+ # - lg_delta_lookup (${lg_delta} or "no")
}
sep_line() {
- echo " \\"
+ echo " \\"
}
size_classes() {
@@ -95,12 +128,13 @@
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
- echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
+ echo " /* index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
+ slab_maxpgs=0
# Tiny size classes.
ndelta=0
@@ -114,6 +148,9 @@
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
+ if [ ${pgs} -gt ${slab_maxpgs} ] ; then
+ slab_maxpgs=${pgs}
+ fi
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
@@ -133,11 +170,17 @@
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
+ if [ ${pgs} -gt ${slab_maxpgs} ] ; then
+ slab_maxpgs=${pgs}
+ fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
+ if [ ${pgs} -gt ${slab_maxpgs} ] ; then
+ slab_maxpgs=${pgs}
+ fi
done
# All remaining groups.
@@ -161,6 +204,9 @@
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
+ if [ ${pgs} -gt ${slab_maxpgs} ] ; then
+ slab_maxpgs=${pgs}
+ fi
if [ ${lg_g} -gt 0 ] ; then
lg_large_minclass=$((${lg_grp} + 1))
else
@@ -186,6 +232,7 @@
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
+ # - slab_maxpgs
# - lg_large_minclass
# - huge_maxclass
}
@@ -200,14 +247,14 @@
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
- * SIZE_CLASSES: Complete table of
- * SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
- * tuples.
+ * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, bin,
+ * pgs, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
+ * pgs: Run page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
@@ -217,6 +264,7 @@
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
+ * SLAB_MAXPGS: Maximum pages in small size class run.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* HUGE_MAXCLASS: Maximum (huge) size class.
*/
@@ -241,6 +289,7 @@
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
+ echo "#define SLAB_MAXPGS ${slab_maxpgs}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
echo "#endif"
diff --git a/src/arena.c b/src/arena.c
index 9b458ab..7b9f313 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -15,14 +15,25 @@
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
-arena_bin_info_t arena_bin_info[NBINS];
+const arena_bin_info_t arena_bin_info[NBINS] = {
+#define BIN_INFO_bin_yes(reg_size, run_size, nregs) \
+ {reg_size, run_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
+#define BIN_INFO_bin_no(reg_size, run_size, nregs)
+#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
+ BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
+ (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
+ (ndelta<<lg_delta)))
+ SIZE_CLASSES
+#undef BIN_INFO_bin_yes
+#undef BIN_INFO_bin_no
+#undef SC
+};
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */
size_t run_quantize_max; /* Max run_quantize_*() input. */
-static size_t small_maxrun; /* Max run size for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
@@ -86,7 +97,8 @@
assert(size == PAGE_CEILING(size));
/* Don't change sizes that are valid small run sizes. */
- if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
+ if (size <= (ZU(SLAB_MAXPGS) << LG_PAGE) && small_run_tab[size >>
+ LG_PAGE])
return (size);
/*
@@ -121,12 +133,12 @@
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
- if (size >= small_maxrun)
+ if ((size >> LG_PAGE) >= ZU(SLAB_MAXPGS))
return (large_run_size_next);
while (true) {
size += PAGE;
- assert(size <= small_maxrun);
+ assert(size <= (ZU(SLAB_MAXPGS) << LG_PAGE));
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
@@ -301,7 +313,7 @@
}
JEMALLOC_INLINE_C void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
+arena_run_reg_alloc(arena_run_t *run, const arena_bin_info_t *bin_info)
{
void *ret;
size_t regind;
@@ -327,7 +339,7 @@
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
size_t regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
@@ -1822,7 +1834,7 @@
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
- arena_bin_info_t *bin_info =
+ const arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
@@ -2045,7 +2057,7 @@
assert(size == PAGE || arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
- arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size = bin_info->run_size;
}
@@ -2241,7 +2253,7 @@
{
arena_run_t *run;
szind_t binind;
- arena_bin_info_t *bin_info;
+ const arena_bin_info_t *bin_info;
/* Look for a usable run. */
run = arena_bin_nonfull_run_tryget(bin);
@@ -2291,7 +2303,7 @@
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
- arena_bin_info_t *bin_info;
+ const arena_bin_info_t *bin_info;
arena_run_t *run;
binind = arena_bin_index(arena, bin);
@@ -2390,7 +2402,7 @@
}
void
-arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
{
if (!zero)
@@ -2402,7 +2414,7 @@
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info)
{
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
@@ -2706,7 +2718,7 @@
else {
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
@@ -2768,7 +2780,7 @@
size_t pageind, rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
- arena_bin_info_t *bin_info;
+ const arena_bin_info_t *bin_info;
szind_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
@@ -3483,81 +3495,24 @@
return (arena);
}
-/*
- * Calculate bin_info->run_size such that it meets the following constraints:
- *
- * *) bin_info->run_size <= arena_maxrun
- * *) bin_info->nregs <= RUN_MAXREGS
- *
- * bin_info->nregs is also calculated here, since these settings are all
- * interdependent.
- */
-static void
-bin_info_run_size_calc(arena_bin_info_t *bin_info)
-{
- size_t try_run_size, perfect_run_size, actual_run_size;
- uint32_t try_nregs, perfect_nregs, actual_nregs;
-
- /* Compute smallest run size that is an integer multiple of reg_size. */
- try_run_size = PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
- do {
- perfect_run_size = try_run_size;
- perfect_nregs = try_nregs;
-
- try_run_size += PAGE;
- try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
- } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
- assert(perfect_run_size <= arena_maxrun);
- assert(perfect_nregs <= RUN_MAXREGS);
-
- actual_run_size = perfect_run_size;
- actual_nregs = (uint32_t)((actual_run_size) / bin_info->reg_size);
-
- /* Copy final settings. */
- bin_info->run_size = actual_run_size;
- bin_info->nregs = actual_nregs;
-
- if (actual_run_size > small_maxrun)
- small_maxrun = actual_run_size;
-}
-
-static void
-bin_info_init(void)
-{
- arena_bin_info_t *bin_info;
-
-#define BIN_INFO_INIT_bin_yes(index, size) \
- bin_info = &arena_bin_info[index]; \
- bin_info->reg_size = size; \
- bin_info_run_size_calc(bin_info); \
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-#define BIN_INFO_INIT_bin_no(index, size)
-#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
- BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
- SIZE_CLASSES
-#undef BIN_INFO_INIT_bin_yes
-#undef BIN_INFO_INIT_bin_no
-#undef SC
-}
-
static bool
small_run_size_init(void)
{
- assert(small_maxrun != 0);
+ assert(SLAB_MAXPGS != 0);
- small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
- LG_PAGE));
+ small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * SLAB_MAXPGS);
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \
- arena_bin_info_t *bin_info = &arena_bin_info[index]; \
+ const arena_bin_info_t *bin_info = \
+ &arena_bin_info[index]; \
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
}
#define TAB_INIT_bin_no(index, size)
-#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, run_size, \
+ lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
@@ -3643,7 +3598,6 @@
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
- bin_info_init();
if (small_run_size_init())
return (true);
if (run_quantize_init())
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 5be5961..4eec09b 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -81,7 +81,7 @@
/* Last entry for overflow detection only. */
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES+1] = {
-#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
@@ -154,7 +154,7 @@
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
-#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
+#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
diff --git a/test/unit/junk.c b/test/unit/junk.c
index f74e33f..82eddf4 100644
--- a/test/unit/junk.c
+++ b/test/unit/junk.c
@@ -23,7 +23,7 @@
}
static void
-arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
+arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
{
size_t i;