Simplify small size class infrastructure.

Program-generate small size class tables for all valid combinations of
LG_TINY_MIN, LG_QUANTUM, and PAGE_SHIFT.  Use the appropriate table to generate
all relevant data structures, and remove the distinction between
tiny/quantum/cacheline/subpage bins.

Remove --enable-dynamic-page-shift.  This option didn't prove useful in
practice, and it prevented optimizations.

Add Tilera architecture support.
diff --git a/src/arena.c b/src/arena.c
index 33f3f85..72b7f44 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -4,128 +4,38 @@
 /******************************************************************************/
 /* Data. */
 
-size_t	opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
-size_t	opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-uint8_t const	*small_size2bin;
-arena_bin_info_t	*arena_bin_info;
+arena_bin_info_t	arena_bin_info[NBINS];
 
-/* Various bin-related settings. */
-unsigned	nqbins;
-unsigned	ncbins;
-unsigned	nsbins;
-unsigned	nbins;
-size_t		qspace_max;
-size_t		cspace_min;
-size_t		cspace_max;
-size_t		sspace_min;
-size_t		sspace_max;
-
-size_t		lg_mspace;
-size_t		mspace_mask;
-
-/*
- * const_small_size2bin is a static constant lookup table that in the common
- * case can be used as-is for small_size2bin.
- */
+JEMALLOC_ATTR(aligned(CACHELINE))
+const uint8_t	small_size2bin[] = {
 #define	S2B_8(i)	i,
 #define	S2B_16(i)	S2B_8(i) S2B_8(i)
 #define	S2B_32(i)	S2B_16(i) S2B_16(i)
 #define	S2B_64(i)	S2B_32(i) S2B_32(i)
 #define	S2B_128(i)	S2B_64(i) S2B_64(i)
 #define	S2B_256(i)	S2B_128(i) S2B_128(i)
-/*
- * The number of elements in const_small_size2bin is dependent on the
- * definition for SUBPAGE.
- */
-static JEMALLOC_ATTR(aligned(CACHELINE))
-    const uint8_t	const_small_size2bin[] = {
-#if (LG_QUANTUM == 4)
-/* 16-byte quantum **********************/
-	S2B_8(0)		/*    8 */
-	S2B_8(1)		/*   16 */
-#  define S2B_QMIN 1
-	S2B_16(S2B_QMIN + 1)	/*   32 */
-	S2B_16(S2B_QMIN + 2)	/*   48 */
-	S2B_16(S2B_QMIN + 3)	/*   64 */
-	S2B_16(S2B_QMIN + 4)	/*   80 */
-	S2B_16(S2B_QMIN + 5)	/*   96 */
-	S2B_16(S2B_QMIN + 6)	/*  112 */
-	S2B_16(S2B_QMIN + 7)	/*  128 */
-#  define S2B_CMIN (S2B_QMIN + 8)
-#else
-/* 8-byte quantum ***********************/
-#  define S2B_QMIN 0
-	S2B_8(S2B_QMIN + 0)	/*    8 */
-	S2B_8(S2B_QMIN + 1)	/*   16 */
-	S2B_8(S2B_QMIN + 2)	/*   24 */
-	S2B_8(S2B_QMIN + 3)	/*   32 */
-	S2B_8(S2B_QMIN + 4)	/*   40 */
-	S2B_8(S2B_QMIN + 5)	/*   48 */
-	S2B_8(S2B_QMIN + 6)	/*   56 */
-	S2B_8(S2B_QMIN + 7)	/*   64 */
-	S2B_8(S2B_QMIN + 8)	/*   72 */
-	S2B_8(S2B_QMIN + 9)	/*   80 */
-	S2B_8(S2B_QMIN + 10)	/*   88 */
-	S2B_8(S2B_QMIN + 11)	/*   96 */
-	S2B_8(S2B_QMIN + 12)	/*  104 */
-	S2B_8(S2B_QMIN + 13)	/*  112 */
-	S2B_8(S2B_QMIN + 14)	/*  120 */
-	S2B_8(S2B_QMIN + 15)	/*  128 */
-#  define S2B_CMIN (S2B_QMIN + 16)
-#endif
-/****************************************/
-	S2B_64(S2B_CMIN + 0)	/*  192 */
-	S2B_64(S2B_CMIN + 1)	/*  256 */
-	S2B_64(S2B_CMIN + 2)	/*  320 */
-	S2B_64(S2B_CMIN + 3)	/*  384 */
-	S2B_64(S2B_CMIN + 4)	/*  448 */
-	S2B_64(S2B_CMIN + 5)	/*  512 */
-#  define S2B_SMIN (S2B_CMIN + 6)
-	S2B_256(S2B_SMIN + 0)	/*  768 */
-	S2B_256(S2B_SMIN + 1)	/* 1024 */
-	S2B_256(S2B_SMIN + 2)	/* 1280 */
-	S2B_256(S2B_SMIN + 3)	/* 1536 */
-	S2B_256(S2B_SMIN + 4)	/* 1792 */
-	S2B_256(S2B_SMIN + 5)	/* 2048 */
-	S2B_256(S2B_SMIN + 6)	/* 2304 */
-	S2B_256(S2B_SMIN + 7)	/* 2560 */
-	S2B_256(S2B_SMIN + 8)	/* 2816 */
-	S2B_256(S2B_SMIN + 9)	/* 3072 */
-	S2B_256(S2B_SMIN + 10)	/* 3328 */
-	S2B_256(S2B_SMIN + 11)	/* 3584 */
-	S2B_256(S2B_SMIN + 12)	/* 3840 */
-#if (STATIC_PAGE_SHIFT == 13)
-	S2B_256(S2B_SMIN + 13)	/* 4096 */
-	S2B_256(S2B_SMIN + 14)	/* 4352 */
-	S2B_256(S2B_SMIN + 15)	/* 4608 */
-	S2B_256(S2B_SMIN + 16)	/* 4864 */
-	S2B_256(S2B_SMIN + 17)	/* 5120 */
-	S2B_256(S2B_SMIN + 18)	/* 5376 */
-	S2B_256(S2B_SMIN + 19)	/* 5632 */
-	S2B_256(S2B_SMIN + 20)	/* 5888 */
-	S2B_256(S2B_SMIN + 21)	/* 6144 */
-	S2B_256(S2B_SMIN + 22)	/* 6400 */
-	S2B_256(S2B_SMIN + 23)	/* 6656 */
-	S2B_256(S2B_SMIN + 24)	/* 6912 */
-	S2B_256(S2B_SMIN + 25)	/* 7168 */
-	S2B_256(S2B_SMIN + 26)	/* 7424 */
-	S2B_256(S2B_SMIN + 27)	/* 7680 */
-	S2B_256(S2B_SMIN + 28)	/* 7936 */
-#endif
-};
-#undef S2B_1
-#undef S2B_2
-#undef S2B_4
+#define	S2B_512(i)	S2B_256(i) S2B_256(i)
+#define	S2B_1024(i)	S2B_512(i) S2B_512(i)
+#define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
+#define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
+#define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
+#define	SIZE_CLASS(bin, delta, size)					\
+	S2B_##delta(bin)
+	SIZE_CLASSES
 #undef S2B_8
 #undef S2B_16
 #undef S2B_32
 #undef S2B_64
 #undef S2B_128
 #undef S2B_256
-#undef S2B_QMIN
-#undef S2B_CMIN
-#undef S2B_SMIN
+#undef S2B_512
+#undef S2B_1024
+#undef S2B_2048
+#undef S2B_4096
+#undef S2B_8192
+#undef SIZE_CLASS
+};
 
 /******************************************************************************/
 /* Function prototypes for non-inline static functions. */
@@ -160,12 +70,9 @@
     void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
 static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
     size_t extra, bool zero);
-static bool	small_size2bin_init(void);
-static void	small_size2bin_validate(void);
-static bool	small_size2bin_init_hard(void);
 static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
     size_t min_run_size);
-static bool	bin_info_init(void);
+static void	bin_info_init(void);
 
 /******************************************************************************/
 
@@ -1368,7 +1275,7 @@
 	size_t binind;
 
 	binind = SMALL_SIZE2BIN(size);
-	assert(binind < nbins);
+	assert(binind < NBINS);
 	bin = &arena->bins[binind];
 	size = arena_bin_info[binind].reg_size;
 
@@ -1553,12 +1460,12 @@
 	assert(ptr != NULL);
 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
 	assert(isalloc(ptr) == PAGE_SIZE);
-	assert(size <= small_maxclass);
+	assert(size <= SMALL_MAXCLASS);
 
 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
 	binind = SMALL_SIZE2BIN(size);
-	assert(binind < nbins);
+	assert(binind < NBINS);
 	chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
 	    ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
 }
@@ -1594,7 +1501,7 @@
 		    CHUNK_MAP_CLASS_MASK) != 0) {
 			size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
 			    CHUNK_MAP_CLASS_SHIFT) - 1;
-			assert(binind < nbins);
+			assert(binind < NBINS);
 			ret = arena_bin_info[binind].reg_size;
 		}
 		assert(ret != 0);
@@ -1762,7 +1669,7 @@
 	}
 	malloc_mutex_unlock(&arena->lock);
 
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		arena_bin_t *bin = &arena->bins[i];
 
 		malloc_mutex_lock(&bin->lock);
@@ -1963,10 +1870,10 @@
 	 * Avoid moving the allocation if the size class can be left the same.
 	 */
 	if (oldsize <= arena_maxclass) {
-		if (oldsize <= small_maxclass) {
+		if (oldsize <= SMALL_MAXCLASS) {
 			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
 			    == oldsize);
-			if ((size + extra <= small_maxclass &&
+			if ((size + extra <= SMALL_MAXCLASS &&
 			    SMALL_SIZE2BIN(size + extra) ==
 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
 			    size + extra >= oldsize)) {
@@ -1978,7 +1885,7 @@
 			}
 		} else {
 			assert(size <= arena_maxclass);
-			if (size + extra > small_maxclass) {
+			if (size + extra > SMALL_MAXCLASS) {
 				if (arena_ralloc_large(ptr, oldsize, size,
 				    extra, zero) == false)
 					return (ptr);
@@ -2083,7 +1990,7 @@
 	arena_avail_tree_new(&arena->runs_avail_dirty);
 
 	/* Initialize bins. */
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		bin = &arena->bins[i];
 		if (malloc_mutex_init(&bin->lock))
 			return (true);
@@ -2096,119 +2003,6 @@
 	return (false);
 }
 
-static void
-small_size2bin_validate(void)
-{
-	size_t i, size, binind;
-
-	i = 1;
-	/* Tiny. */
-	for (; i < TINY_MIN; i++) {
-		size = TINY_MIN;
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-	for (; i < qspace_min; i++) {
-		size = pow2_ceil(i);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-	/* Quantum-spaced. */
-	for (; i <= qspace_max; i++) {
-		size = QUANTUM_CEILING(i);
-		binind = ntbins + (size >> LG_QUANTUM) - 1;
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-	/* Cacheline-spaced. */
-	for (; i <= cspace_max; i++) {
-		size = CACHELINE_CEILING(i);
-		binind = ntbins + nqbins + ((size - cspace_min) >>
-		    LG_CACHELINE);
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-	/* Sub-page. */
-	for (; i <= sspace_max; i++) {
-		size = SUBPAGE_CEILING(i);
-		binind = ntbins + nqbins + ncbins + ((size - sspace_min)
-		    >> LG_SUBPAGE);
-		assert(SMALL_SIZE2BIN(i) == binind);
-	}
-}
-
-static bool
-small_size2bin_init(void)
-{
-
-	if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
-	    || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
-	    || (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
-	    LG_TINY_MIN) + 1))
-		return (small_size2bin_init_hard());
-
-	small_size2bin = const_small_size2bin;
-	if (config_debug)
-		small_size2bin_validate();
-	return (false);
-}
-
-static bool
-small_size2bin_init_hard(void)
-{
-	size_t i, size, binind;
-	uint8_t *custom_small_size2bin;
-#define	CUSTOM_SMALL_SIZE2BIN(s)					\
-    custom_small_size2bin[(s-1) >> LG_TINY_MIN]
-
-	assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
-	    || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
-	    || (sizeof(const_small_size2bin) != ((small_maxclass-1) >>
-	    LG_TINY_MIN) + 1));
-
-	custom_small_size2bin = (uint8_t *)
-	    base_alloc(small_maxclass >> LG_TINY_MIN);
-	if (custom_small_size2bin == NULL)
-		return (true);
-
-	i = 1;
-	/* Tiny. */
-	for (; i < TINY_MIN; i += TINY_MIN) {
-		size = TINY_MIN;
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-	for (; i < qspace_min; i += TINY_MIN) {
-		size = pow2_ceil(i);
-		binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-	/* Quantum-spaced. */
-	for (; i <= qspace_max; i += TINY_MIN) {
-		size = QUANTUM_CEILING(i);
-		binind = ntbins + (size >> LG_QUANTUM) - 1;
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-	/* Cacheline-spaced. */
-	for (; i <= cspace_max; i += TINY_MIN) {
-		size = CACHELINE_CEILING(i);
-		binind = ntbins + nqbins + ((size - cspace_min) >>
-		    LG_CACHELINE);
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-	/* Sub-page. */
-	for (; i <= sspace_max; i += TINY_MIN) {
-		size = SUBPAGE_CEILING(i);
-		binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
-		    LG_SUBPAGE);
-		CUSTOM_SMALL_SIZE2BIN(i) = binind;
-	}
-
-	small_size2bin = custom_small_size2bin;
-	if (config_debug)
-		small_size2bin_validate();
-	return (false);
-#undef CUSTOM_SMALL_SIZE2BIN
-}
-
 /*
  * Calculate bin_info->run_size such that it meets the following constraints:
  *
@@ -2330,104 +2124,27 @@
 	return (good_run_size);
 }
 
-static bool
+static void
 bin_info_init(void)
 {
 	arena_bin_info_t *bin_info;
-	unsigned i;
-	size_t prev_run_size;
+	size_t prev_run_size = PAGE_SIZE;
 
-	arena_bin_info = base_alloc(sizeof(arena_bin_info_t) * nbins);
-	if (arena_bin_info == NULL)
-		return (true);
-
-	prev_run_size = PAGE_SIZE;
-	i = 0;
-	/* (2^n)-spaced tiny bins. */
-	for (; i < ntbins; i++) {
-		bin_info = &arena_bin_info[i];
-		bin_info->reg_size = (1U << (LG_TINY_MIN + i));
-		prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
-		bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-	}
-	/* Quantum-spaced bins. */
-	for (; i < ntbins + nqbins; i++) {
-		bin_info = &arena_bin_info[i];
-		bin_info->reg_size = (i - ntbins + 1) << LG_QUANTUM;
-		prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
-		bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-	}
-	/* Cacheline-spaced bins. */
-	for (; i < ntbins + nqbins + ncbins; i++) {
-		bin_info = &arena_bin_info[i];
-		bin_info->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
-		    LG_CACHELINE);
-		prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
-		bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-	}
-	/* Subpage-spaced bins. */
-	for (; i < nbins; i++) {
-		bin_info = &arena_bin_info[i];
-		bin_info->reg_size = sspace_min + ((i - (ntbins + nqbins +
-		    ncbins)) << LG_SUBPAGE);
-		prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);
-		bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
-	}
-
-	return (false);
+#define	SIZE_CLASS(bin, delta, size)					\
+	bin_info = &arena_bin_info[bin];				\
+	bin_info->reg_size = size;					\
+	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+	SIZE_CLASSES
+#undef SIZE_CLASS
 }
 
-bool
+void
 arena_boot(void)
 {
 	size_t header_size;
 	unsigned i;
 
-	/* Set variables according to the value of opt_lg_[qc]space_max. */
-	qspace_max = (1U << opt_lg_qspace_max);
-	cspace_min = CACHELINE_CEILING(qspace_max);
-	if (cspace_min == qspace_max)
-		cspace_min += CACHELINE;
-	cspace_max = (1U << opt_lg_cspace_max);
-	sspace_min = SUBPAGE_CEILING(cspace_max);
-	if (sspace_min == cspace_max)
-		sspace_min += SUBPAGE;
-	assert(sspace_min < PAGE_SIZE);
-	sspace_max = PAGE_SIZE - SUBPAGE;
-
-	assert(LG_QUANTUM >= LG_TINY_MIN);
-	assert(ntbins <= LG_QUANTUM);
-	nqbins = qspace_max >> LG_QUANTUM;
-	ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
-	nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1;
-	nbins = ntbins + nqbins + ncbins + nsbins;
-
-	/*
-	 * The small_size2bin lookup table uses uint8_t to encode each bin
-	 * index, so we cannot support more than 256 small size classes.  This
-	 * limit is difficult to exceed (not even possible with 16B quantum and
-	 * 4KiB pages), and such configurations are impractical, but
-	 * nonetheless we need to protect against this case in order to avoid
-	 * undefined behavior.
-	 *
-	 * Further constrain nbins to 255 if prof_promote is true, since all
-	 * small size classes, plus a "not small" size class must be stored in
-	 * 8 bits of arena_chunk_map_t's bits field.
-	 */
-	if (config_prof && opt_prof && prof_promote && nbins > 255) {
-		char line_buf[UMAX2S_BUFSIZE];
-		malloc_write("<jemalloc>: Too many small size classes (");
-		malloc_write(u2s(nbins, 10, line_buf));
-		malloc_write(" > max 255)\n");
-		abort();
-	} else if (nbins > 256) {
-		char line_buf[UMAX2S_BUFSIZE];
-		malloc_write("<jemalloc>: Too many small size classes (");
-		malloc_write(u2s(nbins, 10, line_buf));
-		malloc_write(" > max 256)\n");
-		abort();
-	}
-
 	/*
 	 * Compute the header size such that it is large enough to contain the
 	 * page map.  The page map is biased to omit entries for the header
@@ -2451,11 +2168,5 @@
 
 	arena_maxclass = chunksize - (map_bias << PAGE_SHIFT);
 
-	if (small_size2bin_init())
-		return (true);
-
-	if (bin_info_init())
-		return (true);
-
-	return (false);
+	bin_info_init();
 }
diff --git a/src/ctl.c b/src/ctl.c
index 4938e10..0beeb3d 100644
--- a/src/ctl.c
+++ b/src/ctl.c
@@ -47,7 +47,6 @@
 CTL_PROTO(thread_deallocatedp)
 CTL_PROTO(config_debug)
 CTL_PROTO(config_dss)
-CTL_PROTO(config_dynamic_page_shift)
 CTL_PROTO(config_fill)
 CTL_PROTO(config_lazy_lock)
 CTL_PROTO(config_prof)
@@ -59,8 +58,6 @@
 CTL_PROTO(config_tls)
 CTL_PROTO(config_xmalloc)
 CTL_PROTO(opt_abort)
-CTL_PROTO(opt_lg_qspace_max)
-CTL_PROTO(opt_lg_cspace_max)
 CTL_PROTO(opt_lg_chunk)
 CTL_PROTO(opt_narenas)
 CTL_PROTO(opt_lg_dirty_mult)
@@ -88,23 +85,9 @@
 CTL_PROTO(arenas_narenas)
 CTL_PROTO(arenas_initialized)
 CTL_PROTO(arenas_quantum)
-CTL_PROTO(arenas_cacheline)
-CTL_PROTO(arenas_subpage)
 CTL_PROTO(arenas_pagesize)
 CTL_PROTO(arenas_chunksize)
-CTL_PROTO(arenas_tspace_min)
-CTL_PROTO(arenas_tspace_max)
-CTL_PROTO(arenas_qspace_min)
-CTL_PROTO(arenas_qspace_max)
-CTL_PROTO(arenas_cspace_min)
-CTL_PROTO(arenas_cspace_max)
-CTL_PROTO(arenas_sspace_min)
-CTL_PROTO(arenas_sspace_max)
 CTL_PROTO(arenas_tcache_max)
-CTL_PROTO(arenas_ntbins)
-CTL_PROTO(arenas_nqbins)
-CTL_PROTO(arenas_ncbins)
-CTL_PROTO(arenas_nsbins)
 CTL_PROTO(arenas_nbins)
 CTL_PROTO(arenas_nhbins)
 CTL_PROTO(arenas_nlruns)
@@ -185,7 +168,6 @@
 static const ctl_node_t	config_node[] = {
 	{NAME("debug"),			CTL(config_debug)},
 	{NAME("dss"),			CTL(config_dss)},
-	{NAME("dynamic_page_shift"),	CTL(config_dynamic_page_shift)},
 	{NAME("fill"),			CTL(config_fill)},
 	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
 	{NAME("prof"),			CTL(config_prof)},
@@ -200,8 +182,6 @@
 
 static const ctl_node_t opt_node[] = {
 	{NAME("abort"),			CTL(opt_abort)},
-	{NAME("lg_qspace_max"),		CTL(opt_lg_qspace_max)},
-	{NAME("lg_cspace_max"),		CTL(opt_lg_cspace_max)},
 	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
 	{NAME("narenas"),		CTL(opt_narenas)},
 	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
@@ -250,23 +230,9 @@
 	{NAME("narenas"),		CTL(arenas_narenas)},
 	{NAME("initialized"),		CTL(arenas_initialized)},
 	{NAME("quantum"),		CTL(arenas_quantum)},
-	{NAME("cacheline"),		CTL(arenas_cacheline)},
-	{NAME("subpage"),		CTL(arenas_subpage)},
 	{NAME("pagesize"),		CTL(arenas_pagesize)},
 	{NAME("chunksize"),		CTL(arenas_chunksize)},
-	{NAME("tspace_min"),		CTL(arenas_tspace_min)},
-	{NAME("tspace_max"),		CTL(arenas_tspace_max)},
-	{NAME("qspace_min"),		CTL(arenas_qspace_min)},
-	{NAME("qspace_max"),		CTL(arenas_qspace_max)},
-	{NAME("cspace_min"),		CTL(arenas_cspace_min)},
-	{NAME("cspace_max"),		CTL(arenas_cspace_max)},
-	{NAME("sspace_min"),		CTL(arenas_sspace_min)},
-	{NAME("sspace_max"),		CTL(arenas_sspace_max)},
 	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
-	{NAME("ntbins"),		CTL(arenas_ntbins)},
-	{NAME("nqbins"),		CTL(arenas_nqbins)},
-	{NAME("ncbins"),		CTL(arenas_ncbins)},
-	{NAME("nsbins"),		CTL(arenas_nsbins)},
 	{NAME("nbins"),			CTL(arenas_nbins)},
 	{NAME("nhbins"),		CTL(arenas_nhbins)},
 	{NAME("bin"),			CHILD(arenas_bin)},
@@ -397,12 +363,6 @@
 ctl_arena_init(ctl_arena_stats_t *astats)
 {
 
-	if (astats->bstats == NULL) {
-		astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins *
-		    sizeof(malloc_bin_stats_t));
-		if (astats->bstats == NULL)
-			return (true);
-	}
 	if (astats->lstats == NULL) {
 		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
 		    sizeof(malloc_large_stats_t));
@@ -425,7 +385,7 @@
 		astats->nmalloc_small = 0;
 		astats->ndalloc_small = 0;
 		astats->nrequests_small = 0;
-		memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
+		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
 		memset(astats->lstats, 0, nlclasses *
 		    sizeof(malloc_large_stats_t));
 	}
@@ -439,7 +399,7 @@
 	arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
 	    &cstats->astats, cstats->bstats, cstats->lstats);
 
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		cstats->allocated_small += cstats->bstats[i].allocated;
 		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
 		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
@@ -477,7 +437,7 @@
 		sstats->lstats[i].curruns += astats->lstats[i].curruns;
 	}
 
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		sstats->bstats[i].allocated += astats->bstats[i].allocated;
 		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
 		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
@@ -1092,7 +1052,6 @@
 
 CTL_RO_BOOL_CONFIG_GEN(config_debug)
 CTL_RO_BOOL_CONFIG_GEN(config_dss)
-CTL_RO_BOOL_CONFIG_GEN(config_dynamic_page_shift)
 CTL_RO_BOOL_CONFIG_GEN(config_fill)
 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
 CTL_RO_BOOL_CONFIG_GEN(config_prof)
@@ -1107,8 +1066,6 @@
 /******************************************************************************/
 
 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
-CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
@@ -1138,7 +1095,7 @@
 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
 {
 
-	if (i > nbins)
+	if (i > NBINS)
 		return (NULL);
 	return (super_arenas_bin_i_node);
 }
@@ -1182,24 +1139,10 @@
 }
 
 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
-CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
-CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
 CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
 CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
-CTL_RO_NL_GEN(arenas_tspace_min, TINY_MIN, size_t)
-CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
-CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
-CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
-CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
-CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
-CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
-CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
-CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
-CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
-CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
-CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
-CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
 
@@ -1346,7 +1289,7 @@
 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
 {
 
-	if (j > nbins)
+	if (j > NBINS)
 		return (NULL);
 	return (super_stats_arenas_i_bins_j_node);
 }
diff --git a/src/jemalloc.c b/src/jemalloc.c
index 81829fe..08e5f31 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -95,9 +95,7 @@
 {
 	arena_t *ret;
 
-	/* Allocate enough space for trailing bins. */
-	ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
-	    + (sizeof(arena_bin_t) * nbins));
+	ret = (arena_t *)base_alloc(sizeof(arena_t));
 	if (ret != NULL && arena_new(ret, ind) == false) {
 		arenas[ind] = ret;
 		return (ret);
@@ -563,10 +561,6 @@
 			}
 
 			CONF_HANDLE_BOOL(abort)
-			CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
-			    PAGE_SHIFT-1)
-			CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
-			    PAGE_SHIFT-1)
 			/*
 			 * Chunks always require at least one * header page,
 			 * plus one data page.
@@ -613,14 +607,6 @@
 #undef CONF_HANDLE_SSIZE_T
 #undef CONF_HANDLE_CHAR_P
 		}
-
-		/* Validate configuration of options that are inter-related. */
-		if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
-			malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
-			    "relationship; restoring defaults\n");
-			opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
-			opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
-		}
 	}
 }
 
@@ -709,10 +695,7 @@
 	if (config_prof)
 		prof_boot1();
 
-	if (arena_boot()) {
-		malloc_mutex_unlock(&init_lock);
-		return (true);
-	}
+	arena_boot();
 
 	if (config_tcache && tcache_boot()) {
 		malloc_mutex_unlock(&init_lock);
@@ -893,8 +876,8 @@
 			goto OOM;
 		}
 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
-		    small_maxclass) {
-			ret = imalloc(small_maxclass+1);
+		    SMALL_MAXCLASS) {
+			ret = imalloc(SMALL_MAXCLASS+1);
 			if (ret != NULL)
 				arena_prof_promoted(ret, usize);
 		} else
@@ -992,10 +975,10 @@
 				ret = EINVAL;
 			} else {
 				if (prof_promote && (uintptr_t)cnt !=
-				    (uintptr_t)1U && usize <= small_maxclass) {
-					assert(sa2u(small_maxclass+1,
+				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
+					assert(sa2u(SMALL_MAXCLASS+1,
 					    alignment, NULL) != 0);
-					result = ipalloc(sa2u(small_maxclass+1,
+					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
 					    alignment, NULL), alignment, false);
 					if (result != NULL) {
 						arena_prof_promoted(result,
@@ -1091,8 +1074,8 @@
 			goto RETURN;
 		}
 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
-		    <= small_maxclass) {
-			ret = icalloc(small_maxclass+1);
+		    <= SMALL_MAXCLASS) {
+			ret = icalloc(SMALL_MAXCLASS+1);
 			if (ret != NULL)
 				arena_prof_promoted(ret, usize);
 		} else
@@ -1177,8 +1160,8 @@
 				goto OOM;
 			}
 			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
-			    usize <= small_maxclass) {
-				ret = iralloc(ptr, small_maxclass+1, 0, 0,
+			    usize <= SMALL_MAXCLASS) {
+				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
 				    false, false);
 				if (ret != NULL)
 					arena_prof_promoted(ret, usize);
@@ -1220,8 +1203,8 @@
 				else {
 					if (prof_promote && (uintptr_t)cnt !=
 					    (uintptr_t)1U && usize <=
-					    small_maxclass) {
-						ret = imalloc(small_maxclass+1);
+					    SMALL_MAXCLASS) {
+						ret = imalloc(SMALL_MAXCLASS+1);
 						if (ret != NULL) {
 							arena_prof_promoted(ret,
 							    usize);
@@ -1436,9 +1419,9 @@
 		if (cnt == NULL)
 			goto OOM;
 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
-		    small_maxclass) {
+		    SMALL_MAXCLASS) {
 			size_t usize_promoted = (alignment == 0) ?
-			    s2u(small_maxclass+1) : sa2u(small_maxclass+1,
+			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
 			    alignment, NULL);
 			assert(usize_promoted != 0);
 			p = iallocm(usize_promoted, alignment, zero);
@@ -1517,9 +1500,9 @@
 		 */
 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
 		    && ((alignment == 0) ? s2u(size) : sa2u(size,
-		    alignment, NULL)) <= small_maxclass) {
-			q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
-			    size+extra) ? 0 : size+extra - (small_maxclass+1),
+		    alignment, NULL)) <= SMALL_MAXCLASS) {
+			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
 			    alignment, zero, no_move);
 			if (q == NULL)
 				goto ERR;
diff --git a/src/stats.c b/src/stats.c
index 6d9ba9d..e4500df 100644
--- a/src/stats.c
+++ b/src/stats.c
@@ -159,12 +159,12 @@
 	CTL_GET("config.tcache", &config_tcache, bool);
 	if (config_tcache) {
 		malloc_cprintf(write_cb, cbopaque,
-		    "bins:     bin    size regs pgs    allocated      nmalloc"
+		    "bins:     bin  size regs pgs    allocated      nmalloc"
 		    "      ndalloc    nrequests       nfills     nflushes"
 		    "      newruns       reruns      curruns\n");
 	} else {
 		malloc_cprintf(write_cb, cbopaque,
-		    "bins:     bin    size regs pgs    allocated      nmalloc"
+		    "bins:     bin  size regs pgs    allocated      nmalloc"
 		    "      ndalloc      newruns       reruns      curruns\n");
 	}
 	CTL_GET("arenas.nbins", &nbins, unsigned);
@@ -176,7 +176,6 @@
 			if (gap_start == UINT_MAX)
 				gap_start = j;
 		} else {
-			unsigned ntbins_, nqbins, ncbins, nsbins;
 			size_t reg_size, run_size, allocated;
 			uint32_t nregs;
 			uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
@@ -196,10 +195,6 @@
 				}
 				gap_start = UINT_MAX;
 			}
-			CTL_GET("arenas.ntbins", &ntbins_, unsigned);
-			CTL_GET("arenas.nqbins", &nqbins, unsigned);
-			CTL_GET("arenas.ncbins", &ncbins, unsigned);
-			CTL_GET("arenas.nsbins", &nsbins, unsigned);
 			CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
 			CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
 			CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
@@ -223,27 +218,19 @@
 			    size_t);
 			if (config_tcache) {
 				malloc_cprintf(write_cb, cbopaque,
-				    "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
+				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
 				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
 				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
 				    " %12zu\n",
-				    j,
-				    j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
-				    "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
-				    "S",
-				    reg_size, nregs, run_size / pagesize,
+				    j, reg_size, nregs, run_size / pagesize,
 				    allocated, nmalloc, ndalloc, nrequests,
 				    nfills, nflushes, nruns, reruns, curruns);
 			} else {
 				malloc_cprintf(write_cb, cbopaque,
-				    "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
+				    "%13u %5zu %4u %3zu %12zu %12"PRIu64
 				    " %12"PRIu64" %12"PRIu64" %12"PRIu64
 				    " %12zu\n",
-				    j,
-				    j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
-				    "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
-				    "S",
-				    reg_size, nregs, run_size / pagesize,
+				    j, reg_size, nregs, run_size / pagesize,
 				    allocated, nmalloc, ndalloc, nruns, reruns,
 				    curruns);
 			}
@@ -496,8 +483,6 @@
 
 		write_cb(cbopaque, "Run-time option settings:\n");
 		OPT_WRITE_BOOL(abort)
-		OPT_WRITE_SIZE_T(lg_qspace_max)
-		OPT_WRITE_SIZE_T(lg_cspace_max)
 		OPT_WRITE_SIZE_T(lg_chunk)
 		OPT_WRITE_SIZE_T(narenas)
 		OPT_WRITE_SSIZE_T(lg_dirty_mult)
@@ -541,51 +526,6 @@
 		write_cb(cbopaque, u2s(sv, 10, s));
 		write_cb(cbopaque, "\n");
 
-		CTL_GET("arenas.cacheline", &sv, size_t);
-		write_cb(cbopaque, "Cacheline size (assumed): ");
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "\n");
-
-		CTL_GET("arenas.subpage", &sv, size_t);
-		write_cb(cbopaque, "Subpage spacing: ");
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "\n");
-
-		if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
-		    NULL, 0)) == 0) {
-			write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
-			write_cb(cbopaque, u2s(sv, 10, s));
-			write_cb(cbopaque, "..");
-
-			CTL_GET("arenas.tspace_max", &sv, size_t);
-			write_cb(cbopaque, u2s(sv, 10, s));
-			write_cb(cbopaque, "]\n");
-		}
-
-		CTL_GET("arenas.qspace_min", &sv, size_t);
-		write_cb(cbopaque, "Quantum-spaced sizes: [");
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "..");
-		CTL_GET("arenas.qspace_max", &sv, size_t);
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "]\n");
-
-		CTL_GET("arenas.cspace_min", &sv, size_t);
-		write_cb(cbopaque, "Cacheline-spaced sizes: [");
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "..");
-		CTL_GET("arenas.cspace_max", &sv, size_t);
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "]\n");
-
-		CTL_GET("arenas.sspace_min", &sv, size_t);
-		write_cb(cbopaque, "Subpage-spaced sizes: [");
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "..");
-		CTL_GET("arenas.sspace_max", &sv, size_t);
-		write_cb(cbopaque, u2s(sv, 10, s));
-		write_cb(cbopaque, "]\n");
-
 		CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
 		if (ssv >= 0) {
 			write_cb(cbopaque,
diff --git a/src/tcache.c b/src/tcache.c
index 4f4ed6c..fa05728 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -55,7 +55,7 @@
 	unsigned i, nflush, ndeferred;
 	bool merged_stats = false;
 
-	assert(binind < nbins);
+	assert(binind < NBINS);
 	assert(rem <= tbin->ncached);
 
 	for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
@@ -152,7 +152,7 @@
 				merged_stats = true;
 				arena->stats.nrequests_large +=
 				    tbin->tstats.nrequests;
-				arena->stats.lstats[binind - nbins].nrequests +=
+				arena->stats.lstats[binind - NBINS].nrequests +=
 				    tbin->tstats.nrequests;
 				tbin->tstats.nrequests = 0;
 			}
@@ -185,7 +185,7 @@
 		arena_t *arena = tcache->arena;
 		malloc_mutex_lock(&arena->lock);
 		arena->stats.nrequests_large += tbin->tstats.nrequests;
-		arena->stats.lstats[binind - nbins].nrequests +=
+		arena->stats.lstats[binind - NBINS].nrequests +=
 		    tbin->tstats.nrequests;
 		tbin->tstats.nrequests = 0;
 		malloc_mutex_unlock(&arena->lock);
@@ -220,7 +220,7 @@
 	 */
 	size = (size + CACHELINE_MASK) & (-CACHELINE);
 
-	if (size <= small_maxclass)
+	if (size <= SMALL_MAXCLASS)
 		tcache = (tcache_t *)arena_malloc_small(arena, size, true);
 	else if (size <= tcache_maxclass)
 		tcache = (tcache_t *)arena_malloc_large(arena, size, true);
@@ -266,7 +266,7 @@
 		tcache_stats_merge(tcache, tcache->arena);
 	}
 
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		tcache_bin_t *tbin = &tcache->tbins[i];
 		tcache_bin_flush_small(tbin, i, 0, tcache);
 
@@ -287,7 +287,7 @@
 			arena_t *arena = tcache->arena;
 			malloc_mutex_lock(&arena->lock);
 			arena->stats.nrequests_large += tbin->tstats.nrequests;
-			arena->stats.lstats[i - nbins].nrequests +=
+			arena->stats.lstats[i - NBINS].nrequests +=
 			    tbin->tstats.nrequests;
 			malloc_mutex_unlock(&arena->lock);
 		}
@@ -300,7 +300,7 @@
 	}
 
 	tcache_size = arena_salloc(tcache);
-	if (tcache_size <= small_maxclass) {
+	if (tcache_size <= SMALL_MAXCLASS) {
 		arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
 		arena_t *arena = chunk->arena;
 		size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
@@ -357,7 +357,7 @@
 	unsigned i;
 
 	/* Merge and reset tcache stats. */
-	for (i = 0; i < nbins; i++) {
+	for (i = 0; i < NBINS; i++) {
 		arena_bin_t *bin = &arena->bins[i];
 		tcache_bin_t *tbin = &tcache->tbins[i];
 		malloc_mutex_lock(&bin->lock);
@@ -367,7 +367,7 @@
 	}
 
 	for (; i < nhbins; i++) {
-		malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins];
+		malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
 		tcache_bin_t *tbin = &tcache->tbins[i];
 		arena->stats.nrequests_large += tbin->tstats.nrequests;
 		lstats->nrequests += tbin->tstats.nrequests;
@@ -384,17 +384,18 @@
 
 		/*
 		 * If necessary, clamp opt_lg_tcache_max, now that
-		 * small_maxclass and arena_maxclass are known.
+		 * SMALL_MAXCLASS and arena_maxclass are known.
+		 * XXX Can this be done earlier?
 		 */
 		if (opt_lg_tcache_max < 0 || (1U <<
-		    opt_lg_tcache_max) < small_maxclass)
-			tcache_maxclass = small_maxclass;
+		    opt_lg_tcache_max) < SMALL_MAXCLASS)
+			tcache_maxclass = SMALL_MAXCLASS;
 		else if ((1U << opt_lg_tcache_max) > arena_maxclass)
 			tcache_maxclass = arena_maxclass;
 		else
 			tcache_maxclass = (1U << opt_lg_tcache_max);
 
-		nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
+		nhbins = NBINS + (tcache_maxclass >> PAGE_SHIFT);
 
 		/* Initialize tcache_bin_info. */
 		tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
@@ -402,7 +403,7 @@
 		if (tcache_bin_info == NULL)
 			return (true);
 		stack_nelms = 0;
-		for (i = 0; i < nbins; i++) {
+		for (i = 0; i < NBINS; i++) {
 			if ((arena_bin_info[i].nregs << 1) <=
 			    TCACHE_NSLOTS_SMALL_MAX) {
 				tcache_bin_info[i].ncached_max =
@@ -421,7 +422,7 @@
 		/* Compute incremental GC event threshold. */
 		if (opt_lg_tcache_gc_sweep >= 0) {
 			tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
-			    nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
+			    NBINS) + (((1U << opt_lg_tcache_gc_sweep) % NBINS ==
 			    0) ? 0 : 1);
 		} else
 			tcache_gc_incr = 0;