Rename arena_maxclass to large_maxclass.
arena_maxclass is no longer an appropriate name, because arenas also
manage huge allocations.
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index f77f257..9712c1c 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -424,7 +424,7 @@
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxrun; /* Max run size for arenas. */
-extern size_t arena_maxclass; /* Max size class for arenas. */
+extern size_t large_maxclass; /* Max large size class. */
extern unsigned nlclasses; /* Number of large size classes. */
extern unsigned nhclasses; /* Number of huge size classes. */
@@ -1143,7 +1143,7 @@
zero));
} else
return (arena_malloc_small(arena, size, zero));
- } else if (likely(size <= arena_maxclass)) {
+ } else if (likely(size <= large_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index a341b25..e2959f1 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -705,7 +705,7 @@
}
/* Try for a large size class. */
- if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) {
+ if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
/*
* We can't achieve subpage alignment, so round up alignment
* to the minimum that can actually be supported.
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index ed1f6c2..9d21a80 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -58,7 +58,6 @@
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_maxclass
arena_maxrun
arena_maybe_purge
arena_metadata_allocated_add
@@ -285,6 +284,7 @@
jemalloc_postfork_child
jemalloc_postfork_parent
jemalloc_prefork
+large_maxclass
lg_floor
malloc_cprintf
malloc_mutex_init
diff --git a/src/arena.c b/src/arena.c
index b41f0ce..a119d26 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -11,7 +11,7 @@
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
-size_t arena_maxclass; /* Max size class for arenas. */
+size_t large_maxclass; /* Max large size class. */
static size_t small_maxrun; /* Max run size used for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
unsigned nlclasses; /* Number of large size classes. */
@@ -2357,7 +2357,7 @@
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
ret = arena_malloc(tsd, arena, usize, zero, tcache);
- } else if (usize <= arena_maxclass && alignment <= PAGE) {
+ } else if (usize <= large_maxclass && alignment <= PAGE) {
/*
* Large; alignment doesn't require special run placement.
* However, the cached pointer may be at a random offset from
@@ -2368,7 +2368,7 @@
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
- if (likely(usize <= arena_maxclass)) {
+ if (likely(usize <= large_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
@@ -2800,7 +2800,7 @@
extra = HUGE_MAXCLASS - size;
usize_max = s2u(size + extra);
- if (likely(oldsize <= arena_maxclass && usize_min <= arena_maxclass)) {
+ if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
@@ -2852,7 +2852,7 @@
if (usize == 0)
return (NULL);
- if (likely(usize <= arena_maxclass)) {
+ if (likely(usize <= large_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
@@ -3258,17 +3258,17 @@
arena_maxrun = chunksize - (map_bias << LG_PAGE);
assert(arena_maxrun > 0);
- arena_maxclass = index2size(size2index(chunksize)-1);
- if (arena_maxclass > arena_maxrun) {
+ large_maxclass = index2size(size2index(chunksize)-1);
+ if (large_maxclass > arena_maxrun) {
/*
* For small chunk sizes it's possible for there to be fewer
* non-header pages available than are necessary to serve the
* size classes just below chunksize.
*/
- arena_maxclass = arena_maxrun;
+ large_maxclass = arena_maxrun;
}
- assert(arena_maxclass > 0);
- nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
+ assert(large_maxclass > 0);
+ nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
diff --git a/src/tcache.c b/src/tcache.c
index f1a30d5..fdafd0c 100644
--- a/src/tcache.c
+++ b/src/tcache.c
@@ -496,13 +496,13 @@
unsigned i;
/*
- * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+ * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
*/
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > arena_maxclass)
- tcache_maxclass = arena_maxclass;
+ else if ((1U << opt_lg_tcache_max) > large_maxclass)
+ tcache_maxclass = large_maxclass;
else
tcache_maxclass = (1U << opt_lg_tcache_max);
diff --git a/test/unit/junk.c b/test/unit/junk.c
index 01d314b..b23dd1e 100644
--- a/test/unit/junk.c
+++ b/test/unit/junk.c
@@ -140,7 +140,7 @@
{
test_skip_if(!config_fill);
- test_junk(SMALL_MAXCLASS+1, arena_maxclass);
+ test_junk(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@@ -148,7 +148,7 @@
{
test_skip_if(!config_fill);
- test_junk(arena_maxclass+1, chunksize*2);
+ test_junk(large_maxclass+1, chunksize*2);
}
TEST_END
@@ -172,8 +172,8 @@
{
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
- assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
- assert_zu_eq(usize, shrink_size(arena_maxclass), "Unexpected usize");
+ assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
+ assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
most_recently_trimmed = ptr;
}
@@ -181,13 +181,13 @@
{
void *p1, *p2;
- p1 = mallocx(arena_maxclass, 0);
+ p1 = mallocx(large_maxclass, 0);
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
- p2 = rallocx(p1, shrink_size(arena_maxclass), 0);
+ p2 = rallocx(p1, shrink_size(large_maxclass), 0);
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
diff --git a/test/unit/stats.c b/test/unit/stats.c
index 81ef0b7..8e4bc63 100644
--- a/test/unit/stats.c
+++ b/test/unit/stats.c
@@ -42,7 +42,7 @@
size_t sz;
int expected = config_stats ? 0 : ENOENT;
- p = mallocx(arena_maxclass+1, 0);
+ p = mallocx(large_maxclass+1, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
@@ -88,7 +88,7 @@
little = mallocx(SMALL_MAXCLASS, 0);
assert_ptr_not_null(little, "Unexpected mallocx() failure");
- large = mallocx(arena_maxclass, 0);
+ large = mallocx(large_maxclass, 0);
assert_ptr_not_null(large, "Unexpected mallocx() failure");
huge = mallocx(chunksize, 0);
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
@@ -200,7 +200,7 @@
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
0, "Unexpected mallctl() failure");
- p = mallocx(arena_maxclass, 0);
+ p = mallocx(large_maxclass, 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
diff --git a/test/unit/zero.c b/test/unit/zero.c
index 65a8f0c..93afc2b 100644
--- a/test/unit/zero.c
+++ b/test/unit/zero.c
@@ -55,7 +55,7 @@
{
test_skip_if(!config_fill);
- test_zero(SMALL_MAXCLASS+1, arena_maxclass);
+ test_zero(SMALL_MAXCLASS+1, large_maxclass);
}
TEST_END
@@ -63,7 +63,7 @@
{
test_skip_if(!config_fill);
- test_zero(arena_maxclass+1, chunksize*2);
+ test_zero(large_maxclass+1, chunksize*2);
}
TEST_END