blob: 76514955d04579f1240412d30cddabcca209c714 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070024unsigned nlclasses; /* Number of large size classes. */
25unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080026
27/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080028/*
29 * Function prototypes for static functions that are referenced prior to
30 * definition.
31 */
Jason Evanse476f8a2010-01-16 09:53:50 -080032
Jason Evansc1e00ef2016-05-10 22:21:10 -070033static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070034 size_t ndirty_limit);
Jason Evansc1e00ef2016-05-10 22:21:10 -070035static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
Jason Evansb2c0d632016-04-13 23:36:15 -070036 bool dirty, bool cleaned, bool decommitted);
Jason Evansc1e00ef2016-05-10 22:21:10 -070037static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070038 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070039static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
40 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080041
42/******************************************************************************/
43
Jason Evans8fadb1a2015-08-04 10:49:46 -070044JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040045arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070046{
47 arena_chunk_t *chunk;
48 size_t pageind, mapbits;
49
Jason Evans8fadb1a2015-08-04 10:49:46 -070050 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
51 pageind = arena_miscelm_to_pageind(miscelm);
52 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070053 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070054}
55
Jason Evansc6a2c392016-03-26 17:30:37 -070056JEMALLOC_INLINE_C int
57arena_run_addr_comp(const arena_chunk_map_misc_t *a,
58 const arena_chunk_map_misc_t *b)
59{
60 uintptr_t a_miscelm = (uintptr_t)a;
61 uintptr_t b_miscelm = (uintptr_t)b;
62
63 assert(a != NULL);
64 assert(b != NULL);
65
66 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
67}
68
69/* Generate pairing heap functions. */
70ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
71 ph_link, arena_run_addr_comp)
72
Jason Evans0da8ce12016-02-22 16:20:56 -080073#ifdef JEMALLOC_JET
74#undef run_quantize_floor
Jason Evansab0cfe02016-04-18 15:11:20 -070075#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
Jason Evans0da8ce12016-02-22 16:20:56 -080076#endif
77static size_t
78run_quantize_floor(size_t size)
79{
80 size_t ret;
Jason Evans5d8db152016-04-08 14:16:19 -070081 pszind_t pind;
Jason Evans0da8ce12016-02-22 16:20:56 -080082
83 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -070084 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -080085 assert((size & PAGE_MASK) == 0);
86
Jason Evans5d8db152016-04-08 14:16:19 -070087 assert(size != 0);
88 assert(size == PAGE_CEILING(size));
89
90 pind = psz2ind(size - large_pad + 1);
91 if (pind == 0) {
92 /*
93 * Avoid underflow. This short-circuit would also do the right
94 * thing for all sizes in the range for which there are
95 * PAGE-spaced size classes, but it's simplest to just handle
96 * the one case that would cause erroneous results.
97 */
98 return (size);
99 }
100 ret = pind2sz(pind - 1) + large_pad;
101 assert(ret <= size);
Jason Evans0da8ce12016-02-22 16:20:56 -0800102 return (ret);
103}
104#ifdef JEMALLOC_JET
105#undef run_quantize_floor
106#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
Jason Evansab0cfe02016-04-18 15:11:20 -0700107run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
Jason Evans0da8ce12016-02-22 16:20:56 -0800108#endif
109
110#ifdef JEMALLOC_JET
111#undef run_quantize_ceil
Jason Evansab0cfe02016-04-18 15:11:20 -0700112#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
Jason Evans0da8ce12016-02-22 16:20:56 -0800113#endif
114static size_t
115run_quantize_ceil(size_t size)
116{
117 size_t ret;
118
119 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -0700120 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -0800121 assert((size & PAGE_MASK) == 0);
122
Jason Evans5d8db152016-04-08 14:16:19 -0700123 ret = run_quantize_floor(size);
124 if (ret < size) {
125 /*
126 * Skip a quantization that may have an adequately large run,
127 * because under-sized runs may be mixed in. This only happens
128 * when an unusual size is requested, i.e. for aligned
129 * allocation, and is just one of several places where linear
130 * search would potentially find sufficiently aligned available
131 * memory somewhere lower.
132 */
133 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
134 }
Jason Evans0da8ce12016-02-22 16:20:56 -0800135 return (ret);
136}
Jason Evansa9a46842016-02-22 14:58:05 -0800137#ifdef JEMALLOC_JET
138#undef run_quantize_ceil
139#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
Jason Evansab0cfe02016-04-18 15:11:20 -0700140run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
Jason Evansa9a46842016-02-22 14:58:05 -0800141#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700142
Jason Evanse3d13062012-10-30 15:42:37 -0700143static void
144arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700145 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700146{
Jason Evansf193fd82016-04-08 14:17:57 -0700147 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700148 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700149 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
150 LG_PAGE));
Jason Evansf193fd82016-04-08 14:17:57 -0700151 arena_run_heap_insert(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700152 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700153}
154
155static void
156arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700157 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700158{
Jason Evansf193fd82016-04-08 14:17:57 -0700159 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700160 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700161 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
162 LG_PAGE));
Jason Evansf193fd82016-04-08 14:17:57 -0700163 arena_run_heap_remove(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700164 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700165}
166
Jason Evans070b3c32014-08-14 14:45:58 -0700167static void
Jason Evansee41ad42015-02-15 18:04:46 -0800168arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700169 size_t npages)
170{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700171 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
172 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800173
Jason Evans070b3c32014-08-14 14:45:58 -0700174 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
175 LG_PAGE));
176 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
177 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
178 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800179
Jason Evans613cdc82016-03-08 01:04:48 -0800180 qr_new(&miscelm->rd, rd_link);
181 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700182 arena->ndirty += npages;
183}
184
185static void
Jason Evansee41ad42015-02-15 18:04:46 -0800186arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700187 size_t npages)
188{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700189 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
190 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800191
Jason Evans070b3c32014-08-14 14:45:58 -0700192 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
193 LG_PAGE));
194 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
195 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
196 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800197
Jason Evans613cdc82016-03-08 01:04:48 -0800198 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800199 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700200 arena->ndirty -= npages;
201}
202
Jason Evansee41ad42015-02-15 18:04:46 -0800203static size_t
204arena_chunk_dirty_npages(const extent_node_t *node)
205{
206
207 return (extent_node_size_get(node) >> LG_PAGE);
208}
209
Jason Evansee41ad42015-02-15 18:04:46 -0800210void
Jason Evans738e0892015-02-18 01:15:50 -0800211arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800212{
213
Jason Evans738e0892015-02-18 01:15:50 -0800214 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800215 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800216 extent_node_dirty_insert(node, &arena->runs_dirty,
217 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800218 arena->ndirty += arena_chunk_dirty_npages(node);
219 }
220}
221
222void
Jason Evans738e0892015-02-18 01:15:50 -0800223arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800224{
225
226 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800227 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800228 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
229 arena->ndirty -= arena_chunk_dirty_npages(node);
230 }
231}
232
Jason Evansaf1f5922014-10-30 16:38:08 -0700233JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700234arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800235{
236 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800237 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700238 arena_chunk_map_misc_t *miscelm;
239 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800240
Jason Evans1e0a6362010-03-13 13:41:58 -0800241 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700242 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800243
Jason Evans9e1810c2016-02-24 12:42:23 -0800244 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700245 miscelm = arena_run_to_miscelm(run);
246 rpages = arena_miscelm_to_rpages(miscelm);
247 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700248 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800249 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800250 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800251}
252
Jason Evansaf1f5922014-10-30 16:38:08 -0700253JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800254arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800255{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700256 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700257 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
258 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700259 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700260 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800261 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700262
Jason Evans49f7e8f2011-03-15 13:59:15 -0700263 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800264 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700265 assert(((uintptr_t)ptr -
266 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700267 (uintptr_t)bin_info->reg0_offset)) %
268 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700269 assert((uintptr_t)ptr >=
270 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700271 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700272 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700273 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800274
Jason Evans0c5dd032014-09-29 01:31:39 -0700275 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800276 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800277}
278
Jason Evansaf1f5922014-10-30 16:38:08 -0700279JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800280arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
281{
282
Jason Evansbd87b012014-04-15 16:35:08 -0700283 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
284 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800285 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
286 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800287}
288
Jason Evansaf1f5922014-10-30 16:38:08 -0700289JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700290arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
291{
292
Jason Evansbd87b012014-04-15 16:35:08 -0700293 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
294 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700295}
296
Jason Evansaf1f5922014-10-30 16:38:08 -0700297JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800298arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700299{
Jason Evansd4bab212010-10-24 20:08:37 -0700300 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700301 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700302
Jason Evansdda90f52013-10-19 23:48:40 -0700303 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700304 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700305 assert(p[i] == 0);
306}
Jason Evans21fb95b2010-10-18 17:45:40 -0700307
Jason Evanse476f8a2010-01-16 09:53:50 -0800308static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800309arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800310{
311
312 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800313 size_t cactive_add = CHUNK_CEILING((arena->nactive +
314 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700315 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800316 if (cactive_add != 0)
317 stats_cactive_add(cactive_add);
318 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800319 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800320}
321
322static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800323arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800324{
325
326 if (config_stats) {
327 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
328 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
329 if (cactive_sub != 0)
330 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800331 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800332 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800333}
334
335static void
336arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700337 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800338{
339 size_t total_pages, rem_pages;
340
Jason Evans8fadb1a2015-08-04 10:49:46 -0700341 assert(flag_dirty == 0 || flag_decommitted == 0);
342
Jason Evansaa5113b2014-01-14 16:23:03 -0800343 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
344 LG_PAGE;
345 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
346 flag_dirty);
347 assert(need_pages <= total_pages);
348 rem_pages = total_pages - need_pages;
349
Qinfan Wu90737fc2014-07-21 19:39:20 -0700350 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700351 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800352 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800353 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800354
355 /* Keep track of trailing unused pages for later use. */
356 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700357 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700358 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
359 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700360
Jason Evans1f27abc2015-08-11 12:42:33 -0700361 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
362 (rem_pages << LG_PAGE), flags |
363 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
364 flag_unzeroed_mask));
365 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
366 (rem_pages << LG_PAGE), flags |
367 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
368 flag_unzeroed_mask));
369 if (flag_dirty != 0) {
370 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
371 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800372 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700373 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800374 }
375}
376
Jason Evans8fadb1a2015-08-04 10:49:46 -0700377static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800378arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
379 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800380{
381 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700382 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300383 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700384 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700385
Jason Evanse476f8a2010-01-16 09:53:50 -0800386 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700387 miscelm = arena_run_to_miscelm(run);
388 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700389 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700390 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700391 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800392 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800393
Jason Evansde249c82015-08-09 16:47:27 -0700394 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
395 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700396 return (true);
397
Jason Evansc368f8c2013-10-29 18:17:42 -0700398 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800399 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700400 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700401 }
402
Jason Evansaa5113b2014-01-14 16:23:03 -0800403 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700404 if (flag_decommitted != 0) {
405 /* The run is untouched, and therefore zeroed. */
406 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
407 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
408 (need_pages << LG_PAGE));
409 } else if (flag_dirty != 0) {
410 /* The run is dirty, so all pages must be zeroed. */
411 arena_run_zero(chunk, run_ind, need_pages);
412 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800413 /*
414 * The run is clean, so some pages may be zeroed (i.e.
415 * never before touched).
416 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300417 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800418 for (i = 0; i < need_pages; i++) {
419 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
420 != 0)
421 arena_run_zero(chunk, run_ind+i, 1);
422 else if (config_debug) {
423 arena_run_page_validate_zeroed(chunk,
424 run_ind+i);
425 } else {
426 arena_run_page_mark_zeroed(chunk,
427 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700428 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800429 }
430 }
Jason Evans19b3d612010-03-18 20:36:40 -0700431 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700432 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700433 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800434 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800435
436 /*
437 * Set the last element first, in case the run only contains one page
438 * (i.e. both statements set the same element).
439 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700440 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
441 CHUNK_MAP_UNZEROED : 0;
442 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
443 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
444 run_ind+need_pages-1)));
445 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
446 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700447 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800448}
449
Jason Evans8fadb1a2015-08-04 10:49:46 -0700450static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800451arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700452{
453
Jason Evans8fadb1a2015-08-04 10:49:46 -0700454 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700455}
456
Jason Evans8fadb1a2015-08-04 10:49:46 -0700457static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800458arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700459{
460
Jason Evans8fadb1a2015-08-04 10:49:46 -0700461 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800462}
463
Jason Evans8fadb1a2015-08-04 10:49:46 -0700464static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800465arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700466 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800467{
468 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700469 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700470 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800471
472 assert(binind != BININD_INVALID);
473
474 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700475 miscelm = arena_run_to_miscelm(run);
476 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800477 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700478 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800479 need_pages = (size >> LG_PAGE);
480 assert(need_pages > 0);
481
Jason Evans8fadb1a2015-08-04 10:49:46 -0700482 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
483 run_ind << LG_PAGE, size, arena->ind))
484 return (true);
485
486 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
487 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800488
Jason Evans381c23d2014-10-10 23:01:03 -0700489 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700490 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
491 run_ind+i);
492 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
493 flag_unzeroed);
494 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800495 arena_run_page_validate_zeroed(chunk, run_ind+i);
496 }
Jason Evansbd87b012014-04-15 16:35:08 -0700497 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800498 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700499 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800500}
501
502static arena_chunk_t *
503arena_chunk_init_spare(arena_t *arena)
504{
505 arena_chunk_t *chunk;
506
507 assert(arena->spare != NULL);
508
509 chunk = arena->spare;
510 arena->spare = NULL;
511
512 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
513 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
514 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700515 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800516 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700517 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800518 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
519 arena_mapbits_dirty_get(chunk, chunk_npages-1));
520
521 return (chunk);
522}
523
Jason Evans99bd94f2015-02-18 16:40:53 -0800524static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700525arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700526 bool zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800527{
528
Jason Evans8fadb1a2015-08-04 10:49:46 -0700529 /*
530 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700531 * arena chunks. Arbitrarily mark them as committed. The commit state
532 * of runs is tracked individually, and upon chunk deallocation the
533 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700534 */
535 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800536 extent_node_achunk_set(&chunk->node, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700537 return (chunk_register(tsdn, chunk, &chunk->node));
Jason Evans99bd94f2015-02-18 16:40:53 -0800538}
539
540static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700541arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700542 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800543{
544 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800545
Jason Evansc1e00ef2016-05-10 22:21:10 -0700546 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400547
Jason Evansc1e00ef2016-05-10 22:21:10 -0700548 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700549 NULL, chunksize, chunksize, zero, commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700550 if (chunk != NULL && !*commit) {
551 /* Commit header. */
552 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
553 LG_PAGE, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700554 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700555 (void *)chunk, chunksize, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700556 chunk = NULL;
557 }
558 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700559 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700560 if (!*commit) {
561 /* Undo commit of header. */
562 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
563 LG_PAGE, arena->ind);
564 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700565 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
Jason Evansce7c0f92016-03-30 18:36:04 -0700566 chunksize, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800567 chunk = NULL;
568 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800569
Jason Evansc1e00ef2016-05-10 22:21:10 -0700570 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800571 return (chunk);
572}
573
Jason Evansaa5113b2014-01-14 16:23:03 -0800574static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700575arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
576 bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700577{
578 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400579 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700580
Jason Evansc1e00ef2016-05-10 22:21:10 -0700581 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
Jason Evansb49a3342015-07-28 11:28:19 -0400582 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700583 if (chunk != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700584 if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
585 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700586 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700587 return (NULL);
588 }
589 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400590 }
591 if (chunk == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700592 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700593 &chunk_hooks, zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400594 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800595
Jason Evans4581b972014-11-27 17:22:36 -0200596 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700597 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200598 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
599 }
Jason Evanse2deab72014-05-15 22:22:27 -0700600
601 return (chunk);
602}
603
Jason Evanse2deab72014-05-15 22:22:27 -0700604static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700605arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
Jason Evansaa5113b2014-01-14 16:23:03 -0800606{
607 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700608 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700609 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800610
611 assert(arena->spare == NULL);
612
613 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700614 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700615 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800616 if (chunk == NULL)
617 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800618
Jason Evansaa5113b2014-01-14 16:23:03 -0800619 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800620 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evansf86bc082016-03-31 11:19:46 -0700621 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
622 * or decommitted chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800623 */
Jason Evans45186f02015-08-10 23:03:34 -0700624 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
625 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
626 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
627 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800628 /*
629 * There is no need to initialize the internal page map entries unless
630 * the chunk is not zeroed.
631 */
Jason Evans551ebc42014-10-03 10:16:09 -0700632 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700633 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700634 (void *)arena_bitselm_get_const(chunk, map_bias+1),
635 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
636 chunk_npages-1) -
637 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800638 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700639 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800640 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700641 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700642 *)arena_bitselm_get_const(chunk, map_bias+1),
643 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
644 chunk_npages-1) -
645 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800646 if (config_debug) {
647 for (i = map_bias+1; i < chunk_npages-1; i++) {
648 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700649 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800650 }
651 }
652 }
Jason Evans155bfa72014-10-05 17:54:10 -0700653 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700654 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800655
656 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700657}
658
Jason Evanse476f8a2010-01-16 09:53:50 -0800659static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700660arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800661{
662 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800663
Jason Evansaa5113b2014-01-14 16:23:03 -0800664 if (arena->spare != NULL)
665 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700666 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700667 chunk = arena_chunk_init_hard(tsdn, arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700668 if (chunk == NULL)
669 return (NULL);
670 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800671
Jason Evans19ff2ce2016-04-22 14:37:17 -0700672 ql_elm_new(&chunk->node, ql_link);
673 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700674 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700675
Jason Evanse476f8a2010-01-16 09:53:50 -0800676 return (chunk);
677}
678
679static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700680arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700681{
682 bool committed;
683 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
684
685 chunk_deregister(chunk, &chunk->node);
686
687 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
688 if (!committed) {
689 /*
690 * Decommit the header. Mark the chunk as decommitted even if
691 * header decommit fails, since treating a partially committed
692 * chunk as committed has a high potential for causing later
693 * access of decommitted memory.
694 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700695 chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700696 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
697 arena->ind);
698 }
699
Jason Evansc1e00ef2016-05-10 22:21:10 -0700700 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
Jason Evans19ff2ce2016-04-22 14:37:17 -0700701 committed);
702
703 if (config_stats) {
704 arena->stats.mapped -= chunksize;
705 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
706 }
707}
708
709static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700710arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700711{
712
713 assert(arena->spare != spare);
714
715 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
716 arena_run_dirty_remove(arena, spare, map_bias,
717 chunk_npages-map_bias);
718 }
719
Jason Evansc1e00ef2016-05-10 22:21:10 -0700720 arena_chunk_discard(tsdn, arena, spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700721}
722
723static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700724arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800725{
Jason Evans19ff2ce2016-04-22 14:37:17 -0700726 arena_chunk_t *spare;
Qinfan Wu04d60a12014-07-18 14:21:17 -0700727
Jason Evans30fe12b2012-05-10 17:09:17 -0700728 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
729 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
730 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700731 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700732 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700733 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700734 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
735 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700736 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
737 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700738
Dave Watson3417a302016-02-23 12:06:21 -0800739 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700740 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800741
Jason Evans19ff2ce2016-04-22 14:37:17 -0700742 ql_remove(&arena->achunks, &chunk->node, ql_link);
743 spare = arena->spare;
744 arena->spare = chunk;
745 if (spare != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700746 arena_spare_discard(tsdn, arena, spare);
Jason Evanse476f8a2010-01-16 09:53:50 -0800747}
748
Jason Evans9b41ac92014-10-14 22:20:00 -0700749static void
750arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
751{
Jason Evansd01fd192015-08-19 15:21:32 -0700752 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700753
754 cassert(config_stats);
755
756 arena->stats.nmalloc_huge++;
757 arena->stats.allocated_huge += usize;
758 arena->stats.hstats[index].nmalloc++;
759 arena->stats.hstats[index].curhchunks++;
760}
761
762static void
763arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
764{
Jason Evansd01fd192015-08-19 15:21:32 -0700765 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700766
767 cassert(config_stats);
768
769 arena->stats.nmalloc_huge--;
770 arena->stats.allocated_huge -= usize;
771 arena->stats.hstats[index].nmalloc--;
772 arena->stats.hstats[index].curhchunks--;
773}
774
775static void
776arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
777{
Jason Evansd01fd192015-08-19 15:21:32 -0700778 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700779
780 cassert(config_stats);
781
782 arena->stats.ndalloc_huge++;
783 arena->stats.allocated_huge -= usize;
784 arena->stats.hstats[index].ndalloc++;
785 arena->stats.hstats[index].curhchunks--;
786}
787
788static void
Jason Evans7e674952016-04-25 13:26:54 -0700789arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
790{
791 szind_t index = size2index(usize) - nlclasses - NBINS;
792
793 cassert(config_stats);
794
795 arena->stats.ndalloc_huge++;
796 arena->stats.hstats[index].ndalloc--;
797}
798
799static void
Jason Evans9b41ac92014-10-14 22:20:00 -0700800arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
801{
Jason Evansd01fd192015-08-19 15:21:32 -0700802 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700803
804 cassert(config_stats);
805
806 arena->stats.ndalloc_huge--;
807 arena->stats.allocated_huge += usize;
808 arena->stats.hstats[index].ndalloc--;
809 arena->stats.hstats[index].curhchunks++;
810}
811
812static void
813arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
814{
815
816 arena_huge_dalloc_stats_update(arena, oldsize);
817 arena_huge_malloc_stats_update(arena, usize);
818}
819
820static void
821arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
822 size_t usize)
823{
824
825 arena_huge_dalloc_stats_update_undo(arena, oldsize);
826 arena_huge_malloc_stats_update_undo(arena, usize);
827}
828
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800829extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700830arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800831{
832 extent_node_t *node;
833
Jason Evansc1e00ef2016-05-10 22:21:10 -0700834 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800835 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800836 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700837 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
838 return (base_alloc(tsdn, sizeof(extent_node_t)));
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800839 }
Jason Evans2195ba42015-02-15 16:43:52 -0800840 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700841 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800842 return (node);
843}
844
845void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700846arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800847{
848
Jason Evansc1e00ef2016-05-10 22:21:10 -0700849 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800850 ql_elm_new(node, ql_link);
851 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700852 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800853}
854
Jason Evans99bd94f2015-02-18 16:40:53 -0800855static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700856arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700857 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
858 size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700859{
860 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700861 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700862
Jason Evansc1e00ef2016-05-10 22:21:10 -0700863 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700864 alignment, zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700865 if (ret == NULL) {
866 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700867 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700868 if (config_stats) {
869 arena_huge_malloc_stats_update_undo(arena, usize);
870 arena->stats.mapped -= usize;
871 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800872 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700873 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700874 }
875
Jason Evans99bd94f2015-02-18 16:40:53 -0800876 return (ret);
877}
Jason Evans9b41ac92014-10-14 22:20:00 -0700878
Jason Evans99bd94f2015-02-18 16:40:53 -0800879void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700880arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700881 size_t alignment, bool *zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800882{
883 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400884 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800885 size_t csize = CHUNK_CEILING(usize);
886
Jason Evansc1e00ef2016-05-10 22:21:10 -0700887 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800888
889 /* Optimistically update stats. */
890 if (config_stats) {
891 arena_huge_malloc_stats_update(arena, usize);
892 arena->stats.mapped += usize;
893 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800894 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800895
Jason Evansc1e00ef2016-05-10 22:21:10 -0700896 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700897 alignment, zero, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700898 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800899 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700900 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700901 usize, alignment, zero, csize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800902 }
903
Jason Evans9b41ac92014-10-14 22:20:00 -0700904 return (ret);
905}
906
907void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700908arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700909{
Jason Evansb49a3342015-07-28 11:28:19 -0400910 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800911 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700912
Jason Evans99bd94f2015-02-18 16:40:53 -0800913 csize = CHUNK_CEILING(usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700914 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700915 if (config_stats) {
916 arena_huge_dalloc_stats_update(arena, usize);
917 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700918 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800919 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800920
Jason Evansc1e00ef2016-05-10 22:21:10 -0700921 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
922 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700923}
924
925void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700927 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700928{
929
930 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
931 assert(oldsize != usize);
932
Jason Evansc1e00ef2016-05-10 22:21:10 -0700933 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700934 if (config_stats)
935 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800936 if (oldsize < usize)
937 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
938 else
939 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700940 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700941}
942
943void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700944arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700945 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700946{
Jason Evans9b41ac92014-10-14 22:20:00 -0700947 size_t udiff = oldsize - usize;
948 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
949
Jason Evansc1e00ef2016-05-10 22:21:10 -0700950 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700951 if (config_stats) {
952 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800953 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -0700954 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -0700955 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800956 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800957
Jason Evans2012d5a2014-11-17 09:54:49 -0800958 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400959 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800960 void *nchunk = (void *)((uintptr_t)chunk +
961 CHUNK_CEILING(usize));
962
Jason Evansc1e00ef2016-05-10 22:21:10 -0700963 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700964 true);
Jason Evansb49a3342015-07-28 11:28:19 -0400965 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700966 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800967}
968
Jason Evansb49a3342015-07-28 11:28:19 -0400969static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700970arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700971 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
972 bool *zero, void *nchunk, size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -0800973{
974 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700975 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800976
Jason Evansc1e00ef2016-05-10 22:21:10 -0700977 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700978 chunksize, zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -0800979 if (err) {
980 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700981 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800982 if (config_stats) {
983 arena_huge_ralloc_stats_update_undo(arena, oldsize,
984 usize);
985 arena->stats.mapped -= cdiff;
986 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800987 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700988 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400989 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
990 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700991 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700992 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400993 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -0800994 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800995 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -0700996}
997
998bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700999arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07001000 size_t oldsize, size_t usize, bool *zero)
Jason Evans9b41ac92014-10-14 22:20:00 -07001001{
Jason Evans99bd94f2015-02-18 16:40:53 -08001002 bool err;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001003 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001004 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001005 size_t udiff = usize - oldsize;
1006 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1007
Jason Evansc1e00ef2016-05-10 22:21:10 -07001008 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001009
1010 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001011 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001012 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1013 arena->stats.mapped += cdiff;
1014 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001015 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001016
Jason Evansc1e00ef2016-05-10 22:21:10 -07001017 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001018 chunksize, zero, true) == NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001019 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001020 if (err) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001021 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07001022 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
Jason Evansb49a3342015-07-28 11:28:19 -04001023 cdiff);
1024 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1025 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001026 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001027 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001028 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001029 }
1030
Jason Evans99bd94f2015-02-18 16:40:53 -08001031 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001032}
1033
Jason Evansaa282662015-07-15 16:02:21 -07001034/*
1035 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001036 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1037 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001038 */
Jason Evans97c04a92015-03-06 19:57:36 -08001039static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001040arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001041{
Jason Evansf193fd82016-04-08 14:17:57 -07001042 pszind_t pind, i;
Dave Watson3417a302016-02-23 12:06:21 -08001043
Jason Evansf193fd82016-04-08 14:17:57 -07001044 pind = psz2ind(run_quantize_ceil(size));
1045
1046 for (i = pind; pind2sz(i) <= large_maxclass; i++) {
Jason Evansc6a2c392016-03-26 17:30:37 -07001047 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
Jason Evansf193fd82016-04-08 14:17:57 -07001048 &arena->runs_avail[i]);
Jason Evansc6a2c392016-03-26 17:30:37 -07001049 if (miscelm != NULL)
Dave Watson3417a302016-02-23 12:06:21 -08001050 return (&miscelm->run);
1051 }
1052
1053 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001054}
1055
Jason Evanse476f8a2010-01-16 09:53:50 -08001056static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001057arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001058{
Jason Evansaa282662015-07-15 16:02:21 -07001059 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001060 if (run != NULL) {
1061 if (arena_run_split_large(arena, run, size, zero))
1062 run = NULL;
1063 }
Jason Evans97c04a92015-03-06 19:57:36 -08001064 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001065}
1066
1067static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001068arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001069{
1070 arena_chunk_t *chunk;
1071 arena_run_t *run;
1072
Jason Evansfc0b3b72014-10-09 17:54:06 -07001073 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001074 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001075
1076 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001077 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001078 if (run != NULL)
1079 return (run);
1080
Jason Evanse476f8a2010-01-16 09:53:50 -08001081 /*
1082 * No usable runs. Create a new chunk from which to allocate the run.
1083 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001084 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001085 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001086 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001087 if (arena_run_split_large(arena, run, size, zero))
1088 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001089 return (run);
1090 }
1091
1092 /*
1093 * arena_chunk_alloc() failed, but another thread may have made
1094 * sufficient memory available while this one dropped arena->lock in
1095 * arena_chunk_alloc(), so search one more time.
1096 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001097 return (arena_run_alloc_large_helper(arena, size, zero));
1098}
1099
1100static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001101arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001102{
Jason Evansaa282662015-07-15 16:02:21 -07001103 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001104 if (run != NULL) {
1105 if (arena_run_split_small(arena, run, size, binind))
1106 run = NULL;
1107 }
Jason Evans97c04a92015-03-06 19:57:36 -08001108 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001109}
1110
1111static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001112arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001113{
1114 arena_chunk_t *chunk;
1115 arena_run_t *run;
1116
Jason Evansfc0b3b72014-10-09 17:54:06 -07001117 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001118 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001119 assert(binind != BININD_INVALID);
1120
1121 /* Search the arena's chunks for the lowest best fit. */
1122 run = arena_run_alloc_small_helper(arena, size, binind);
1123 if (run != NULL)
1124 return (run);
1125
1126 /*
1127 * No usable runs. Create a new chunk from which to allocate the run.
1128 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001129 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evansaa5113b2014-01-14 16:23:03 -08001130 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001131 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001132 if (arena_run_split_small(arena, run, size, binind))
1133 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001134 return (run);
1135 }
1136
1137 /*
1138 * arena_chunk_alloc() failed, but another thread may have made
1139 * sufficient memory available while this one dropped arena->lock in
1140 * arena_chunk_alloc(), so search one more time.
1141 */
1142 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001143}
1144
Jason Evans8d6a3e82015-03-18 18:55:33 -07001145static bool
1146arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1147{
1148
Jason Evansbd16ea42015-03-24 15:59:28 -07001149 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1150 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001151}
1152
1153ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001154arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001155{
1156 ssize_t lg_dirty_mult;
1157
Jason Evansc1e00ef2016-05-10 22:21:10 -07001158 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001159 lg_dirty_mult = arena->lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001160 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001161
1162 return (lg_dirty_mult);
1163}
1164
1165bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001166arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001167{
1168
1169 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1170 return (true);
1171
Jason Evansc1e00ef2016-05-10 22:21:10 -07001172 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001173 arena->lg_dirty_mult = lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001174 arena_maybe_purge(tsdn, arena);
1175 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001176
1177 return (false);
1178}
1179
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001180static void
Jason Evans243f7a02016-02-19 20:09:31 -08001181arena_decay_deadline_init(arena_t *arena)
1182{
1183
1184 assert(opt_purge == purge_mode_decay);
1185
1186 /*
1187 * Generate a new deadline that is uniformly random within the next
1188 * epoch after the current one.
1189 */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001190 nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1191 nstime_add(&arena->decay.deadline, &arena->decay.interval);
1192 if (arena->decay.time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001193 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001194
Jason Evans94e7ffa2016-10-10 20:32:19 -07001195 nstime_init(&jitter, prng_range(&arena->decay.jitter_state,
1196 nstime_ns(&arena->decay.interval)));
1197 nstime_add(&arena->decay.deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001198 }
1199}
1200
1201static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001202arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001203{
1204
1205 assert(opt_purge == purge_mode_decay);
1206
Jason Evans94e7ffa2016-10-10 20:32:19 -07001207 return (nstime_compare(&arena->decay.deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001208}
1209
1210static size_t
1211arena_decay_backlog_npages_limit(const arena_t *arena)
1212{
1213 static const uint64_t h_steps[] = {
1214#define STEP(step, h, x, y) \
1215 h,
1216 SMOOTHSTEP
1217#undef STEP
1218 };
1219 uint64_t sum;
1220 size_t npages_limit_backlog;
1221 unsigned i;
1222
1223 assert(opt_purge == purge_mode_decay);
1224
1225 /*
1226 * For each element of decay_backlog, multiply by the corresponding
1227 * fixed-point smoothstep decay factor. Sum the products, then divide
1228 * to round down to the nearest whole number of pages.
1229 */
1230 sum = 0;
1231 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
Jason Evans94e7ffa2016-10-10 20:32:19 -07001232 sum += arena->decay.backlog[i] * h_steps[i];
rustyx00432332016-04-12 09:50:54 +02001233 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
Jason Evans243f7a02016-02-19 20:09:31 -08001234
1235 return (npages_limit_backlog);
1236}
1237
1238static void
Jason Evansd419bb02016-10-11 15:30:01 -07001239arena_decay_backlog_update_last(arena_t *arena)
1240{
1241 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1242 arena->ndirty - arena->decay.ndirty : 0;
1243 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1244}
1245
1246static void
1247arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1248{
1249
1250 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1251 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1252 sizeof(size_t));
1253 } else {
1254 size_t nadvance_z = (size_t)nadvance_u64;
1255
1256 assert((uint64_t)nadvance_z == nadvance_u64);
1257
1258 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1259 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1260 if (nadvance_z > 1) {
1261 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1262 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1263 }
1264 }
1265
1266 arena_decay_backlog_update_last(arena);
1267}
1268
1269static void
1270arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001271{
rustyx00432332016-04-12 09:50:54 +02001272 uint64_t nadvance_u64;
Jason Evans9bad0792016-02-21 11:25:02 -08001273 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001274
1275 assert(opt_purge == purge_mode_decay);
1276 assert(arena_decay_deadline_reached(arena, time));
1277
Jason Evans9bad0792016-02-21 11:25:02 -08001278 nstime_copy(&delta, time);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001279 nstime_subtract(&delta, &arena->decay.epoch);
1280 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001281 assert(nadvance_u64 > 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001282
rustyx00432332016-04-12 09:50:54 +02001283 /* Add nadvance_u64 decay intervals to epoch. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001284 nstime_copy(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001285 nstime_imultiply(&delta, nadvance_u64);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001286 nstime_add(&arena->decay.epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001287
1288 /* Set a new deadline. */
1289 arena_decay_deadline_init(arena);
1290
1291 /* Update the backlog. */
Jason Evansd419bb02016-10-11 15:30:01 -07001292 arena_decay_backlog_update(arena, nadvance_u64);
Jason Evans243f7a02016-02-19 20:09:31 -08001293}
1294
Jason Evansd419bb02016-10-11 15:30:01 -07001295static void
1296arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001297{
Jason Evansd419bb02016-10-11 15:30:01 -07001298 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001299
Jason Evansd419bb02016-10-11 15:30:01 -07001300 if (arena->ndirty > ndirty_limit)
1301 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1302 arena->decay.ndirty = arena->ndirty;
1303}
Jason Evans243f7a02016-02-19 20:09:31 -08001304
Jason Evansd419bb02016-10-11 15:30:01 -07001305static void
1306arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1307{
Jason Evans243f7a02016-02-19 20:09:31 -08001308
Jason Evansd419bb02016-10-11 15:30:01 -07001309 arena_decay_epoch_advance_helper(arena, time);
1310 arena_decay_epoch_advance_purge(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001311}
1312
1313static void
1314arena_decay_init(arena_t *arena, ssize_t decay_time)
1315{
1316
Jason Evans94e7ffa2016-10-10 20:32:19 -07001317 arena->decay.time = decay_time;
Jason Evans243f7a02016-02-19 20:09:31 -08001318 if (decay_time > 0) {
Jason Evans94e7ffa2016-10-10 20:32:19 -07001319 nstime_init2(&arena->decay.interval, decay_time, 0);
1320 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001321 }
1322
Jason Evans94e7ffa2016-10-10 20:32:19 -07001323 nstime_init(&arena->decay.epoch, 0);
1324 nstime_update(&arena->decay.epoch);
1325 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
Jason Evans243f7a02016-02-19 20:09:31 -08001326 arena_decay_deadline_init(arena);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001327 arena->decay.ndirty = arena->ndirty;
Jason Evans94e7ffa2016-10-10 20:32:19 -07001328 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
Jason Evans243f7a02016-02-19 20:09:31 -08001329}
1330
1331static bool
1332arena_decay_time_valid(ssize_t decay_time)
1333{
1334
Jason Evans022f6892016-03-02 22:41:32 -08001335 if (decay_time < -1)
1336 return (false);
1337 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1338 return (true);
1339 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001340}
1341
1342ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001343arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001344{
1345 ssize_t decay_time;
1346
Jason Evansc1e00ef2016-05-10 22:21:10 -07001347 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001348 decay_time = arena->decay.time;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001349 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001350
1351 return (decay_time);
1352}
1353
1354bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001355arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
Jason Evans243f7a02016-02-19 20:09:31 -08001356{
1357
1358 if (!arena_decay_time_valid(decay_time))
1359 return (true);
1360
Jason Evansc1e00ef2016-05-10 22:21:10 -07001361 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001362 /*
1363 * Restart decay backlog from scratch, which may cause many dirty pages
1364 * to be immediately purged. It would conceptually be possible to map
1365 * the old backlog onto the new backlog, but there is no justification
1366 * for such complexity since decay_time changes are intended to be
1367 * infrequent, either between the {-1, 0, >0} states, or a one-time
1368 * arbitrary change during initial arena configuration.
1369 */
1370 arena_decay_init(arena, decay_time);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001371 arena_maybe_purge(tsdn, arena);
1372 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001373
1374 return (false);
1375}
1376
1377static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001378arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001379{
1380
Jason Evans243f7a02016-02-19 20:09:31 -08001381 assert(opt_purge == purge_mode_ratio);
1382
Jason Evanse3d13062012-10-30 15:42:37 -07001383 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001384 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001385 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001386
Jason Evans0a9f9a42015-06-22 18:50:32 -07001387 /*
1388 * Iterate, since preventing recursive purging could otherwise leave too
1389 * many dirty pages.
1390 */
1391 while (true) {
1392 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1393 if (threshold < chunk_npages)
1394 threshold = chunk_npages;
1395 /*
1396 * Don't purge unless the number of purgeable pages exceeds the
1397 * threshold.
1398 */
1399 if (arena->ndirty <= threshold)
1400 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001401 arena_purge_to_limit(tsdn, arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001402 }
Jason Evans05b21be2010-03-14 17:36:10 -07001403}
1404
Jason Evans243f7a02016-02-19 20:09:31 -08001405static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001406arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001407{
Jason Evans9bad0792016-02-21 11:25:02 -08001408 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001409
1410 assert(opt_purge == purge_mode_decay);
1411
1412 /* Purge all or nothing if the option is disabled. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001413 if (arena->decay.time <= 0) {
1414 if (arena->decay.time == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001415 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001416 return;
1417 }
1418
Jason Evans45a5bf62016-10-10 22:15:10 -07001419 nstime_init(&time, 0);
1420 nstime_update(&time);
1421 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1422 &time) > 0)) {
1423 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001424 * Time went backwards. Move the epoch back in time and
1425 * generate a new deadline, with the expectation that time
1426 * typically flows forward for long enough periods of time that
1427 * epochs complete. Unfortunately, this strategy is susceptible
1428 * to clock jitter triggering premature epoch advances, but
1429 * clock jitter estimation and compensation isn't feasible here
1430 * because calls into this code are event-driven.
Jason Evans45a5bf62016-10-10 22:15:10 -07001431 */
1432 nstime_copy(&arena->decay.epoch, &time);
Jason Evansd419bb02016-10-11 15:30:01 -07001433 arena_decay_deadline_init(arena);
Jason Evans45a5bf62016-10-10 22:15:10 -07001434 } else {
1435 /* Verify that time does not go backwards. */
1436 assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001437 }
1438
Jason Evans243f7a02016-02-19 20:09:31 -08001439 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001440 * If the deadline has been reached, advance to the current epoch and
1441 * purge to the new limit if necessary. Note that dirty pages created
1442 * during the current epoch are not subject to purge until a future
1443 * epoch, so as a result purging only happens during epoch advances.
Jason Evans243f7a02016-02-19 20:09:31 -08001444 */
Jason Evansd419bb02016-10-11 15:30:01 -07001445 if (arena_decay_deadline_reached(arena, &time))
1446 arena_decay_epoch_advance(tsdn, arena, &time);
Jason Evans243f7a02016-02-19 20:09:31 -08001447}
1448
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001449void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001450arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001451{
1452
1453 /* Don't recursively purge. */
1454 if (arena->purging)
1455 return;
1456
Jason Evans243f7a02016-02-19 20:09:31 -08001457 if (opt_purge == purge_mode_ratio)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001458 arena_maybe_purge_ratio(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001459 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001460 arena_maybe_purge_decay(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001461}
1462
Qinfan Wua244e502014-07-21 10:23:36 -07001463static size_t
1464arena_dirty_count(arena_t *arena)
1465{
1466 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001467 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001468 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001469
Jason Evans38e42d32015-03-10 18:15:40 -07001470 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001471 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001472 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001473 size_t npages;
1474
Jason Evansf5c8f372015-03-10 18:29:49 -07001475 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001476 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001477 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001478 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001479 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1480 rdelm);
1481 arena_chunk_map_misc_t *miscelm =
1482 arena_rd_to_miscelm(rdelm);
1483 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001484 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1485 0);
1486 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1487 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1488 npages = arena_mapbits_unallocated_size_get(chunk,
1489 pageind) >> LG_PAGE;
1490 }
Qinfan Wua244e502014-07-21 10:23:36 -07001491 ndirty += npages;
1492 }
1493
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001494 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001495}
1496
1497static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001498arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001499 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001500 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001501{
Jason Evans38e42d32015-03-10 18:15:40 -07001502 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001503 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001504 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001505
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001506 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001507 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001508 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001509 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001510 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001511 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001512
Jason Evansf5c8f372015-03-10 18:29:49 -07001513 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001514 extent_node_t *chunkselm_next;
1515 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001516 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001517
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001518 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001519 if (opt_purge == purge_mode_decay && arena->ndirty -
1520 (nstashed + npages) < ndirty_limit)
1521 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001522
Jason Evans738e0892015-02-18 01:15:50 -08001523 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001524 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001525 * Allocate. chunkselm remains valid due to the
1526 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001527 */
Jason Evansee41ad42015-02-15 18:04:46 -08001528 zero = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001529 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001530 extent_node_addr_get(chunkselm),
1531 extent_node_size_get(chunkselm), chunksize, &zero,
1532 false);
1533 assert(chunk == extent_node_addr_get(chunkselm));
1534 assert(zero == extent_node_zeroed_get(chunkselm));
1535 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001536 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001537 assert(npages == (extent_node_size_get(chunkselm) >>
1538 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001539 chunkselm = chunkselm_next;
1540 } else {
1541 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001542 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1543 arena_chunk_map_misc_t *miscelm =
1544 arena_rd_to_miscelm(rdelm);
1545 size_t pageind = arena_miscelm_to_pageind(miscelm);
1546 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001547 size_t run_size =
1548 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001549
Jason Evansee41ad42015-02-15 18:04:46 -08001550 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001551 if (opt_purge == purge_mode_decay && arena->ndirty -
1552 (nstashed + npages) < ndirty_limit)
1553 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001554
1555 assert(pageind + npages <= chunk_npages);
1556 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1557 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1558
1559 /*
1560 * If purging the spare chunk's run, make it available
1561 * prior to allocation.
1562 */
1563 if (chunk == arena->spare)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001564 arena_chunk_alloc(tsdn, arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001565
1566 /* Temporarily allocate the free dirty run. */
1567 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001568 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001569 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001570 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001571 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001572 assert(qr_next(rdelm, rd_link) == rdelm);
1573 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001574 }
Jason Evans38e42d32015-03-10 18:15:40 -07001575 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001576 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001577
Qinfan Wue9708002014-07-21 18:09:04 -07001578 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001579 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1580 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001581 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001582 }
Qinfan Wue9708002014-07-21 18:09:04 -07001583
1584 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001585}
1586
1587static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001588arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001589 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001590 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001591{
Qinfan Wue9708002014-07-21 18:09:04 -07001592 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001593 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001594 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001595
Jason Evansaa5113b2014-01-14 16:23:03 -08001596 if (config_stats)
1597 nmadvise = 0;
1598 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001599
Jason Evansc1e00ef2016-05-10 22:21:10 -07001600 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001601 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001602 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001603 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001604 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001605
Jason Evansf5c8f372015-03-10 18:29:49 -07001606 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001607 /*
1608 * Don't actually purge the chunk here because 1)
1609 * chunkselm is embedded in the chunk and must remain
1610 * valid, and 2) we deallocate the chunk in
1611 * arena_unstash_purged(), where it is destroyed,
1612 * decommitted, or purged, depending on chunk
1613 * deallocation policy.
1614 */
Jason Evansee41ad42015-02-15 18:04:46 -08001615 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001616 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001617 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001618 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001619 size_t pageind, run_size, flag_unzeroed, flags, i;
1620 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001621 arena_chunk_t *chunk =
1622 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001623 arena_chunk_map_misc_t *miscelm =
1624 arena_rd_to_miscelm(rdelm);
1625 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001626 run_size = arena_mapbits_large_size_get(chunk, pageind);
1627 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001628
Jason Evansee41ad42015-02-15 18:04:46 -08001629 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001630 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1631 assert(!arena_mapbits_decommitted_get(chunk,
1632 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001633 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1634 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1635 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001636 flag_unzeroed = 0;
1637 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001638 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001639 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001640 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001641 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1642 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001643 }
Jason Evans45186f02015-08-10 23:03:34 -07001644 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1645 flags);
1646 arena_mapbits_large_set(chunk, pageind, run_size,
1647 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001648
1649 /*
Jason Evans45186f02015-08-10 23:03:34 -07001650 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001651 * chunk_purge_wrapper() has returned whether the pages
1652 * were zeroed as a side effect of purging. This chunk
1653 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001654 * isn't currently owned by this thread, because the run
1655 * is marked as allocated, thus protecting it from being
1656 * modified by any other thread. As long as these
1657 * writes don't perturb the first and last elements'
1658 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1659 */
Jason Evans45186f02015-08-10 23:03:34 -07001660 for (i = 1; i < npages-1; i++) {
1661 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001662 flag_unzeroed);
1663 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001664 }
Qinfan Wue9708002014-07-21 18:09:04 -07001665
Jason Evansaa5113b2014-01-14 16:23:03 -08001666 npurged += npages;
1667 if (config_stats)
1668 nmadvise++;
1669 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001670 malloc_mutex_lock(tsdn, &arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001671
1672 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001673 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001674 arena->stats.purged += npurged;
1675 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001676
1677 return (npurged);
1678}
1679
1680static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001681arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001682 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001683 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001684{
Jason Evans38e42d32015-03-10 18:15:40 -07001685 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001686 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001687
Jason Evansb49a3342015-07-28 11:28:19 -04001688 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001689 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001690 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001691 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1692 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001693 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001694 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001695 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001696 void *addr = extent_node_addr_get(chunkselm);
1697 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001698 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001699 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001700 extent_node_dirty_remove(chunkselm);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001701 arena_node_dalloc(tsdn, arena, chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001702 chunkselm = chunkselm_next;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001703 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
Jason Evansb2c0d632016-04-13 23:36:15 -07001704 size, zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001705 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001706 arena_chunk_t *chunk =
1707 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001708 arena_chunk_map_misc_t *miscelm =
1709 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001710 size_t pageind = arena_miscelm_to_pageind(miscelm);
1711 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1712 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001713 arena_run_t *run = &miscelm->run;
1714 qr_remove(rdelm, rd_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001715 arena_run_dalloc(tsdn, arena, run, false, true,
Jason Evansb2c0d632016-04-13 23:36:15 -07001716 decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001717 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001718 }
1719}
1720
Jason Evans243f7a02016-02-19 20:09:31 -08001721/*
1722 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1723 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1724 * desired state:
1725 * (arena->ndirty <= ndirty_limit)
1726 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1727 * violating the invariant:
1728 * (arena->ndirty >= ndirty_limit)
1729 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001730static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001731arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001732{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001733 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001734 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001735 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001736 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001737
Jason Evans0a9f9a42015-06-22 18:50:32 -07001738 arena->purging = true;
1739
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001740 /*
1741 * Calls to arena_dirty_count() are disabled even for debug builds
1742 * because overhead grows nonlinearly as memory usage increases.
1743 */
1744 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001745 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001746 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001747 }
Jason Evans243f7a02016-02-19 20:09:31 -08001748 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1749 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001750
1751 qr_new(&purge_runs_sentinel, rd_link);
1752 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1753
Jason Evansc1e00ef2016-05-10 22:21:10 -07001754 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001755 &purge_runs_sentinel, &purge_chunks_sentinel);
1756 if (npurge == 0)
1757 goto label_return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001758 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -07001759 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001760 assert(npurged == npurge);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001761 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001762 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001763
Jason Evans7372b152012-02-10 20:22:09 -08001764 if (config_stats)
1765 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001766
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001767label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001768 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001769}
1770
Jason Evans6005f072010-09-30 16:55:08 -07001771void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001772arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001773{
1774
Jason Evansc1e00ef2016-05-10 22:21:10 -07001775 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001776 if (all)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001777 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001778 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001779 arena_maybe_purge(tsdn, arena);
1780 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans6005f072010-09-30 16:55:08 -07001781}
1782
Jason Evanse476f8a2010-01-16 09:53:50 -08001783static void
Jason Evans19ff2ce2016-04-22 14:37:17 -07001784arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1785{
1786 size_t pageind, npages;
1787
1788 cassert(config_prof);
1789 assert(opt_prof);
1790
1791 /*
1792 * Iterate over the allocated runs and remove profiled allocations from
1793 * the sample set.
1794 */
1795 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1796 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1797 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1798 void *ptr = (void *)((uintptr_t)chunk + (pageind
1799 << LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001800 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1801 config_prof);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001802
1803 prof_free(tsd, ptr, usize);
1804 npages = arena_mapbits_large_size_get(chunk,
1805 pageind) >> LG_PAGE;
1806 } else {
1807 /* Skip small run. */
1808 size_t binind = arena_mapbits_binind_get(chunk,
1809 pageind);
1810 arena_bin_info_t *bin_info =
1811 &arena_bin_info[binind];
1812 npages = bin_info->run_size >> LG_PAGE;
1813 }
1814 } else {
1815 /* Skip unallocated run. */
1816 npages = arena_mapbits_unallocated_size_get(chunk,
1817 pageind) >> LG_PAGE;
1818 }
1819 assert(pageind + npages <= chunk_npages);
1820 }
1821}
1822
1823void
1824arena_reset(tsd_t *tsd, arena_t *arena)
1825{
1826 unsigned i;
1827 extent_node_t *node;
1828
1829 /*
1830 * Locking in this function is unintuitive. The caller guarantees that
1831 * no concurrent operations are happening in this arena, but there are
1832 * still reasons that some locking is necessary:
1833 *
1834 * - Some of the functions in the transitive closure of calls assume
1835 * appropriate locks are held, and in some cases these locks are
1836 * temporarily dropped to avoid lock order reversal or deadlock due to
1837 * reentry.
1838 * - mallctl("epoch", ...) may concurrently refresh stats. While
1839 * strictly speaking this is a "concurrent operation", disallowing
1840 * stats refreshes would impose an inconvenient burden.
1841 */
1842
1843 /* Remove large allocations from prof sample set. */
1844 if (config_prof && opt_prof) {
1845 ql_foreach(node, &arena->achunks, ql_link) {
1846 arena_achunk_prof_reset(tsd, arena,
1847 extent_node_addr_get(node));
1848 }
1849 }
1850
Jason Evans7e674952016-04-25 13:26:54 -07001851 /* Reset curruns for large size classes. */
1852 if (config_stats) {
1853 for (i = 0; i < nlclasses; i++)
1854 arena->stats.lstats[i].curruns = 0;
1855 }
1856
Jason Evans19ff2ce2016-04-22 14:37:17 -07001857 /* Huge allocations. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001858 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001859 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1860 ql_last(&arena->huge, ql_link)) {
1861 void *ptr = extent_node_addr_get(node);
Jason Evans7e674952016-04-25 13:26:54 -07001862 size_t usize;
Jason Evans19ff2ce2016-04-22 14:37:17 -07001863
Jason Evansc1e00ef2016-05-10 22:21:10 -07001864 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001865 if (config_stats || (config_prof && opt_prof))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001866 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans7e674952016-04-25 13:26:54 -07001867 /* Remove huge allocation from prof sample set. */
1868 if (config_prof && opt_prof)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001869 prof_free(tsd, ptr, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001870 huge_dalloc(tsd_tsdn(tsd), ptr);
1871 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001872 /* Cancel out unwanted effects on stats. */
1873 if (config_stats)
1874 arena_huge_reset_stats_cancel(arena, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001875 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001876 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001877
Jason Evansc1e00ef2016-05-10 22:21:10 -07001878 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001879
1880 /* Bins. */
1881 for (i = 0; i < NBINS; i++) {
1882 arena_bin_t *bin = &arena->bins[i];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001883 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001884 bin->runcur = NULL;
1885 arena_run_heap_new(&bin->runs);
1886 if (config_stats) {
1887 bin->stats.curregs = 0;
1888 bin->stats.curruns = 0;
1889 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001890 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001891 }
1892
1893 /*
1894 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1895 * chains directly correspond.
1896 */
1897 qr_new(&arena->runs_dirty, rd_link);
1898 for (node = qr_next(&arena->chunks_cache, cc_link);
1899 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1900 qr_new(&node->rd, rd_link);
1901 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1902 }
1903
1904 /* Arena chunks. */
1905 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1906 ql_last(&arena->achunks, ql_link)) {
1907 ql_remove(&arena->achunks, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001908 arena_chunk_discard(tsd_tsdn(tsd), arena,
1909 extent_node_addr_get(node));
Jason Evans19ff2ce2016-04-22 14:37:17 -07001910 }
1911
1912 /* Spare. */
1913 if (arena->spare != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001914 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001915 arena->spare = NULL;
1916 }
1917
1918 assert(!arena->purging);
1919 arena->nactive = 0;
1920
Jason Evansf193fd82016-04-08 14:17:57 -07001921 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
1922 i++)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001923 arena_run_heap_new(&arena->runs_avail[i]);
1924
Jason Evansc1e00ef2016-05-10 22:21:10 -07001925 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001926}
1927
1928static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001929arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001930 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1931 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001932{
Jason Evansaa5113b2014-01-14 16:23:03 -08001933 size_t size = *p_size;
1934 size_t run_ind = *p_run_ind;
1935 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001936
1937 /* Try to coalesce forward. */
1938 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001939 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001940 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1941 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1942 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001943 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1944 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001945 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001946
1947 /*
1948 * Remove successor from runs_avail; the coalesced run is
1949 * inserted later.
1950 */
Jason Evans203484e2012-05-02 00:30:36 -07001951 assert(arena_mapbits_unallocated_size_get(chunk,
1952 run_ind+run_pages+nrun_pages-1) == nrun_size);
1953 assert(arena_mapbits_dirty_get(chunk,
1954 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001955 assert(arena_mapbits_decommitted_get(chunk,
1956 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001957 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001958
Jason Evansee41ad42015-02-15 18:04:46 -08001959 /*
1960 * If the successor is dirty, remove it from the set of dirty
1961 * pages.
1962 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001963 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001964 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001965 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001966 }
1967
Jason Evanse476f8a2010-01-16 09:53:50 -08001968 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001969 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001970
Jason Evans203484e2012-05-02 00:30:36 -07001971 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1972 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1973 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001974 }
1975
1976 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001977 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1978 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07001979 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1980 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001981 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1982 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001983 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001984
Jason Evans12ca9142010-10-17 19:56:09 -07001985 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001986
1987 /*
1988 * Remove predecessor from runs_avail; the coalesced run is
1989 * inserted later.
1990 */
Jason Evans203484e2012-05-02 00:30:36 -07001991 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1992 prun_size);
1993 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001994 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1995 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001996 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001997
Jason Evansee41ad42015-02-15 18:04:46 -08001998 /*
1999 * If the predecessor is dirty, remove it from the set of dirty
2000 * pages.
2001 */
2002 if (flag_dirty != 0) {
2003 arena_run_dirty_remove(arena, chunk, run_ind,
2004 prun_pages);
2005 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07002006
Jason Evanse476f8a2010-01-16 09:53:50 -08002007 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002008 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002009
Jason Evans203484e2012-05-02 00:30:36 -07002010 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2011 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2012 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002013 }
2014
Jason Evansaa5113b2014-01-14 16:23:03 -08002015 *p_size = size;
2016 *p_run_ind = run_ind;
2017 *p_run_pages = run_pages;
2018}
2019
Jason Evans8fadb1a2015-08-04 10:49:46 -07002020static size_t
2021arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2022 size_t run_ind)
2023{
2024 size_t size;
2025
2026 assert(run_ind >= map_bias);
2027 assert(run_ind < chunk_npages);
2028
2029 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2030 size = arena_mapbits_large_size_get(chunk, run_ind);
2031 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2032 run_ind+(size>>LG_PAGE)-1) == 0);
2033 } else {
2034 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2035 size = bin_info->run_size;
2036 }
2037
2038 return (size);
2039}
2040
Jason Evansaa5113b2014-01-14 16:23:03 -08002041static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002042arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
Jason Evansb2c0d632016-04-13 23:36:15 -07002043 bool cleaned, bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08002044{
2045 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002046 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002047 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08002048
2049 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002050 miscelm = arena_run_to_miscelm(run);
2051 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08002052 assert(run_ind >= map_bias);
2053 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002054 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08002055 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08002056 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08002057
2058 /*
2059 * The run is dirty if the caller claims to have dirtied it, as well as
2060 * if it was already dirty before being allocated and the caller
2061 * doesn't claim to have cleaned it.
2062 */
2063 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2064 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002065 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2066 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08002067 dirty = true;
2068 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002069 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08002070
2071 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07002072 if (dirty || decommitted) {
2073 size_t flags = flag_dirty | flag_decommitted;
2074 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002075 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002076 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002077 } else {
2078 arena_mapbits_unallocated_set(chunk, run_ind, size,
2079 arena_mapbits_unzeroed_get(chunk, run_ind));
2080 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2081 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2082 }
2083
Jason Evans8fadb1a2015-08-04 10:49:46 -07002084 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2085 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08002086
Jason Evanse476f8a2010-01-16 09:53:50 -08002087 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07002088 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2089 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2090 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2091 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002092 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2093 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07002094 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07002095
Jason Evans070b3c32014-08-14 14:45:58 -07002096 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08002097 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002098
Jason Evans203484e2012-05-02 00:30:36 -07002099 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07002100 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07002101 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07002102 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07002103 arena_chunk_dalloc(tsdn, arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07002104 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002105
Jason Evans4fb7f512010-01-27 18:27:09 -08002106 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07002107 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08002108 * deallocated above, since in that case it is the spare. Waiting
2109 * until after possible chunk deallocation to do dirty processing
2110 * allows for an old spare to be fully deallocated, thus decreasing the
2111 * chances of spuriously crossing the dirty page purging threshold.
2112 */
Jason Evans8d4203c2010-04-13 20:53:21 -07002113 if (dirty)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002114 arena_maybe_purge(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002115}
2116
2117static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002118arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002119 arena_run_t *run, size_t oldsize, size_t newsize)
Jason Evanse476f8a2010-01-16 09:53:50 -08002120{
Jason Evans0c5dd032014-09-29 01:31:39 -07002121 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2122 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002123 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002124 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002125 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2126 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2127 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002128
2129 assert(oldsize > newsize);
2130
2131 /*
2132 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002133 * leading run as separately allocated. Set the last element of each
2134 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002135 */
Jason Evans203484e2012-05-02 00:30:36 -07002136 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002137 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2138 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2139 pageind+head_npages-1)));
2140 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2141 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002142
Jason Evans7372b152012-02-10 20:22:09 -08002143 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002144 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002145 assert(arena_mapbits_large_size_get(chunk,
2146 pageind+head_npages+tail_npages-1) == 0);
2147 assert(arena_mapbits_dirty_get(chunk,
2148 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002149 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002150 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002151 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2152 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002153
Jason Evansc1e00ef2016-05-10 22:21:10 -07002154 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
Jason Evansb2c0d632016-04-13 23:36:15 -07002155 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002156}
2157
2158static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002159arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002160 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08002161{
Jason Evans0c5dd032014-09-29 01:31:39 -07002162 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2163 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002164 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002165 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002166 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2167 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2168 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002169 arena_chunk_map_misc_t *tail_miscelm;
2170 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002171
2172 assert(oldsize > newsize);
2173
2174 /*
2175 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002176 * trailing run as separately allocated. Set the last element of each
2177 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002178 */
Jason Evans203484e2012-05-02 00:30:36 -07002179 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002180 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2181 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2182 pageind+head_npages-1)));
2183 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2184 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002185
Jason Evans203484e2012-05-02 00:30:36 -07002186 if (config_debug) {
2187 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2188 assert(arena_mapbits_large_size_get(chunk,
2189 pageind+head_npages+tail_npages-1) == 0);
2190 assert(arena_mapbits_dirty_get(chunk,
2191 pageind+head_npages+tail_npages-1) == flag_dirty);
2192 }
2193 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002194 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2195 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002196
Jason Evans61a6dfc2016-03-23 16:04:38 -07002197 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002198 tail_run = &tail_miscelm->run;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002199 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
Jason Evansb2c0d632016-04-13 23:36:15 -07002200 != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002201}
2202
Jason Evanse7a10582012-02-13 17:36:52 -08002203static void
2204arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2205{
Jason Evans0c5dd032014-09-29 01:31:39 -07002206 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002207
Jason Evansc6a2c392016-03-26 17:30:37 -07002208 arena_run_heap_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08002209}
2210
2211static arena_run_t *
2212arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2213{
Jason Evansc6a2c392016-03-26 17:30:37 -07002214 arena_chunk_map_misc_t *miscelm;
2215
2216 miscelm = arena_run_heap_remove_first(&bin->runs);
2217 if (miscelm == NULL)
2218 return (NULL);
2219 if (config_stats)
2220 bin->stats.reruns++;
2221
2222 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002223}
2224
2225static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002226arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002227{
Jason Evanse476f8a2010-01-16 09:53:50 -08002228 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002229 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002230 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002231
2232 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002233 run = arena_bin_nonfull_run_tryget(bin);
2234 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002235 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002236 /* No existing runs have any space available. */
2237
Jason Evans49f7e8f2011-03-15 13:59:15 -07002238 binind = arena_bin_index(arena, bin);
2239 bin_info = &arena_bin_info[binind];
2240
Jason Evanse476f8a2010-01-16 09:53:50 -08002241 /* Allocate a new run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002242 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002243 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002244 malloc_mutex_lock(tsdn, &arena->lock);
2245 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002246 if (run != NULL) {
2247 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002248 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002249 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002250 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002251 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002252 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002253 /********************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002254 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002255 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002256 if (config_stats) {
2257 bin->stats.nruns++;
2258 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002259 }
Jason Evanse00572b2010-03-14 19:43:56 -07002260 return (run);
2261 }
2262
2263 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002264 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002265 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002266 * so search one more time.
2267 */
Jason Evanse7a10582012-02-13 17:36:52 -08002268 run = arena_bin_nonfull_run_tryget(bin);
2269 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002270 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002271
2272 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002273}
2274
Jason Evans1e0a6362010-03-13 13:41:58 -08002275/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002276static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002277arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002278{
Jason Evansd01fd192015-08-19 15:21:32 -07002279 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002280 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002281 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002282
Jason Evans49f7e8f2011-03-15 13:59:15 -07002283 binind = arena_bin_index(arena, bin);
2284 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002285 bin->runcur = NULL;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002286 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002287 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2288 /*
2289 * Another thread updated runcur while this one ran without the
2290 * bin lock in arena_bin_nonfull_run_get().
2291 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002292 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002293 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002294 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002295 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002296 arena_chunk_t *chunk;
2297
2298 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002299 * arena_run_alloc_small() may have allocated run, or
2300 * it may have pulled run from the bin's run tree.
2301 * Therefore it is unsafe to make any assumptions about
2302 * how run has previously been used, and
2303 * arena_bin_lower_run() must be called, as if a region
2304 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002305 */
2306 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansb2c0d632016-04-13 23:36:15 -07002307 if (run->nfree == bin_info->nregs) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002308 arena_dalloc_bin_run(tsdn, arena, chunk, run,
Jason Evansb2c0d632016-04-13 23:36:15 -07002309 bin);
2310 } else
Jason Evans8de6a022010-10-17 20:57:30 -07002311 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002312 }
2313 return (ret);
2314 }
2315
2316 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002317 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002318
2319 bin->runcur = run;
2320
Jason Evanse476f8a2010-01-16 09:53:50 -08002321 assert(bin->runcur->nfree > 0);
2322
Jason Evans49f7e8f2011-03-15 13:59:15 -07002323 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002324}
2325
Jason Evans86815df2010-03-13 20:32:56 -08002326void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002327arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
Jason Evans243f7a02016-02-19 20:09:31 -08002328 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002329{
2330 unsigned i, nfill;
2331 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002332
2333 assert(tbin->ncached == 0);
2334
Jason Evansc1e00ef2016-05-10 22:21:10 -07002335 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2336 prof_idump(tsdn);
Jason Evanse69bee02010-03-15 22:25:23 -07002337 bin = &arena->bins[binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002338 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002339 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2340 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002341 arena_run_t *run;
2342 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002343 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002344 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002345 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002346 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002347 if (ptr == NULL) {
2348 /*
2349 * OOM. tbin->avail isn't yet filled down to its first
2350 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002351 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002352 */
2353 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002354 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002355 i * sizeof(void *));
2356 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002357 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002358 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002359 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002360 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2361 true);
2362 }
Jason Evans9c43c132011-03-18 10:53:15 -07002363 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002364 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002365 }
Jason Evans7372b152012-02-10 20:22:09 -08002366 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002367 bin->stats.nmalloc += i;
2368 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002369 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002370 bin->stats.nfills++;
2371 tbin->tstats.nrequests = 0;
2372 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002373 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002374 tbin->ncached = i;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002375 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002376}
Jason Evanse476f8a2010-01-16 09:53:50 -08002377
Jason Evans122449b2012-04-06 00:35:09 -07002378void
2379arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2380{
2381
Chris Petersona82070e2016-03-27 23:28:39 -07002382 size_t redzone_size = bin_info->redzone_size;
2383
Jason Evans122449b2012-04-06 00:35:09 -07002384 if (zero) {
Chris Petersona82070e2016-03-27 23:28:39 -07002385 memset((void *)((uintptr_t)ptr - redzone_size),
2386 JEMALLOC_ALLOC_JUNK, redzone_size);
2387 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2388 JEMALLOC_ALLOC_JUNK, redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07002389 } else {
Chris Petersona82070e2016-03-27 23:28:39 -07002390 memset((void *)((uintptr_t)ptr - redzone_size),
2391 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
Jason Evans122449b2012-04-06 00:35:09 -07002392 }
2393}
2394
Jason Evans0d6c5d82013-12-17 15:14:36 -08002395#ifdef JEMALLOC_JET
2396#undef arena_redzone_corruption
Jason Evansab0cfe02016-04-18 15:11:20 -07002397#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
Jason Evans0d6c5d82013-12-17 15:14:36 -08002398#endif
2399static void
2400arena_redzone_corruption(void *ptr, size_t usize, bool after,
2401 size_t offset, uint8_t byte)
2402{
2403
Jason Evans5fae7dc2015-07-23 13:56:25 -07002404 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2405 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002406 after ? "after" : "before", ptr, usize, byte);
2407}
2408#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002409#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002410#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2411arena_redzone_corruption_t *arena_redzone_corruption =
Jason Evansab0cfe02016-04-18 15:11:20 -07002412 JEMALLOC_N(n_arena_redzone_corruption);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002413#endif
2414
2415static void
2416arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002417{
Jason Evans122449b2012-04-06 00:35:09 -07002418 bool error = false;
2419
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002420 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002421 size_t size = bin_info->reg_size;
2422 size_t redzone_size = bin_info->redzone_size;
2423 size_t i;
2424
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002425 for (i = 1; i <= redzone_size; i++) {
2426 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
Chris Petersona82070e2016-03-27 23:28:39 -07002427 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002428 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002429 arena_redzone_corruption(ptr, size, false, i,
2430 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002431 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002432 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002433 }
2434 }
2435 for (i = 0; i < redzone_size; i++) {
2436 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
Chris Petersona82070e2016-03-27 23:28:39 -07002437 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002438 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002439 arena_redzone_corruption(ptr, size, true, i,
2440 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002441 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002442 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002443 }
Jason Evans122449b2012-04-06 00:35:09 -07002444 }
2445 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002446
Jason Evans122449b2012-04-06 00:35:09 -07002447 if (opt_abort && error)
2448 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002449}
Jason Evans122449b2012-04-06 00:35:09 -07002450
Jason Evans6b694c42014-01-07 16:47:56 -08002451#ifdef JEMALLOC_JET
2452#undef arena_dalloc_junk_small
Jason Evansab0cfe02016-04-18 15:11:20 -07002453#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
Jason Evans6b694c42014-01-07 16:47:56 -08002454#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002455void
2456arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2457{
2458 size_t redzone_size = bin_info->redzone_size;
2459
2460 arena_redzones_validate(ptr, bin_info, false);
Chris Petersona82070e2016-03-27 23:28:39 -07002461 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
Jason Evans122449b2012-04-06 00:35:09 -07002462 bin_info->reg_interval);
2463}
Jason Evans6b694c42014-01-07 16:47:56 -08002464#ifdef JEMALLOC_JET
2465#undef arena_dalloc_junk_small
2466#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2467arena_dalloc_junk_small_t *arena_dalloc_junk_small =
Jason Evansab0cfe02016-04-18 15:11:20 -07002468 JEMALLOC_N(n_arena_dalloc_junk_small);
Jason Evans6b694c42014-01-07 16:47:56 -08002469#endif
Jason Evans122449b2012-04-06 00:35:09 -07002470
Jason Evans0d6c5d82013-12-17 15:14:36 -08002471void
2472arena_quarantine_junk_small(void *ptr, size_t usize)
2473{
Jason Evansd01fd192015-08-19 15:21:32 -07002474 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002475 arena_bin_info_t *bin_info;
2476 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002477 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002478 assert(opt_quarantine);
2479 assert(usize <= SMALL_MAXCLASS);
2480
Jason Evans155bfa72014-10-05 17:54:10 -07002481 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002482 bin_info = &arena_bin_info[binind];
2483 arena_redzones_validate(ptr, bin_info, true);
2484}
2485
Jason Evans578cd162016-02-19 18:40:03 -08002486static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002487arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002488{
2489 void *ret;
2490 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002491 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002492 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002493
Jason Evansb1726102012-02-28 16:50:47 -08002494 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002495 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002496 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002497
Jason Evansc1e00ef2016-05-10 22:21:10 -07002498 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002499 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002500 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002501 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002502 ret = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002503
2504 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002505 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002506 return (NULL);
2507 }
2508
Jason Evans7372b152012-02-10 20:22:09 -08002509 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002510 bin->stats.nmalloc++;
2511 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002512 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002513 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002514 malloc_mutex_unlock(tsdn, &bin->lock);
2515 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2516 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002517
Jason Evans551ebc42014-10-03 10:16:09 -07002518 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002519 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002520 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002521 arena_alloc_junk_small(ret,
2522 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002523 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002524 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002525 }
Jason Evans0c516a02016-02-25 15:29:49 -08002526 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002527 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002528 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002529 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2530 true);
2531 }
Jason Evans0c516a02016-02-25 15:29:49 -08002532 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2533 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002534 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002535
Jason Evansc1e00ef2016-05-10 22:21:10 -07002536 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002537 return (ret);
2538}
2539
2540void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002541arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002542{
2543 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002544 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002545 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002546 arena_run_t *run;
2547 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002548 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002549
2550 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002551 usize = index2size(binind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002552 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002553 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002554 uint64_t r;
2555
Jason Evans8a03cf02015-05-04 09:58:36 -07002556 /*
2557 * Compute a uniformly distributed offset within the first page
2558 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2559 * for 4 KiB pages and 64-byte cachelines.
2560 */
Jason Evans34676d32016-02-09 16:28:40 -08002561 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
Jason Evans8a03cf02015-05-04 09:58:36 -07002562 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2563 } else
2564 random_offset = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002565 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002566 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002567 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002568 return (NULL);
2569 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002570 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002571 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2572 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002573 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002574 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002575
Jason Evans7372b152012-02-10 20:22:09 -08002576 arena->stats.nmalloc_large++;
2577 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002578 arena->stats.allocated_large += usize;
2579 arena->stats.lstats[index].nmalloc++;
2580 arena->stats.lstats[index].nrequests++;
2581 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002582 }
Jason Evans7372b152012-02-10 20:22:09 -08002583 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002584 idump = arena_prof_accum_locked(arena, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002585 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002586 if (config_prof && idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002587 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002588
Jason Evans551ebc42014-10-03 10:16:09 -07002589 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002590 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002591 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002592 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002593 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002594 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002595 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002596 }
2597
Jason Evansc1e00ef2016-05-10 22:21:10 -07002598 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002599 return (ret);
2600}
2601
Jason Evans578cd162016-02-19 18:40:03 -08002602void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002603arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
Jason Evans66cd9532016-04-22 14:34:14 -07002604 bool zero)
Jason Evans578cd162016-02-19 18:40:03 -08002605{
2606
Jason Evansc1e00ef2016-05-10 22:21:10 -07002607 assert(!tsdn_null(tsdn) || arena != NULL);
2608
2609 if (likely(!tsdn_null(tsdn)))
2610 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans578cd162016-02-19 18:40:03 -08002611 if (unlikely(arena == NULL))
2612 return (NULL);
2613
2614 if (likely(size <= SMALL_MAXCLASS))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002615 return (arena_malloc_small(tsdn, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002616 if (likely(size <= large_maxclass))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002617 return (arena_malloc_large(tsdn, arena, ind, zero));
2618 return (huge_malloc(tsdn, arena, index2size(ind), zero));
Jason Evans578cd162016-02-19 18:40:03 -08002619}
2620
Jason Evanse476f8a2010-01-16 09:53:50 -08002621/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002622static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002623arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002624 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002625{
2626 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002627 size_t alloc_size, leadsize, trailsize;
2628 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002629 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002630 arena_chunk_map_misc_t *miscelm;
2631 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002632
Jason Evansc1e00ef2016-05-10 22:21:10 -07002633 assert(!tsdn_null(tsdn) || arena != NULL);
Jason Evans50883de2015-07-23 17:13:18 -07002634 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002635
Jason Evansc1e00ef2016-05-10 22:21:10 -07002636 if (likely(!tsdn_null(tsdn)))
2637 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002638 if (unlikely(arena == NULL))
2639 return (NULL);
2640
Jason Evans93443682010-10-20 17:39:18 -07002641 alignment = PAGE_CEILING(alignment);
Jason Evans05a9e4a2016-06-07 14:19:50 -07002642 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002643
Jason Evansc1e00ef2016-05-10 22:21:10 -07002644 malloc_mutex_lock(tsdn, &arena->lock);
2645 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002646 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002647 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002648 return (NULL);
2649 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002650 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002651 miscelm = arena_run_to_miscelm(run);
2652 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002653
Jason Evans0c5dd032014-09-29 01:31:39 -07002654 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2655 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002656 assert(alloc_size >= leadsize + usize);
2657 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002658 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002659 arena_chunk_map_misc_t *head_miscelm = miscelm;
2660 arena_run_t *head_run = run;
2661
Jason Evans61a6dfc2016-03-23 16:04:38 -07002662 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002663 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2664 LG_PAGE));
2665 run = &miscelm->run;
2666
Jason Evansc1e00ef2016-05-10 22:21:10 -07002667 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
Jason Evans0c5dd032014-09-29 01:31:39 -07002668 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002669 }
2670 if (trailsize != 0) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002671 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
Jason Evans50883de2015-07-23 17:13:18 -07002672 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002673 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002674 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2675 size_t run_ind =
2676 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002677 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2678 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2679 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002680
Jason Evansde249c82015-08-09 16:47:27 -07002681 assert(decommitted); /* Cause of OOM. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002682 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2683 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002684 return (NULL);
2685 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002686 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002687
Jason Evans7372b152012-02-10 20:22:09 -08002688 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002689 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002690
Jason Evans7372b152012-02-10 20:22:09 -08002691 arena->stats.nmalloc_large++;
2692 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002693 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002694 arena->stats.lstats[index].nmalloc++;
2695 arena->stats.lstats[index].nrequests++;
2696 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002697 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002698 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002699
Jason Evans551ebc42014-10-03 10:16:09 -07002700 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002701 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002702 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002703 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002704 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002705 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002706 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002707 return (ret);
2708}
2709
Jason Evans88fef7c2015-02-12 14:06:37 -08002710void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002711arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002712 bool zero, tcache_t *tcache)
2713{
2714 void *ret;
2715
Jason Evans8a03cf02015-05-04 09:58:36 -07002716 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002717 && (usize & PAGE_MASK) == 0))) {
2718 /* Small; alignment doesn't require special run placement. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002719 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002720 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002721 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002722 /*
2723 * Large; alignment doesn't require special run placement.
2724 * However, the cached pointer may be at a random offset from
2725 * the base of the run, so do some bit manipulation to retrieve
2726 * the base.
2727 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002728 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002729 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002730 if (config_cache_oblivious)
2731 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2732 } else {
Jason Evans676df882015-09-11 20:50:20 -07002733 if (likely(usize <= large_maxclass)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002734 ret = arena_palloc_large(tsdn, arena, usize, alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002735 zero);
2736 } else if (likely(alignment <= chunksize))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002737 ret = huge_malloc(tsdn, arena, usize, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002738 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002739 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002740 }
2741 }
2742 return (ret);
2743}
2744
Jason Evans0b270a92010-03-31 16:45:04 -07002745void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002746arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
Jason Evans0b270a92010-03-31 16:45:04 -07002747{
2748 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002749 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002750 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002751
Jason Evans78f73522012-04-18 13:38:40 -07002752 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002753 assert(ptr != NULL);
2754 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002755 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2756 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002757 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002758
2759 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002760 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002761 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002762 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002763 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002764
Jason Evansc1e00ef2016-05-10 22:21:10 -07002765 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2766 assert(isalloc(tsdn, ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002767}
Jason Evans6109fe02010-02-10 10:37:56 -08002768
Jason Evanse476f8a2010-01-16 09:53:50 -08002769static void
Jason Evans088e6a02010-10-18 00:04:44 -07002770arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002771 arena_bin_t *bin)
2772{
Jason Evanse476f8a2010-01-16 09:53:50 -08002773
Jason Evans19b3d612010-03-18 20:36:40 -07002774 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002775 if (run == bin->runcur)
2776 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002777 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002778 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002779 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002780 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2781
Jason Evansc6a2c392016-03-26 17:30:37 -07002782 /*
2783 * The following block's conditional is necessary because if the
2784 * run only contains one region, then it never gets inserted
2785 * into the non-full runs tree.
2786 */
Jason Evans49f7e8f2011-03-15 13:59:15 -07002787 if (bin_info->nregs != 1) {
Jason Evansc6a2c392016-03-26 17:30:37 -07002788 arena_chunk_map_misc_t *miscelm =
2789 arena_run_to_miscelm(run);
2790
2791 arena_run_heap_remove(&bin->runs, miscelm);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002792 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002793 }
Jason Evans088e6a02010-10-18 00:04:44 -07002794}
2795
2796static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002797arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002798 arena_run_t *run, arena_bin_t *bin)
Jason Evans088e6a02010-10-18 00:04:44 -07002799{
Jason Evans088e6a02010-10-18 00:04:44 -07002800
2801 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002802
Jason Evansc1e00ef2016-05-10 22:21:10 -07002803 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002804 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002805 malloc_mutex_lock(tsdn, &arena->lock);
2806 arena_run_dalloc(tsdn, arena, run, true, false, false);
2807 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002808 /****************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002809 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002810 if (config_stats)
2811 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002812}
2813
Jason Evans940a2e02010-10-17 17:51:37 -07002814static void
2815arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2816 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002817{
Jason Evanse476f8a2010-01-16 09:53:50 -08002818
Jason Evans8de6a022010-10-17 20:57:30 -07002819 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002820 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2821 * non-full run. It is okay to NULL runcur out rather than proactively
2822 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002823 */
Jason Evanse7a10582012-02-13 17:36:52 -08002824 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002825 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002826 if (bin->runcur->nfree > 0)
2827 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002828 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002829 if (config_stats)
2830 bin->stats.reruns++;
2831 } else
2832 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002833}
2834
Jason Evansfc0b3b72014-10-09 17:54:06 -07002835static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002836arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002837 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002838{
Jason Evans0c5dd032014-09-29 01:31:39 -07002839 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002840 arena_run_t *run;
2841 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002842 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002843 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002844
Jason Evansae4c7b42012-04-02 07:04:34 -07002845 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002846 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002847 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002848 binind = run->binind;
2849 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002850 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002851
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002852 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002853 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002854
2855 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002856 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002857 arena_dissociate_bin_run(chunk, run, bin);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002858 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002859 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002860 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002861
Jason Evans7372b152012-02-10 20:22:09 -08002862 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002863 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002864 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002865 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002866}
2867
Jason Evanse476f8a2010-01-16 09:53:50 -08002868void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002869arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2870 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002871{
2872
Jason Evansc1e00ef2016-05-10 22:21:10 -07002873 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002874}
2875
2876void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002877arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002878 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002879{
2880 arena_run_t *run;
2881 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002882 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002883
Jason Evans0c5dd032014-09-29 01:31:39 -07002884 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002885 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002886 bin = &arena->bins[run->binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002887 malloc_mutex_lock(tsdn, &bin->lock);
2888 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2889 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans203484e2012-05-02 00:30:36 -07002890}
2891
2892void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002893arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2894 void *ptr, size_t pageind)
Jason Evans203484e2012-05-02 00:30:36 -07002895{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002896 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002897
2898 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002899 /* arena_ptr_small_binind_get() does extra sanity checking. */
2900 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2901 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002902 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002903 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002904 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2905 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002906}
Jason Evanse476f8a2010-01-16 09:53:50 -08002907
Jason Evans6b694c42014-01-07 16:47:56 -08002908#ifdef JEMALLOC_JET
2909#undef arena_dalloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07002910#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08002911#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002912void
Jason Evans6b694c42014-01-07 16:47:56 -08002913arena_dalloc_junk_large(void *ptr, size_t usize)
2914{
2915
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002916 if (config_fill && unlikely(opt_junk_free))
Chris Petersona82070e2016-03-27 23:28:39 -07002917 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Jason Evans6b694c42014-01-07 16:47:56 -08002918}
2919#ifdef JEMALLOC_JET
2920#undef arena_dalloc_junk_large
2921#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2922arena_dalloc_junk_large_t *arena_dalloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07002923 JEMALLOC_N(n_arena_dalloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08002924#endif
2925
Jason Evanse56b24e2015-09-20 09:58:10 -07002926static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002927arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2928 arena_chunk_t *chunk, void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002929{
Jason Evans0c5dd032014-09-29 01:31:39 -07002930 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002931 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2932 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002933 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002934
Jason Evans7372b152012-02-10 20:22:09 -08002935 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002936 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2937 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002938
Jason Evansfc0b3b72014-10-09 17:54:06 -07002939 if (!junked)
2940 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002941 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002942 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002943
Jason Evans7372b152012-02-10 20:22:09 -08002944 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002945 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002946 arena->stats.lstats[index].ndalloc++;
2947 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002948 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002949 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002950
Jason Evansc1e00ef2016-05-10 22:21:10 -07002951 arena_run_dalloc(tsdn, arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002952}
2953
Jason Evans203484e2012-05-02 00:30:36 -07002954void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002955arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07002956 arena_chunk_t *chunk, void *ptr)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002957{
2958
Jason Evansc1e00ef2016-05-10 22:21:10 -07002959 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002960}
2961
2962void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002963arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2964 void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07002965{
2966
Jason Evansc1e00ef2016-05-10 22:21:10 -07002967 malloc_mutex_lock(tsdn, &arena->lock);
2968 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
2969 malloc_mutex_unlock(tsdn, &arena->lock);
2970 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002971}
2972
Jason Evanse476f8a2010-01-16 09:53:50 -08002973static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002974arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002975 void *ptr, size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002976{
Jason Evans0c5dd032014-09-29 01:31:39 -07002977 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002978 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2979 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002980 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002981
2982 assert(size < oldsize);
2983
2984 /*
2985 * Shrink the run, and make trailing pages available for other
2986 * allocations.
2987 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002988 malloc_mutex_lock(tsdn, &arena->lock);
2989 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
Jason Evans8a03cf02015-05-04 09:58:36 -07002990 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08002991 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002992 szind_t oldindex = size2index(oldsize) - NBINS;
2993 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002994
Jason Evans7372b152012-02-10 20:22:09 -08002995 arena->stats.ndalloc_large++;
2996 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07002997 arena->stats.lstats[oldindex].ndalloc++;
2998 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08002999
Jason Evans7372b152012-02-10 20:22:09 -08003000 arena->stats.nmalloc_large++;
3001 arena->stats.nrequests_large++;
3002 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003003 arena->stats.lstats[index].nmalloc++;
3004 arena->stats.lstats[index].nrequests++;
3005 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08003006 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003007 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003008}
3009
3010static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003011arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003012 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003013{
Jason Evansae4c7b42012-04-02 07:04:34 -07003014 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07003015 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003016 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08003017
Jason Evans8a03cf02015-05-04 09:58:36 -07003018 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3019 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08003020
3021 /* Try to extend the run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003022 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07003023 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3024 pageind+npages) != 0)
3025 goto label_fail;
3026 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3027 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003028 /*
3029 * The next run is available and sufficiently large. Split the
3030 * following run, then merge the first part with the existing
3031 * allocation.
3032 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02003033 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07003034 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07003035
Jason Evans560a4e12015-09-11 16:18:53 -07003036 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07003037 while (oldsize + followsize < usize)
3038 usize = index2size(size2index(usize)-1);
3039 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07003040 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07003041 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07003042 if (splitsize == 0)
3043 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07003044
Jason Evans61a6dfc2016-03-23 16:04:38 -07003045 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07003046 if (arena_run_split_large(arena, run, splitsize, zero))
3047 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08003048
Jason Evansd260f442015-09-24 16:38:45 -07003049 if (config_cache_oblivious && zero) {
3050 /*
3051 * Zero the trailing bytes of the original allocation's
3052 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07003053 * There will always be trailing bytes, because ptr's
3054 * offset from the beginning of the run is a multiple of
3055 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07003056 */
Jason Evansa784e412015-09-24 22:21:55 -07003057 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3058 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3059 PAGE));
3060 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3061 assert(nzero > 0);
3062 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07003063 }
3064
Jason Evans088e6a02010-10-18 00:04:44 -07003065 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07003066 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07003067
3068 /*
3069 * Mark the extended run as dirty if either portion of the run
3070 * was dirty before allocation. This is rather pedantic,
3071 * because there's not actually any sequence of events that
3072 * could cause the resulting run to be passed to
3073 * arena_run_dalloc() with the dirty argument set to false
3074 * (which is when dirty flag consistency would really matter).
3075 */
Jason Evans203484e2012-05-02 00:30:36 -07003076 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3077 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07003078 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07003079 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07003080 flag_dirty | (flag_unzeroed_mask &
3081 arena_mapbits_unzeroed_get(chunk, pageind)));
3082 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3083 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3084 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08003085
Jason Evans7372b152012-02-10 20:22:09 -08003086 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003087 szind_t oldindex = size2index(oldsize) - NBINS;
3088 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003089
Jason Evans7372b152012-02-10 20:22:09 -08003090 arena->stats.ndalloc_large++;
3091 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003092 arena->stats.lstats[oldindex].ndalloc++;
3093 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003094
Jason Evans7372b152012-02-10 20:22:09 -08003095 arena->stats.nmalloc_large++;
3096 arena->stats.nrequests_large++;
3097 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003098 arena->stats.lstats[index].nmalloc++;
3099 arena->stats.lstats[index].nrequests++;
3100 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07003101 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003102 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003103 return (false);
3104 }
Jason Evans560a4e12015-09-11 16:18:53 -07003105label_fail:
Jason Evansc1e00ef2016-05-10 22:21:10 -07003106 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003107 return (true);
3108}
3109
Jason Evans6b694c42014-01-07 16:47:56 -08003110#ifdef JEMALLOC_JET
3111#undef arena_ralloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07003112#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08003113#endif
3114static void
3115arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3116{
3117
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02003118 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003119 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
Jason Evans6b694c42014-01-07 16:47:56 -08003120 old_usize - usize);
3121 }
3122}
3123#ifdef JEMALLOC_JET
3124#undef arena_ralloc_junk_large
3125#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3126arena_ralloc_junk_large_t *arena_ralloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003127 JEMALLOC_N(n_arena_ralloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003128#endif
3129
Jason Evanse476f8a2010-01-16 09:53:50 -08003130/*
3131 * Try to resize a large allocation, in order to avoid copying. This will
3132 * always fail if growing an object, and the following run is already in use.
3133 */
3134static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003135arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -07003136 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003137{
Jason Evans560a4e12015-09-11 16:18:53 -07003138 arena_chunk_t *chunk;
3139 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003140
Jason Evans560a4e12015-09-11 16:18:53 -07003141 if (oldsize == usize_max) {
3142 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003143 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003144 }
Jason Evans560a4e12015-09-11 16:18:53 -07003145
3146 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3147 arena = extent_node_arena_get(&chunk->node);
3148
3149 if (oldsize < usize_max) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003150 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
Jason Evansb2c0d632016-04-13 23:36:15 -07003151 oldsize, usize_min, usize_max, zero);
Jason Evans560a4e12015-09-11 16:18:53 -07003152 if (config_fill && !ret && !zero) {
3153 if (unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003154 memset((void *)((uintptr_t)ptr + oldsize),
3155 JEMALLOC_ALLOC_JUNK,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003156 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003157 } else if (unlikely(opt_zero)) {
3158 memset((void *)((uintptr_t)ptr + oldsize), 0,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003159 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003160 }
3161 }
3162 return (ret);
3163 }
3164
3165 assert(oldsize > usize_max);
3166 /* Fill before shrinking in order avoid a race. */
3167 arena_ralloc_junk_large(ptr, oldsize, usize_max);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003168 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
Jason Evans560a4e12015-09-11 16:18:53 -07003169 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003170}
3171
Jason Evansb2c31662014-01-12 15:05:44 -08003172bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003173arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08003174 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003175{
Jason Evans560a4e12015-09-11 16:18:53 -07003176 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003177
Jason Evans0c516a02016-02-25 15:29:49 -08003178 /* Calls with non-zero extra had to clamp extra. */
3179 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3180
Jason Evans0c516a02016-02-25 15:29:49 -08003181 if (unlikely(size > HUGE_MAXCLASS))
3182 return (true);
3183
Jason Evans560a4e12015-09-11 16:18:53 -07003184 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003185 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003186 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003187 arena_chunk_t *chunk;
3188
Jason Evans88fef7c2015-02-12 14:06:37 -08003189 /*
3190 * Avoid moving the allocation if the size class can be left the
3191 * same.
3192 */
Jason Evans560a4e12015-09-11 16:18:53 -07003193 if (oldsize <= SMALL_MAXCLASS) {
3194 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3195 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003196 if ((usize_max > SMALL_MAXCLASS ||
3197 size2index(usize_max) != size2index(oldsize)) &&
3198 (size > oldsize || usize_max < oldsize))
3199 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003200 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003201 if (usize_max <= SMALL_MAXCLASS)
3202 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003203 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
Jason Evans4985dc62016-02-19 19:24:58 -08003204 usize_max, zero))
3205 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003206 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003207
Jason Evans243f7a02016-02-19 20:09:31 -08003208 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003209 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003210 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003211 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003212 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
Jason Evans243f7a02016-02-19 20:09:31 -08003213 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003214 }
3215}
3216
3217static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003218arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans560a4e12015-09-11 16:18:53 -07003219 size_t alignment, bool zero, tcache_t *tcache)
3220{
3221
3222 if (alignment == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003223 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3224 zero, tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003225 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003226 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003227 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003228 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003229}
Jason Evanse476f8a2010-01-16 09:53:50 -08003230
Jason Evans8e3c3c62010-09-17 15:46:18 -07003231void *
Jason Evans5460aa62014-09-22 21:09:23 -07003232arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003233 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003234{
3235 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003236 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003237
Jason Evans560a4e12015-09-11 16:18:53 -07003238 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003239 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003240 return (NULL);
3241
Jason Evans676df882015-09-11 20:50:20 -07003242 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003243 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003244
Jason Evans88fef7c2015-02-12 14:06:37 -08003245 /* Try to avoid moving the allocation. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003246 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3247 zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003248 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003249
Jason Evans88fef7c2015-02-12 14:06:37 -08003250 /*
3251 * size and oldsize are different enough that we need to move
3252 * the object. In that case, fall back to allocating new space
3253 * and copying.
3254 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003255 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3256 alignment, zero, tcache);
Jason Evans560a4e12015-09-11 16:18:53 -07003257 if (ret == NULL)
3258 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003259
3260 /*
3261 * Junk/zero-filling were already done by
3262 * ipalloc()/arena_malloc().
3263 */
3264
Jason Evans560a4e12015-09-11 16:18:53 -07003265 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003266 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3267 memcpy(ret, ptr, copysize);
Jason Evans3ef51d72016-05-06 12:16:00 -07003268 isqalloc(tsd, ptr, oldsize, tcache, true);
Jason Evans88fef7c2015-02-12 14:06:37 -08003269 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003270 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3271 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003272 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003273 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003274}
3275
Jason Evans609ae592012-10-11 13:53:15 -07003276dss_prec_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07003277arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans609ae592012-10-11 13:53:15 -07003278{
3279 dss_prec_t ret;
3280
Jason Evansc1e00ef2016-05-10 22:21:10 -07003281 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003282 ret = arena->dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003283 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003284 return (ret);
3285}
3286
Jason Evans4d434ad2014-04-15 12:09:48 -07003287bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003288arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
Jason Evans609ae592012-10-11 13:53:15 -07003289{
3290
Jason Evans551ebc42014-10-03 10:16:09 -07003291 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003292 return (dss_prec != dss_prec_disabled);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003293 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003294 arena->dss_prec = dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003295 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003296 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003297}
3298
Jason Evans8d6a3e82015-03-18 18:55:33 -07003299ssize_t
3300arena_lg_dirty_mult_default_get(void)
3301{
3302
3303 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3304}
3305
3306bool
3307arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3308{
3309
Jason Evans243f7a02016-02-19 20:09:31 -08003310 if (opt_purge != purge_mode_ratio)
3311 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003312 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3313 return (true);
3314 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3315 return (false);
3316}
3317
Jason Evans243f7a02016-02-19 20:09:31 -08003318ssize_t
3319arena_decay_time_default_get(void)
3320{
3321
3322 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3323}
3324
3325bool
3326arena_decay_time_default_set(ssize_t decay_time)
3327{
3328
3329 if (opt_purge != purge_mode_decay)
3330 return (true);
3331 if (!arena_decay_time_valid(decay_time))
3332 return (true);
3333 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3334 return (false);
3335}
3336
Jason Evans3c07f802016-02-27 20:40:13 -08003337static void
3338arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3339 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3340 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003341{
Jason Evans609ae592012-10-11 13:53:15 -07003342
Jason Evans66cd9532016-04-22 14:34:14 -07003343 *nthreads += arena_nthreads_get(arena, false);
Jason Evans609ae592012-10-11 13:53:15 -07003344 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003345 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans94e7ffa2016-10-10 20:32:19 -07003346 *decay_time = arena->decay.time;
Jason Evans609ae592012-10-11 13:53:15 -07003347 *nactive += arena->nactive;
3348 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003349}
3350
3351void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003352arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003353 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3354 size_t *nactive, size_t *ndirty)
Jason Evans3c07f802016-02-27 20:40:13 -08003355{
3356
Jason Evansc1e00ef2016-05-10 22:21:10 -07003357 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003358 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3359 decay_time, nactive, ndirty);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003360 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003361}
3362
3363void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003364arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003365 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3366 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3367 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3368 malloc_huge_stats_t *hstats)
Jason Evans3c07f802016-02-27 20:40:13 -08003369{
3370 unsigned i;
3371
3372 cassert(config_stats);
3373
Jason Evansc1e00ef2016-05-10 22:21:10 -07003374 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003375 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3376 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003377
3378 astats->mapped += arena->stats.mapped;
Jason Evans04c3c0f2016-05-03 22:11:35 -07003379 astats->retained += arena->stats.retained;
Jason Evans609ae592012-10-11 13:53:15 -07003380 astats->npurge += arena->stats.npurge;
3381 astats->nmadvise += arena->stats.nmadvise;
3382 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003383 astats->metadata_mapped += arena->stats.metadata_mapped;
3384 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003385 astats->allocated_large += arena->stats.allocated_large;
3386 astats->nmalloc_large += arena->stats.nmalloc_large;
3387 astats->ndalloc_large += arena->stats.ndalloc_large;
3388 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003389 astats->allocated_huge += arena->stats.allocated_huge;
3390 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3391 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003392
3393 for (i = 0; i < nlclasses; i++) {
3394 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3395 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3396 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3397 lstats[i].curruns += arena->stats.lstats[i].curruns;
3398 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003399
3400 for (i = 0; i < nhclasses; i++) {
3401 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3402 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3403 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3404 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003405 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003406
3407 for (i = 0; i < NBINS; i++) {
3408 arena_bin_t *bin = &arena->bins[i];
3409
Jason Evansc1e00ef2016-05-10 22:21:10 -07003410 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003411 bstats[i].nmalloc += bin->stats.nmalloc;
3412 bstats[i].ndalloc += bin->stats.ndalloc;
3413 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003414 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003415 if (config_tcache) {
3416 bstats[i].nfills += bin->stats.nfills;
3417 bstats[i].nflushes += bin->stats.nflushes;
3418 }
3419 bstats[i].nruns += bin->stats.nruns;
3420 bstats[i].reruns += bin->stats.reruns;
3421 bstats[i].curruns += bin->stats.curruns;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003422 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003423 }
3424}
3425
Jason Evans767d8502016-02-24 23:58:10 -08003426unsigned
Jason Evans66cd9532016-04-22 14:34:14 -07003427arena_nthreads_get(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003428{
3429
Jason Evans66cd9532016-04-22 14:34:14 -07003430 return (atomic_read_u(&arena->nthreads[internal]));
Jason Evans767d8502016-02-24 23:58:10 -08003431}
3432
3433void
Jason Evans66cd9532016-04-22 14:34:14 -07003434arena_nthreads_inc(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003435{
3436
Jason Evans66cd9532016-04-22 14:34:14 -07003437 atomic_add_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003438}
3439
3440void
Jason Evans66cd9532016-04-22 14:34:14 -07003441arena_nthreads_dec(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003442{
3443
Jason Evans66cd9532016-04-22 14:34:14 -07003444 atomic_sub_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003445}
3446
Jason Evans8bb31982014-10-07 23:14:57 -07003447arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003448arena_new(tsdn_t *tsdn, unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003449{
Jason Evans8bb31982014-10-07 23:14:57 -07003450 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003451 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003452
Jason Evans8bb31982014-10-07 23:14:57 -07003453 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003454 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3455 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003456 */
3457 if (config_stats) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003458 arena = (arena_t *)base_alloc(tsdn,
Jason Evansf193fd82016-04-08 14:17:57 -07003459 CACHELINE_CEILING(sizeof(arena_t)) +
3460 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)) +
3461 (nhclasses * sizeof(malloc_huge_stats_t))));
Jason Evans8bb31982014-10-07 23:14:57 -07003462 } else
Jason Evansf193fd82016-04-08 14:17:57 -07003463 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003464 if (arena == NULL)
3465 return (NULL);
3466
Jason Evans6109fe02010-02-10 10:37:56 -08003467 arena->ind = ind;
Jason Evans66cd9532016-04-22 14:34:14 -07003468 arena->nthreads[0] = arena->nthreads[1] = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07003469 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003470 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003471
Jason Evans7372b152012-02-10 20:22:09 -08003472 if (config_stats) {
3473 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003474 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003475 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08003476 memset(arena->stats.lstats, 0, nlclasses *
3477 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003478 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003479 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003480 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3481 memset(arena->stats.hstats, 0, nhclasses *
3482 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003483 if (config_tcache)
3484 ql_new(&arena->tcache_ql);
3485 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003486
Jason Evans7372b152012-02-10 20:22:09 -08003487 if (config_prof)
3488 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003489
Jason Evans8a03cf02015-05-04 09:58:36 -07003490 if (config_cache_oblivious) {
3491 /*
3492 * A nondeterministic seed based on the address of arena reduces
3493 * the likelihood of lockstep non-uniform cache index
3494 * utilization among identical concurrent processes, but at the
3495 * cost of test repeatability. For debug builds, instead use a
3496 * deterministic seed.
3497 */
3498 arena->offset_state = config_debug ? ind :
3499 (uint64_t)(uintptr_t)arena;
3500 }
3501
Jason Evanse2bcf032016-10-13 12:18:38 -07003502 arena->dss_prec = chunk_dss_prec_get();
Jason Evans609ae592012-10-11 13:53:15 -07003503
Jason Evans19ff2ce2016-04-22 14:37:17 -07003504 ql_new(&arena->achunks);
3505
Jason Evanse476f8a2010-01-16 09:53:50 -08003506 arena->spare = NULL;
3507
Jason Evans8d6a3e82015-03-18 18:55:33 -07003508 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003509 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003510 arena->nactive = 0;
3511 arena->ndirty = 0;
3512
Jason Evansf193fd82016-04-08 14:17:57 -07003513 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
3514 i++)
Jason Evansc6a2c392016-03-26 17:30:37 -07003515 arena_run_heap_new(&arena->runs_avail[i]);
Jason Evansf193fd82016-04-08 14:17:57 -07003516
Jason Evansee41ad42015-02-15 18:04:46 -08003517 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003518 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003519
Jason Evans243f7a02016-02-19 20:09:31 -08003520 if (opt_purge == purge_mode_decay)
3521 arena_decay_init(arena, arena_decay_time_default_get());
3522
Jason Evansee41ad42015-02-15 18:04:46 -08003523 ql_new(&arena->huge);
Jason Evansb2c0d632016-04-13 23:36:15 -07003524 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3525 WITNESS_RANK_ARENA_HUGE))
Jason Evansee41ad42015-02-15 18:04:46 -08003526 return (NULL);
3527
Jason Evansb49a3342015-07-28 11:28:19 -04003528 extent_tree_szad_new(&arena->chunks_szad_cached);
3529 extent_tree_ad_new(&arena->chunks_ad_cached);
3530 extent_tree_szad_new(&arena->chunks_szad_retained);
3531 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansb2c0d632016-04-13 23:36:15 -07003532 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3533 WITNESS_RANK_ARENA_CHUNKS))
Jason Evansee41ad42015-02-15 18:04:46 -08003534 return (NULL);
3535 ql_new(&arena->node_cache);
Jason Evansb2c0d632016-04-13 23:36:15 -07003536 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3537 WITNESS_RANK_ARENA_NODE_CACHE))
Jason Evansee41ad42015-02-15 18:04:46 -08003538 return (NULL);
3539
Jason Evansb49a3342015-07-28 11:28:19 -04003540 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003541
3542 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003543 for (i = 0; i < NBINS; i++) {
Jason Evansc9a4bf92016-04-22 14:36:48 -07003544 arena_bin_t *bin = &arena->bins[i];
Jason Evansb2c0d632016-04-13 23:36:15 -07003545 if (malloc_mutex_init(&bin->lock, "arena_bin",
3546 WITNESS_RANK_ARENA_BIN))
Jason Evans8bb31982014-10-07 23:14:57 -07003547 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003548 bin->runcur = NULL;
Jason Evansc6a2c392016-03-26 17:30:37 -07003549 arena_run_heap_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003550 if (config_stats)
3551 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003552 }
3553
Jason Evans8bb31982014-10-07 23:14:57 -07003554 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003555}
3556
Jason Evans49f7e8f2011-03-15 13:59:15 -07003557/*
3558 * Calculate bin_info->run_size such that it meets the following constraints:
3559 *
Jason Evans155bfa72014-10-05 17:54:10 -07003560 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003561 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003562 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003563 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3564 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003565 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003566static void
3567bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003568{
Jason Evans122449b2012-04-06 00:35:09 -07003569 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003570 size_t try_run_size, perfect_run_size, actual_run_size;
3571 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003572
3573 /*
Jason Evans122449b2012-04-06 00:35:09 -07003574 * Determine redzone size based on minimum alignment and minimum
3575 * redzone size. Add padding to the end of the run if it is needed to
3576 * align the regions. The padding allows each redzone to be half the
3577 * minimum alignment; without the padding, each redzone would have to
3578 * be twice as large in order to maintain alignment.
3579 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003580 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003581 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003582 if (align_min <= REDZONE_MINSIZE) {
3583 bin_info->redzone_size = REDZONE_MINSIZE;
3584 pad_size = 0;
3585 } else {
3586 bin_info->redzone_size = align_min >> 1;
3587 pad_size = bin_info->redzone_size;
3588 }
3589 } else {
3590 bin_info->redzone_size = 0;
3591 pad_size = 0;
3592 }
3593 bin_info->reg_interval = bin_info->reg_size +
3594 (bin_info->redzone_size << 1);
3595
3596 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003597 * Compute run size under ideal conditions (no redzones, no limit on run
3598 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003599 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003600 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003601 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003602 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003603 perfect_run_size = try_run_size;
3604 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003605
Jason Evansae4c7b42012-04-02 07:04:34 -07003606 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003607 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003608 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3609 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003610
Jason Evans0c5dd032014-09-29 01:31:39 -07003611 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003612 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3613 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003614
3615 /*
3616 * Redzones can require enough padding that not even a single region can
3617 * fit within the number of pages that would normally be dedicated to a
3618 * run for this size class. Increase the run size until at least one
3619 * region fits.
3620 */
3621 while (actual_nregs == 0) {
3622 assert(config_fill && unlikely(opt_redzone));
3623
3624 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003625 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3626 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003627 }
3628
3629 /*
3630 * Make sure that the run will fit within an arena chunk.
3631 */
Jason Evans155bfa72014-10-05 17:54:10 -07003632 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003633 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003634 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3635 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003636 }
3637 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003638 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003639
3640 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003641 bin_info->run_size = actual_run_size;
3642 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003643 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3644 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003645
3646 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3647 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003648}
3649
Jason Evansb1726102012-02-28 16:50:47 -08003650static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003651bin_info_init(void)
3652{
3653 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003654
Jason Evans8a03cf02015-05-04 09:58:36 -07003655#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003656 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003657 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003658 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003659 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003660#define BIN_INFO_INIT_bin_no(index, size)
Jason Evans1abb49f2016-04-17 16:16:11 -07003661#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
Jason Evansd04047c2014-05-28 16:11:55 -07003662 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003663 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003664#undef BIN_INFO_INIT_bin_yes
3665#undef BIN_INFO_INIT_bin_no
3666#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003667}
3668
Jason Evans5d8db152016-04-08 14:16:19 -07003669void
Jason Evansa0bf2422010-01-29 14:30:41 -08003670arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003671{
Jason Evans7393f442010-10-01 17:35:43 -07003672 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003673
Jason Evans8d6a3e82015-03-18 18:55:33 -07003674 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003675 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003676
Jason Evanse476f8a2010-01-16 09:53:50 -08003677 /*
3678 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003679 * page map. The page map is biased to omit entries for the header
3680 * itself, so some iteration is necessary to compute the map bias.
3681 *
3682 * 1) Compute safe header_size and map_bias values that include enough
3683 * space for an unbiased page map.
3684 * 2) Refine map_bias based on (1) to omit the header pages in the page
3685 * map. The resulting map_bias may be one too small.
3686 * 3) Refine map_bias based on (2). The result will be >= the result
3687 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003688 */
Jason Evans7393f442010-10-01 17:35:43 -07003689 map_bias = 0;
3690 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003691 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003692 ((sizeof(arena_chunk_map_bits_t) +
3693 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003694 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003695 }
3696 assert(map_bias > 0);
3697
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003698 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3699 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3700
Jason Evans155bfa72014-10-05 17:54:10 -07003701 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003702 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003703 large_maxclass = index2size(size2index(chunksize)-1);
3704 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003705 /*
3706 * For small chunk sizes it's possible for there to be fewer
3707 * non-header pages available than are necessary to serve the
3708 * size classes just below chunksize.
3709 */
Jason Evans676df882015-09-11 20:50:20 -07003710 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003711 }
Jason Evans676df882015-09-11 20:50:20 -07003712 assert(large_maxclass > 0);
3713 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003714 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003715
Jason Evansb1726102012-02-28 16:50:47 -08003716 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08003717}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003718
3719void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003720arena_prefork0(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003721{
3722
Jason Evansc1e00ef2016-05-10 22:21:10 -07003723 malloc_mutex_prefork(tsdn, &arena->lock);
Jason Evans174c0c32016-04-25 23:14:40 -07003724}
3725
3726void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003727arena_prefork1(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003728{
3729
Jason Evansc1e00ef2016-05-10 22:21:10 -07003730 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003731}
3732
3733void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003734arena_prefork2(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003735{
3736
Jason Evansc1e00ef2016-05-10 22:21:10 -07003737 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003738}
3739
3740void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003741arena_prefork3(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003742{
3743 unsigned i;
3744
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003745 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003746 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3747 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003748}
3749
3750void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003751arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003752{
3753 unsigned i;
3754
Jason Evansc1e00ef2016-05-10 22:21:10 -07003755 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003756 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003757 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3758 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3759 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3760 malloc_mutex_postfork_parent(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003761}
3762
3763void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003764arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003765{
3766 unsigned i;
3767
Jason Evansc1e00ef2016-05-10 22:21:10 -07003768 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003769 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003770 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3771 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3772 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3773 malloc_mutex_postfork_child(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003774}