blob: d737ec9a8c4974004cd3efb791283517e99e3590 [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070024unsigned nlclasses; /* Number of large size classes. */
25unsigned nhclasses; /* Number of huge size classes. */
Jason Evanse476f8a2010-01-16 09:53:50 -080026
27/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080028/*
29 * Function prototypes for static functions that are referenced prior to
30 * definition.
31 */
Jason Evanse476f8a2010-01-16 09:53:50 -080032
Jason Evanse9012632016-11-03 17:11:01 -070033static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
34 arena_chunk_t *chunk);
Jason Evansc1e00ef2016-05-10 22:21:10 -070035static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070036 size_t ndirty_limit);
Jason Evansc1e00ef2016-05-10 22:21:10 -070037static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
Jason Evansb2c0d632016-04-13 23:36:15 -070038 bool dirty, bool cleaned, bool decommitted);
Jason Evansc1e00ef2016-05-10 22:21:10 -070039static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070040 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070041static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
42 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080043
44/******************************************************************************/
45
Jason Evans8fadb1a2015-08-04 10:49:46 -070046JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040047arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070048{
49 arena_chunk_t *chunk;
50 size_t pageind, mapbits;
51
Jason Evans8fadb1a2015-08-04 10:49:46 -070052 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
53 pageind = arena_miscelm_to_pageind(miscelm);
54 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070055 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070056}
57
Jason Evansc6a2c392016-03-26 17:30:37 -070058JEMALLOC_INLINE_C int
59arena_run_addr_comp(const arena_chunk_map_misc_t *a,
60 const arena_chunk_map_misc_t *b)
61{
62 uintptr_t a_miscelm = (uintptr_t)a;
63 uintptr_t b_miscelm = (uintptr_t)b;
64
65 assert(a != NULL);
66 assert(b != NULL);
67
68 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
69}
70
71/* Generate pairing heap functions. */
72ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
73 ph_link, arena_run_addr_comp)
74
Jason Evans0da8ce12016-02-22 16:20:56 -080075#ifdef JEMALLOC_JET
76#undef run_quantize_floor
Jason Evansab0cfe02016-04-18 15:11:20 -070077#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
Jason Evans0da8ce12016-02-22 16:20:56 -080078#endif
79static size_t
80run_quantize_floor(size_t size)
81{
82 size_t ret;
Jason Evans5d8db152016-04-08 14:16:19 -070083 pszind_t pind;
Jason Evans0da8ce12016-02-22 16:20:56 -080084
85 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -070086 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -080087 assert((size & PAGE_MASK) == 0);
88
Jason Evans5d8db152016-04-08 14:16:19 -070089 assert(size != 0);
90 assert(size == PAGE_CEILING(size));
91
92 pind = psz2ind(size - large_pad + 1);
93 if (pind == 0) {
94 /*
95 * Avoid underflow. This short-circuit would also do the right
96 * thing for all sizes in the range for which there are
97 * PAGE-spaced size classes, but it's simplest to just handle
98 * the one case that would cause erroneous results.
99 */
100 return (size);
101 }
102 ret = pind2sz(pind - 1) + large_pad;
103 assert(ret <= size);
Jason Evans0da8ce12016-02-22 16:20:56 -0800104 return (ret);
105}
106#ifdef JEMALLOC_JET
107#undef run_quantize_floor
108#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
Jason Evansab0cfe02016-04-18 15:11:20 -0700109run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
Jason Evans0da8ce12016-02-22 16:20:56 -0800110#endif
111
112#ifdef JEMALLOC_JET
113#undef run_quantize_ceil
Jason Evansab0cfe02016-04-18 15:11:20 -0700114#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
Jason Evans0da8ce12016-02-22 16:20:56 -0800115#endif
116static size_t
117run_quantize_ceil(size_t size)
118{
119 size_t ret;
120
121 assert(size > 0);
Jason Evansf193fd82016-04-08 14:17:57 -0700122 assert(size <= HUGE_MAXCLASS);
Jason Evans0da8ce12016-02-22 16:20:56 -0800123 assert((size & PAGE_MASK) == 0);
124
Jason Evans5d8db152016-04-08 14:16:19 -0700125 ret = run_quantize_floor(size);
126 if (ret < size) {
127 /*
128 * Skip a quantization that may have an adequately large run,
129 * because under-sized runs may be mixed in. This only happens
130 * when an unusual size is requested, i.e. for aligned
131 * allocation, and is just one of several places where linear
132 * search would potentially find sufficiently aligned available
133 * memory somewhere lower.
134 */
135 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
136 }
Jason Evans0da8ce12016-02-22 16:20:56 -0800137 return (ret);
138}
Jason Evansa9a46842016-02-22 14:58:05 -0800139#ifdef JEMALLOC_JET
140#undef run_quantize_ceil
141#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
Jason Evansab0cfe02016-04-18 15:11:20 -0700142run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
Jason Evansa9a46842016-02-22 14:58:05 -0800143#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700144
Jason Evanse3d13062012-10-30 15:42:37 -0700145static void
146arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700147 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700148{
Jason Evansf193fd82016-04-08 14:17:57 -0700149 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700150 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700151 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
152 LG_PAGE));
Jason Evansf193fd82016-04-08 14:17:57 -0700153 arena_run_heap_insert(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700154 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700155}
156
157static void
158arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700159 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700160{
Jason Evansf193fd82016-04-08 14:17:57 -0700161 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700162 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700163 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
164 LG_PAGE));
Jason Evansf193fd82016-04-08 14:17:57 -0700165 arena_run_heap_remove(&arena->runs_avail[pind],
Jason Evansc6a2c392016-03-26 17:30:37 -0700166 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700167}
168
Jason Evans070b3c32014-08-14 14:45:58 -0700169static void
Jason Evansee41ad42015-02-15 18:04:46 -0800170arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700171 size_t npages)
172{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700173 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
174 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800175
Jason Evans070b3c32014-08-14 14:45:58 -0700176 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
177 LG_PAGE));
178 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
179 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
180 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800181
Jason Evans613cdc82016-03-08 01:04:48 -0800182 qr_new(&miscelm->rd, rd_link);
183 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700184 arena->ndirty += npages;
185}
186
187static void
Jason Evansee41ad42015-02-15 18:04:46 -0800188arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700189 size_t npages)
190{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700191 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
192 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800193
Jason Evans070b3c32014-08-14 14:45:58 -0700194 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
195 LG_PAGE));
196 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
197 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
198 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800199
Jason Evans613cdc82016-03-08 01:04:48 -0800200 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800201 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700202 arena->ndirty -= npages;
203}
204
Jason Evansee41ad42015-02-15 18:04:46 -0800205static size_t
206arena_chunk_dirty_npages(const extent_node_t *node)
207{
208
209 return (extent_node_size_get(node) >> LG_PAGE);
210}
211
Jason Evansee41ad42015-02-15 18:04:46 -0800212void
Jason Evans738e0892015-02-18 01:15:50 -0800213arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800214{
215
Jason Evans738e0892015-02-18 01:15:50 -0800216 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800217 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800218 extent_node_dirty_insert(node, &arena->runs_dirty,
219 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800220 arena->ndirty += arena_chunk_dirty_npages(node);
221 }
222}
223
224void
Jason Evans738e0892015-02-18 01:15:50 -0800225arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800226{
227
228 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800229 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800230 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
231 arena->ndirty -= arena_chunk_dirty_npages(node);
232 }
233}
234
Jason Evansaf1f5922014-10-30 16:38:08 -0700235JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700236arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800237{
238 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800239 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700240 arena_chunk_map_misc_t *miscelm;
241 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800242
Jason Evans1e0a6362010-03-13 13:41:58 -0800243 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700244 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800245
Jason Evans9e1810c2016-02-24 12:42:23 -0800246 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700247 miscelm = arena_run_to_miscelm(run);
248 rpages = arena_miscelm_to_rpages(miscelm);
249 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700250 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800251 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800252 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800253}
254
Jason Evansaf1f5922014-10-30 16:38:08 -0700255JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800256arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800257{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700258 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700259 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
260 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700261 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700262 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800263 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700264
Jason Evans49f7e8f2011-03-15 13:59:15 -0700265 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800266 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700267 assert(((uintptr_t)ptr -
268 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700269 (uintptr_t)bin_info->reg0_offset)) %
270 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700271 assert((uintptr_t)ptr >=
272 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700273 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700274 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700275 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800276
Jason Evans0c5dd032014-09-29 01:31:39 -0700277 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800278 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800279}
280
Jason Evansaf1f5922014-10-30 16:38:08 -0700281JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800282arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
283{
284
Jason Evansbd87b012014-04-15 16:35:08 -0700285 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
286 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800287 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
288 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800289}
290
Jason Evansaf1f5922014-10-30 16:38:08 -0700291JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700292arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
293{
294
Jason Evansbd87b012014-04-15 16:35:08 -0700295 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
296 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700297}
298
Jason Evansaf1f5922014-10-30 16:38:08 -0700299JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800300arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700301{
Jason Evansd4bab212010-10-24 20:08:37 -0700302 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700303 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700304
Jason Evansdda90f52013-10-19 23:48:40 -0700305 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700306 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700307 assert(p[i] == 0);
308}
Jason Evans21fb95b2010-10-18 17:45:40 -0700309
Jason Evanse476f8a2010-01-16 09:53:50 -0800310static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800311arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800312{
313
314 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800315 size_t cactive_add = CHUNK_CEILING((arena->nactive +
316 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700317 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800318 if (cactive_add != 0)
319 stats_cactive_add(cactive_add);
320 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800321 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800322}
323
324static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800325arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800326{
327
328 if (config_stats) {
329 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
330 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
331 if (cactive_sub != 0)
332 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800333 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800334 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800335}
336
337static void
338arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700339 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800340{
341 size_t total_pages, rem_pages;
342
Jason Evans8fadb1a2015-08-04 10:49:46 -0700343 assert(flag_dirty == 0 || flag_decommitted == 0);
344
Jason Evansaa5113b2014-01-14 16:23:03 -0800345 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
346 LG_PAGE;
347 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
348 flag_dirty);
349 assert(need_pages <= total_pages);
350 rem_pages = total_pages - need_pages;
351
Qinfan Wu90737fc2014-07-21 19:39:20 -0700352 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700353 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800354 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800355 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800356
357 /* Keep track of trailing unused pages for later use. */
358 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700359 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700360 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
361 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700362
Jason Evans1f27abc2015-08-11 12:42:33 -0700363 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
364 (rem_pages << LG_PAGE), flags |
365 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
366 flag_unzeroed_mask));
367 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
368 (rem_pages << LG_PAGE), flags |
369 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
370 flag_unzeroed_mask));
371 if (flag_dirty != 0) {
372 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
373 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800374 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700375 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800376 }
377}
378
Jason Evans8fadb1a2015-08-04 10:49:46 -0700379static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800380arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
381 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800382{
383 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700384 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300385 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700386 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700387
Jason Evanse476f8a2010-01-16 09:53:50 -0800388 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700389 miscelm = arena_run_to_miscelm(run);
390 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700391 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700392 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700393 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800394 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800395
Jason Evansde249c82015-08-09 16:47:27 -0700396 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
397 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700398 return (true);
399
Jason Evansc368f8c2013-10-29 18:17:42 -0700400 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800401 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700402 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700403 }
404
Jason Evansaa5113b2014-01-14 16:23:03 -0800405 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700406 if (flag_decommitted != 0) {
407 /* The run is untouched, and therefore zeroed. */
408 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
409 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
410 (need_pages << LG_PAGE));
411 } else if (flag_dirty != 0) {
412 /* The run is dirty, so all pages must be zeroed. */
413 arena_run_zero(chunk, run_ind, need_pages);
414 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800415 /*
416 * The run is clean, so some pages may be zeroed (i.e.
417 * never before touched).
418 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300419 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800420 for (i = 0; i < need_pages; i++) {
421 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
422 != 0)
423 arena_run_zero(chunk, run_ind+i, 1);
424 else if (config_debug) {
425 arena_run_page_validate_zeroed(chunk,
426 run_ind+i);
427 } else {
428 arena_run_page_mark_zeroed(chunk,
429 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700430 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800431 }
432 }
Jason Evans19b3d612010-03-18 20:36:40 -0700433 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700434 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700435 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800436 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800437
438 /*
439 * Set the last element first, in case the run only contains one page
440 * (i.e. both statements set the same element).
441 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700442 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
443 CHUNK_MAP_UNZEROED : 0;
444 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
445 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
446 run_ind+need_pages-1)));
447 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
448 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700449 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800450}
451
Jason Evans8fadb1a2015-08-04 10:49:46 -0700452static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800453arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700454{
455
Jason Evans8fadb1a2015-08-04 10:49:46 -0700456 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700457}
458
Jason Evans8fadb1a2015-08-04 10:49:46 -0700459static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800460arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700461{
462
Jason Evans8fadb1a2015-08-04 10:49:46 -0700463 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800464}
465
Jason Evans8fadb1a2015-08-04 10:49:46 -0700466static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800467arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700468 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800469{
470 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700471 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700472 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800473
474 assert(binind != BININD_INVALID);
475
476 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700477 miscelm = arena_run_to_miscelm(run);
478 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800479 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700480 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800481 need_pages = (size >> LG_PAGE);
482 assert(need_pages > 0);
483
Jason Evans8fadb1a2015-08-04 10:49:46 -0700484 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
485 run_ind << LG_PAGE, size, arena->ind))
486 return (true);
487
488 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
489 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800490
Jason Evans381c23d2014-10-10 23:01:03 -0700491 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700492 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
493 run_ind+i);
494 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
495 flag_unzeroed);
496 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800497 arena_run_page_validate_zeroed(chunk, run_ind+i);
498 }
Jason Evansbd87b012014-04-15 16:35:08 -0700499 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800500 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700501 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800502}
503
504static arena_chunk_t *
505arena_chunk_init_spare(arena_t *arena)
506{
507 arena_chunk_t *chunk;
508
509 assert(arena->spare != NULL);
510
511 chunk = arena->spare;
512 arena->spare = NULL;
513
514 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
515 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
516 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700517 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800518 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700519 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800520 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
521 arena_mapbits_dirty_get(chunk, chunk_npages-1));
522
523 return (chunk);
524}
525
Jason Evans99bd94f2015-02-18 16:40:53 -0800526static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700527arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700528 bool zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800529{
530
Jason Evans8fadb1a2015-08-04 10:49:46 -0700531 /*
532 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700533 * arena chunks. Arbitrarily mark them as committed. The commit state
534 * of runs is tracked individually, and upon chunk deallocation the
535 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700536 */
537 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800538 extent_node_achunk_set(&chunk->node, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700539 return (chunk_register(tsdn, chunk, &chunk->node));
Jason Evans99bd94f2015-02-18 16:40:53 -0800540}
541
542static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700543arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700544 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800545{
546 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800547
Jason Evansc1e00ef2016-05-10 22:21:10 -0700548 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400549
Jason Evansc1e00ef2016-05-10 22:21:10 -0700550 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700551 NULL, chunksize, chunksize, zero, commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700552 if (chunk != NULL && !*commit) {
553 /* Commit header. */
554 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
555 LG_PAGE, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700556 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700557 (void *)chunk, chunksize, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700558 chunk = NULL;
559 }
560 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700561 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700562 if (!*commit) {
563 /* Undo commit of header. */
564 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
565 LG_PAGE, arena->ind);
566 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700567 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
Jason Evansce7c0f92016-03-30 18:36:04 -0700568 chunksize, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800569 chunk = NULL;
570 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800571
Jason Evansc1e00ef2016-05-10 22:21:10 -0700572 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800573 return (chunk);
574}
575
Jason Evansaa5113b2014-01-14 16:23:03 -0800576static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700577arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
578 bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700579{
580 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400581 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700582
Jason Evansc1e00ef2016-05-10 22:21:10 -0700583 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
Jason Evanse9012632016-11-03 17:11:01 -0700584 chunksize, zero, commit, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700585 if (chunk != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700586 if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
587 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700588 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700589 return (NULL);
590 }
Jason Evansb49a3342015-07-28 11:28:19 -0400591 }
592 if (chunk == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700593 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700594 &chunk_hooks, zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400595 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800596
Jason Evans4581b972014-11-27 17:22:36 -0200597 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700598 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200599 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
600 }
Jason Evanse2deab72014-05-15 22:22:27 -0700601
602 return (chunk);
603}
604
Jason Evanse2deab72014-05-15 22:22:27 -0700605static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700606arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
Jason Evansaa5113b2014-01-14 16:23:03 -0800607{
608 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700609 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700610 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800611
612 assert(arena->spare == NULL);
613
614 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700615 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700616 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800617 if (chunk == NULL)
618 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800619
Jason Evansaa5113b2014-01-14 16:23:03 -0800620 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800621 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evansf86bc082016-03-31 11:19:46 -0700622 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
623 * or decommitted chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800624 */
Jason Evans45186f02015-08-10 23:03:34 -0700625 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
626 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
627 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
628 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800629 /*
630 * There is no need to initialize the internal page map entries unless
631 * the chunk is not zeroed.
632 */
Jason Evans551ebc42014-10-03 10:16:09 -0700633 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700634 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700635 (void *)arena_bitselm_get_const(chunk, map_bias+1),
636 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
637 chunk_npages-1) -
638 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800639 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700640 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800641 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700642 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700643 *)arena_bitselm_get_const(chunk, map_bias+1),
644 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
645 chunk_npages-1) -
646 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800647 if (config_debug) {
648 for (i = map_bias+1; i < chunk_npages-1; i++) {
649 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700650 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800651 }
652 }
653 }
Jason Evans155bfa72014-10-05 17:54:10 -0700654 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700655 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800656
657 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700658}
659
Jason Evanse476f8a2010-01-16 09:53:50 -0800660static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700661arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800662{
663 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800664
Jason Evansaa5113b2014-01-14 16:23:03 -0800665 if (arena->spare != NULL)
666 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700667 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700668 chunk = arena_chunk_init_hard(tsdn, arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700669 if (chunk == NULL)
670 return (NULL);
671 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800672
Jason Evans19ff2ce2016-04-22 14:37:17 -0700673 ql_elm_new(&chunk->node, ql_link);
674 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700675 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700676
Jason Evanse476f8a2010-01-16 09:53:50 -0800677 return (chunk);
678}
679
680static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700681arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700682{
683 bool committed;
684 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
685
686 chunk_deregister(chunk, &chunk->node);
687
688 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
689 if (!committed) {
690 /*
691 * Decommit the header. Mark the chunk as decommitted even if
692 * header decommit fails, since treating a partially committed
693 * chunk as committed has a high potential for causing later
694 * access of decommitted memory.
695 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700696 chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700697 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
698 arena->ind);
699 }
700
Jason Evansc1e00ef2016-05-10 22:21:10 -0700701 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
Jason Evans19ff2ce2016-04-22 14:37:17 -0700702 committed);
703
704 if (config_stats) {
705 arena->stats.mapped -= chunksize;
706 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
707 }
708}
709
710static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700711arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700712{
713
714 assert(arena->spare != spare);
715
716 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
717 arena_run_dirty_remove(arena, spare, map_bias,
718 chunk_npages-map_bias);
719 }
720
Jason Evansc1e00ef2016-05-10 22:21:10 -0700721 arena_chunk_discard(tsdn, arena, spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700722}
723
724static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700725arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800726{
Jason Evans19ff2ce2016-04-22 14:37:17 -0700727 arena_chunk_t *spare;
Qinfan Wu04d60a12014-07-18 14:21:17 -0700728
Jason Evans30fe12b2012-05-10 17:09:17 -0700729 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
730 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
731 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700732 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700733 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700734 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700735 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
736 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700737 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
738 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700739
Dave Watson3417a302016-02-23 12:06:21 -0800740 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700741 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800742
Jason Evans19ff2ce2016-04-22 14:37:17 -0700743 ql_remove(&arena->achunks, &chunk->node, ql_link);
744 spare = arena->spare;
745 arena->spare = chunk;
746 if (spare != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700747 arena_spare_discard(tsdn, arena, spare);
Jason Evanse476f8a2010-01-16 09:53:50 -0800748}
749
Jason Evans9b41ac92014-10-14 22:20:00 -0700750static void
751arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
752{
Jason Evansd01fd192015-08-19 15:21:32 -0700753 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700754
755 cassert(config_stats);
756
757 arena->stats.nmalloc_huge++;
758 arena->stats.allocated_huge += usize;
759 arena->stats.hstats[index].nmalloc++;
760 arena->stats.hstats[index].curhchunks++;
761}
762
763static void
764arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
765{
Jason Evansd01fd192015-08-19 15:21:32 -0700766 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700767
768 cassert(config_stats);
769
770 arena->stats.nmalloc_huge--;
771 arena->stats.allocated_huge -= usize;
772 arena->stats.hstats[index].nmalloc--;
773 arena->stats.hstats[index].curhchunks--;
774}
775
776static void
777arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
778{
Jason Evansd01fd192015-08-19 15:21:32 -0700779 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700780
781 cassert(config_stats);
782
783 arena->stats.ndalloc_huge++;
784 arena->stats.allocated_huge -= usize;
785 arena->stats.hstats[index].ndalloc++;
786 arena->stats.hstats[index].curhchunks--;
787}
788
789static void
Jason Evans7e674952016-04-25 13:26:54 -0700790arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
791{
792 szind_t index = size2index(usize) - nlclasses - NBINS;
793
794 cassert(config_stats);
795
796 arena->stats.ndalloc_huge++;
797 arena->stats.hstats[index].ndalloc--;
798}
799
800static void
Jason Evans9b41ac92014-10-14 22:20:00 -0700801arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
802{
Jason Evansd01fd192015-08-19 15:21:32 -0700803 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700804
805 cassert(config_stats);
806
807 arena->stats.ndalloc_huge--;
808 arena->stats.allocated_huge += usize;
809 arena->stats.hstats[index].ndalloc--;
810 arena->stats.hstats[index].curhchunks++;
811}
812
813static void
814arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
815{
816
817 arena_huge_dalloc_stats_update(arena, oldsize);
818 arena_huge_malloc_stats_update(arena, usize);
819}
820
821static void
822arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
823 size_t usize)
824{
825
826 arena_huge_dalloc_stats_update_undo(arena, oldsize);
827 arena_huge_malloc_stats_update_undo(arena, usize);
828}
829
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800830extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700831arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800832{
833 extent_node_t *node;
834
Jason Evansc1e00ef2016-05-10 22:21:10 -0700835 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800836 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800837 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700838 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
839 return (base_alloc(tsdn, sizeof(extent_node_t)));
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800840 }
Jason Evans2195ba42015-02-15 16:43:52 -0800841 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700842 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800843 return (node);
844}
845
846void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700847arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800848{
849
Jason Evansc1e00ef2016-05-10 22:21:10 -0700850 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800851 ql_elm_new(node, ql_link);
852 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700853 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800854}
855
Jason Evans99bd94f2015-02-18 16:40:53 -0800856static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700857arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700858 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
859 size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700860{
861 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700862 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700863
Jason Evansc1e00ef2016-05-10 22:21:10 -0700864 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700865 alignment, zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700866 if (ret == NULL) {
867 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700868 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700869 if (config_stats) {
870 arena_huge_malloc_stats_update_undo(arena, usize);
871 arena->stats.mapped -= usize;
872 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800873 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700874 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700875 }
876
Jason Evans99bd94f2015-02-18 16:40:53 -0800877 return (ret);
878}
Jason Evans9b41ac92014-10-14 22:20:00 -0700879
Jason Evans99bd94f2015-02-18 16:40:53 -0800880void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700881arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700882 size_t alignment, bool *zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800883{
884 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400885 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800886 size_t csize = CHUNK_CEILING(usize);
Jason Evanse9012632016-11-03 17:11:01 -0700887 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800888
Jason Evansc1e00ef2016-05-10 22:21:10 -0700889 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800890
891 /* Optimistically update stats. */
892 if (config_stats) {
893 arena_huge_malloc_stats_update(arena, usize);
894 arena->stats.mapped += usize;
895 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800896 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800897
Jason Evansc1e00ef2016-05-10 22:21:10 -0700898 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
Jason Evanse9012632016-11-03 17:11:01 -0700899 alignment, zero, &commit, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700900 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800901 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700902 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700903 usize, alignment, zero, csize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800904 }
905
Jason Evans9b41ac92014-10-14 22:20:00 -0700906 return (ret);
907}
908
909void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700910arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700911{
Jason Evansb49a3342015-07-28 11:28:19 -0400912 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800913 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700914
Jason Evans99bd94f2015-02-18 16:40:53 -0800915 csize = CHUNK_CEILING(usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700916 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700917 if (config_stats) {
918 arena_huge_dalloc_stats_update(arena, usize);
919 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700920 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800921 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800922
Jason Evansc1e00ef2016-05-10 22:21:10 -0700923 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
924 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700925}
926
927void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700928arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700929 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700930{
931
932 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
933 assert(oldsize != usize);
934
Jason Evansc1e00ef2016-05-10 22:21:10 -0700935 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700936 if (config_stats)
937 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800938 if (oldsize < usize)
939 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
940 else
941 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700942 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700943}
944
945void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700946arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700947 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700948{
Jason Evans9b41ac92014-10-14 22:20:00 -0700949 size_t udiff = oldsize - usize;
950 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
951
Jason Evansc1e00ef2016-05-10 22:21:10 -0700952 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700953 if (config_stats) {
954 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800955 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -0700956 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -0700957 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800958 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800959
Jason Evans2012d5a2014-11-17 09:54:49 -0800960 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -0400961 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800962 void *nchunk = (void *)((uintptr_t)chunk +
963 CHUNK_CEILING(usize));
964
Jason Evansc1e00ef2016-05-10 22:21:10 -0700965 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700966 true);
Jason Evansb49a3342015-07-28 11:28:19 -0400967 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700968 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800969}
970
Jason Evansb49a3342015-07-28 11:28:19 -0400971static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700972arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700973 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
974 bool *zero, void *nchunk, size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -0800975{
976 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700977 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -0800978
Jason Evansc1e00ef2016-05-10 22:21:10 -0700979 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700980 chunksize, zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -0800981 if (err) {
982 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700983 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800984 if (config_stats) {
985 arena_huge_ralloc_stats_update_undo(arena, oldsize,
986 usize);
987 arena->stats.mapped -= cdiff;
988 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800989 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700990 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400991 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
992 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700993 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -0700994 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -0400995 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -0800996 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800997 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -0700998}
999
1000bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001001arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07001002 size_t oldsize, size_t usize, bool *zero)
Jason Evans9b41ac92014-10-14 22:20:00 -07001003{
Jason Evans99bd94f2015-02-18 16:40:53 -08001004 bool err;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001005 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001006 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001007 size_t udiff = usize - oldsize;
1008 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
Jason Evanse9012632016-11-03 17:11:01 -07001009 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001010
Jason Evansc1e00ef2016-05-10 22:21:10 -07001011 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001012
1013 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001014 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001015 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1016 arena->stats.mapped += cdiff;
1017 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001018 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001019
Jason Evansc1e00ef2016-05-10 22:21:10 -07001020 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evanse9012632016-11-03 17:11:01 -07001021 chunksize, zero, &commit, true) == NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001022 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001023 if (err) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001024 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07001025 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
Jason Evansb49a3342015-07-28 11:28:19 -04001026 cdiff);
1027 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1028 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001029 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001030 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001031 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001032 }
1033
Jason Evans99bd94f2015-02-18 16:40:53 -08001034 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001035}
1036
Jason Evansaa282662015-07-15 16:02:21 -07001037/*
1038 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001039 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1040 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001041 */
Jason Evans97c04a92015-03-06 19:57:36 -08001042static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001043arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001044{
Jason Evansf193fd82016-04-08 14:17:57 -07001045 pszind_t pind, i;
Dave Watson3417a302016-02-23 12:06:21 -08001046
Jason Evansf193fd82016-04-08 14:17:57 -07001047 pind = psz2ind(run_quantize_ceil(size));
1048
1049 for (i = pind; pind2sz(i) <= large_maxclass; i++) {
Jason Evansc6a2c392016-03-26 17:30:37 -07001050 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
Jason Evansf193fd82016-04-08 14:17:57 -07001051 &arena->runs_avail[i]);
Jason Evansc6a2c392016-03-26 17:30:37 -07001052 if (miscelm != NULL)
Dave Watson3417a302016-02-23 12:06:21 -08001053 return (&miscelm->run);
1054 }
1055
1056 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001057}
1058
Jason Evanse476f8a2010-01-16 09:53:50 -08001059static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001060arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001061{
Jason Evans32896a92016-11-03 22:21:34 -07001062 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001063 if (run != NULL) {
1064 if (arena_run_split_large(arena, run, size, zero))
1065 run = NULL;
1066 }
Jason Evans97c04a92015-03-06 19:57:36 -08001067 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001068}
1069
1070static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001071arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001072{
1073 arena_chunk_t *chunk;
1074 arena_run_t *run;
1075
Jason Evansfc0b3b72014-10-09 17:54:06 -07001076 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001077 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001078
1079 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001080 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001081 if (run != NULL)
1082 return (run);
1083
Jason Evanse476f8a2010-01-16 09:53:50 -08001084 /*
1085 * No usable runs. Create a new chunk from which to allocate the run.
1086 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001087 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001088 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001089 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001090 if (arena_run_split_large(arena, run, size, zero))
1091 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001092 return (run);
1093 }
1094
1095 /*
1096 * arena_chunk_alloc() failed, but another thread may have made
1097 * sufficient memory available while this one dropped arena->lock in
1098 * arena_chunk_alloc(), so search one more time.
1099 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001100 return (arena_run_alloc_large_helper(arena, size, zero));
1101}
1102
1103static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001104arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001105{
Jason Evansaa282662015-07-15 16:02:21 -07001106 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001107 if (run != NULL) {
1108 if (arena_run_split_small(arena, run, size, binind))
1109 run = NULL;
1110 }
Jason Evans97c04a92015-03-06 19:57:36 -08001111 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001112}
1113
1114static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001115arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001116{
1117 arena_chunk_t *chunk;
1118 arena_run_t *run;
1119
Jason Evansfc0b3b72014-10-09 17:54:06 -07001120 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001121 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001122 assert(binind != BININD_INVALID);
1123
1124 /* Search the arena's chunks for the lowest best fit. */
1125 run = arena_run_alloc_small_helper(arena, size, binind);
1126 if (run != NULL)
1127 return (run);
1128
1129 /*
1130 * No usable runs. Create a new chunk from which to allocate the run.
1131 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001132 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evansaa5113b2014-01-14 16:23:03 -08001133 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001134 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001135 if (arena_run_split_small(arena, run, size, binind))
1136 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001137 return (run);
1138 }
1139
1140 /*
1141 * arena_chunk_alloc() failed, but another thread may have made
1142 * sufficient memory available while this one dropped arena->lock in
1143 * arena_chunk_alloc(), so search one more time.
1144 */
1145 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001146}
1147
Jason Evans8d6a3e82015-03-18 18:55:33 -07001148static bool
1149arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1150{
1151
Jason Evansbd16ea42015-03-24 15:59:28 -07001152 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1153 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001154}
1155
1156ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001157arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001158{
1159 ssize_t lg_dirty_mult;
1160
Jason Evansc1e00ef2016-05-10 22:21:10 -07001161 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001162 lg_dirty_mult = arena->lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001163 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001164
1165 return (lg_dirty_mult);
1166}
1167
1168bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001169arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001170{
1171
1172 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1173 return (true);
1174
Jason Evansc1e00ef2016-05-10 22:21:10 -07001175 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001176 arena->lg_dirty_mult = lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001177 arena_maybe_purge(tsdn, arena);
1178 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001179
1180 return (false);
1181}
1182
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001183static void
Jason Evans243f7a02016-02-19 20:09:31 -08001184arena_decay_deadline_init(arena_t *arena)
1185{
1186
1187 assert(opt_purge == purge_mode_decay);
1188
1189 /*
1190 * Generate a new deadline that is uniformly random within the next
1191 * epoch after the current one.
1192 */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001193 nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1194 nstime_add(&arena->decay.deadline, &arena->decay.interval);
1195 if (arena->decay.time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001196 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001197
Jason Evans94e7ffa2016-10-10 20:32:19 -07001198 nstime_init(&jitter, prng_range(&arena->decay.jitter_state,
1199 nstime_ns(&arena->decay.interval)));
1200 nstime_add(&arena->decay.deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001201 }
1202}
1203
1204static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001205arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001206{
1207
1208 assert(opt_purge == purge_mode_decay);
1209
Jason Evans94e7ffa2016-10-10 20:32:19 -07001210 return (nstime_compare(&arena->decay.deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001211}
1212
1213static size_t
1214arena_decay_backlog_npages_limit(const arena_t *arena)
1215{
1216 static const uint64_t h_steps[] = {
1217#define STEP(step, h, x, y) \
1218 h,
1219 SMOOTHSTEP
1220#undef STEP
1221 };
1222 uint64_t sum;
1223 size_t npages_limit_backlog;
1224 unsigned i;
1225
1226 assert(opt_purge == purge_mode_decay);
1227
1228 /*
1229 * For each element of decay_backlog, multiply by the corresponding
1230 * fixed-point smoothstep decay factor. Sum the products, then divide
1231 * to round down to the nearest whole number of pages.
1232 */
1233 sum = 0;
1234 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
Jason Evans94e7ffa2016-10-10 20:32:19 -07001235 sum += arena->decay.backlog[i] * h_steps[i];
rustyx00432332016-04-12 09:50:54 +02001236 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
Jason Evans243f7a02016-02-19 20:09:31 -08001237
1238 return (npages_limit_backlog);
1239}
1240
1241static void
Jason Evansd419bb02016-10-11 15:30:01 -07001242arena_decay_backlog_update_last(arena_t *arena)
1243{
1244 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1245 arena->ndirty - arena->decay.ndirty : 0;
1246 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1247}
1248
1249static void
1250arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1251{
1252
1253 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1254 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1255 sizeof(size_t));
1256 } else {
1257 size_t nadvance_z = (size_t)nadvance_u64;
1258
1259 assert((uint64_t)nadvance_z == nadvance_u64);
1260
1261 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1262 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1263 if (nadvance_z > 1) {
1264 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1265 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1266 }
1267 }
1268
1269 arena_decay_backlog_update_last(arena);
1270}
1271
1272static void
1273arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001274{
rustyx00432332016-04-12 09:50:54 +02001275 uint64_t nadvance_u64;
Jason Evans9bad0792016-02-21 11:25:02 -08001276 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001277
1278 assert(opt_purge == purge_mode_decay);
1279 assert(arena_decay_deadline_reached(arena, time));
1280
Jason Evans9bad0792016-02-21 11:25:02 -08001281 nstime_copy(&delta, time);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001282 nstime_subtract(&delta, &arena->decay.epoch);
1283 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001284 assert(nadvance_u64 > 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001285
rustyx00432332016-04-12 09:50:54 +02001286 /* Add nadvance_u64 decay intervals to epoch. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001287 nstime_copy(&delta, &arena->decay.interval);
rustyx00432332016-04-12 09:50:54 +02001288 nstime_imultiply(&delta, nadvance_u64);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001289 nstime_add(&arena->decay.epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001290
1291 /* Set a new deadline. */
1292 arena_decay_deadline_init(arena);
1293
1294 /* Update the backlog. */
Jason Evansd419bb02016-10-11 15:30:01 -07001295 arena_decay_backlog_update(arena, nadvance_u64);
Jason Evans243f7a02016-02-19 20:09:31 -08001296}
1297
Jason Evansd419bb02016-10-11 15:30:01 -07001298static void
1299arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001300{
Jason Evansd419bb02016-10-11 15:30:01 -07001301 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001302
Jason Evansd419bb02016-10-11 15:30:01 -07001303 if (arena->ndirty > ndirty_limit)
1304 arena_purge_to_limit(tsdn, arena, ndirty_limit);
1305 arena->decay.ndirty = arena->ndirty;
1306}
Jason Evans243f7a02016-02-19 20:09:31 -08001307
Jason Evansd419bb02016-10-11 15:30:01 -07001308static void
1309arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1310{
Jason Evans243f7a02016-02-19 20:09:31 -08001311
Jason Evansd419bb02016-10-11 15:30:01 -07001312 arena_decay_epoch_advance_helper(arena, time);
1313 arena_decay_epoch_advance_purge(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001314}
1315
1316static void
1317arena_decay_init(arena_t *arena, ssize_t decay_time)
1318{
1319
Jason Evans94e7ffa2016-10-10 20:32:19 -07001320 arena->decay.time = decay_time;
Jason Evans243f7a02016-02-19 20:09:31 -08001321 if (decay_time > 0) {
Jason Evans94e7ffa2016-10-10 20:32:19 -07001322 nstime_init2(&arena->decay.interval, decay_time, 0);
1323 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001324 }
1325
Jason Evans94e7ffa2016-10-10 20:32:19 -07001326 nstime_init(&arena->decay.epoch, 0);
1327 nstime_update(&arena->decay.epoch);
1328 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
Jason Evans243f7a02016-02-19 20:09:31 -08001329 arena_decay_deadline_init(arena);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001330 arena->decay.ndirty = arena->ndirty;
Jason Evans94e7ffa2016-10-10 20:32:19 -07001331 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
Jason Evans243f7a02016-02-19 20:09:31 -08001332}
1333
1334static bool
1335arena_decay_time_valid(ssize_t decay_time)
1336{
1337
Jason Evans022f6892016-03-02 22:41:32 -08001338 if (decay_time < -1)
1339 return (false);
1340 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1341 return (true);
1342 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001343}
1344
1345ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001346arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001347{
1348 ssize_t decay_time;
1349
Jason Evansc1e00ef2016-05-10 22:21:10 -07001350 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans94e7ffa2016-10-10 20:32:19 -07001351 decay_time = arena->decay.time;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001352 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001353
1354 return (decay_time);
1355}
1356
1357bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001358arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
Jason Evans243f7a02016-02-19 20:09:31 -08001359{
1360
1361 if (!arena_decay_time_valid(decay_time))
1362 return (true);
1363
Jason Evansc1e00ef2016-05-10 22:21:10 -07001364 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001365 /*
1366 * Restart decay backlog from scratch, which may cause many dirty pages
1367 * to be immediately purged. It would conceptually be possible to map
1368 * the old backlog onto the new backlog, but there is no justification
1369 * for such complexity since decay_time changes are intended to be
1370 * infrequent, either between the {-1, 0, >0} states, or a one-time
1371 * arbitrary change during initial arena configuration.
1372 */
1373 arena_decay_init(arena, decay_time);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001374 arena_maybe_purge(tsdn, arena);
1375 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001376
1377 return (false);
1378}
1379
1380static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001381arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001382{
1383
Jason Evans243f7a02016-02-19 20:09:31 -08001384 assert(opt_purge == purge_mode_ratio);
1385
Jason Evanse3d13062012-10-30 15:42:37 -07001386 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001387 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001388 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001389
Jason Evans0a9f9a42015-06-22 18:50:32 -07001390 /*
1391 * Iterate, since preventing recursive purging could otherwise leave too
1392 * many dirty pages.
1393 */
1394 while (true) {
1395 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1396 if (threshold < chunk_npages)
1397 threshold = chunk_npages;
1398 /*
1399 * Don't purge unless the number of purgeable pages exceeds the
1400 * threshold.
1401 */
1402 if (arena->ndirty <= threshold)
1403 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001404 arena_purge_to_limit(tsdn, arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001405 }
Jason Evans05b21be2010-03-14 17:36:10 -07001406}
1407
Jason Evans243f7a02016-02-19 20:09:31 -08001408static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001409arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001410{
Jason Evans9bad0792016-02-21 11:25:02 -08001411 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001412
1413 assert(opt_purge == purge_mode_decay);
1414
1415 /* Purge all or nothing if the option is disabled. */
Jason Evans94e7ffa2016-10-10 20:32:19 -07001416 if (arena->decay.time <= 0) {
1417 if (arena->decay.time == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001418 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001419 return;
1420 }
1421
Jason Evans45a5bf62016-10-10 22:15:10 -07001422 nstime_init(&time, 0);
1423 nstime_update(&time);
1424 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1425 &time) > 0)) {
1426 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001427 * Time went backwards. Move the epoch back in time and
1428 * generate a new deadline, with the expectation that time
1429 * typically flows forward for long enough periods of time that
1430 * epochs complete. Unfortunately, this strategy is susceptible
1431 * to clock jitter triggering premature epoch advances, but
1432 * clock jitter estimation and compensation isn't feasible here
1433 * because calls into this code are event-driven.
Jason Evans45a5bf62016-10-10 22:15:10 -07001434 */
1435 nstime_copy(&arena->decay.epoch, &time);
Jason Evansd419bb02016-10-11 15:30:01 -07001436 arena_decay_deadline_init(arena);
Jason Evans45a5bf62016-10-10 22:15:10 -07001437 } else {
1438 /* Verify that time does not go backwards. */
1439 assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001440 }
1441
Jason Evans243f7a02016-02-19 20:09:31 -08001442 /*
Jason Evansd419bb02016-10-11 15:30:01 -07001443 * If the deadline has been reached, advance to the current epoch and
1444 * purge to the new limit if necessary. Note that dirty pages created
1445 * during the current epoch are not subject to purge until a future
1446 * epoch, so as a result purging only happens during epoch advances.
Jason Evans243f7a02016-02-19 20:09:31 -08001447 */
Jason Evansd419bb02016-10-11 15:30:01 -07001448 if (arena_decay_deadline_reached(arena, &time))
1449 arena_decay_epoch_advance(tsdn, arena, &time);
Jason Evans243f7a02016-02-19 20:09:31 -08001450}
1451
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001452void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001453arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001454{
1455
1456 /* Don't recursively purge. */
1457 if (arena->purging)
1458 return;
1459
Jason Evans243f7a02016-02-19 20:09:31 -08001460 if (opt_purge == purge_mode_ratio)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001461 arena_maybe_purge_ratio(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001462 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001463 arena_maybe_purge_decay(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001464}
1465
Qinfan Wua244e502014-07-21 10:23:36 -07001466static size_t
1467arena_dirty_count(arena_t *arena)
1468{
1469 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001470 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001471 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001472
Jason Evans38e42d32015-03-10 18:15:40 -07001473 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001474 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001475 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001476 size_t npages;
1477
Jason Evansf5c8f372015-03-10 18:29:49 -07001478 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001479 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001480 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001481 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001482 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1483 rdelm);
1484 arena_chunk_map_misc_t *miscelm =
1485 arena_rd_to_miscelm(rdelm);
1486 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001487 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1488 0);
1489 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1490 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1491 npages = arena_mapbits_unallocated_size_get(chunk,
1492 pageind) >> LG_PAGE;
1493 }
Qinfan Wua244e502014-07-21 10:23:36 -07001494 ndirty += npages;
1495 }
1496
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001497 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001498}
1499
1500static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001501arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001502 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001503 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001504{
Jason Evans38e42d32015-03-10 18:15:40 -07001505 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001506 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001507 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001508
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001509 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001510 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001511 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001512 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001513 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001514 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001515
Jason Evansf5c8f372015-03-10 18:29:49 -07001516 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001517 extent_node_t *chunkselm_next;
Jason Evanse9012632016-11-03 17:11:01 -07001518 bool zero, commit;
Jason Evansee41ad42015-02-15 18:04:46 -08001519 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001520
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001521 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001522 if (opt_purge == purge_mode_decay && arena->ndirty -
1523 (nstashed + npages) < ndirty_limit)
1524 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001525
Jason Evans738e0892015-02-18 01:15:50 -08001526 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001527 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001528 * Allocate. chunkselm remains valid due to the
1529 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001530 */
Jason Evansee41ad42015-02-15 18:04:46 -08001531 zero = false;
Jason Evanse9012632016-11-03 17:11:01 -07001532 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001533 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001534 extent_node_addr_get(chunkselm),
1535 extent_node_size_get(chunkselm), chunksize, &zero,
Jason Evanse9012632016-11-03 17:11:01 -07001536 &commit, false);
Jason Evans99bd94f2015-02-18 16:40:53 -08001537 assert(chunk == extent_node_addr_get(chunkselm));
1538 assert(zero == extent_node_zeroed_get(chunkselm));
1539 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001540 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001541 assert(npages == (extent_node_size_get(chunkselm) >>
1542 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001543 chunkselm = chunkselm_next;
1544 } else {
1545 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001546 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1547 arena_chunk_map_misc_t *miscelm =
1548 arena_rd_to_miscelm(rdelm);
1549 size_t pageind = arena_miscelm_to_pageind(miscelm);
1550 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001551 size_t run_size =
1552 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001553
Jason Evansee41ad42015-02-15 18:04:46 -08001554 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001555 if (opt_purge == purge_mode_decay && arena->ndirty -
1556 (nstashed + npages) < ndirty_limit)
1557 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001558
1559 assert(pageind + npages <= chunk_npages);
1560 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1561 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1562
1563 /*
1564 * If purging the spare chunk's run, make it available
1565 * prior to allocation.
1566 */
1567 if (chunk == arena->spare)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001568 arena_chunk_alloc(tsdn, arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001569
1570 /* Temporarily allocate the free dirty run. */
1571 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001572 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001573 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001574 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001575 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001576 assert(qr_next(rdelm, rd_link) == rdelm);
1577 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001578 }
Jason Evans38e42d32015-03-10 18:15:40 -07001579 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001580 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001581
Qinfan Wue9708002014-07-21 18:09:04 -07001582 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001583 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1584 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001585 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001586 }
Qinfan Wue9708002014-07-21 18:09:04 -07001587
1588 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001589}
1590
1591static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001592arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001593 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001594 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001595{
Qinfan Wue9708002014-07-21 18:09:04 -07001596 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001597 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001598 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001599
Jason Evansaa5113b2014-01-14 16:23:03 -08001600 if (config_stats)
1601 nmadvise = 0;
1602 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001603
Jason Evansc1e00ef2016-05-10 22:21:10 -07001604 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001605 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001606 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001607 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001608 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001609
Jason Evansf5c8f372015-03-10 18:29:49 -07001610 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001611 /*
1612 * Don't actually purge the chunk here because 1)
1613 * chunkselm is embedded in the chunk and must remain
1614 * valid, and 2) we deallocate the chunk in
1615 * arena_unstash_purged(), where it is destroyed,
1616 * decommitted, or purged, depending on chunk
1617 * deallocation policy.
1618 */
Jason Evansee41ad42015-02-15 18:04:46 -08001619 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001620 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001621 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001622 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001623 size_t pageind, run_size, flag_unzeroed, flags, i;
1624 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001625 arena_chunk_t *chunk =
1626 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001627 arena_chunk_map_misc_t *miscelm =
1628 arena_rd_to_miscelm(rdelm);
1629 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001630 run_size = arena_mapbits_large_size_get(chunk, pageind);
1631 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001632
Jason Evansee41ad42015-02-15 18:04:46 -08001633 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001634 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1635 assert(!arena_mapbits_decommitted_get(chunk,
1636 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001637 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1638 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1639 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001640 flag_unzeroed = 0;
1641 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001642 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001643 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001644 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001645 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1646 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001647 }
Jason Evans45186f02015-08-10 23:03:34 -07001648 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1649 flags);
1650 arena_mapbits_large_set(chunk, pageind, run_size,
1651 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001652
1653 /*
Jason Evans45186f02015-08-10 23:03:34 -07001654 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001655 * chunk_purge_wrapper() has returned whether the pages
1656 * were zeroed as a side effect of purging. This chunk
1657 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001658 * isn't currently owned by this thread, because the run
1659 * is marked as allocated, thus protecting it from being
1660 * modified by any other thread. As long as these
1661 * writes don't perturb the first and last elements'
1662 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1663 */
Jason Evans45186f02015-08-10 23:03:34 -07001664 for (i = 1; i < npages-1; i++) {
1665 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001666 flag_unzeroed);
1667 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001668 }
Qinfan Wue9708002014-07-21 18:09:04 -07001669
Jason Evansaa5113b2014-01-14 16:23:03 -08001670 npurged += npages;
1671 if (config_stats)
1672 nmadvise++;
1673 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001674 malloc_mutex_lock(tsdn, &arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001675
1676 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001677 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001678 arena->stats.purged += npurged;
1679 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001680
1681 return (npurged);
1682}
1683
1684static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001685arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001686 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001687 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001688{
Jason Evans38e42d32015-03-10 18:15:40 -07001689 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001690 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001691
Jason Evansb49a3342015-07-28 11:28:19 -04001692 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001693 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001694 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001695 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1696 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001697 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001698 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001699 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001700 void *addr = extent_node_addr_get(chunkselm);
1701 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001702 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001703 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001704 extent_node_dirty_remove(chunkselm);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001705 arena_node_dalloc(tsdn, arena, chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001706 chunkselm = chunkselm_next;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001707 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
Jason Evansb2c0d632016-04-13 23:36:15 -07001708 size, zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001709 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001710 arena_chunk_t *chunk =
1711 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001712 arena_chunk_map_misc_t *miscelm =
1713 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001714 size_t pageind = arena_miscelm_to_pageind(miscelm);
1715 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1716 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001717 arena_run_t *run = &miscelm->run;
1718 qr_remove(rdelm, rd_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001719 arena_run_dalloc(tsdn, arena, run, false, true,
Jason Evansb2c0d632016-04-13 23:36:15 -07001720 decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001721 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001722 }
1723}
1724
Jason Evans243f7a02016-02-19 20:09:31 -08001725/*
1726 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1727 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1728 * desired state:
1729 * (arena->ndirty <= ndirty_limit)
1730 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1731 * violating the invariant:
1732 * (arena->ndirty >= ndirty_limit)
1733 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001734static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001735arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001736{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001737 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001738 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001739 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001740 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001741
Jason Evans0a9f9a42015-06-22 18:50:32 -07001742 arena->purging = true;
1743
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001744 /*
1745 * Calls to arena_dirty_count() are disabled even for debug builds
1746 * because overhead grows nonlinearly as memory usage increases.
1747 */
1748 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001749 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001750 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001751 }
Jason Evans243f7a02016-02-19 20:09:31 -08001752 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1753 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001754
1755 qr_new(&purge_runs_sentinel, rd_link);
1756 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1757
Jason Evansc1e00ef2016-05-10 22:21:10 -07001758 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001759 &purge_runs_sentinel, &purge_chunks_sentinel);
1760 if (npurge == 0)
1761 goto label_return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001762 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -07001763 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001764 assert(npurged == npurge);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001765 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001766 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001767
Jason Evans7372b152012-02-10 20:22:09 -08001768 if (config_stats)
1769 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001770
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001771label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001772 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001773}
1774
Jason Evans6005f072010-09-30 16:55:08 -07001775void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001776arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001777{
1778
Jason Evansc1e00ef2016-05-10 22:21:10 -07001779 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001780 if (all)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001781 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001782 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001783 arena_maybe_purge(tsdn, arena);
1784 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans6005f072010-09-30 16:55:08 -07001785}
1786
Jason Evanse476f8a2010-01-16 09:53:50 -08001787static void
Jason Evans19ff2ce2016-04-22 14:37:17 -07001788arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1789{
1790 size_t pageind, npages;
1791
1792 cassert(config_prof);
1793 assert(opt_prof);
1794
1795 /*
1796 * Iterate over the allocated runs and remove profiled allocations from
1797 * the sample set.
1798 */
1799 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1800 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1801 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1802 void *ptr = (void *)((uintptr_t)chunk + (pageind
1803 << LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001804 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1805 config_prof);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001806
1807 prof_free(tsd, ptr, usize);
1808 npages = arena_mapbits_large_size_get(chunk,
1809 pageind) >> LG_PAGE;
1810 } else {
1811 /* Skip small run. */
1812 size_t binind = arena_mapbits_binind_get(chunk,
1813 pageind);
1814 arena_bin_info_t *bin_info =
1815 &arena_bin_info[binind];
1816 npages = bin_info->run_size >> LG_PAGE;
1817 }
1818 } else {
1819 /* Skip unallocated run. */
1820 npages = arena_mapbits_unallocated_size_get(chunk,
1821 pageind) >> LG_PAGE;
1822 }
1823 assert(pageind + npages <= chunk_npages);
1824 }
1825}
1826
1827void
1828arena_reset(tsd_t *tsd, arena_t *arena)
1829{
1830 unsigned i;
1831 extent_node_t *node;
1832
1833 /*
1834 * Locking in this function is unintuitive. The caller guarantees that
1835 * no concurrent operations are happening in this arena, but there are
1836 * still reasons that some locking is necessary:
1837 *
1838 * - Some of the functions in the transitive closure of calls assume
1839 * appropriate locks are held, and in some cases these locks are
1840 * temporarily dropped to avoid lock order reversal or deadlock due to
1841 * reentry.
1842 * - mallctl("epoch", ...) may concurrently refresh stats. While
1843 * strictly speaking this is a "concurrent operation", disallowing
1844 * stats refreshes would impose an inconvenient burden.
1845 */
1846
1847 /* Remove large allocations from prof sample set. */
1848 if (config_prof && opt_prof) {
1849 ql_foreach(node, &arena->achunks, ql_link) {
1850 arena_achunk_prof_reset(tsd, arena,
1851 extent_node_addr_get(node));
1852 }
1853 }
1854
Jason Evans7e674952016-04-25 13:26:54 -07001855 /* Reset curruns for large size classes. */
1856 if (config_stats) {
1857 for (i = 0; i < nlclasses; i++)
1858 arena->stats.lstats[i].curruns = 0;
1859 }
1860
Jason Evans19ff2ce2016-04-22 14:37:17 -07001861 /* Huge allocations. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001862 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001863 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1864 ql_last(&arena->huge, ql_link)) {
1865 void *ptr = extent_node_addr_get(node);
Jason Evans7e674952016-04-25 13:26:54 -07001866 size_t usize;
Jason Evans19ff2ce2016-04-22 14:37:17 -07001867
Jason Evansc1e00ef2016-05-10 22:21:10 -07001868 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001869 if (config_stats || (config_prof && opt_prof))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001870 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans7e674952016-04-25 13:26:54 -07001871 /* Remove huge allocation from prof sample set. */
1872 if (config_prof && opt_prof)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001873 prof_free(tsd, ptr, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001874 huge_dalloc(tsd_tsdn(tsd), ptr);
1875 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001876 /* Cancel out unwanted effects on stats. */
1877 if (config_stats)
1878 arena_huge_reset_stats_cancel(arena, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001879 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001880 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001881
Jason Evansc1e00ef2016-05-10 22:21:10 -07001882 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001883
1884 /* Bins. */
1885 for (i = 0; i < NBINS; i++) {
1886 arena_bin_t *bin = &arena->bins[i];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001887 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001888 bin->runcur = NULL;
1889 arena_run_heap_new(&bin->runs);
1890 if (config_stats) {
1891 bin->stats.curregs = 0;
1892 bin->stats.curruns = 0;
1893 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001894 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001895 }
1896
1897 /*
1898 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1899 * chains directly correspond.
1900 */
1901 qr_new(&arena->runs_dirty, rd_link);
1902 for (node = qr_next(&arena->chunks_cache, cc_link);
1903 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1904 qr_new(&node->rd, rd_link);
1905 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1906 }
1907
1908 /* Arena chunks. */
1909 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1910 ql_last(&arena->achunks, ql_link)) {
1911 ql_remove(&arena->achunks, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001912 arena_chunk_discard(tsd_tsdn(tsd), arena,
1913 extent_node_addr_get(node));
Jason Evans19ff2ce2016-04-22 14:37:17 -07001914 }
1915
1916 /* Spare. */
1917 if (arena->spare != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001918 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001919 arena->spare = NULL;
1920 }
1921
1922 assert(!arena->purging);
1923 arena->nactive = 0;
1924
Jason Evansf193fd82016-04-08 14:17:57 -07001925 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
1926 i++)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001927 arena_run_heap_new(&arena->runs_avail[i]);
1928
Jason Evansc1e00ef2016-05-10 22:21:10 -07001929 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001930}
1931
1932static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001933arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001934 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1935 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001936{
Jason Evansaa5113b2014-01-14 16:23:03 -08001937 size_t size = *p_size;
1938 size_t run_ind = *p_run_ind;
1939 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001940
1941 /* Try to coalesce forward. */
1942 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001943 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001944 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1945 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1946 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001947 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1948 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001949 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001950
1951 /*
1952 * Remove successor from runs_avail; the coalesced run is
1953 * inserted later.
1954 */
Jason Evans203484e2012-05-02 00:30:36 -07001955 assert(arena_mapbits_unallocated_size_get(chunk,
1956 run_ind+run_pages+nrun_pages-1) == nrun_size);
1957 assert(arena_mapbits_dirty_get(chunk,
1958 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001959 assert(arena_mapbits_decommitted_get(chunk,
1960 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07001961 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08001962
Jason Evansee41ad42015-02-15 18:04:46 -08001963 /*
1964 * If the successor is dirty, remove it from the set of dirty
1965 * pages.
1966 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07001967 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08001968 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07001969 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07001970 }
1971
Jason Evanse476f8a2010-01-16 09:53:50 -08001972 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07001973 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001974
Jason Evans203484e2012-05-02 00:30:36 -07001975 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1976 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1977 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08001978 }
1979
1980 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001981 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1982 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07001983 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1984 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001985 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1986 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07001987 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001988
Jason Evans12ca9142010-10-17 19:56:09 -07001989 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001990
1991 /*
1992 * Remove predecessor from runs_avail; the coalesced run is
1993 * inserted later.
1994 */
Jason Evans203484e2012-05-02 00:30:36 -07001995 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1996 prun_size);
1997 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001998 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
1999 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002000 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002001
Jason Evansee41ad42015-02-15 18:04:46 -08002002 /*
2003 * If the predecessor is dirty, remove it from the set of dirty
2004 * pages.
2005 */
2006 if (flag_dirty != 0) {
2007 arena_run_dirty_remove(arena, chunk, run_ind,
2008 prun_pages);
2009 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07002010
Jason Evanse476f8a2010-01-16 09:53:50 -08002011 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002012 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002013
Jason Evans203484e2012-05-02 00:30:36 -07002014 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2015 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2016 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002017 }
2018
Jason Evansaa5113b2014-01-14 16:23:03 -08002019 *p_size = size;
2020 *p_run_ind = run_ind;
2021 *p_run_pages = run_pages;
2022}
2023
Jason Evans8fadb1a2015-08-04 10:49:46 -07002024static size_t
2025arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2026 size_t run_ind)
2027{
2028 size_t size;
2029
2030 assert(run_ind >= map_bias);
2031 assert(run_ind < chunk_npages);
2032
2033 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2034 size = arena_mapbits_large_size_get(chunk, run_ind);
2035 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2036 run_ind+(size>>LG_PAGE)-1) == 0);
2037 } else {
2038 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2039 size = bin_info->run_size;
2040 }
2041
2042 return (size);
2043}
2044
Jason Evansaa5113b2014-01-14 16:23:03 -08002045static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002046arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
Jason Evansb2c0d632016-04-13 23:36:15 -07002047 bool cleaned, bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08002048{
2049 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002050 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002051 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08002052
2053 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002054 miscelm = arena_run_to_miscelm(run);
2055 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08002056 assert(run_ind >= map_bias);
2057 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002058 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08002059 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08002060 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08002061
2062 /*
2063 * The run is dirty if the caller claims to have dirtied it, as well as
2064 * if it was already dirty before being allocated and the caller
2065 * doesn't claim to have cleaned it.
2066 */
2067 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2068 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002069 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2070 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08002071 dirty = true;
2072 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002073 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08002074
2075 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07002076 if (dirty || decommitted) {
2077 size_t flags = flag_dirty | flag_decommitted;
2078 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002079 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002080 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002081 } else {
2082 arena_mapbits_unallocated_set(chunk, run_ind, size,
2083 arena_mapbits_unzeroed_get(chunk, run_ind));
2084 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2085 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2086 }
2087
Jason Evans8fadb1a2015-08-04 10:49:46 -07002088 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2089 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08002090
Jason Evanse476f8a2010-01-16 09:53:50 -08002091 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07002092 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2093 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2094 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2095 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002096 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2097 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07002098 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07002099
Jason Evans070b3c32014-08-14 14:45:58 -07002100 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08002101 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002102
Jason Evans203484e2012-05-02 00:30:36 -07002103 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07002104 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07002105 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07002106 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07002107 arena_chunk_dalloc(tsdn, arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07002108 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002109
Jason Evans4fb7f512010-01-27 18:27:09 -08002110 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07002111 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08002112 * deallocated above, since in that case it is the spare. Waiting
2113 * until after possible chunk deallocation to do dirty processing
2114 * allows for an old spare to be fully deallocated, thus decreasing the
2115 * chances of spuriously crossing the dirty page purging threshold.
2116 */
Jason Evans8d4203c2010-04-13 20:53:21 -07002117 if (dirty)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002118 arena_maybe_purge(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002119}
2120
2121static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002122arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002123 arena_run_t *run, size_t oldsize, size_t newsize)
Jason Evanse476f8a2010-01-16 09:53:50 -08002124{
Jason Evans0c5dd032014-09-29 01:31:39 -07002125 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2126 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002127 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002128 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002129 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2130 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2131 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002132
2133 assert(oldsize > newsize);
2134
2135 /*
2136 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002137 * leading run as separately allocated. Set the last element of each
2138 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002139 */
Jason Evans203484e2012-05-02 00:30:36 -07002140 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002141 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2142 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2143 pageind+head_npages-1)));
2144 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2145 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002146
Jason Evans7372b152012-02-10 20:22:09 -08002147 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002148 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002149 assert(arena_mapbits_large_size_get(chunk,
2150 pageind+head_npages+tail_npages-1) == 0);
2151 assert(arena_mapbits_dirty_get(chunk,
2152 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002153 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002154 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002155 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2156 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002157
Jason Evansc1e00ef2016-05-10 22:21:10 -07002158 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
Jason Evansb2c0d632016-04-13 23:36:15 -07002159 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002160}
2161
2162static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002163arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002164 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08002165{
Jason Evans0c5dd032014-09-29 01:31:39 -07002166 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2167 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002168 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002169 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002170 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2171 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2172 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002173 arena_chunk_map_misc_t *tail_miscelm;
2174 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002175
2176 assert(oldsize > newsize);
2177
2178 /*
2179 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002180 * trailing run as separately allocated. Set the last element of each
2181 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002182 */
Jason Evans203484e2012-05-02 00:30:36 -07002183 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002184 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2185 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2186 pageind+head_npages-1)));
2187 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2188 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002189
Jason Evans203484e2012-05-02 00:30:36 -07002190 if (config_debug) {
2191 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2192 assert(arena_mapbits_large_size_get(chunk,
2193 pageind+head_npages+tail_npages-1) == 0);
2194 assert(arena_mapbits_dirty_get(chunk,
2195 pageind+head_npages+tail_npages-1) == flag_dirty);
2196 }
2197 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002198 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2199 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002200
Jason Evans61a6dfc2016-03-23 16:04:38 -07002201 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002202 tail_run = &tail_miscelm->run;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002203 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
Jason Evansb2c0d632016-04-13 23:36:15 -07002204 != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002205}
2206
Jason Evanse7a10582012-02-13 17:36:52 -08002207static void
2208arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2209{
Jason Evans0c5dd032014-09-29 01:31:39 -07002210 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002211
Jason Evansc6a2c392016-03-26 17:30:37 -07002212 arena_run_heap_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08002213}
2214
2215static arena_run_t *
2216arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2217{
Jason Evansc6a2c392016-03-26 17:30:37 -07002218 arena_chunk_map_misc_t *miscelm;
2219
2220 miscelm = arena_run_heap_remove_first(&bin->runs);
2221 if (miscelm == NULL)
2222 return (NULL);
2223 if (config_stats)
2224 bin->stats.reruns++;
2225
2226 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002227}
2228
2229static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002230arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002231{
Jason Evanse476f8a2010-01-16 09:53:50 -08002232 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002233 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002234 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002235
2236 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002237 run = arena_bin_nonfull_run_tryget(bin);
2238 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002239 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002240 /* No existing runs have any space available. */
2241
Jason Evans49f7e8f2011-03-15 13:59:15 -07002242 binind = arena_bin_index(arena, bin);
2243 bin_info = &arena_bin_info[binind];
2244
Jason Evanse476f8a2010-01-16 09:53:50 -08002245 /* Allocate a new run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002246 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002247 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002248 malloc_mutex_lock(tsdn, &arena->lock);
2249 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002250 if (run != NULL) {
2251 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002252 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002253 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002254 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002255 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002256 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002257 /********************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002258 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002259 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002260 if (config_stats) {
2261 bin->stats.nruns++;
2262 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002263 }
Jason Evanse00572b2010-03-14 19:43:56 -07002264 return (run);
2265 }
2266
2267 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002268 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002269 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002270 * so search one more time.
2271 */
Jason Evanse7a10582012-02-13 17:36:52 -08002272 run = arena_bin_nonfull_run_tryget(bin);
2273 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002274 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002275
2276 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002277}
2278
Jason Evans1e0a6362010-03-13 13:41:58 -08002279/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002280static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002281arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002282{
Jason Evansd01fd192015-08-19 15:21:32 -07002283 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002284 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002285 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002286
Jason Evans49f7e8f2011-03-15 13:59:15 -07002287 binind = arena_bin_index(arena, bin);
2288 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002289 bin->runcur = NULL;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002290 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002291 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2292 /*
2293 * Another thread updated runcur while this one ran without the
2294 * bin lock in arena_bin_nonfull_run_get().
2295 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002296 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002297 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002298 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002299 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002300 arena_chunk_t *chunk;
2301
2302 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002303 * arena_run_alloc_small() may have allocated run, or
2304 * it may have pulled run from the bin's run tree.
2305 * Therefore it is unsafe to make any assumptions about
2306 * how run has previously been used, and
2307 * arena_bin_lower_run() must be called, as if a region
2308 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002309 */
2310 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansb2c0d632016-04-13 23:36:15 -07002311 if (run->nfree == bin_info->nregs) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002312 arena_dalloc_bin_run(tsdn, arena, chunk, run,
Jason Evansb2c0d632016-04-13 23:36:15 -07002313 bin);
2314 } else
Jason Evans8de6a022010-10-17 20:57:30 -07002315 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002316 }
2317 return (ret);
2318 }
2319
2320 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002321 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002322
2323 bin->runcur = run;
2324
Jason Evanse476f8a2010-01-16 09:53:50 -08002325 assert(bin->runcur->nfree > 0);
2326
Jason Evans49f7e8f2011-03-15 13:59:15 -07002327 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002328}
2329
Jason Evans86815df2010-03-13 20:32:56 -08002330void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002331arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
Jason Evans243f7a02016-02-19 20:09:31 -08002332 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002333{
2334 unsigned i, nfill;
2335 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002336
2337 assert(tbin->ncached == 0);
2338
Jason Evansc1e00ef2016-05-10 22:21:10 -07002339 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2340 prof_idump(tsdn);
Jason Evanse69bee02010-03-15 22:25:23 -07002341 bin = &arena->bins[binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002342 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002343 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2344 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002345 arena_run_t *run;
2346 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002347 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002348 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002349 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002350 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002351 if (ptr == NULL) {
2352 /*
2353 * OOM. tbin->avail isn't yet filled down to its first
2354 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002355 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002356 */
2357 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002358 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002359 i * sizeof(void *));
2360 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002361 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002362 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002363 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002364 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2365 true);
2366 }
Jason Evans9c43c132011-03-18 10:53:15 -07002367 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002368 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002369 }
Jason Evans7372b152012-02-10 20:22:09 -08002370 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002371 bin->stats.nmalloc += i;
2372 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002373 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002374 bin->stats.nfills++;
2375 tbin->tstats.nrequests = 0;
2376 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002377 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002378 tbin->ncached = i;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002379 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002380}
Jason Evanse476f8a2010-01-16 09:53:50 -08002381
Jason Evans122449b2012-04-06 00:35:09 -07002382void
2383arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2384{
2385
Chris Petersona82070e2016-03-27 23:28:39 -07002386 size_t redzone_size = bin_info->redzone_size;
2387
Jason Evans122449b2012-04-06 00:35:09 -07002388 if (zero) {
Chris Petersona82070e2016-03-27 23:28:39 -07002389 memset((void *)((uintptr_t)ptr - redzone_size),
2390 JEMALLOC_ALLOC_JUNK, redzone_size);
2391 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2392 JEMALLOC_ALLOC_JUNK, redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07002393 } else {
Chris Petersona82070e2016-03-27 23:28:39 -07002394 memset((void *)((uintptr_t)ptr - redzone_size),
2395 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
Jason Evans122449b2012-04-06 00:35:09 -07002396 }
2397}
2398
Jason Evans0d6c5d82013-12-17 15:14:36 -08002399#ifdef JEMALLOC_JET
2400#undef arena_redzone_corruption
Jason Evansab0cfe02016-04-18 15:11:20 -07002401#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
Jason Evans0d6c5d82013-12-17 15:14:36 -08002402#endif
2403static void
2404arena_redzone_corruption(void *ptr, size_t usize, bool after,
2405 size_t offset, uint8_t byte)
2406{
2407
Jason Evans5fae7dc2015-07-23 13:56:25 -07002408 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2409 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002410 after ? "after" : "before", ptr, usize, byte);
2411}
2412#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002413#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002414#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2415arena_redzone_corruption_t *arena_redzone_corruption =
Jason Evansab0cfe02016-04-18 15:11:20 -07002416 JEMALLOC_N(n_arena_redzone_corruption);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002417#endif
2418
2419static void
2420arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002421{
Jason Evans122449b2012-04-06 00:35:09 -07002422 bool error = false;
2423
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002424 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002425 size_t size = bin_info->reg_size;
2426 size_t redzone_size = bin_info->redzone_size;
2427 size_t i;
2428
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002429 for (i = 1; i <= redzone_size; i++) {
2430 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
Chris Petersona82070e2016-03-27 23:28:39 -07002431 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002432 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002433 arena_redzone_corruption(ptr, size, false, i,
2434 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002435 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002436 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002437 }
2438 }
2439 for (i = 0; i < redzone_size; i++) {
2440 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
Chris Petersona82070e2016-03-27 23:28:39 -07002441 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002442 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002443 arena_redzone_corruption(ptr, size, true, i,
2444 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002445 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002446 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002447 }
Jason Evans122449b2012-04-06 00:35:09 -07002448 }
2449 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002450
Jason Evans122449b2012-04-06 00:35:09 -07002451 if (opt_abort && error)
2452 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002453}
Jason Evans122449b2012-04-06 00:35:09 -07002454
Jason Evans6b694c42014-01-07 16:47:56 -08002455#ifdef JEMALLOC_JET
2456#undef arena_dalloc_junk_small
Jason Evansab0cfe02016-04-18 15:11:20 -07002457#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
Jason Evans6b694c42014-01-07 16:47:56 -08002458#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002459void
2460arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2461{
2462 size_t redzone_size = bin_info->redzone_size;
2463
2464 arena_redzones_validate(ptr, bin_info, false);
Chris Petersona82070e2016-03-27 23:28:39 -07002465 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
Jason Evans122449b2012-04-06 00:35:09 -07002466 bin_info->reg_interval);
2467}
Jason Evans6b694c42014-01-07 16:47:56 -08002468#ifdef JEMALLOC_JET
2469#undef arena_dalloc_junk_small
2470#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2471arena_dalloc_junk_small_t *arena_dalloc_junk_small =
Jason Evansab0cfe02016-04-18 15:11:20 -07002472 JEMALLOC_N(n_arena_dalloc_junk_small);
Jason Evans6b694c42014-01-07 16:47:56 -08002473#endif
Jason Evans122449b2012-04-06 00:35:09 -07002474
Jason Evans0d6c5d82013-12-17 15:14:36 -08002475void
2476arena_quarantine_junk_small(void *ptr, size_t usize)
2477{
Jason Evansd01fd192015-08-19 15:21:32 -07002478 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002479 arena_bin_info_t *bin_info;
2480 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002481 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002482 assert(opt_quarantine);
2483 assert(usize <= SMALL_MAXCLASS);
2484
Jason Evans155bfa72014-10-05 17:54:10 -07002485 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002486 bin_info = &arena_bin_info[binind];
2487 arena_redzones_validate(ptr, bin_info, true);
2488}
2489
Jason Evans578cd162016-02-19 18:40:03 -08002490static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002491arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002492{
2493 void *ret;
2494 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002495 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002496 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002497
Jason Evansb1726102012-02-28 16:50:47 -08002498 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002499 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002500 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002501
Jason Evansc1e00ef2016-05-10 22:21:10 -07002502 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002503 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002504 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002505 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002506 ret = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002507
2508 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002509 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002510 return (NULL);
2511 }
2512
Jason Evans7372b152012-02-10 20:22:09 -08002513 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002514 bin->stats.nmalloc++;
2515 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002516 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002517 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002518 malloc_mutex_unlock(tsdn, &bin->lock);
2519 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2520 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002521
Jason Evans551ebc42014-10-03 10:16:09 -07002522 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002523 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002524 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002525 arena_alloc_junk_small(ret,
2526 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002527 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002528 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002529 }
Jason Evans0c516a02016-02-25 15:29:49 -08002530 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002531 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002532 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002533 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2534 true);
2535 }
Jason Evans0c516a02016-02-25 15:29:49 -08002536 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2537 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002538 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002539
Jason Evansc1e00ef2016-05-10 22:21:10 -07002540 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002541 return (ret);
2542}
2543
2544void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002545arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002546{
2547 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002548 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002549 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002550 arena_run_t *run;
2551 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002552 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002553
2554 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002555 usize = index2size(binind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002556 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002557 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002558 uint64_t r;
2559
Jason Evans8a03cf02015-05-04 09:58:36 -07002560 /*
2561 * Compute a uniformly distributed offset within the first page
2562 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2563 * for 4 KiB pages and 64-byte cachelines.
2564 */
Jason Evans34676d32016-02-09 16:28:40 -08002565 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
Jason Evans8a03cf02015-05-04 09:58:36 -07002566 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2567 } else
2568 random_offset = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002569 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002570 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002571 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002572 return (NULL);
2573 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002574 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002575 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2576 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002577 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002578 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002579
Jason Evans7372b152012-02-10 20:22:09 -08002580 arena->stats.nmalloc_large++;
2581 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002582 arena->stats.allocated_large += usize;
2583 arena->stats.lstats[index].nmalloc++;
2584 arena->stats.lstats[index].nrequests++;
2585 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002586 }
Jason Evans7372b152012-02-10 20:22:09 -08002587 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002588 idump = arena_prof_accum_locked(arena, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002589 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002590 if (config_prof && idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002591 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002592
Jason Evans551ebc42014-10-03 10:16:09 -07002593 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002594 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002595 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002596 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002597 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002598 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002599 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002600 }
2601
Jason Evansc1e00ef2016-05-10 22:21:10 -07002602 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002603 return (ret);
2604}
2605
Jason Evans578cd162016-02-19 18:40:03 -08002606void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002607arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
Jason Evans66cd9532016-04-22 14:34:14 -07002608 bool zero)
Jason Evans578cd162016-02-19 18:40:03 -08002609{
2610
Jason Evansc1e00ef2016-05-10 22:21:10 -07002611 assert(!tsdn_null(tsdn) || arena != NULL);
2612
2613 if (likely(!tsdn_null(tsdn)))
2614 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans578cd162016-02-19 18:40:03 -08002615 if (unlikely(arena == NULL))
2616 return (NULL);
2617
2618 if (likely(size <= SMALL_MAXCLASS))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002619 return (arena_malloc_small(tsdn, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002620 if (likely(size <= large_maxclass))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002621 return (arena_malloc_large(tsdn, arena, ind, zero));
2622 return (huge_malloc(tsdn, arena, index2size(ind), zero));
Jason Evans578cd162016-02-19 18:40:03 -08002623}
2624
Jason Evanse476f8a2010-01-16 09:53:50 -08002625/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002626static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002627arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002628 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002629{
2630 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002631 size_t alloc_size, leadsize, trailsize;
2632 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002633 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002634 arena_chunk_map_misc_t *miscelm;
2635 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002636
Jason Evansc1e00ef2016-05-10 22:21:10 -07002637 assert(!tsdn_null(tsdn) || arena != NULL);
Jason Evans50883de2015-07-23 17:13:18 -07002638 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002639
Jason Evansc1e00ef2016-05-10 22:21:10 -07002640 if (likely(!tsdn_null(tsdn)))
2641 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002642 if (unlikely(arena == NULL))
2643 return (NULL);
2644
Jason Evans93443682010-10-20 17:39:18 -07002645 alignment = PAGE_CEILING(alignment);
Jason Evans05a9e4a2016-06-07 14:19:50 -07002646 alloc_size = usize + large_pad + alignment - PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002647
Jason Evansc1e00ef2016-05-10 22:21:10 -07002648 malloc_mutex_lock(tsdn, &arena->lock);
2649 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002650 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002651 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002652 return (NULL);
2653 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002654 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002655 miscelm = arena_run_to_miscelm(run);
2656 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002657
Jason Evans0c5dd032014-09-29 01:31:39 -07002658 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2659 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002660 assert(alloc_size >= leadsize + usize);
2661 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002662 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002663 arena_chunk_map_misc_t *head_miscelm = miscelm;
2664 arena_run_t *head_run = run;
2665
Jason Evans61a6dfc2016-03-23 16:04:38 -07002666 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002667 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2668 LG_PAGE));
2669 run = &miscelm->run;
2670
Jason Evansc1e00ef2016-05-10 22:21:10 -07002671 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
Jason Evans0c5dd032014-09-29 01:31:39 -07002672 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002673 }
2674 if (trailsize != 0) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002675 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
Jason Evans50883de2015-07-23 17:13:18 -07002676 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002677 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002678 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2679 size_t run_ind =
2680 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002681 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2682 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2683 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002684
Jason Evansde249c82015-08-09 16:47:27 -07002685 assert(decommitted); /* Cause of OOM. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002686 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2687 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002688 return (NULL);
2689 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002690 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002691
Jason Evans7372b152012-02-10 20:22:09 -08002692 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002693 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002694
Jason Evans7372b152012-02-10 20:22:09 -08002695 arena->stats.nmalloc_large++;
2696 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002697 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002698 arena->stats.lstats[index].nmalloc++;
2699 arena->stats.lstats[index].nrequests++;
2700 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002701 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002702 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002703
Jason Evans551ebc42014-10-03 10:16:09 -07002704 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002705 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002706 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002707 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002708 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002709 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002710 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002711 return (ret);
2712}
2713
Jason Evans88fef7c2015-02-12 14:06:37 -08002714void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002715arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002716 bool zero, tcache_t *tcache)
2717{
2718 void *ret;
2719
Jason Evans8a03cf02015-05-04 09:58:36 -07002720 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002721 && (usize & PAGE_MASK) == 0))) {
2722 /* Small; alignment doesn't require special run placement. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002723 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002724 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002725 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002726 /*
2727 * Large; alignment doesn't require special run placement.
2728 * However, the cached pointer may be at a random offset from
2729 * the base of the run, so do some bit manipulation to retrieve
2730 * the base.
2731 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002732 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002733 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002734 if (config_cache_oblivious)
2735 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2736 } else {
Jason Evans676df882015-09-11 20:50:20 -07002737 if (likely(usize <= large_maxclass)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002738 ret = arena_palloc_large(tsdn, arena, usize, alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002739 zero);
2740 } else if (likely(alignment <= chunksize))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002741 ret = huge_malloc(tsdn, arena, usize, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002742 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002743 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002744 }
2745 }
2746 return (ret);
2747}
2748
Jason Evans0b270a92010-03-31 16:45:04 -07002749void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002750arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
Jason Evans0b270a92010-03-31 16:45:04 -07002751{
2752 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002753 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002754 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002755
Jason Evans78f73522012-04-18 13:38:40 -07002756 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002757 assert(ptr != NULL);
2758 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002759 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2760 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002761 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002762
2763 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002764 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002765 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002766 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002767 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002768
Jason Evansc1e00ef2016-05-10 22:21:10 -07002769 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2770 assert(isalloc(tsdn, ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002771}
Jason Evans6109fe02010-02-10 10:37:56 -08002772
Jason Evanse476f8a2010-01-16 09:53:50 -08002773static void
Jason Evans088e6a02010-10-18 00:04:44 -07002774arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002775 arena_bin_t *bin)
2776{
Jason Evanse476f8a2010-01-16 09:53:50 -08002777
Jason Evans19b3d612010-03-18 20:36:40 -07002778 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002779 if (run == bin->runcur)
2780 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002781 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002782 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002783 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002784 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2785
Jason Evansc6a2c392016-03-26 17:30:37 -07002786 /*
2787 * The following block's conditional is necessary because if the
2788 * run only contains one region, then it never gets inserted
2789 * into the non-full runs tree.
2790 */
Jason Evans49f7e8f2011-03-15 13:59:15 -07002791 if (bin_info->nregs != 1) {
Jason Evansc6a2c392016-03-26 17:30:37 -07002792 arena_chunk_map_misc_t *miscelm =
2793 arena_run_to_miscelm(run);
2794
2795 arena_run_heap_remove(&bin->runs, miscelm);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002796 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002797 }
Jason Evans088e6a02010-10-18 00:04:44 -07002798}
2799
2800static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002801arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002802 arena_run_t *run, arena_bin_t *bin)
Jason Evans088e6a02010-10-18 00:04:44 -07002803{
Jason Evans088e6a02010-10-18 00:04:44 -07002804
2805 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002806
Jason Evansc1e00ef2016-05-10 22:21:10 -07002807 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002808 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002809 malloc_mutex_lock(tsdn, &arena->lock);
2810 arena_run_dalloc(tsdn, arena, run, true, false, false);
2811 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002812 /****************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002813 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002814 if (config_stats)
2815 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002816}
2817
Jason Evans940a2e02010-10-17 17:51:37 -07002818static void
2819arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2820 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002821{
Jason Evanse476f8a2010-01-16 09:53:50 -08002822
Jason Evans8de6a022010-10-17 20:57:30 -07002823 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002824 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2825 * non-full run. It is okay to NULL runcur out rather than proactively
2826 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002827 */
Jason Evanse7a10582012-02-13 17:36:52 -08002828 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002829 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002830 if (bin->runcur->nfree > 0)
2831 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002832 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002833 if (config_stats)
2834 bin->stats.reruns++;
2835 } else
2836 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002837}
2838
Jason Evansfc0b3b72014-10-09 17:54:06 -07002839static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002840arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002841 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002842{
Jason Evans0c5dd032014-09-29 01:31:39 -07002843 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002844 arena_run_t *run;
2845 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002846 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002847 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002848
Jason Evansae4c7b42012-04-02 07:04:34 -07002849 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002850 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002851 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002852 binind = run->binind;
2853 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002854 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002855
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002856 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002857 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002858
2859 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002860 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002861 arena_dissociate_bin_run(chunk, run, bin);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002862 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002863 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002864 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002865
Jason Evans7372b152012-02-10 20:22:09 -08002866 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002867 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002868 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002869 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002870}
2871
Jason Evanse476f8a2010-01-16 09:53:50 -08002872void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002873arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2874 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002875{
2876
Jason Evansc1e00ef2016-05-10 22:21:10 -07002877 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002878}
2879
2880void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002881arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002882 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002883{
2884 arena_run_t *run;
2885 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002886 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002887
Jason Evans0c5dd032014-09-29 01:31:39 -07002888 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002889 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002890 bin = &arena->bins[run->binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002891 malloc_mutex_lock(tsdn, &bin->lock);
2892 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2893 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans203484e2012-05-02 00:30:36 -07002894}
2895
2896void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002897arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2898 void *ptr, size_t pageind)
Jason Evans203484e2012-05-02 00:30:36 -07002899{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002900 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002901
2902 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002903 /* arena_ptr_small_binind_get() does extra sanity checking. */
2904 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2905 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002906 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002907 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002908 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2909 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002910}
Jason Evanse476f8a2010-01-16 09:53:50 -08002911
Jason Evans6b694c42014-01-07 16:47:56 -08002912#ifdef JEMALLOC_JET
2913#undef arena_dalloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07002914#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08002915#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002916void
Jason Evans6b694c42014-01-07 16:47:56 -08002917arena_dalloc_junk_large(void *ptr, size_t usize)
2918{
2919
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002920 if (config_fill && unlikely(opt_junk_free))
Chris Petersona82070e2016-03-27 23:28:39 -07002921 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Jason Evans6b694c42014-01-07 16:47:56 -08002922}
2923#ifdef JEMALLOC_JET
2924#undef arena_dalloc_junk_large
2925#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2926arena_dalloc_junk_large_t *arena_dalloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07002927 JEMALLOC_N(n_arena_dalloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08002928#endif
2929
Jason Evanse56b24e2015-09-20 09:58:10 -07002930static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002931arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2932 arena_chunk_t *chunk, void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002933{
Jason Evans0c5dd032014-09-29 01:31:39 -07002934 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002935 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2936 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002937 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002938
Jason Evans7372b152012-02-10 20:22:09 -08002939 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002940 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2941 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002942
Jason Evansfc0b3b72014-10-09 17:54:06 -07002943 if (!junked)
2944 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002945 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002946 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002947
Jason Evans7372b152012-02-10 20:22:09 -08002948 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002949 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002950 arena->stats.lstats[index].ndalloc++;
2951 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002952 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002953 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002954
Jason Evansc1e00ef2016-05-10 22:21:10 -07002955 arena_run_dalloc(tsdn, arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002956}
2957
Jason Evans203484e2012-05-02 00:30:36 -07002958void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002959arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07002960 arena_chunk_t *chunk, void *ptr)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002961{
2962
Jason Evansc1e00ef2016-05-10 22:21:10 -07002963 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002964}
2965
2966void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002967arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2968 void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07002969{
2970
Jason Evansc1e00ef2016-05-10 22:21:10 -07002971 malloc_mutex_lock(tsdn, &arena->lock);
2972 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
2973 malloc_mutex_unlock(tsdn, &arena->lock);
2974 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002975}
2976
Jason Evanse476f8a2010-01-16 09:53:50 -08002977static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002978arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002979 void *ptr, size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08002980{
Jason Evans0c5dd032014-09-29 01:31:39 -07002981 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002982 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2983 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002984 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002985
2986 assert(size < oldsize);
2987
2988 /*
2989 * Shrink the run, and make trailing pages available for other
2990 * allocations.
2991 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002992 malloc_mutex_lock(tsdn, &arena->lock);
2993 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
Jason Evans8a03cf02015-05-04 09:58:36 -07002994 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08002995 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002996 szind_t oldindex = size2index(oldsize) - NBINS;
2997 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002998
Jason Evans7372b152012-02-10 20:22:09 -08002999 arena->stats.ndalloc_large++;
3000 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003001 arena->stats.lstats[oldindex].ndalloc++;
3002 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003003
Jason Evans7372b152012-02-10 20:22:09 -08003004 arena->stats.nmalloc_large++;
3005 arena->stats.nrequests_large++;
3006 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003007 arena->stats.lstats[index].nmalloc++;
3008 arena->stats.lstats[index].nrequests++;
3009 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08003010 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003011 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003012}
3013
3014static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003015arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003016 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003017{
Jason Evansae4c7b42012-04-02 07:04:34 -07003018 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07003019 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003020 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08003021
Jason Evans8a03cf02015-05-04 09:58:36 -07003022 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3023 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08003024
3025 /* Try to extend the run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003026 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07003027 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3028 pageind+npages) != 0)
3029 goto label_fail;
3030 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3031 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003032 /*
3033 * The next run is available and sufficiently large. Split the
3034 * following run, then merge the first part with the existing
3035 * allocation.
3036 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02003037 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07003038 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07003039
Jason Evans560a4e12015-09-11 16:18:53 -07003040 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07003041 while (oldsize + followsize < usize)
3042 usize = index2size(size2index(usize)-1);
3043 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07003044 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07003045 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07003046 if (splitsize == 0)
3047 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07003048
Jason Evans61a6dfc2016-03-23 16:04:38 -07003049 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07003050 if (arena_run_split_large(arena, run, splitsize, zero))
3051 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08003052
Jason Evansd260f442015-09-24 16:38:45 -07003053 if (config_cache_oblivious && zero) {
3054 /*
3055 * Zero the trailing bytes of the original allocation's
3056 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07003057 * There will always be trailing bytes, because ptr's
3058 * offset from the beginning of the run is a multiple of
3059 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07003060 */
Jason Evansa784e412015-09-24 22:21:55 -07003061 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3062 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3063 PAGE));
3064 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3065 assert(nzero > 0);
3066 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07003067 }
3068
Jason Evans088e6a02010-10-18 00:04:44 -07003069 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07003070 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07003071
3072 /*
3073 * Mark the extended run as dirty if either portion of the run
3074 * was dirty before allocation. This is rather pedantic,
3075 * because there's not actually any sequence of events that
3076 * could cause the resulting run to be passed to
3077 * arena_run_dalloc() with the dirty argument set to false
3078 * (which is when dirty flag consistency would really matter).
3079 */
Jason Evans203484e2012-05-02 00:30:36 -07003080 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3081 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07003082 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07003083 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07003084 flag_dirty | (flag_unzeroed_mask &
3085 arena_mapbits_unzeroed_get(chunk, pageind)));
3086 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3087 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3088 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08003089
Jason Evans7372b152012-02-10 20:22:09 -08003090 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003091 szind_t oldindex = size2index(oldsize) - NBINS;
3092 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003093
Jason Evans7372b152012-02-10 20:22:09 -08003094 arena->stats.ndalloc_large++;
3095 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003096 arena->stats.lstats[oldindex].ndalloc++;
3097 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003098
Jason Evans7372b152012-02-10 20:22:09 -08003099 arena->stats.nmalloc_large++;
3100 arena->stats.nrequests_large++;
3101 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003102 arena->stats.lstats[index].nmalloc++;
3103 arena->stats.lstats[index].nrequests++;
3104 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07003105 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003106 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003107 return (false);
3108 }
Jason Evans560a4e12015-09-11 16:18:53 -07003109label_fail:
Jason Evansc1e00ef2016-05-10 22:21:10 -07003110 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003111 return (true);
3112}
3113
Jason Evans6b694c42014-01-07 16:47:56 -08003114#ifdef JEMALLOC_JET
3115#undef arena_ralloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07003116#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08003117#endif
3118static void
3119arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3120{
3121
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02003122 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003123 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
Jason Evans6b694c42014-01-07 16:47:56 -08003124 old_usize - usize);
3125 }
3126}
3127#ifdef JEMALLOC_JET
3128#undef arena_ralloc_junk_large
3129#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3130arena_ralloc_junk_large_t *arena_ralloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003131 JEMALLOC_N(n_arena_ralloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003132#endif
3133
Jason Evanse476f8a2010-01-16 09:53:50 -08003134/*
3135 * Try to resize a large allocation, in order to avoid copying. This will
3136 * always fail if growing an object, and the following run is already in use.
3137 */
3138static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003139arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -07003140 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003141{
Jason Evans560a4e12015-09-11 16:18:53 -07003142 arena_chunk_t *chunk;
3143 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003144
Jason Evans560a4e12015-09-11 16:18:53 -07003145 if (oldsize == usize_max) {
3146 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003147 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003148 }
Jason Evans560a4e12015-09-11 16:18:53 -07003149
3150 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3151 arena = extent_node_arena_get(&chunk->node);
3152
3153 if (oldsize < usize_max) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003154 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
Jason Evansb2c0d632016-04-13 23:36:15 -07003155 oldsize, usize_min, usize_max, zero);
Jason Evans560a4e12015-09-11 16:18:53 -07003156 if (config_fill && !ret && !zero) {
3157 if (unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003158 memset((void *)((uintptr_t)ptr + oldsize),
3159 JEMALLOC_ALLOC_JUNK,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003160 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003161 } else if (unlikely(opt_zero)) {
3162 memset((void *)((uintptr_t)ptr + oldsize), 0,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003163 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003164 }
3165 }
3166 return (ret);
3167 }
3168
3169 assert(oldsize > usize_max);
3170 /* Fill before shrinking in order avoid a race. */
3171 arena_ralloc_junk_large(ptr, oldsize, usize_max);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003172 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
Jason Evans560a4e12015-09-11 16:18:53 -07003173 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003174}
3175
Jason Evansb2c31662014-01-12 15:05:44 -08003176bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003177arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08003178 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003179{
Jason Evans560a4e12015-09-11 16:18:53 -07003180 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003181
Jason Evans0c516a02016-02-25 15:29:49 -08003182 /* Calls with non-zero extra had to clamp extra. */
3183 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3184
Jason Evans0c516a02016-02-25 15:29:49 -08003185 if (unlikely(size > HUGE_MAXCLASS))
3186 return (true);
3187
Jason Evans560a4e12015-09-11 16:18:53 -07003188 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003189 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003190 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003191 arena_chunk_t *chunk;
3192
Jason Evans88fef7c2015-02-12 14:06:37 -08003193 /*
3194 * Avoid moving the allocation if the size class can be left the
3195 * same.
3196 */
Jason Evans560a4e12015-09-11 16:18:53 -07003197 if (oldsize <= SMALL_MAXCLASS) {
3198 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3199 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003200 if ((usize_max > SMALL_MAXCLASS ||
3201 size2index(usize_max) != size2index(oldsize)) &&
3202 (size > oldsize || usize_max < oldsize))
3203 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003204 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003205 if (usize_max <= SMALL_MAXCLASS)
3206 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003207 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
Jason Evans4985dc62016-02-19 19:24:58 -08003208 usize_max, zero))
3209 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003210 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003211
Jason Evans243f7a02016-02-19 20:09:31 -08003212 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003213 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003214 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003215 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003216 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
Jason Evans243f7a02016-02-19 20:09:31 -08003217 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003218 }
3219}
3220
3221static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003222arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans560a4e12015-09-11 16:18:53 -07003223 size_t alignment, bool zero, tcache_t *tcache)
3224{
3225
3226 if (alignment == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003227 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3228 zero, tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003229 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003230 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003231 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003232 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003233}
Jason Evanse476f8a2010-01-16 09:53:50 -08003234
Jason Evans8e3c3c62010-09-17 15:46:18 -07003235void *
Jason Evans5460aa62014-09-22 21:09:23 -07003236arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003237 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003238{
3239 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003240 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003241
Jason Evans560a4e12015-09-11 16:18:53 -07003242 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003243 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003244 return (NULL);
3245
Jason Evans676df882015-09-11 20:50:20 -07003246 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003247 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003248
Jason Evans88fef7c2015-02-12 14:06:37 -08003249 /* Try to avoid moving the allocation. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003250 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3251 zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003252 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003253
Jason Evans88fef7c2015-02-12 14:06:37 -08003254 /*
3255 * size and oldsize are different enough that we need to move
3256 * the object. In that case, fall back to allocating new space
3257 * and copying.
3258 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003259 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3260 alignment, zero, tcache);
Jason Evans560a4e12015-09-11 16:18:53 -07003261 if (ret == NULL)
3262 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003263
3264 /*
3265 * Junk/zero-filling were already done by
3266 * ipalloc()/arena_malloc().
3267 */
3268
Jason Evans560a4e12015-09-11 16:18:53 -07003269 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003270 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3271 memcpy(ret, ptr, copysize);
Jason Evans3ef51d72016-05-06 12:16:00 -07003272 isqalloc(tsd, ptr, oldsize, tcache, true);
Jason Evans88fef7c2015-02-12 14:06:37 -08003273 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003274 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3275 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003276 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003277 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003278}
3279
Jason Evans609ae592012-10-11 13:53:15 -07003280dss_prec_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07003281arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans609ae592012-10-11 13:53:15 -07003282{
3283 dss_prec_t ret;
3284
Jason Evansc1e00ef2016-05-10 22:21:10 -07003285 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003286 ret = arena->dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003287 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003288 return (ret);
3289}
3290
Jason Evans4d434ad2014-04-15 12:09:48 -07003291bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003292arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
Jason Evans609ae592012-10-11 13:53:15 -07003293{
3294
Jason Evans551ebc42014-10-03 10:16:09 -07003295 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003296 return (dss_prec != dss_prec_disabled);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003297 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003298 arena->dss_prec = dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003299 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003300 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003301}
3302
Jason Evans8d6a3e82015-03-18 18:55:33 -07003303ssize_t
3304arena_lg_dirty_mult_default_get(void)
3305{
3306
3307 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3308}
3309
3310bool
3311arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3312{
3313
Jason Evans243f7a02016-02-19 20:09:31 -08003314 if (opt_purge != purge_mode_ratio)
3315 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003316 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3317 return (true);
3318 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3319 return (false);
3320}
3321
Jason Evans243f7a02016-02-19 20:09:31 -08003322ssize_t
3323arena_decay_time_default_get(void)
3324{
3325
3326 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3327}
3328
3329bool
3330arena_decay_time_default_set(ssize_t decay_time)
3331{
3332
3333 if (opt_purge != purge_mode_decay)
3334 return (true);
3335 if (!arena_decay_time_valid(decay_time))
3336 return (true);
3337 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3338 return (false);
3339}
3340
Jason Evans3c07f802016-02-27 20:40:13 -08003341static void
3342arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3343 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3344 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003345{
Jason Evans609ae592012-10-11 13:53:15 -07003346
Jason Evans66cd9532016-04-22 14:34:14 -07003347 *nthreads += arena_nthreads_get(arena, false);
Jason Evans609ae592012-10-11 13:53:15 -07003348 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003349 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans94e7ffa2016-10-10 20:32:19 -07003350 *decay_time = arena->decay.time;
Jason Evans609ae592012-10-11 13:53:15 -07003351 *nactive += arena->nactive;
3352 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003353}
3354
3355void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003356arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003357 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3358 size_t *nactive, size_t *ndirty)
Jason Evans3c07f802016-02-27 20:40:13 -08003359{
3360
Jason Evansc1e00ef2016-05-10 22:21:10 -07003361 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003362 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3363 decay_time, nactive, ndirty);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003364 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003365}
3366
3367void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003368arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003369 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3370 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3371 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3372 malloc_huge_stats_t *hstats)
Jason Evans3c07f802016-02-27 20:40:13 -08003373{
3374 unsigned i;
3375
3376 cassert(config_stats);
3377
Jason Evansc1e00ef2016-05-10 22:21:10 -07003378 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003379 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3380 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003381
3382 astats->mapped += arena->stats.mapped;
Jason Evans04c3c0f2016-05-03 22:11:35 -07003383 astats->retained += arena->stats.retained;
Jason Evans609ae592012-10-11 13:53:15 -07003384 astats->npurge += arena->stats.npurge;
3385 astats->nmadvise += arena->stats.nmadvise;
3386 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003387 astats->metadata_mapped += arena->stats.metadata_mapped;
3388 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003389 astats->allocated_large += arena->stats.allocated_large;
3390 astats->nmalloc_large += arena->stats.nmalloc_large;
3391 astats->ndalloc_large += arena->stats.ndalloc_large;
3392 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003393 astats->allocated_huge += arena->stats.allocated_huge;
3394 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3395 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003396
3397 for (i = 0; i < nlclasses; i++) {
3398 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3399 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3400 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3401 lstats[i].curruns += arena->stats.lstats[i].curruns;
3402 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003403
3404 for (i = 0; i < nhclasses; i++) {
3405 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3406 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3407 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3408 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003409 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003410
3411 for (i = 0; i < NBINS; i++) {
3412 arena_bin_t *bin = &arena->bins[i];
3413
Jason Evansc1e00ef2016-05-10 22:21:10 -07003414 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003415 bstats[i].nmalloc += bin->stats.nmalloc;
3416 bstats[i].ndalloc += bin->stats.ndalloc;
3417 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003418 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003419 if (config_tcache) {
3420 bstats[i].nfills += bin->stats.nfills;
3421 bstats[i].nflushes += bin->stats.nflushes;
3422 }
3423 bstats[i].nruns += bin->stats.nruns;
3424 bstats[i].reruns += bin->stats.reruns;
3425 bstats[i].curruns += bin->stats.curruns;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003426 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003427 }
3428}
3429
Jason Evans767d8502016-02-24 23:58:10 -08003430unsigned
Jason Evans66cd9532016-04-22 14:34:14 -07003431arena_nthreads_get(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003432{
3433
Jason Evans66cd9532016-04-22 14:34:14 -07003434 return (atomic_read_u(&arena->nthreads[internal]));
Jason Evans767d8502016-02-24 23:58:10 -08003435}
3436
3437void
Jason Evans66cd9532016-04-22 14:34:14 -07003438arena_nthreads_inc(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003439{
3440
Jason Evans66cd9532016-04-22 14:34:14 -07003441 atomic_add_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003442}
3443
3444void
Jason Evans66cd9532016-04-22 14:34:14 -07003445arena_nthreads_dec(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003446{
3447
Jason Evans66cd9532016-04-22 14:34:14 -07003448 atomic_sub_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003449}
3450
Jason Evans8bb31982014-10-07 23:14:57 -07003451arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003452arena_new(tsdn_t *tsdn, unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003453{
Jason Evans8bb31982014-10-07 23:14:57 -07003454 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003455 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003456
Jason Evans8bb31982014-10-07 23:14:57 -07003457 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003458 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3459 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003460 */
3461 if (config_stats) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003462 arena = (arena_t *)base_alloc(tsdn,
Jason Evansf193fd82016-04-08 14:17:57 -07003463 CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans28b7e422016-11-04 15:00:08 -07003464 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3465 + (nhclasses * sizeof(malloc_huge_stats_t)));
Jason Evans8bb31982014-10-07 23:14:57 -07003466 } else
Jason Evansf193fd82016-04-08 14:17:57 -07003467 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003468 if (arena == NULL)
3469 return (NULL);
3470
Jason Evans6109fe02010-02-10 10:37:56 -08003471 arena->ind = ind;
Jason Evans66cd9532016-04-22 14:34:14 -07003472 arena->nthreads[0] = arena->nthreads[1] = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07003473 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003474 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003475
Jason Evans7372b152012-02-10 20:22:09 -08003476 if (config_stats) {
3477 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003478 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003479 + CACHELINE_CEILING(sizeof(arena_t)));
Jason Evans7372b152012-02-10 20:22:09 -08003480 memset(arena->stats.lstats, 0, nlclasses *
3481 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003482 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Jason Evansf193fd82016-04-08 14:17:57 -07003483 + CACHELINE_CEILING(sizeof(arena_t)) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003484 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3485 memset(arena->stats.hstats, 0, nhclasses *
3486 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003487 if (config_tcache)
3488 ql_new(&arena->tcache_ql);
3489 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003490
Jason Evans7372b152012-02-10 20:22:09 -08003491 if (config_prof)
3492 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003493
Jason Evans8a03cf02015-05-04 09:58:36 -07003494 if (config_cache_oblivious) {
3495 /*
3496 * A nondeterministic seed based on the address of arena reduces
3497 * the likelihood of lockstep non-uniform cache index
3498 * utilization among identical concurrent processes, but at the
3499 * cost of test repeatability. For debug builds, instead use a
3500 * deterministic seed.
3501 */
3502 arena->offset_state = config_debug ? ind :
3503 (uint64_t)(uintptr_t)arena;
3504 }
3505
Jason Evanse2bcf032016-10-13 12:18:38 -07003506 arena->dss_prec = chunk_dss_prec_get();
Jason Evans609ae592012-10-11 13:53:15 -07003507
Jason Evans19ff2ce2016-04-22 14:37:17 -07003508 ql_new(&arena->achunks);
3509
Jason Evanse476f8a2010-01-16 09:53:50 -08003510 arena->spare = NULL;
3511
Jason Evans8d6a3e82015-03-18 18:55:33 -07003512 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003513 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003514 arena->nactive = 0;
3515 arena->ndirty = 0;
3516
Jason Evansf193fd82016-04-08 14:17:57 -07003517 for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
3518 i++)
Jason Evansc6a2c392016-03-26 17:30:37 -07003519 arena_run_heap_new(&arena->runs_avail[i]);
Jason Evansf193fd82016-04-08 14:17:57 -07003520
Jason Evansee41ad42015-02-15 18:04:46 -08003521 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003522 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003523
Jason Evans243f7a02016-02-19 20:09:31 -08003524 if (opt_purge == purge_mode_decay)
3525 arena_decay_init(arena, arena_decay_time_default_get());
3526
Jason Evansee41ad42015-02-15 18:04:46 -08003527 ql_new(&arena->huge);
Jason Evansb2c0d632016-04-13 23:36:15 -07003528 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3529 WITNESS_RANK_ARENA_HUGE))
Jason Evansee41ad42015-02-15 18:04:46 -08003530 return (NULL);
3531
Jason Evansb49a3342015-07-28 11:28:19 -04003532 extent_tree_szad_new(&arena->chunks_szad_cached);
3533 extent_tree_ad_new(&arena->chunks_ad_cached);
3534 extent_tree_szad_new(&arena->chunks_szad_retained);
3535 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansb2c0d632016-04-13 23:36:15 -07003536 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3537 WITNESS_RANK_ARENA_CHUNKS))
Jason Evansee41ad42015-02-15 18:04:46 -08003538 return (NULL);
3539 ql_new(&arena->node_cache);
Jason Evansb2c0d632016-04-13 23:36:15 -07003540 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3541 WITNESS_RANK_ARENA_NODE_CACHE))
Jason Evansee41ad42015-02-15 18:04:46 -08003542 return (NULL);
3543
Jason Evansb49a3342015-07-28 11:28:19 -04003544 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003545
3546 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003547 for (i = 0; i < NBINS; i++) {
Jason Evansc9a4bf92016-04-22 14:36:48 -07003548 arena_bin_t *bin = &arena->bins[i];
Jason Evansb2c0d632016-04-13 23:36:15 -07003549 if (malloc_mutex_init(&bin->lock, "arena_bin",
3550 WITNESS_RANK_ARENA_BIN))
Jason Evans8bb31982014-10-07 23:14:57 -07003551 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003552 bin->runcur = NULL;
Jason Evansc6a2c392016-03-26 17:30:37 -07003553 arena_run_heap_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003554 if (config_stats)
3555 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003556 }
3557
Jason Evans8bb31982014-10-07 23:14:57 -07003558 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003559}
3560
Jason Evans49f7e8f2011-03-15 13:59:15 -07003561/*
3562 * Calculate bin_info->run_size such that it meets the following constraints:
3563 *
Jason Evans155bfa72014-10-05 17:54:10 -07003564 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003565 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003566 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003567 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3568 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003569 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003570static void
3571bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003572{
Jason Evans122449b2012-04-06 00:35:09 -07003573 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003574 size_t try_run_size, perfect_run_size, actual_run_size;
3575 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003576
3577 /*
Jason Evans122449b2012-04-06 00:35:09 -07003578 * Determine redzone size based on minimum alignment and minimum
3579 * redzone size. Add padding to the end of the run if it is needed to
3580 * align the regions. The padding allows each redzone to be half the
3581 * minimum alignment; without the padding, each redzone would have to
3582 * be twice as large in order to maintain alignment.
3583 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003584 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003585 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003586 if (align_min <= REDZONE_MINSIZE) {
3587 bin_info->redzone_size = REDZONE_MINSIZE;
3588 pad_size = 0;
3589 } else {
3590 bin_info->redzone_size = align_min >> 1;
3591 pad_size = bin_info->redzone_size;
3592 }
3593 } else {
3594 bin_info->redzone_size = 0;
3595 pad_size = 0;
3596 }
3597 bin_info->reg_interval = bin_info->reg_size +
3598 (bin_info->redzone_size << 1);
3599
3600 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003601 * Compute run size under ideal conditions (no redzones, no limit on run
3602 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003603 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003604 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003605 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003606 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003607 perfect_run_size = try_run_size;
3608 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003609
Jason Evansae4c7b42012-04-02 07:04:34 -07003610 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003611 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003612 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3613 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003614
Jason Evans0c5dd032014-09-29 01:31:39 -07003615 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003616 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3617 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003618
3619 /*
3620 * Redzones can require enough padding that not even a single region can
3621 * fit within the number of pages that would normally be dedicated to a
3622 * run for this size class. Increase the run size until at least one
3623 * region fits.
3624 */
3625 while (actual_nregs == 0) {
3626 assert(config_fill && unlikely(opt_redzone));
3627
3628 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003629 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3630 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003631 }
3632
3633 /*
3634 * Make sure that the run will fit within an arena chunk.
3635 */
Jason Evans155bfa72014-10-05 17:54:10 -07003636 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003637 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003638 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3639 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003640 }
3641 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003642 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003643
3644 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003645 bin_info->run_size = actual_run_size;
3646 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003647 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3648 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003649
3650 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3651 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003652}
3653
Jason Evansb1726102012-02-28 16:50:47 -08003654static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003655bin_info_init(void)
3656{
3657 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003658
Jason Evans8a03cf02015-05-04 09:58:36 -07003659#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003660 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003661 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003662 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003663 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003664#define BIN_INFO_INIT_bin_no(index, size)
Jason Evans1abb49f2016-04-17 16:16:11 -07003665#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
Jason Evansd04047c2014-05-28 16:11:55 -07003666 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003667 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003668#undef BIN_INFO_INIT_bin_yes
3669#undef BIN_INFO_INIT_bin_no
3670#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003671}
3672
Jason Evans5d8db152016-04-08 14:16:19 -07003673void
Jason Evansa0bf2422010-01-29 14:30:41 -08003674arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003675{
Jason Evans7393f442010-10-01 17:35:43 -07003676 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003677
Jason Evans8d6a3e82015-03-18 18:55:33 -07003678 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003679 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003680
Jason Evanse476f8a2010-01-16 09:53:50 -08003681 /*
3682 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003683 * page map. The page map is biased to omit entries for the header
3684 * itself, so some iteration is necessary to compute the map bias.
3685 *
3686 * 1) Compute safe header_size and map_bias values that include enough
3687 * space for an unbiased page map.
3688 * 2) Refine map_bias based on (1) to omit the header pages in the page
3689 * map. The resulting map_bias may be one too small.
3690 * 3) Refine map_bias based on (2). The result will be >= the result
3691 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003692 */
Jason Evans7393f442010-10-01 17:35:43 -07003693 map_bias = 0;
3694 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003695 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003696 ((sizeof(arena_chunk_map_bits_t) +
3697 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003698 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003699 }
3700 assert(map_bias > 0);
3701
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003702 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3703 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3704
Jason Evans155bfa72014-10-05 17:54:10 -07003705 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003706 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003707 large_maxclass = index2size(size2index(chunksize)-1);
3708 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003709 /*
3710 * For small chunk sizes it's possible for there to be fewer
3711 * non-header pages available than are necessary to serve the
3712 * size classes just below chunksize.
3713 */
Jason Evans676df882015-09-11 20:50:20 -07003714 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003715 }
Jason Evans676df882015-09-11 20:50:20 -07003716 assert(large_maxclass > 0);
3717 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003718 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003719
Jason Evansb1726102012-02-28 16:50:47 -08003720 bin_info_init();
Jason Evanse476f8a2010-01-16 09:53:50 -08003721}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003722
3723void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003724arena_prefork0(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003725{
3726
Jason Evansc1e00ef2016-05-10 22:21:10 -07003727 malloc_mutex_prefork(tsdn, &arena->lock);
Jason Evans174c0c32016-04-25 23:14:40 -07003728}
3729
3730void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003731arena_prefork1(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003732{
3733
Jason Evansc1e00ef2016-05-10 22:21:10 -07003734 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003735}
3736
3737void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003738arena_prefork2(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003739{
3740
Jason Evansc1e00ef2016-05-10 22:21:10 -07003741 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003742}
3743
3744void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003745arena_prefork3(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003746{
3747 unsigned i;
3748
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003749 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003750 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3751 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003752}
3753
3754void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003755arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003756{
3757 unsigned i;
3758
Jason Evansc1e00ef2016-05-10 22:21:10 -07003759 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003760 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003761 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3762 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3763 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3764 malloc_mutex_postfork_parent(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003765}
3766
3767void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003768arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003769{
3770 unsigned i;
3771
Jason Evansc1e00ef2016-05-10 22:21:10 -07003772 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003773 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003774 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3775 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3776 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3777 malloc_mutex_postfork_child(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003778}