blob: c605bcd3ebb7dac41f5d3fb674262794985ee6ff [file] [log] [blame]
Jason Evanse476f8a2010-01-16 09:53:50 -08001#define JEMALLOC_ARENA_C_
Jason Evans376b1522010-02-11 14:45:59 -08002#include "jemalloc/internal/jemalloc_internal.h"
Jason Evanse476f8a2010-01-16 09:53:50 -08003
4/******************************************************************************/
5/* Data. */
6
Jason Evans243f7a02016-02-19 20:09:31 -08007purge_mode_t opt_purge = PURGE_DEFAULT;
8const char *purge_mode_names[] = {
9 "ratio",
10 "decay",
11 "N/A"
12};
Jason Evanse476f8a2010-01-16 09:53:50 -080013ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
Jason Evans8d6a3e82015-03-18 18:55:33 -070014static ssize_t lg_dirty_mult_default;
Jason Evans243f7a02016-02-19 20:09:31 -080015ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
16static ssize_t decay_time_default;
17
Jason Evansb1726102012-02-28 16:50:47 -080018arena_bin_info_t arena_bin_info[NBINS];
Jason Evanse476f8a2010-01-16 09:53:50 -080019
Jason Evans155bfa72014-10-05 17:54:10 -070020size_t map_bias;
21size_t map_misc_offset;
22size_t arena_maxrun; /* Max run size for arenas. */
Jason Evans676df882015-09-11 20:50:20 -070023size_t large_maxclass; /* Max large size class. */
Jason Evans0da8ce12016-02-22 16:20:56 -080024size_t run_quantize_max; /* Max run_quantize_*() input. */
25static size_t small_maxrun; /* Max run size for small size classes. */
Jason Evans8a03cf02015-05-04 09:58:36 -070026static bool *small_run_tab; /* Valid small run page multiples. */
Jason Evans0da8ce12016-02-22 16:20:56 -080027static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
28static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
Jason Evans3c4d92e2014-10-12 22:53:59 -070029unsigned nlclasses; /* Number of large size classes. */
30unsigned nhclasses; /* Number of huge size classes. */
Dave Watson3417a302016-02-23 12:06:21 -080031static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
32static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
Jason Evanse476f8a2010-01-16 09:53:50 -080033
34/******************************************************************************/
Jason Evansaa5113b2014-01-14 16:23:03 -080035/*
36 * Function prototypes for static functions that are referenced prior to
37 * definition.
38 */
Jason Evanse476f8a2010-01-16 09:53:50 -080039
Jason Evansc1e00ef2016-05-10 22:21:10 -070040static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070041 size_t ndirty_limit);
Jason Evansc1e00ef2016-05-10 22:21:10 -070042static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
Jason Evansb2c0d632016-04-13 23:36:15 -070043 bool dirty, bool cleaned, bool decommitted);
Jason Evansc1e00ef2016-05-10 22:21:10 -070044static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -070045 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
Jason Evans940a2e02010-10-17 17:51:37 -070046static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
47 arena_run_t *run, arena_bin_t *bin);
Jason Evanse476f8a2010-01-16 09:53:50 -080048
49/******************************************************************************/
50
Jason Evans8fadb1a2015-08-04 10:49:46 -070051JEMALLOC_INLINE_C size_t
Joshua Kahn13b40152015-09-18 16:58:17 -040052arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
Jason Evans8fadb1a2015-08-04 10:49:46 -070053{
54 arena_chunk_t *chunk;
55 size_t pageind, mapbits;
56
Jason Evans8fadb1a2015-08-04 10:49:46 -070057 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
58 pageind = arena_miscelm_to_pageind(miscelm);
59 mapbits = arena_mapbits_get(chunk, pageind);
Jason Evans5ef33a92015-08-19 14:12:05 -070060 return (arena_mapbits_size_decode(mapbits));
Ben Maurerf9ff6032014-04-06 13:24:16 -070061}
62
Jason Evansc6a2c392016-03-26 17:30:37 -070063JEMALLOC_INLINE_C int
64arena_run_addr_comp(const arena_chunk_map_misc_t *a,
65 const arena_chunk_map_misc_t *b)
66{
67 uintptr_t a_miscelm = (uintptr_t)a;
68 uintptr_t b_miscelm = (uintptr_t)b;
69
70 assert(a != NULL);
71 assert(b != NULL);
72
73 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
74}
75
76/* Generate pairing heap functions. */
77ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
78 ph_link, arena_run_addr_comp)
79
Jason Evans8a03cf02015-05-04 09:58:36 -070080static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -080081run_quantize_floor_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -070082{
83 size_t qsize;
84
85 assert(size != 0);
86 assert(size == PAGE_CEILING(size));
87
88 /* Don't change sizes that are valid small run sizes. */
89 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
90 return (size);
91
92 /*
93 * Round down to the nearest run size that can actually be requested
94 * during normal large allocation. Add large_pad so that cache index
95 * randomization can offset the allocation from the page boundary.
96 */
97 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad;
98 if (qsize <= SMALL_MAXCLASS + large_pad)
Jason Evans0da8ce12016-02-22 16:20:56 -080099 return (run_quantize_floor_compute(size - large_pad));
Jason Evans8a03cf02015-05-04 09:58:36 -0700100 assert(qsize <= size);
101 return (qsize);
102}
103
104static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -0800105run_quantize_ceil_compute_hard(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -0700106{
107 size_t large_run_size_next;
108
109 assert(size != 0);
110 assert(size == PAGE_CEILING(size));
111
112 /*
113 * Return the next quantized size greater than the input size.
114 * Quantized sizes comprise the union of run sizes that back small
115 * region runs, and run sizes that back large regions with no explicit
116 * alignment constraints.
117 */
118
119 if (size > SMALL_MAXCLASS) {
120 large_run_size_next = PAGE_CEILING(index2size(size2index(size -
121 large_pad) + 1) + large_pad);
122 } else
123 large_run_size_next = SIZE_T_MAX;
124 if (size >= small_maxrun)
125 return (large_run_size_next);
126
127 while (true) {
128 size += PAGE;
129 assert(size <= small_maxrun);
130 if (small_run_tab[size >> LG_PAGE]) {
131 if (large_run_size_next < size)
132 return (large_run_size_next);
133 return (size);
134 }
135 }
136}
137
138static size_t
Jason Evans0da8ce12016-02-22 16:20:56 -0800139run_quantize_ceil_compute(size_t size)
Jason Evans8a03cf02015-05-04 09:58:36 -0700140{
Jason Evans0da8ce12016-02-22 16:20:56 -0800141 size_t qsize = run_quantize_floor_compute(size);
Jason Evans8a03cf02015-05-04 09:58:36 -0700142
143 if (qsize < size) {
144 /*
145 * Skip a quantization that may have an adequately large run,
146 * because under-sized runs may be mixed in. This only happens
147 * when an unusual size is requested, i.e. for aligned
148 * allocation, and is just one of several places where linear
149 * search would potentially find sufficiently aligned available
150 * memory somewhere lower.
151 */
Jason Evans0da8ce12016-02-22 16:20:56 -0800152 qsize = run_quantize_ceil_compute_hard(qsize);
Jason Evans8a03cf02015-05-04 09:58:36 -0700153 }
154 return (qsize);
155}
Jason Evans0da8ce12016-02-22 16:20:56 -0800156
157#ifdef JEMALLOC_JET
158#undef run_quantize_floor
Jason Evansab0cfe02016-04-18 15:11:20 -0700159#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
Jason Evans0da8ce12016-02-22 16:20:56 -0800160#endif
161static size_t
162run_quantize_floor(size_t size)
163{
164 size_t ret;
165
166 assert(size > 0);
167 assert(size <= run_quantize_max);
168 assert((size & PAGE_MASK) == 0);
169
170 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
171 assert(ret == run_quantize_floor_compute(size));
172 return (ret);
173}
174#ifdef JEMALLOC_JET
175#undef run_quantize_floor
176#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
Jason Evansab0cfe02016-04-18 15:11:20 -0700177run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
Jason Evans0da8ce12016-02-22 16:20:56 -0800178#endif
179
180#ifdef JEMALLOC_JET
181#undef run_quantize_ceil
Jason Evansab0cfe02016-04-18 15:11:20 -0700182#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
Jason Evans0da8ce12016-02-22 16:20:56 -0800183#endif
184static size_t
185run_quantize_ceil(size_t size)
186{
187 size_t ret;
188
189 assert(size > 0);
190 assert(size <= run_quantize_max);
191 assert((size & PAGE_MASK) == 0);
192
193 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
194 assert(ret == run_quantize_ceil_compute(size));
195 return (ret);
196}
Jason Evansa9a46842016-02-22 14:58:05 -0800197#ifdef JEMALLOC_JET
198#undef run_quantize_ceil
199#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
Jason Evansab0cfe02016-04-18 15:11:20 -0700200run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
Jason Evansa9a46842016-02-22 14:58:05 -0800201#endif
Jason Evans8a03cf02015-05-04 09:58:36 -0700202
Jason Evansc6a2c392016-03-26 17:30:37 -0700203static arena_run_heap_t *
Dave Watson3417a302016-02-23 12:06:21 -0800204arena_runs_avail_get(arena_t *arena, szind_t ind)
205{
206
207 assert(ind >= runs_avail_bias);
208 assert(ind - runs_avail_bias < runs_avail_nclasses);
209
210 return (&arena->runs_avail[ind - runs_avail_bias]);
211}
Jason Evanse476f8a2010-01-16 09:53:50 -0800212
Jason Evanse3d13062012-10-30 15:42:37 -0700213static void
214arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700215 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700216{
Dave Watson3417a302016-02-23 12:06:21 -0800217 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700218 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700219 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
220 LG_PAGE));
Jason Evansc6a2c392016-03-26 17:30:37 -0700221 arena_run_heap_insert(arena_runs_avail_get(arena, ind),
222 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700223}
224
225static void
226arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Qinfan Wu90737fc2014-07-21 19:39:20 -0700227 size_t npages)
Jason Evanse3d13062012-10-30 15:42:37 -0700228{
Dave Watson3417a302016-02-23 12:06:21 -0800229 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700230 arena_miscelm_get_const(chunk, pageind))));
Jason Evanse3d13062012-10-30 15:42:37 -0700231 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
232 LG_PAGE));
Jason Evansc6a2c392016-03-26 17:30:37 -0700233 arena_run_heap_remove(arena_runs_avail_get(arena, ind),
234 arena_miscelm_get_mutable(chunk, pageind));
Jason Evanse3d13062012-10-30 15:42:37 -0700235}
236
Jason Evans070b3c32014-08-14 14:45:58 -0700237static void
Jason Evansee41ad42015-02-15 18:04:46 -0800238arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700239 size_t npages)
240{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700241 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
242 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800243
Jason Evans070b3c32014-08-14 14:45:58 -0700244 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
245 LG_PAGE));
246 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
247 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
248 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800249
Jason Evans613cdc82016-03-08 01:04:48 -0800250 qr_new(&miscelm->rd, rd_link);
251 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
Jason Evans070b3c32014-08-14 14:45:58 -0700252 arena->ndirty += npages;
253}
254
255static void
Jason Evansee41ad42015-02-15 18:04:46 -0800256arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
Jason Evans070b3c32014-08-14 14:45:58 -0700257 size_t npages)
258{
Jason Evans61a6dfc2016-03-23 16:04:38 -0700259 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
260 pageind);
Jason Evansee41ad42015-02-15 18:04:46 -0800261
Jason Evans070b3c32014-08-14 14:45:58 -0700262 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
263 LG_PAGE));
264 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
265 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
266 CHUNK_MAP_DIRTY);
Jason Evansee41ad42015-02-15 18:04:46 -0800267
Jason Evans613cdc82016-03-08 01:04:48 -0800268 qr_remove(&miscelm->rd, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -0800269 assert(arena->ndirty >= npages);
Jason Evans070b3c32014-08-14 14:45:58 -0700270 arena->ndirty -= npages;
271}
272
Jason Evansee41ad42015-02-15 18:04:46 -0800273static size_t
274arena_chunk_dirty_npages(const extent_node_t *node)
275{
276
277 return (extent_node_size_get(node) >> LG_PAGE);
278}
279
Jason Evansee41ad42015-02-15 18:04:46 -0800280void
Jason Evans738e0892015-02-18 01:15:50 -0800281arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
Jason Evansee41ad42015-02-15 18:04:46 -0800282{
283
Jason Evans738e0892015-02-18 01:15:50 -0800284 if (cache) {
Jason Evans47701b22015-02-17 22:23:10 -0800285 extent_node_dirty_linkage_init(node);
Jason Evans738e0892015-02-18 01:15:50 -0800286 extent_node_dirty_insert(node, &arena->runs_dirty,
287 &arena->chunks_cache);
Jason Evansee41ad42015-02-15 18:04:46 -0800288 arena->ndirty += arena_chunk_dirty_npages(node);
289 }
290}
291
292void
Jason Evans738e0892015-02-18 01:15:50 -0800293arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
Jason Evansee41ad42015-02-15 18:04:46 -0800294{
295
296 if (dirty) {
Jason Evans738e0892015-02-18 01:15:50 -0800297 extent_node_dirty_remove(node);
Jason Evansee41ad42015-02-15 18:04:46 -0800298 assert(arena->ndirty >= arena_chunk_dirty_npages(node));
299 arena->ndirty -= arena_chunk_dirty_npages(node);
300 }
301}
302
Jason Evansaf1f5922014-10-30 16:38:08 -0700303JEMALLOC_INLINE_C void *
Jason Evans49f7e8f2011-03-15 13:59:15 -0700304arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
Jason Evanse476f8a2010-01-16 09:53:50 -0800305{
306 void *ret;
Jason Evans42ce80e2016-02-25 20:51:00 -0800307 size_t regind;
Jason Evans0c5dd032014-09-29 01:31:39 -0700308 arena_chunk_map_misc_t *miscelm;
309 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -0800310
Jason Evans1e0a6362010-03-13 13:41:58 -0800311 assert(run->nfree > 0);
Jason Evans551ebc42014-10-03 10:16:09 -0700312 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
Jason Evanse476f8a2010-01-16 09:53:50 -0800313
Jason Evans9e1810c2016-02-24 12:42:23 -0800314 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
Jason Evans0c5dd032014-09-29 01:31:39 -0700315 miscelm = arena_run_to_miscelm(run);
316 rpages = arena_miscelm_to_rpages(miscelm);
317 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
Jason Evans122449b2012-04-06 00:35:09 -0700318 (uintptr_t)(bin_info->reg_interval * regind));
Jason Evans1e0a6362010-03-13 13:41:58 -0800319 run->nfree--;
Jason Evans1e0a6362010-03-13 13:41:58 -0800320 return (ret);
Jason Evans6109fe02010-02-10 10:37:56 -0800321}
322
Jason Evansaf1f5922014-10-30 16:38:08 -0700323JEMALLOC_INLINE_C void
Jason Evans1e0a6362010-03-13 13:41:58 -0800324arena_run_reg_dalloc(arena_run_t *run, void *ptr)
Jason Evans6109fe02010-02-10 10:37:56 -0800325{
Jason Evans49f7e8f2011-03-15 13:59:15 -0700326 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans203484e2012-05-02 00:30:36 -0700327 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
328 size_t mapbits = arena_mapbits_get(chunk, pageind);
Jason Evansd01fd192015-08-19 15:21:32 -0700329 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
Jason Evans49f7e8f2011-03-15 13:59:15 -0700330 arena_bin_info_t *bin_info = &arena_bin_info[binind];
Jason Evans42ce80e2016-02-25 20:51:00 -0800331 size_t regind = arena_run_regind(run, bin_info, ptr);
Jason Evans84c8eef2011-03-16 10:30:13 -0700332
Jason Evans49f7e8f2011-03-15 13:59:15 -0700333 assert(run->nfree < bin_info->nregs);
Jason Evans1e0a6362010-03-13 13:41:58 -0800334 /* Freeing an interior pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700335 assert(((uintptr_t)ptr -
336 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans122449b2012-04-06 00:35:09 -0700337 (uintptr_t)bin_info->reg0_offset)) %
338 (uintptr_t)bin_info->reg_interval == 0);
Jason Evans0c5dd032014-09-29 01:31:39 -0700339 assert((uintptr_t)ptr >=
340 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
Jason Evans49f7e8f2011-03-15 13:59:15 -0700341 (uintptr_t)bin_info->reg0_offset);
Jason Evans84c8eef2011-03-16 10:30:13 -0700342 /* Freeing an unallocated pointer can cause assertion failure. */
Jason Evans0c5dd032014-09-29 01:31:39 -0700343 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
Jason Evanse476f8a2010-01-16 09:53:50 -0800344
Jason Evans0c5dd032014-09-29 01:31:39 -0700345 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
Jason Evans1e0a6362010-03-13 13:41:58 -0800346 run->nfree++;
Jason Evanse476f8a2010-01-16 09:53:50 -0800347}
348
Jason Evansaf1f5922014-10-30 16:38:08 -0700349JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800350arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
351{
352
Jason Evansbd87b012014-04-15 16:35:08 -0700353 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
354 (run_ind << LG_PAGE)), (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800355 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
356 (npages << LG_PAGE));
Jason Evans38067482013-01-21 20:04:42 -0800357}
358
Jason Evansaf1f5922014-10-30 16:38:08 -0700359JEMALLOC_INLINE_C void
Jason Evansdda90f52013-10-19 23:48:40 -0700360arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
361{
362
Jason Evansbd87b012014-04-15 16:35:08 -0700363 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
364 << LG_PAGE)), PAGE);
Jason Evansdda90f52013-10-19 23:48:40 -0700365}
366
Jason Evansaf1f5922014-10-30 16:38:08 -0700367JEMALLOC_INLINE_C void
Jason Evans38067482013-01-21 20:04:42 -0800368arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
Jason Evans21fb95b2010-10-18 17:45:40 -0700369{
Jason Evansd4bab212010-10-24 20:08:37 -0700370 size_t i;
Jason Evansae4c7b42012-04-02 07:04:34 -0700371 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
Jason Evansd4bab212010-10-24 20:08:37 -0700372
Jason Evansdda90f52013-10-19 23:48:40 -0700373 arena_run_page_mark_zeroed(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700374 for (i = 0; i < PAGE / sizeof(size_t); i++)
Jason Evans21fb95b2010-10-18 17:45:40 -0700375 assert(p[i] == 0);
376}
Jason Evans21fb95b2010-10-18 17:45:40 -0700377
Jason Evanse476f8a2010-01-16 09:53:50 -0800378static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800379arena_nactive_add(arena_t *arena, size_t add_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800380{
381
382 if (config_stats) {
Jason Evans3763d3b2016-02-26 17:29:35 -0800383 size_t cactive_add = CHUNK_CEILING((arena->nactive +
384 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
Jason Evans15229372014-08-06 23:38:39 -0700385 LG_PAGE);
Jason Evans3763d3b2016-02-26 17:29:35 -0800386 if (cactive_add != 0)
387 stats_cactive_add(cactive_add);
388 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800389 arena->nactive += add_pages;
Jason Evans3763d3b2016-02-26 17:29:35 -0800390}
391
392static void
Jason Evans40ee9aa2016-02-27 12:34:50 -0800393arena_nactive_sub(arena_t *arena, size_t sub_pages)
Jason Evans3763d3b2016-02-26 17:29:35 -0800394{
395
396 if (config_stats) {
397 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
398 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
399 if (cactive_sub != 0)
400 stats_cactive_sub(cactive_sub);
Jason Evansaa5113b2014-01-14 16:23:03 -0800401 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800402 arena->nactive -= sub_pages;
Jason Evansaa5113b2014-01-14 16:23:03 -0800403}
404
405static void
406arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700407 size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
Jason Evansaa5113b2014-01-14 16:23:03 -0800408{
409 size_t total_pages, rem_pages;
410
Jason Evans8fadb1a2015-08-04 10:49:46 -0700411 assert(flag_dirty == 0 || flag_decommitted == 0);
412
Jason Evansaa5113b2014-01-14 16:23:03 -0800413 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
414 LG_PAGE;
415 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
416 flag_dirty);
417 assert(need_pages <= total_pages);
418 rem_pages = total_pages - need_pages;
419
Qinfan Wu90737fc2014-07-21 19:39:20 -0700420 arena_avail_remove(arena, chunk, run_ind, total_pages);
Jason Evans070b3c32014-08-14 14:45:58 -0700421 if (flag_dirty != 0)
Jason Evansee41ad42015-02-15 18:04:46 -0800422 arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
Jason Evans40ee9aa2016-02-27 12:34:50 -0800423 arena_nactive_add(arena, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800424
425 /* Keep track of trailing unused pages for later use. */
426 if (rem_pages > 0) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700427 size_t flags = flag_dirty | flag_decommitted;
Jason Evans1f27abc2015-08-11 12:42:33 -0700428 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
429 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700430
Jason Evans1f27abc2015-08-11 12:42:33 -0700431 arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
432 (rem_pages << LG_PAGE), flags |
433 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
434 flag_unzeroed_mask));
435 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
436 (rem_pages << LG_PAGE), flags |
437 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
438 flag_unzeroed_mask));
439 if (flag_dirty != 0) {
440 arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
441 rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800442 }
Qinfan Wu90737fc2014-07-21 19:39:20 -0700443 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800444 }
445}
446
Jason Evans8fadb1a2015-08-04 10:49:46 -0700447static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800448arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
449 bool remove, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -0800450{
451 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700452 arena_chunk_map_misc_t *miscelm;
Dmitry-Mea306a602015-09-04 13:15:28 +0300453 size_t flag_dirty, flag_decommitted, run_ind, need_pages;
Jason Evans1f27abc2015-08-11 12:42:33 -0700454 size_t flag_unzeroed_mask;
Jason Evans203484e2012-05-02 00:30:36 -0700455
Jason Evanse476f8a2010-01-16 09:53:50 -0800456 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700457 miscelm = arena_run_to_miscelm(run);
458 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evans203484e2012-05-02 00:30:36 -0700459 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700460 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansae4c7b42012-04-02 07:04:34 -0700461 need_pages = (size >> LG_PAGE);
Jason Evanse476f8a2010-01-16 09:53:50 -0800462 assert(need_pages > 0);
Jason Evanse476f8a2010-01-16 09:53:50 -0800463
Jason Evansde249c82015-08-09 16:47:27 -0700464 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
465 run_ind << LG_PAGE, size, arena->ind))
Jason Evans8fadb1a2015-08-04 10:49:46 -0700466 return (true);
467
Jason Evansc368f8c2013-10-29 18:17:42 -0700468 if (remove) {
Jason Evansaa5113b2014-01-14 16:23:03 -0800469 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
Jason Evans8fadb1a2015-08-04 10:49:46 -0700470 flag_decommitted, need_pages);
Jason Evans19b3d612010-03-18 20:36:40 -0700471 }
472
Jason Evansaa5113b2014-01-14 16:23:03 -0800473 if (zero) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700474 if (flag_decommitted != 0) {
475 /* The run is untouched, and therefore zeroed. */
476 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
477 *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
478 (need_pages << LG_PAGE));
479 } else if (flag_dirty != 0) {
480 /* The run is dirty, so all pages must be zeroed. */
481 arena_run_zero(chunk, run_ind, need_pages);
482 } else {
Jason Evansaa5113b2014-01-14 16:23:03 -0800483 /*
484 * The run is clean, so some pages may be zeroed (i.e.
485 * never before touched).
486 */
Dmitry-Mea306a602015-09-04 13:15:28 +0300487 size_t i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800488 for (i = 0; i < need_pages; i++) {
489 if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
490 != 0)
491 arena_run_zero(chunk, run_ind+i, 1);
492 else if (config_debug) {
493 arena_run_page_validate_zeroed(chunk,
494 run_ind+i);
495 } else {
496 arena_run_page_mark_zeroed(chunk,
497 run_ind+i);
Jason Evans19b3d612010-03-18 20:36:40 -0700498 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800499 }
500 }
Jason Evans19b3d612010-03-18 20:36:40 -0700501 } else {
Jason Evansbd87b012014-04-15 16:35:08 -0700502 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansdda90f52013-10-19 23:48:40 -0700503 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evanse476f8a2010-01-16 09:53:50 -0800504 }
Jason Evansaa5113b2014-01-14 16:23:03 -0800505
506 /*
507 * Set the last element first, in case the run only contains one page
508 * (i.e. both statements set the same element).
509 */
Jason Evans1f27abc2015-08-11 12:42:33 -0700510 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
511 CHUNK_MAP_UNZEROED : 0;
512 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
514 run_ind+need_pages-1)));
515 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
516 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700517 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -0800518}
519
Jason Evans8fadb1a2015-08-04 10:49:46 -0700520static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800521arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700522{
523
Jason Evans8fadb1a2015-08-04 10:49:46 -0700524 return (arena_run_split_large_helper(arena, run, size, true, zero));
Jason Evansc368f8c2013-10-29 18:17:42 -0700525}
526
Jason Evans8fadb1a2015-08-04 10:49:46 -0700527static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800528arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
Jason Evansc368f8c2013-10-29 18:17:42 -0700529{
530
Jason Evans8fadb1a2015-08-04 10:49:46 -0700531 return (arena_run_split_large_helper(arena, run, size, false, zero));
Jason Evansaa5113b2014-01-14 16:23:03 -0800532}
533
Jason Evans8fadb1a2015-08-04 10:49:46 -0700534static bool
Jason Evansaa5113b2014-01-14 16:23:03 -0800535arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
Jason Evansd01fd192015-08-19 15:21:32 -0700536 szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -0800537{
538 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -0700539 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700540 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800541
542 assert(binind != BININD_INVALID);
543
544 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -0700545 miscelm = arena_run_to_miscelm(run);
546 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -0800547 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700548 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -0800549 need_pages = (size >> LG_PAGE);
550 assert(need_pages > 0);
551
Jason Evans8fadb1a2015-08-04 10:49:46 -0700552 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
553 run_ind << LG_PAGE, size, arena->ind))
554 return (true);
555
556 arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
557 flag_decommitted, need_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -0800558
Jason Evans381c23d2014-10-10 23:01:03 -0700559 for (i = 0; i < need_pages; i++) {
Jason Evans1f27abc2015-08-11 12:42:33 -0700560 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
561 run_ind+i);
562 arena_mapbits_small_set(chunk, run_ind+i, i, binind,
563 flag_unzeroed);
564 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
Jason Evansaa5113b2014-01-14 16:23:03 -0800565 arena_run_page_validate_zeroed(chunk, run_ind+i);
566 }
Jason Evansbd87b012014-04-15 16:35:08 -0700567 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
Jason Evansaa5113b2014-01-14 16:23:03 -0800568 (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
Jason Evans8fadb1a2015-08-04 10:49:46 -0700569 return (false);
Jason Evansaa5113b2014-01-14 16:23:03 -0800570}
571
572static arena_chunk_t *
573arena_chunk_init_spare(arena_t *arena)
574{
575 arena_chunk_t *chunk;
576
577 assert(arena->spare != NULL);
578
579 chunk = arena->spare;
580 arena->spare = NULL;
581
582 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
583 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
584 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700585 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800586 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700587 arena_maxrun);
Jason Evansaa5113b2014-01-14 16:23:03 -0800588 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
589 arena_mapbits_dirty_get(chunk, chunk_npages-1));
590
591 return (chunk);
592}
593
Jason Evans99bd94f2015-02-18 16:40:53 -0800594static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -0700595arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700596 bool zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800597{
598
Jason Evans8fadb1a2015-08-04 10:49:46 -0700599 /*
600 * The extent node notion of "committed" doesn't directly apply to
Jason Evansde249c82015-08-09 16:47:27 -0700601 * arena chunks. Arbitrarily mark them as committed. The commit state
602 * of runs is tracked individually, and upon chunk deallocation the
603 * entire chunk is in a consistent commit state.
Jason Evans8fadb1a2015-08-04 10:49:46 -0700604 */
605 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
Jason Evans99bd94f2015-02-18 16:40:53 -0800606 extent_node_achunk_set(&chunk->node, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700607 return (chunk_register(tsdn, chunk, &chunk->node));
Jason Evans99bd94f2015-02-18 16:40:53 -0800608}
609
610static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700611arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700612 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
Jason Evans99bd94f2015-02-18 16:40:53 -0800613{
614 arena_chunk_t *chunk;
Jason Evans99bd94f2015-02-18 16:40:53 -0800615
Jason Evansc1e00ef2016-05-10 22:21:10 -0700616 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -0400617
Jason Evansc1e00ef2016-05-10 22:21:10 -0700618 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700619 NULL, chunksize, chunksize, zero, commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700620 if (chunk != NULL && !*commit) {
621 /* Commit header. */
622 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
623 LG_PAGE, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700624 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700625 (void *)chunk, chunksize, *zero, *commit);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700626 chunk = NULL;
627 }
628 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700629 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
Jason Evans8fadb1a2015-08-04 10:49:46 -0700630 if (!*commit) {
631 /* Undo commit of header. */
632 chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
633 LG_PAGE, arena->ind);
634 }
Jason Evansc1e00ef2016-05-10 22:21:10 -0700635 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
Jason Evansce7c0f92016-03-30 18:36:04 -0700636 chunksize, *zero, *commit);
Jason Evans99bd94f2015-02-18 16:40:53 -0800637 chunk = NULL;
638 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800639
Jason Evansc1e00ef2016-05-10 22:21:10 -0700640 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800641 return (chunk);
642}
643
Jason Evansaa5113b2014-01-14 16:23:03 -0800644static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700645arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
646 bool *commit)
Jason Evanse2deab72014-05-15 22:22:27 -0700647{
648 arena_chunk_t *chunk;
Jason Evansb49a3342015-07-28 11:28:19 -0400649 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evanse2deab72014-05-15 22:22:27 -0700650
Jason Evansc1e00ef2016-05-10 22:21:10 -0700651 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
Jason Evansb49a3342015-07-28 11:28:19 -0400652 chunksize, zero, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700653 if (chunk != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700654 if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
655 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
Jason Evansde249c82015-08-09 16:47:27 -0700656 chunksize, true);
Jason Evans8fadb1a2015-08-04 10:49:46 -0700657 return (NULL);
658 }
659 *commit = true;
Jason Evansb49a3342015-07-28 11:28:19 -0400660 }
661 if (chunk == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700662 chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700663 &chunk_hooks, zero, commit);
Jason Evansb49a3342015-07-28 11:28:19 -0400664 }
Jason Evans99bd94f2015-02-18 16:40:53 -0800665
Jason Evans4581b972014-11-27 17:22:36 -0200666 if (config_stats && chunk != NULL) {
Jason Evanse2deab72014-05-15 22:22:27 -0700667 arena->stats.mapped += chunksize;
Jason Evans4581b972014-11-27 17:22:36 -0200668 arena->stats.metadata_mapped += (map_bias << LG_PAGE);
669 }
Jason Evanse2deab72014-05-15 22:22:27 -0700670
671 return (chunk);
672}
673
Jason Evanse2deab72014-05-15 22:22:27 -0700674static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700675arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
Jason Evansaa5113b2014-01-14 16:23:03 -0800676{
677 arena_chunk_t *chunk;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700678 bool zero, commit;
Jason Evans45186f02015-08-10 23:03:34 -0700679 size_t flag_unzeroed, flag_decommitted, i;
Jason Evansaa5113b2014-01-14 16:23:03 -0800680
681 assert(arena->spare == NULL);
682
683 zero = false;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700684 commit = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -0700685 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
Jason Evansaa5113b2014-01-14 16:23:03 -0800686 if (chunk == NULL)
687 return (NULL);
Jason Evansaa5113b2014-01-14 16:23:03 -0800688
Jason Evansaa5113b2014-01-14 16:23:03 -0800689 /*
Jason Evansaa5113b2014-01-14 16:23:03 -0800690 * Initialize the map to contain one maximal free untouched run. Mark
Jason Evansf86bc082016-03-31 11:19:46 -0700691 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
692 * or decommitted chunk.
Jason Evansaa5113b2014-01-14 16:23:03 -0800693 */
Jason Evans45186f02015-08-10 23:03:34 -0700694 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
695 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
696 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
697 flag_unzeroed | flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -0800698 /*
699 * There is no need to initialize the internal page map entries unless
700 * the chunk is not zeroed.
701 */
Jason Evans551ebc42014-10-03 10:16:09 -0700702 if (!zero) {
Jason Evansbd87b012014-04-15 16:35:08 -0700703 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
Jason Evans61a6dfc2016-03-23 16:04:38 -0700704 (void *)arena_bitselm_get_const(chunk, map_bias+1),
705 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
706 chunk_npages-1) -
707 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800708 for (i = map_bias+1; i < chunk_npages-1; i++)
Jason Evans45186f02015-08-10 23:03:34 -0700709 arena_mapbits_internal_set(chunk, i, flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800710 } else {
Qinfan Wuff6a31d2014-08-29 13:34:40 -0700711 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
Jason Evans61a6dfc2016-03-23 16:04:38 -0700712 *)arena_bitselm_get_const(chunk, map_bias+1),
713 (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
714 chunk_npages-1) -
715 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
Jason Evansaa5113b2014-01-14 16:23:03 -0800716 if (config_debug) {
717 for (i = map_bias+1; i < chunk_npages-1; i++) {
718 assert(arena_mapbits_unzeroed_get(chunk, i) ==
Jason Evans45186f02015-08-10 23:03:34 -0700719 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800720 }
721 }
722 }
Jason Evans155bfa72014-10-05 17:54:10 -0700723 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
Jason Evans45186f02015-08-10 23:03:34 -0700724 flag_unzeroed);
Jason Evansaa5113b2014-01-14 16:23:03 -0800725
726 return (chunk);
Jason Evansc368f8c2013-10-29 18:17:42 -0700727}
728
Jason Evanse476f8a2010-01-16 09:53:50 -0800729static arena_chunk_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700730arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanse476f8a2010-01-16 09:53:50 -0800731{
732 arena_chunk_t *chunk;
Jason Evanse476f8a2010-01-16 09:53:50 -0800733
Jason Evansaa5113b2014-01-14 16:23:03 -0800734 if (arena->spare != NULL)
735 chunk = arena_chunk_init_spare(arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700736 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700737 chunk = arena_chunk_init_hard(tsdn, arena);
Chris Pride20a8c782014-03-25 22:36:05 -0700738 if (chunk == NULL)
739 return (NULL);
740 }
Jason Evanse476f8a2010-01-16 09:53:50 -0800741
Jason Evans19ff2ce2016-04-22 14:37:17 -0700742 ql_elm_new(&chunk->node, ql_link);
743 ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
Qinfan Wu90737fc2014-07-21 19:39:20 -0700744 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse3d13062012-10-30 15:42:37 -0700745
Jason Evanse476f8a2010-01-16 09:53:50 -0800746 return (chunk);
747}
748
749static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700750arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700751{
752 bool committed;
753 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
754
755 chunk_deregister(chunk, &chunk->node);
756
757 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
758 if (!committed) {
759 /*
760 * Decommit the header. Mark the chunk as decommitted even if
761 * header decommit fails, since treating a partially committed
762 * chunk as committed has a high potential for causing later
763 * access of decommitted memory.
764 */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700765 chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700766 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
767 arena->ind);
768 }
769
Jason Evansc1e00ef2016-05-10 22:21:10 -0700770 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
Jason Evans19ff2ce2016-04-22 14:37:17 -0700771 committed);
772
773 if (config_stats) {
774 arena->stats.mapped -= chunksize;
775 arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
776 }
777}
778
779static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700780arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
Jason Evans19ff2ce2016-04-22 14:37:17 -0700781{
782
783 assert(arena->spare != spare);
784
785 if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
786 arena_run_dirty_remove(arena, spare, map_bias,
787 chunk_npages-map_bias);
788 }
789
Jason Evansc1e00ef2016-05-10 22:21:10 -0700790 arena_chunk_discard(tsdn, arena, spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -0700791}
792
793static void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700794arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
Jason Evanse476f8a2010-01-16 09:53:50 -0800795{
Jason Evans19ff2ce2016-04-22 14:37:17 -0700796 arena_chunk_t *spare;
Qinfan Wu04d60a12014-07-18 14:21:17 -0700797
Jason Evans30fe12b2012-05-10 17:09:17 -0700798 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
799 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
800 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700801 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700802 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
Jason Evans155bfa72014-10-05 17:54:10 -0700803 arena_maxrun);
Jason Evans30fe12b2012-05-10 17:09:17 -0700804 assert(arena_mapbits_dirty_get(chunk, map_bias) ==
805 arena_mapbits_dirty_get(chunk, chunk_npages-1));
Jason Evansde249c82015-08-09 16:47:27 -0700806 assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
807 arena_mapbits_decommitted_get(chunk, chunk_npages-1));
Jason Evans30fe12b2012-05-10 17:09:17 -0700808
Dave Watson3417a302016-02-23 12:06:21 -0800809 /* Remove run from runs_avail, so that the arena does not use it. */
Qinfan Wu90737fc2014-07-21 19:39:20 -0700810 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
Jason Evanse476f8a2010-01-16 09:53:50 -0800811
Jason Evans19ff2ce2016-04-22 14:37:17 -0700812 ql_remove(&arena->achunks, &chunk->node, ql_link);
813 spare = arena->spare;
814 arena->spare = chunk;
815 if (spare != NULL)
Jason Evansc1e00ef2016-05-10 22:21:10 -0700816 arena_spare_discard(tsdn, arena, spare);
Jason Evanse476f8a2010-01-16 09:53:50 -0800817}
818
Jason Evans9b41ac92014-10-14 22:20:00 -0700819static void
820arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
821{
Jason Evansd01fd192015-08-19 15:21:32 -0700822 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700823
824 cassert(config_stats);
825
826 arena->stats.nmalloc_huge++;
827 arena->stats.allocated_huge += usize;
828 arena->stats.hstats[index].nmalloc++;
829 arena->stats.hstats[index].curhchunks++;
830}
831
832static void
833arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
834{
Jason Evansd01fd192015-08-19 15:21:32 -0700835 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700836
837 cassert(config_stats);
838
839 arena->stats.nmalloc_huge--;
840 arena->stats.allocated_huge -= usize;
841 arena->stats.hstats[index].nmalloc--;
842 arena->stats.hstats[index].curhchunks--;
843}
844
845static void
846arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
847{
Jason Evansd01fd192015-08-19 15:21:32 -0700848 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700849
850 cassert(config_stats);
851
852 arena->stats.ndalloc_huge++;
853 arena->stats.allocated_huge -= usize;
854 arena->stats.hstats[index].ndalloc++;
855 arena->stats.hstats[index].curhchunks--;
856}
857
858static void
Jason Evans7e674952016-04-25 13:26:54 -0700859arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
860{
861 szind_t index = size2index(usize) - nlclasses - NBINS;
862
863 cassert(config_stats);
864
865 arena->stats.ndalloc_huge++;
866 arena->stats.hstats[index].ndalloc--;
867}
868
869static void
Jason Evans9b41ac92014-10-14 22:20:00 -0700870arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
871{
Jason Evansd01fd192015-08-19 15:21:32 -0700872 szind_t index = size2index(usize) - nlclasses - NBINS;
Jason Evans9b41ac92014-10-14 22:20:00 -0700873
874 cassert(config_stats);
875
876 arena->stats.ndalloc_huge--;
877 arena->stats.allocated_huge += usize;
878 arena->stats.hstats[index].ndalloc--;
879 arena->stats.hstats[index].curhchunks++;
880}
881
882static void
883arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
884{
885
886 arena_huge_dalloc_stats_update(arena, oldsize);
887 arena_huge_malloc_stats_update(arena, usize);
888}
889
890static void
891arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
892 size_t usize)
893{
894
895 arena_huge_dalloc_stats_update_undo(arena, oldsize);
896 arena_huge_malloc_stats_update_undo(arena, usize);
897}
898
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800899extent_node_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700900arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800901{
902 extent_node_t *node;
903
Jason Evansc1e00ef2016-05-10 22:21:10 -0700904 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800905 node = ql_last(&arena->node_cache, ql_link);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800906 if (node == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700907 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
908 return (base_alloc(tsdn, sizeof(extent_node_t)));
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800909 }
Jason Evans2195ba42015-02-15 16:43:52 -0800910 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700911 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800912 return (node);
913}
914
915void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700916arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800917{
918
Jason Evansc1e00ef2016-05-10 22:21:10 -0700919 malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
Jason Evans2195ba42015-02-15 16:43:52 -0800920 ql_elm_new(node, ql_link);
921 ql_tail_insert(&arena->node_cache, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700922 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
Jason Evanscbf3a6d2015-02-11 12:24:27 -0800923}
924
Jason Evans99bd94f2015-02-18 16:40:53 -0800925static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700926arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -0700927 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
928 size_t csize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700929{
930 void *ret;
Jason Evans8fadb1a2015-08-04 10:49:46 -0700931 bool commit = true;
Jason Evans9b41ac92014-10-14 22:20:00 -0700932
Jason Evansc1e00ef2016-05-10 22:21:10 -0700933 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700934 alignment, zero, &commit);
Jason Evans9b41ac92014-10-14 22:20:00 -0700935 if (ret == NULL) {
936 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -0700937 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700938 if (config_stats) {
939 arena_huge_malloc_stats_update_undo(arena, usize);
940 arena->stats.mapped -= usize;
941 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800942 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700943 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700944 }
945
Jason Evans99bd94f2015-02-18 16:40:53 -0800946 return (ret);
947}
Jason Evans9b41ac92014-10-14 22:20:00 -0700948
Jason Evans99bd94f2015-02-18 16:40:53 -0800949void *
Jason Evansc1e00ef2016-05-10 22:21:10 -0700950arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700951 size_t alignment, bool *zero)
Jason Evans99bd94f2015-02-18 16:40:53 -0800952{
953 void *ret;
Jason Evansb49a3342015-07-28 11:28:19 -0400954 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800955 size_t csize = CHUNK_CEILING(usize);
956
Jason Evansc1e00ef2016-05-10 22:21:10 -0700957 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800958
959 /* Optimistically update stats. */
960 if (config_stats) {
961 arena_huge_malloc_stats_update(arena, usize);
962 arena->stats.mapped += usize;
963 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800964 arena_nactive_add(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800965
Jason Evansc1e00ef2016-05-10 22:21:10 -0700966 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
Jason Evansb2c0d632016-04-13 23:36:15 -0700967 alignment, zero, true);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700968 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -0800969 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -0700970 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -0700971 usize, alignment, zero, csize);
Jason Evans99bd94f2015-02-18 16:40:53 -0800972 }
973
Jason Evans9b41ac92014-10-14 22:20:00 -0700974 return (ret);
975}
976
977void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700978arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700979{
Jason Evansb49a3342015-07-28 11:28:19 -0400980 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -0800981 size_t csize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700982
Jason Evans99bd94f2015-02-18 16:40:53 -0800983 csize = CHUNK_CEILING(usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -0700984 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700985 if (config_stats) {
986 arena_huge_dalloc_stats_update(arena, usize);
987 arena->stats.mapped -= usize;
Jason Evans9b41ac92014-10-14 22:20:00 -0700988 }
Jason Evans40ee9aa2016-02-27 12:34:50 -0800989 arena_nactive_sub(arena, usize >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -0800990
Jason Evansc1e00ef2016-05-10 22:21:10 -0700991 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
992 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -0700993}
994
995void
Jason Evansc1e00ef2016-05-10 22:21:10 -0700996arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -0700997 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -0700998{
999
1000 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1001 assert(oldsize != usize);
1002
Jason Evansc1e00ef2016-05-10 22:21:10 -07001003 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001004 if (config_stats)
1005 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001006 if (oldsize < usize)
1007 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1008 else
1009 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001010 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001011}
1012
1013void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001014arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07001015 size_t oldsize, size_t usize)
Jason Evans9b41ac92014-10-14 22:20:00 -07001016{
Jason Evans9b41ac92014-10-14 22:20:00 -07001017 size_t udiff = oldsize - usize;
1018 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1019
Jason Evansc1e00ef2016-05-10 22:21:10 -07001020 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans9b41ac92014-10-14 22:20:00 -07001021 if (config_stats) {
1022 arena_huge_ralloc_stats_update(arena, oldsize, usize);
Jason Evans40ee9aa2016-02-27 12:34:50 -08001023 if (cdiff != 0)
Jason Evans9b41ac92014-10-14 22:20:00 -07001024 arena->stats.mapped -= cdiff;
Jason Evans9b41ac92014-10-14 22:20:00 -07001025 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001026 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evans99bd94f2015-02-18 16:40:53 -08001027
Jason Evans2012d5a2014-11-17 09:54:49 -08001028 if (cdiff != 0) {
Jason Evansb49a3342015-07-28 11:28:19 -04001029 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
Jason Evans99bd94f2015-02-18 16:40:53 -08001030 void *nchunk = (void *)((uintptr_t)chunk +
1031 CHUNK_CEILING(usize));
1032
Jason Evansc1e00ef2016-05-10 22:21:10 -07001033 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001034 true);
Jason Evansb49a3342015-07-28 11:28:19 -04001035 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001036 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001037}
1038
Jason Evansb49a3342015-07-28 11:28:19 -04001039static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001040arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07001041 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1042 bool *zero, void *nchunk, size_t udiff, size_t cdiff)
Jason Evans99bd94f2015-02-18 16:40:53 -08001043{
1044 bool err;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001045 bool commit = true;
Jason Evans99bd94f2015-02-18 16:40:53 -08001046
Jason Evansc1e00ef2016-05-10 22:21:10 -07001047 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001048 chunksize, zero, &commit) == NULL);
Jason Evans99bd94f2015-02-18 16:40:53 -08001049 if (err) {
1050 /* Revert optimistic stats updates. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001051 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001052 if (config_stats) {
1053 arena_huge_ralloc_stats_update_undo(arena, oldsize,
1054 usize);
1055 arena->stats.mapped -= cdiff;
1056 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001057 arena_nactive_sub(arena, udiff >> LG_PAGE);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001058 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evansb49a3342015-07-28 11:28:19 -04001059 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1060 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001061 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001062 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001063 err = true;
Jason Evans2012d5a2014-11-17 09:54:49 -08001064 }
Jason Evans99bd94f2015-02-18 16:40:53 -08001065 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001066}
1067
1068bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001069arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07001070 size_t oldsize, size_t usize, bool *zero)
Jason Evans9b41ac92014-10-14 22:20:00 -07001071{
Jason Evans99bd94f2015-02-18 16:40:53 -08001072 bool err;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001073 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans99bd94f2015-02-18 16:40:53 -08001074 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
Jason Evans9b41ac92014-10-14 22:20:00 -07001075 size_t udiff = usize - oldsize;
1076 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1077
Jason Evansc1e00ef2016-05-10 22:21:10 -07001078 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001079
1080 /* Optimistically update stats. */
Jason Evans9b41ac92014-10-14 22:20:00 -07001081 if (config_stats) {
Jason Evans9b41ac92014-10-14 22:20:00 -07001082 arena_huge_ralloc_stats_update(arena, oldsize, usize);
1083 arena->stats.mapped += cdiff;
1084 }
Jason Evans40ee9aa2016-02-27 12:34:50 -08001085 arena_nactive_add(arena, udiff >> LG_PAGE);
Jason Evans9b41ac92014-10-14 22:20:00 -07001086
Jason Evansc1e00ef2016-05-10 22:21:10 -07001087 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001088 chunksize, zero, true) == NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001089 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans99bd94f2015-02-18 16:40:53 -08001090 if (err) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001091 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07001092 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
Jason Evansb49a3342015-07-28 11:28:19 -04001093 cdiff);
1094 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1095 cdiff, true, arena->ind)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001096 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
Jason Evansb2c0d632016-04-13 23:36:15 -07001097 *zero, true);
Jason Evansb49a3342015-07-28 11:28:19 -04001098 err = true;
Jason Evans9b41ac92014-10-14 22:20:00 -07001099 }
1100
Jason Evans99bd94f2015-02-18 16:40:53 -08001101 return (err);
Jason Evans9b41ac92014-10-14 22:20:00 -07001102}
1103
Jason Evansaa282662015-07-15 16:02:21 -07001104/*
1105 * Do first-best-fit run selection, i.e. select the lowest run that best fits.
Dave Watson3417a302016-02-23 12:06:21 -08001106 * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1107 * same size.
Jason Evansaa282662015-07-15 16:02:21 -07001108 */
Jason Evans97c04a92015-03-06 19:57:36 -08001109static arena_run_t *
Jason Evansaa282662015-07-15 16:02:21 -07001110arena_run_first_best_fit(arena_t *arena, size_t size)
Jason Evans97c04a92015-03-06 19:57:36 -08001111{
Dave Watson3417a302016-02-23 12:06:21 -08001112 szind_t ind, i;
1113
1114 ind = size2index(run_quantize_ceil(size));
Dave Watsoncd86c142016-02-24 11:02:49 -08001115 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
Jason Evansc6a2c392016-03-26 17:30:37 -07001116 arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1117 arena_runs_avail_get(arena, i));
1118 if (miscelm != NULL)
Dave Watson3417a302016-02-23 12:06:21 -08001119 return (&miscelm->run);
1120 }
1121
1122 return (NULL);
Jason Evans97c04a92015-03-06 19:57:36 -08001123}
1124
Jason Evanse476f8a2010-01-16 09:53:50 -08001125static arena_run_t *
Jason Evansaa5113b2014-01-14 16:23:03 -08001126arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08001127{
Jason Evansaa282662015-07-15 16:02:21 -07001128 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001129 if (run != NULL) {
1130 if (arena_run_split_large(arena, run, size, zero))
1131 run = NULL;
1132 }
Jason Evans97c04a92015-03-06 19:57:36 -08001133 return (run);
Jason Evans5b0c9962012-05-10 15:47:24 -07001134}
1135
1136static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001137arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
Jason Evans5b0c9962012-05-10 15:47:24 -07001138{
1139 arena_chunk_t *chunk;
1140 arena_run_t *run;
1141
Jason Evansfc0b3b72014-10-09 17:54:06 -07001142 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001143 assert(size == PAGE_CEILING(size));
Jason Evans5b0c9962012-05-10 15:47:24 -07001144
1145 /* Search the arena's chunks for the lowest best fit. */
Jason Evansaa5113b2014-01-14 16:23:03 -08001146 run = arena_run_alloc_large_helper(arena, size, zero);
Jason Evans5b0c9962012-05-10 15:47:24 -07001147 if (run != NULL)
1148 return (run);
1149
Jason Evanse476f8a2010-01-16 09:53:50 -08001150 /*
1151 * No usable runs. Create a new chunk from which to allocate the run.
1152 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001153 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evanse00572b2010-03-14 19:43:56 -07001154 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001155 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001156 if (arena_run_split_large(arena, run, size, zero))
1157 run = NULL;
Jason Evanse00572b2010-03-14 19:43:56 -07001158 return (run);
1159 }
1160
1161 /*
1162 * arena_chunk_alloc() failed, but another thread may have made
1163 * sufficient memory available while this one dropped arena->lock in
1164 * arena_chunk_alloc(), so search one more time.
1165 */
Jason Evansaa5113b2014-01-14 16:23:03 -08001166 return (arena_run_alloc_large_helper(arena, size, zero));
1167}
1168
1169static arena_run_t *
Jason Evansd01fd192015-08-19 15:21:32 -07001170arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001171{
Jason Evansaa282662015-07-15 16:02:21 -07001172 arena_run_t *run = arena_run_first_best_fit(arena, size);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001173 if (run != NULL) {
1174 if (arena_run_split_small(arena, run, size, binind))
1175 run = NULL;
1176 }
Jason Evans97c04a92015-03-06 19:57:36 -08001177 return (run);
Jason Evansaa5113b2014-01-14 16:23:03 -08001178}
1179
1180static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07001181arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
Jason Evansaa5113b2014-01-14 16:23:03 -08001182{
1183 arena_chunk_t *chunk;
1184 arena_run_t *run;
1185
Jason Evansfc0b3b72014-10-09 17:54:06 -07001186 assert(size <= arena_maxrun);
Jason Evans8a03cf02015-05-04 09:58:36 -07001187 assert(size == PAGE_CEILING(size));
Jason Evansaa5113b2014-01-14 16:23:03 -08001188 assert(binind != BININD_INVALID);
1189
1190 /* Search the arena's chunks for the lowest best fit. */
1191 run = arena_run_alloc_small_helper(arena, size, binind);
1192 if (run != NULL)
1193 return (run);
1194
1195 /*
1196 * No usable runs. Create a new chunk from which to allocate the run.
1197 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001198 chunk = arena_chunk_alloc(tsdn, arena);
Jason Evansaa5113b2014-01-14 16:23:03 -08001199 if (chunk != NULL) {
Jason Evans61a6dfc2016-03-23 16:04:38 -07001200 run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001201 if (arena_run_split_small(arena, run, size, binind))
1202 run = NULL;
Jason Evansaa5113b2014-01-14 16:23:03 -08001203 return (run);
1204 }
1205
1206 /*
1207 * arena_chunk_alloc() failed, but another thread may have made
1208 * sufficient memory available while this one dropped arena->lock in
1209 * arena_chunk_alloc(), so search one more time.
1210 */
1211 return (arena_run_alloc_small_helper(arena, size, binind));
Jason Evanse476f8a2010-01-16 09:53:50 -08001212}
1213
Jason Evans8d6a3e82015-03-18 18:55:33 -07001214static bool
1215arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1216{
1217
Jason Evansbd16ea42015-03-24 15:59:28 -07001218 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1219 << 3));
Jason Evans8d6a3e82015-03-18 18:55:33 -07001220}
1221
1222ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001223arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001224{
1225 ssize_t lg_dirty_mult;
1226
Jason Evansc1e00ef2016-05-10 22:21:10 -07001227 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001228 lg_dirty_mult = arena->lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001229 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001230
1231 return (lg_dirty_mult);
1232}
1233
1234bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001235arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
Jason Evans8d6a3e82015-03-18 18:55:33 -07001236{
1237
1238 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1239 return (true);
1240
Jason Evansc1e00ef2016-05-10 22:21:10 -07001241 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001242 arena->lg_dirty_mult = lg_dirty_mult;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001243 arena_maybe_purge(tsdn, arena);
1244 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8d6a3e82015-03-18 18:55:33 -07001245
1246 return (false);
1247}
1248
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001249static void
Jason Evans243f7a02016-02-19 20:09:31 -08001250arena_decay_deadline_init(arena_t *arena)
1251{
1252
1253 assert(opt_purge == purge_mode_decay);
1254
1255 /*
1256 * Generate a new deadline that is uniformly random within the next
1257 * epoch after the current one.
1258 */
Jason Evans9bad0792016-02-21 11:25:02 -08001259 nstime_copy(&arena->decay_deadline, &arena->decay_epoch);
1260 nstime_add(&arena->decay_deadline, &arena->decay_interval);
Jason Evans243f7a02016-02-19 20:09:31 -08001261 if (arena->decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001262 nstime_t jitter;
Jason Evans243f7a02016-02-19 20:09:31 -08001263
Jason Evans9bad0792016-02-21 11:25:02 -08001264 nstime_init(&jitter, prng_range(&arena->decay_jitter_state,
1265 nstime_ns(&arena->decay_interval)));
1266 nstime_add(&arena->decay_deadline, &jitter);
Jason Evans243f7a02016-02-19 20:09:31 -08001267 }
1268}
1269
1270static bool
Jason Evans9bad0792016-02-21 11:25:02 -08001271arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001272{
1273
1274 assert(opt_purge == purge_mode_decay);
1275
Jason Evans9bad0792016-02-21 11:25:02 -08001276 return (nstime_compare(&arena->decay_deadline, time) <= 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001277}
1278
1279static size_t
1280arena_decay_backlog_npages_limit(const arena_t *arena)
1281{
1282 static const uint64_t h_steps[] = {
1283#define STEP(step, h, x, y) \
1284 h,
1285 SMOOTHSTEP
1286#undef STEP
1287 };
1288 uint64_t sum;
1289 size_t npages_limit_backlog;
1290 unsigned i;
1291
1292 assert(opt_purge == purge_mode_decay);
1293
1294 /*
1295 * For each element of decay_backlog, multiply by the corresponding
1296 * fixed-point smoothstep decay factor. Sum the products, then divide
1297 * to round down to the nearest whole number of pages.
1298 */
1299 sum = 0;
1300 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1301 sum += arena->decay_backlog[i] * h_steps[i];
rustyx00432332016-04-12 09:50:54 +02001302 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
Jason Evans243f7a02016-02-19 20:09:31 -08001303
1304 return (npages_limit_backlog);
1305}
1306
1307static void
Jason Evans9bad0792016-02-21 11:25:02 -08001308arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
Jason Evans243f7a02016-02-19 20:09:31 -08001309{
rustyx00432332016-04-12 09:50:54 +02001310 uint64_t nadvance_u64;
Jason Evans9bad0792016-02-21 11:25:02 -08001311 nstime_t delta;
Jason Evans243f7a02016-02-19 20:09:31 -08001312 size_t ndirty_delta;
1313
1314 assert(opt_purge == purge_mode_decay);
1315 assert(arena_decay_deadline_reached(arena, time));
1316
Jason Evans9bad0792016-02-21 11:25:02 -08001317 nstime_copy(&delta, time);
1318 nstime_subtract(&delta, &arena->decay_epoch);
rustyx00432332016-04-12 09:50:54 +02001319 nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
1320 assert(nadvance_u64 > 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001321
rustyx00432332016-04-12 09:50:54 +02001322 /* Add nadvance_u64 decay intervals to epoch. */
Jason Evans9bad0792016-02-21 11:25:02 -08001323 nstime_copy(&delta, &arena->decay_interval);
rustyx00432332016-04-12 09:50:54 +02001324 nstime_imultiply(&delta, nadvance_u64);
Jason Evans9bad0792016-02-21 11:25:02 -08001325 nstime_add(&arena->decay_epoch, &delta);
Jason Evans243f7a02016-02-19 20:09:31 -08001326
1327 /* Set a new deadline. */
1328 arena_decay_deadline_init(arena);
1329
1330 /* Update the backlog. */
rustyx00432332016-04-12 09:50:54 +02001331 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
Jason Evans243f7a02016-02-19 20:09:31 -08001332 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1333 sizeof(size_t));
1334 } else {
rustyx00432332016-04-12 09:50:54 +02001335 size_t nadvance_z = (size_t)nadvance_u64;
1336
1337 assert((uint64_t)nadvance_z == nadvance_u64);
1338
1339 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
1340 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1341 if (nadvance_z > 1) {
Jason Evans243f7a02016-02-19 20:09:31 -08001342 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
rustyx00432332016-04-12 09:50:54 +02001343 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
Jason Evans243f7a02016-02-19 20:09:31 -08001344 }
1345 }
1346 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
1347 arena->decay_ndirty : 0;
1348 arena->decay_ndirty = arena->ndirty;
1349 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1350 arena->decay_backlog_npages_limit =
1351 arena_decay_backlog_npages_limit(arena);
1352}
1353
1354static size_t
1355arena_decay_npages_limit(arena_t *arena)
1356{
1357 size_t npages_limit;
1358
1359 assert(opt_purge == purge_mode_decay);
1360
1361 npages_limit = arena->decay_backlog_npages_limit;
1362
1363 /* Add in any dirty pages created during the current epoch. */
1364 if (arena->ndirty > arena->decay_ndirty)
1365 npages_limit += arena->ndirty - arena->decay_ndirty;
1366
1367 return (npages_limit);
1368}
1369
1370static void
1371arena_decay_init(arena_t *arena, ssize_t decay_time)
1372{
1373
1374 arena->decay_time = decay_time;
1375 if (decay_time > 0) {
Jason Evans9bad0792016-02-21 11:25:02 -08001376 nstime_init2(&arena->decay_interval, decay_time, 0);
1377 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS);
Jason Evans243f7a02016-02-19 20:09:31 -08001378 }
1379
Jason Evans9bad0792016-02-21 11:25:02 -08001380 nstime_init(&arena->decay_epoch, 0);
1381 nstime_update(&arena->decay_epoch);
Jason Evans243f7a02016-02-19 20:09:31 -08001382 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena;
1383 arena_decay_deadline_init(arena);
1384 arena->decay_ndirty = arena->ndirty;
1385 arena->decay_backlog_npages_limit = 0;
1386 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1387}
1388
1389static bool
1390arena_decay_time_valid(ssize_t decay_time)
1391{
1392
Jason Evans022f6892016-03-02 22:41:32 -08001393 if (decay_time < -1)
1394 return (false);
1395 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1396 return (true);
1397 return (false);
Jason Evans243f7a02016-02-19 20:09:31 -08001398}
1399
1400ssize_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001401arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001402{
1403 ssize_t decay_time;
1404
Jason Evansc1e00ef2016-05-10 22:21:10 -07001405 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001406 decay_time = arena->decay_time;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001407 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001408
1409 return (decay_time);
1410}
1411
1412bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07001413arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
Jason Evans243f7a02016-02-19 20:09:31 -08001414{
1415
1416 if (!arena_decay_time_valid(decay_time))
1417 return (true);
1418
Jason Evansc1e00ef2016-05-10 22:21:10 -07001419 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001420 /*
1421 * Restart decay backlog from scratch, which may cause many dirty pages
1422 * to be immediately purged. It would conceptually be possible to map
1423 * the old backlog onto the new backlog, but there is no justification
1424 * for such complexity since decay_time changes are intended to be
1425 * infrequent, either between the {-1, 0, >0} states, or a one-time
1426 * arbitrary change during initial arena configuration.
1427 */
1428 arena_decay_init(arena, decay_time);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001429 arena_maybe_purge(tsdn, arena);
1430 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001431
1432 return (false);
1433}
1434
1435static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001436arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
Jason Evans05b21be2010-03-14 17:36:10 -07001437{
1438
Jason Evans243f7a02016-02-19 20:09:31 -08001439 assert(opt_purge == purge_mode_ratio);
1440
Jason Evanse3d13062012-10-30 15:42:37 -07001441 /* Don't purge if the option is disabled. */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001442 if (arena->lg_dirty_mult < 0)
Jason Evanse3d13062012-10-30 15:42:37 -07001443 return;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001444
Jason Evans0a9f9a42015-06-22 18:50:32 -07001445 /*
1446 * Iterate, since preventing recursive purging could otherwise leave too
1447 * many dirty pages.
1448 */
1449 while (true) {
1450 size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1451 if (threshold < chunk_npages)
1452 threshold = chunk_npages;
1453 /*
1454 * Don't purge unless the number of purgeable pages exceeds the
1455 * threshold.
1456 */
1457 if (arena->ndirty <= threshold)
1458 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001459 arena_purge_to_limit(tsdn, arena, threshold);
Jason Evans0a9f9a42015-06-22 18:50:32 -07001460 }
Jason Evans05b21be2010-03-14 17:36:10 -07001461}
1462
Jason Evans243f7a02016-02-19 20:09:31 -08001463static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001464arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
Jason Evans243f7a02016-02-19 20:09:31 -08001465{
Jason Evans9bad0792016-02-21 11:25:02 -08001466 nstime_t time;
Jason Evans243f7a02016-02-19 20:09:31 -08001467 size_t ndirty_limit;
1468
1469 assert(opt_purge == purge_mode_decay);
1470
1471 /* Purge all or nothing if the option is disabled. */
1472 if (arena->decay_time <= 0) {
1473 if (arena->decay_time == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001474 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001475 return;
1476 }
1477
Jason Evans9bad0792016-02-21 11:25:02 -08001478 nstime_copy(&time, &arena->decay_epoch);
1479 if (unlikely(nstime_update(&time))) {
Jason Evans243f7a02016-02-19 20:09:31 -08001480 /* Time went backwards. Force an epoch advance. */
Jason Evans9bad0792016-02-21 11:25:02 -08001481 nstime_copy(&time, &arena->decay_deadline);
Jason Evans243f7a02016-02-19 20:09:31 -08001482 }
1483
1484 if (arena_decay_deadline_reached(arena, &time))
1485 arena_decay_epoch_advance(arena, &time);
1486
1487 ndirty_limit = arena_decay_npages_limit(arena);
1488
1489 /*
1490 * Don't try to purge unless the number of purgeable pages exceeds the
1491 * current limit.
1492 */
1493 if (arena->ndirty <= ndirty_limit)
1494 return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001495 arena_purge_to_limit(tsdn, arena, ndirty_limit);
Jason Evans243f7a02016-02-19 20:09:31 -08001496}
1497
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001498void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001499arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001500{
1501
1502 /* Don't recursively purge. */
1503 if (arena->purging)
1504 return;
1505
Jason Evans243f7a02016-02-19 20:09:31 -08001506 if (opt_purge == purge_mode_ratio)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001507 arena_maybe_purge_ratio(tsdn, arena);
Jason Evans243f7a02016-02-19 20:09:31 -08001508 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001509 arena_maybe_purge_decay(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001510}
1511
Qinfan Wua244e502014-07-21 10:23:36 -07001512static size_t
1513arena_dirty_count(arena_t *arena)
1514{
1515 size_t ndirty = 0;
Jason Evans38e42d32015-03-10 18:15:40 -07001516 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001517 extent_node_t *chunkselm;
Qinfan Wua244e502014-07-21 10:23:36 -07001518
Jason Evans38e42d32015-03-10 18:15:40 -07001519 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001520 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001521 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001522 size_t npages;
1523
Jason Evansf5c8f372015-03-10 18:29:49 -07001524 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001525 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001526 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001527 } else {
Jason Evans38e42d32015-03-10 18:15:40 -07001528 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1529 rdelm);
1530 arena_chunk_map_misc_t *miscelm =
1531 arena_rd_to_miscelm(rdelm);
1532 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001533 assert(arena_mapbits_allocated_get(chunk, pageind) ==
1534 0);
1535 assert(arena_mapbits_large_get(chunk, pageind) == 0);
1536 assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1537 npages = arena_mapbits_unallocated_size_get(chunk,
1538 pageind) >> LG_PAGE;
1539 }
Qinfan Wua244e502014-07-21 10:23:36 -07001540 ndirty += npages;
1541 }
1542
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001543 return (ndirty);
Jason Evansaa5113b2014-01-14 16:23:03 -08001544}
1545
1546static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001547arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001548 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001549 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001550{
Jason Evans38e42d32015-03-10 18:15:40 -07001551 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001552 extent_node_t *chunkselm;
Qinfan Wue9708002014-07-21 18:09:04 -07001553 size_t nstashed = 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08001554
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001555 /* Stash runs/chunks according to ndirty_limit. */
Jason Evans38e42d32015-03-10 18:15:40 -07001556 for (rdelm = qr_next(&arena->runs_dirty, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001557 chunkselm = qr_next(&arena->chunks_cache, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001558 rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
Jason Evansee41ad42015-02-15 18:04:46 -08001559 size_t npages;
Jason Evans38e42d32015-03-10 18:15:40 -07001560 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansaa5113b2014-01-14 16:23:03 -08001561
Jason Evansf5c8f372015-03-10 18:29:49 -07001562 if (rdelm == &chunkselm->rd) {
Jason Evans99bd94f2015-02-18 16:40:53 -08001563 extent_node_t *chunkselm_next;
1564 bool zero;
Jason Evansee41ad42015-02-15 18:04:46 -08001565 UNUSED void *chunk;
Jason Evansaa5113b2014-01-14 16:23:03 -08001566
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001567 npages = extent_node_size_get(chunkselm) >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001568 if (opt_purge == purge_mode_decay && arena->ndirty -
1569 (nstashed + npages) < ndirty_limit)
1570 break;
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001571
Jason Evans738e0892015-02-18 01:15:50 -08001572 chunkselm_next = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001573 /*
Jason Evans99bd94f2015-02-18 16:40:53 -08001574 * Allocate. chunkselm remains valid due to the
1575 * dalloc_node=false argument to chunk_alloc_cache().
Jason Evansee41ad42015-02-15 18:04:46 -08001576 */
Jason Evansee41ad42015-02-15 18:04:46 -08001577 zero = false;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001578 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
Jason Evans99bd94f2015-02-18 16:40:53 -08001579 extent_node_addr_get(chunkselm),
1580 extent_node_size_get(chunkselm), chunksize, &zero,
1581 false);
1582 assert(chunk == extent_node_addr_get(chunkselm));
1583 assert(zero == extent_node_zeroed_get(chunkselm));
1584 extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
Jason Evans738e0892015-02-18 01:15:50 -08001585 purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001586 assert(npages == (extent_node_size_get(chunkselm) >>
1587 LG_PAGE));
Jason Evansee41ad42015-02-15 18:04:46 -08001588 chunkselm = chunkselm_next;
1589 } else {
1590 arena_chunk_t *chunk =
Jason Evans38e42d32015-03-10 18:15:40 -07001591 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1592 arena_chunk_map_misc_t *miscelm =
1593 arena_rd_to_miscelm(rdelm);
1594 size_t pageind = arena_miscelm_to_pageind(miscelm);
1595 arena_run_t *run = &miscelm->run;
Jason Evansee41ad42015-02-15 18:04:46 -08001596 size_t run_size =
1597 arena_mapbits_unallocated_size_get(chunk, pageind);
Jason Evans070b3c32014-08-14 14:45:58 -07001598
Jason Evansee41ad42015-02-15 18:04:46 -08001599 npages = run_size >> LG_PAGE;
Jason Evans243f7a02016-02-19 20:09:31 -08001600 if (opt_purge == purge_mode_decay && arena->ndirty -
1601 (nstashed + npages) < ndirty_limit)
1602 break;
Jason Evansee41ad42015-02-15 18:04:46 -08001603
1604 assert(pageind + npages <= chunk_npages);
1605 assert(arena_mapbits_dirty_get(chunk, pageind) ==
1606 arena_mapbits_dirty_get(chunk, pageind+npages-1));
1607
1608 /*
1609 * If purging the spare chunk's run, make it available
1610 * prior to allocation.
1611 */
1612 if (chunk == arena->spare)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001613 arena_chunk_alloc(tsdn, arena);
Jason Evansee41ad42015-02-15 18:04:46 -08001614
1615 /* Temporarily allocate the free dirty run. */
1616 arena_run_split_large(arena, run, run_size, false);
Jason Evans339c2b22015-02-17 22:25:56 -08001617 /* Stash. */
Jason Evansee41ad42015-02-15 18:04:46 -08001618 if (false)
Jason Evans38e42d32015-03-10 18:15:40 -07001619 qr_new(rdelm, rd_link); /* Redundant. */
Jason Evansee41ad42015-02-15 18:04:46 -08001620 else {
Jason Evans38e42d32015-03-10 18:15:40 -07001621 assert(qr_next(rdelm, rd_link) == rdelm);
1622 assert(qr_prev(rdelm, rd_link) == rdelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001623 }
Jason Evans38e42d32015-03-10 18:15:40 -07001624 qr_meld(purge_runs_sentinel, rdelm, rd_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001625 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001626
Qinfan Wue9708002014-07-21 18:09:04 -07001627 nstashed += npages;
Jason Evans243f7a02016-02-19 20:09:31 -08001628 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1629 ndirty_limit)
Qinfan Wue9708002014-07-21 18:09:04 -07001630 break;
Jason Evansaa5113b2014-01-14 16:23:03 -08001631 }
Qinfan Wue9708002014-07-21 18:09:04 -07001632
1633 return (nstashed);
Jason Evansaa5113b2014-01-14 16:23:03 -08001634}
1635
1636static size_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07001637arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001638 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001639 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001640{
Qinfan Wue9708002014-07-21 18:09:04 -07001641 size_t npurged, nmadvise;
Jason Evans38e42d32015-03-10 18:15:40 -07001642 arena_runs_dirty_link_t *rdelm;
Jason Evansee41ad42015-02-15 18:04:46 -08001643 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001644
Jason Evansaa5113b2014-01-14 16:23:03 -08001645 if (config_stats)
1646 nmadvise = 0;
1647 npurged = 0;
Qinfan Wue9708002014-07-21 18:09:04 -07001648
Jason Evansc1e00ef2016-05-10 22:21:10 -07001649 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans38e42d32015-03-10 18:15:40 -07001650 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001651 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001652 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
Jason Evansee41ad42015-02-15 18:04:46 -08001653 size_t npages;
Qinfan Wue9708002014-07-21 18:09:04 -07001654
Jason Evansf5c8f372015-03-10 18:29:49 -07001655 if (rdelm == &chunkselm->rd) {
Jason Evansb49a3342015-07-28 11:28:19 -04001656 /*
1657 * Don't actually purge the chunk here because 1)
1658 * chunkselm is embedded in the chunk and must remain
1659 * valid, and 2) we deallocate the chunk in
1660 * arena_unstash_purged(), where it is destroyed,
1661 * decommitted, or purged, depending on chunk
1662 * deallocation policy.
1663 */
Jason Evansee41ad42015-02-15 18:04:46 -08001664 size_t size = extent_node_size_get(chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001665 npages = size >> LG_PAGE;
Jason Evans738e0892015-02-18 01:15:50 -08001666 chunkselm = qr_next(chunkselm, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08001667 } else {
Jason Evans45186f02015-08-10 23:03:34 -07001668 size_t pageind, run_size, flag_unzeroed, flags, i;
1669 bool decommitted;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001670 arena_chunk_t *chunk =
1671 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001672 arena_chunk_map_misc_t *miscelm =
1673 arena_rd_to_miscelm(rdelm);
1674 pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansee41ad42015-02-15 18:04:46 -08001675 run_size = arena_mapbits_large_size_get(chunk, pageind);
1676 npages = run_size >> LG_PAGE;
Qinfan Wue9708002014-07-21 18:09:04 -07001677
Jason Evansee41ad42015-02-15 18:04:46 -08001678 assert(pageind + npages <= chunk_npages);
Jason Evansde249c82015-08-09 16:47:27 -07001679 assert(!arena_mapbits_decommitted_get(chunk, pageind));
1680 assert(!arena_mapbits_decommitted_get(chunk,
1681 pageind+npages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07001682 decommitted = !chunk_hooks->decommit(chunk, chunksize,
1683 pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1684 if (decommitted) {
Jason Evans45186f02015-08-10 23:03:34 -07001685 flag_unzeroed = 0;
1686 flags = CHUNK_MAP_DECOMMITTED;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001687 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001688 flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001689 chunk_hooks, chunk, chunksize, pageind <<
Jason Evans45186f02015-08-10 23:03:34 -07001690 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1691 flags = flag_unzeroed;
Jason Evans8fadb1a2015-08-04 10:49:46 -07001692 }
Jason Evans45186f02015-08-10 23:03:34 -07001693 arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1694 flags);
1695 arena_mapbits_large_set(chunk, pageind, run_size,
1696 flags);
Jason Evansee41ad42015-02-15 18:04:46 -08001697
1698 /*
Jason Evans45186f02015-08-10 23:03:34 -07001699 * Set the unzeroed flag for internal pages, now that
Jason Evans8d6a3e82015-03-18 18:55:33 -07001700 * chunk_purge_wrapper() has returned whether the pages
1701 * were zeroed as a side effect of purging. This chunk
1702 * map modification is safe even though the arena mutex
Jason Evansee41ad42015-02-15 18:04:46 -08001703 * isn't currently owned by this thread, because the run
1704 * is marked as allocated, thus protecting it from being
1705 * modified by any other thread. As long as these
1706 * writes don't perturb the first and last elements'
1707 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1708 */
Jason Evans45186f02015-08-10 23:03:34 -07001709 for (i = 1; i < npages-1; i++) {
1710 arena_mapbits_internal_set(chunk, pageind+i,
Jason Evansee41ad42015-02-15 18:04:46 -08001711 flag_unzeroed);
1712 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001713 }
Qinfan Wue9708002014-07-21 18:09:04 -07001714
Jason Evansaa5113b2014-01-14 16:23:03 -08001715 npurged += npages;
1716 if (config_stats)
1717 nmadvise++;
1718 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001719 malloc_mutex_lock(tsdn, &arena->lock);
Qinfan Wue9708002014-07-21 18:09:04 -07001720
1721 if (config_stats) {
Jason Evansaa5113b2014-01-14 16:23:03 -08001722 arena->stats.nmadvise += nmadvise;
Qinfan Wue9708002014-07-21 18:09:04 -07001723 arena->stats.purged += npurged;
1724 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001725
1726 return (npurged);
1727}
1728
1729static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001730arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
Jason Evans38e42d32015-03-10 18:15:40 -07001731 arena_runs_dirty_link_t *purge_runs_sentinel,
Jason Evansee41ad42015-02-15 18:04:46 -08001732 extent_node_t *purge_chunks_sentinel)
Jason Evansaa5113b2014-01-14 16:23:03 -08001733{
Jason Evans38e42d32015-03-10 18:15:40 -07001734 arena_runs_dirty_link_t *rdelm, *rdelm_next;
Jason Evansee41ad42015-02-15 18:04:46 -08001735 extent_node_t *chunkselm;
Jason Evansaa5113b2014-01-14 16:23:03 -08001736
Jason Evansb49a3342015-07-28 11:28:19 -04001737 /* Deallocate chunks/runs. */
Jason Evans38e42d32015-03-10 18:15:40 -07001738 for (rdelm = qr_next(purge_runs_sentinel, rd_link),
Jason Evans738e0892015-02-18 01:15:50 -08001739 chunkselm = qr_next(purge_chunks_sentinel, cc_link);
Jason Evans38e42d32015-03-10 18:15:40 -07001740 rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1741 rdelm_next = qr_next(rdelm, rd_link);
Jason Evansf5c8f372015-03-10 18:29:49 -07001742 if (rdelm == &chunkselm->rd) {
Jason Evansee41ad42015-02-15 18:04:46 -08001743 extent_node_t *chunkselm_next = qr_next(chunkselm,
Jason Evans738e0892015-02-18 01:15:50 -08001744 cc_link);
Jason Evans339c2b22015-02-17 22:25:56 -08001745 void *addr = extent_node_addr_get(chunkselm);
1746 size_t size = extent_node_size_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001747 bool zeroed = extent_node_zeroed_get(chunkselm);
Jason Evansde249c82015-08-09 16:47:27 -07001748 bool committed = extent_node_committed_get(chunkselm);
Jason Evans738e0892015-02-18 01:15:50 -08001749 extent_node_dirty_remove(chunkselm);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001750 arena_node_dalloc(tsdn, arena, chunkselm);
Jason Evansee41ad42015-02-15 18:04:46 -08001751 chunkselm = chunkselm_next;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001752 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
Jason Evansb2c0d632016-04-13 23:36:15 -07001753 size, zeroed, committed);
Jason Evansee41ad42015-02-15 18:04:46 -08001754 } else {
Jason Evans8fadb1a2015-08-04 10:49:46 -07001755 arena_chunk_t *chunk =
1756 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
Jason Evans38e42d32015-03-10 18:15:40 -07001757 arena_chunk_map_misc_t *miscelm =
1758 arena_rd_to_miscelm(rdelm);
Jason Evans8fadb1a2015-08-04 10:49:46 -07001759 size_t pageind = arena_miscelm_to_pageind(miscelm);
1760 bool decommitted = (arena_mapbits_decommitted_get(chunk,
1761 pageind) != 0);
Jason Evans38e42d32015-03-10 18:15:40 -07001762 arena_run_t *run = &miscelm->run;
1763 qr_remove(rdelm, rd_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001764 arena_run_dalloc(tsdn, arena, run, false, true,
Jason Evansb2c0d632016-04-13 23:36:15 -07001765 decommitted);
Jason Evansee41ad42015-02-15 18:04:46 -08001766 }
Jason Evansaa5113b2014-01-14 16:23:03 -08001767 }
1768}
1769
Jason Evans243f7a02016-02-19 20:09:31 -08001770/*
1771 * NB: ndirty_limit is interpreted differently depending on opt_purge:
1772 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1773 * desired state:
1774 * (arena->ndirty <= ndirty_limit)
1775 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1776 * violating the invariant:
1777 * (arena->ndirty >= ndirty_limit)
1778 */
Jason Evans8d6a3e82015-03-18 18:55:33 -07001779static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001780arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
Jason Evanse476f8a2010-01-16 09:53:50 -08001781{
Jason Evansc1e00ef2016-05-10 22:21:10 -07001782 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001783 size_t npurge, npurged;
Jason Evans38e42d32015-03-10 18:15:40 -07001784 arena_runs_dirty_link_t purge_runs_sentinel;
Jason Evansee41ad42015-02-15 18:04:46 -08001785 extent_node_t purge_chunks_sentinel;
Qinfan Wue9708002014-07-21 18:09:04 -07001786
Jason Evans0a9f9a42015-06-22 18:50:32 -07001787 arena->purging = true;
1788
Jason Evans2b2f6dc2014-11-01 02:29:10 -07001789 /*
1790 * Calls to arena_dirty_count() are disabled even for debug builds
1791 * because overhead grows nonlinearly as memory usage increases.
1792 */
1793 if (false && config_debug) {
Qinfan Wu90737fc2014-07-21 19:39:20 -07001794 size_t ndirty = arena_dirty_count(arena);
Qinfan Wua244e502014-07-21 10:23:36 -07001795 assert(ndirty == arena->ndirty);
Jason Evans2caa4712010-03-04 21:35:07 -08001796 }
Jason Evans243f7a02016-02-19 20:09:31 -08001797 assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1798 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001799
1800 qr_new(&purge_runs_sentinel, rd_link);
1801 extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1802
Jason Evansc1e00ef2016-05-10 22:21:10 -07001803 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001804 &purge_runs_sentinel, &purge_chunks_sentinel);
1805 if (npurge == 0)
1806 goto label_return;
Jason Evansc1e00ef2016-05-10 22:21:10 -07001807 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
Jason Evansb2c0d632016-04-13 23:36:15 -07001808 &purge_runs_sentinel, &purge_chunks_sentinel);
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001809 assert(npurged == npurge);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001810 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001811 &purge_chunks_sentinel);
Jason Evanse476f8a2010-01-16 09:53:50 -08001812
Jason Evans7372b152012-02-10 20:22:09 -08001813 if (config_stats)
1814 arena->stats.npurge++;
Jason Evanse476f8a2010-01-16 09:53:50 -08001815
Jason Evans1a4ad3c2016-02-19 19:51:23 -08001816label_return:
Jason Evans0a9f9a42015-06-22 18:50:32 -07001817 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08001818}
1819
Jason Evans6005f072010-09-30 16:55:08 -07001820void
Jason Evansc1e00ef2016-05-10 22:21:10 -07001821arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
Jason Evans6005f072010-09-30 16:55:08 -07001822{
1823
Jason Evansc1e00ef2016-05-10 22:21:10 -07001824 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans243f7a02016-02-19 20:09:31 -08001825 if (all)
Jason Evansc1e00ef2016-05-10 22:21:10 -07001826 arena_purge_to_limit(tsdn, arena, 0);
Jason Evans243f7a02016-02-19 20:09:31 -08001827 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07001828 arena_maybe_purge(tsdn, arena);
1829 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans6005f072010-09-30 16:55:08 -07001830}
1831
Jason Evanse476f8a2010-01-16 09:53:50 -08001832static void
Jason Evans19ff2ce2016-04-22 14:37:17 -07001833arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1834{
1835 size_t pageind, npages;
1836
1837 cassert(config_prof);
1838 assert(opt_prof);
1839
1840 /*
1841 * Iterate over the allocated runs and remove profiled allocations from
1842 * the sample set.
1843 */
1844 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1845 if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1846 if (arena_mapbits_large_get(chunk, pageind) != 0) {
1847 void *ptr = (void *)((uintptr_t)chunk + (pageind
1848 << LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07001849 size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1850 config_prof);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001851
1852 prof_free(tsd, ptr, usize);
1853 npages = arena_mapbits_large_size_get(chunk,
1854 pageind) >> LG_PAGE;
1855 } else {
1856 /* Skip small run. */
1857 size_t binind = arena_mapbits_binind_get(chunk,
1858 pageind);
1859 arena_bin_info_t *bin_info =
1860 &arena_bin_info[binind];
1861 npages = bin_info->run_size >> LG_PAGE;
1862 }
1863 } else {
1864 /* Skip unallocated run. */
1865 npages = arena_mapbits_unallocated_size_get(chunk,
1866 pageind) >> LG_PAGE;
1867 }
1868 assert(pageind + npages <= chunk_npages);
1869 }
1870}
1871
1872void
1873arena_reset(tsd_t *tsd, arena_t *arena)
1874{
1875 unsigned i;
1876 extent_node_t *node;
1877
1878 /*
1879 * Locking in this function is unintuitive. The caller guarantees that
1880 * no concurrent operations are happening in this arena, but there are
1881 * still reasons that some locking is necessary:
1882 *
1883 * - Some of the functions in the transitive closure of calls assume
1884 * appropriate locks are held, and in some cases these locks are
1885 * temporarily dropped to avoid lock order reversal or deadlock due to
1886 * reentry.
1887 * - mallctl("epoch", ...) may concurrently refresh stats. While
1888 * strictly speaking this is a "concurrent operation", disallowing
1889 * stats refreshes would impose an inconvenient burden.
1890 */
1891
1892 /* Remove large allocations from prof sample set. */
1893 if (config_prof && opt_prof) {
1894 ql_foreach(node, &arena->achunks, ql_link) {
1895 arena_achunk_prof_reset(tsd, arena,
1896 extent_node_addr_get(node));
1897 }
1898 }
1899
Jason Evans7e674952016-04-25 13:26:54 -07001900 /* Reset curruns for large size classes. */
1901 if (config_stats) {
1902 for (i = 0; i < nlclasses; i++)
1903 arena->stats.lstats[i].curruns = 0;
1904 }
1905
Jason Evans19ff2ce2016-04-22 14:37:17 -07001906 /* Huge allocations. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07001907 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001908 for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1909 ql_last(&arena->huge, ql_link)) {
1910 void *ptr = extent_node_addr_get(node);
Jason Evans7e674952016-04-25 13:26:54 -07001911 size_t usize;
Jason Evans19ff2ce2016-04-22 14:37:17 -07001912
Jason Evansc1e00ef2016-05-10 22:21:10 -07001913 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001914 if (config_stats || (config_prof && opt_prof))
Jason Evansc1e00ef2016-05-10 22:21:10 -07001915 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
Jason Evans7e674952016-04-25 13:26:54 -07001916 /* Remove huge allocation from prof sample set. */
1917 if (config_prof && opt_prof)
Jason Evans19ff2ce2016-04-22 14:37:17 -07001918 prof_free(tsd, ptr, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001919 huge_dalloc(tsd_tsdn(tsd), ptr);
1920 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans7e674952016-04-25 13:26:54 -07001921 /* Cancel out unwanted effects on stats. */
1922 if (config_stats)
1923 arena_huge_reset_stats_cancel(arena, usize);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001924 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001925 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001926
Jason Evansc1e00ef2016-05-10 22:21:10 -07001927 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001928
1929 /* Bins. */
1930 for (i = 0; i < NBINS; i++) {
1931 arena_bin_t *bin = &arena->bins[i];
Jason Evansc1e00ef2016-05-10 22:21:10 -07001932 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001933 bin->runcur = NULL;
1934 arena_run_heap_new(&bin->runs);
1935 if (config_stats) {
1936 bin->stats.curregs = 0;
1937 bin->stats.curruns = 0;
1938 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07001939 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001940 }
1941
1942 /*
1943 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
1944 * chains directly correspond.
1945 */
1946 qr_new(&arena->runs_dirty, rd_link);
1947 for (node = qr_next(&arena->chunks_cache, cc_link);
1948 node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
1949 qr_new(&node->rd, rd_link);
1950 qr_meld(&arena->runs_dirty, &node->rd, rd_link);
1951 }
1952
1953 /* Arena chunks. */
1954 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
1955 ql_last(&arena->achunks, ql_link)) {
1956 ql_remove(&arena->achunks, node, ql_link);
Jason Evansc1e00ef2016-05-10 22:21:10 -07001957 arena_chunk_discard(tsd_tsdn(tsd), arena,
1958 extent_node_addr_get(node));
Jason Evans19ff2ce2016-04-22 14:37:17 -07001959 }
1960
1961 /* Spare. */
1962 if (arena->spare != NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07001963 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001964 arena->spare = NULL;
1965 }
1966
1967 assert(!arena->purging);
1968 arena->nactive = 0;
1969
1970 for(i = 0; i < runs_avail_nclasses; i++)
1971 arena_run_heap_new(&arena->runs_avail[i]);
1972
Jason Evansc1e00ef2016-05-10 22:21:10 -07001973 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
Jason Evans19ff2ce2016-04-22 14:37:17 -07001974}
1975
1976static void
Jason Evansaa5113b2014-01-14 16:23:03 -08001977arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07001978 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1979 size_t flag_decommitted)
Jason Evanse476f8a2010-01-16 09:53:50 -08001980{
Jason Evansaa5113b2014-01-14 16:23:03 -08001981 size_t size = *p_size;
1982 size_t run_ind = *p_run_ind;
1983 size_t run_pages = *p_run_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08001984
1985 /* Try to coalesce forward. */
1986 if (run_ind + run_pages < chunk_npages &&
Jason Evans203484e2012-05-02 00:30:36 -07001987 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
Jason Evans8fadb1a2015-08-04 10:49:46 -07001988 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1989 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1990 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07001991 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1992 run_ind+run_pages);
Jason Evansae4c7b42012-04-02 07:04:34 -07001993 size_t nrun_pages = nrun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08001994
1995 /*
1996 * Remove successor from runs_avail; the coalesced run is
1997 * inserted later.
1998 */
Jason Evans203484e2012-05-02 00:30:36 -07001999 assert(arena_mapbits_unallocated_size_get(chunk,
2000 run_ind+run_pages+nrun_pages-1) == nrun_size);
2001 assert(arena_mapbits_dirty_get(chunk,
2002 run_ind+run_pages+nrun_pages-1) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002003 assert(arena_mapbits_decommitted_get(chunk,
2004 run_ind+run_pages+nrun_pages-1) == flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002005 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002006
Jason Evansee41ad42015-02-15 18:04:46 -08002007 /*
2008 * If the successor is dirty, remove it from the set of dirty
2009 * pages.
2010 */
Qinfan Wu04d60a12014-07-18 14:21:17 -07002011 if (flag_dirty != 0) {
Jason Evansee41ad42015-02-15 18:04:46 -08002012 arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
Jason Evans070b3c32014-08-14 14:45:58 -07002013 nrun_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002014 }
2015
Jason Evanse476f8a2010-01-16 09:53:50 -08002016 size += nrun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002017 run_pages += nrun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002018
Jason Evans203484e2012-05-02 00:30:36 -07002019 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2020 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2021 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002022 }
2023
2024 /* Try to coalesce backward. */
Jason Evansaa5113b2014-01-14 16:23:03 -08002025 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2026 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
Jason Evans8fadb1a2015-08-04 10:49:46 -07002027 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2028 flag_decommitted) {
Jason Evans203484e2012-05-02 00:30:36 -07002029 size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2030 run_ind-1);
Jason Evansae4c7b42012-04-02 07:04:34 -07002031 size_t prun_pages = prun_size >> LG_PAGE;
Jason Evanse476f8a2010-01-16 09:53:50 -08002032
Jason Evans12ca9142010-10-17 19:56:09 -07002033 run_ind -= prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002034
2035 /*
2036 * Remove predecessor from runs_avail; the coalesced run is
2037 * inserted later.
2038 */
Jason Evans203484e2012-05-02 00:30:36 -07002039 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2040 prun_size);
2041 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002042 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2043 flag_decommitted);
Qinfan Wu90737fc2014-07-21 19:39:20 -07002044 arena_avail_remove(arena, chunk, run_ind, prun_pages);
Jason Evanse476f8a2010-01-16 09:53:50 -08002045
Jason Evansee41ad42015-02-15 18:04:46 -08002046 /*
2047 * If the predecessor is dirty, remove it from the set of dirty
2048 * pages.
2049 */
2050 if (flag_dirty != 0) {
2051 arena_run_dirty_remove(arena, chunk, run_ind,
2052 prun_pages);
2053 }
Qinfan Wu04d60a12014-07-18 14:21:17 -07002054
Jason Evanse476f8a2010-01-16 09:53:50 -08002055 size += prun_size;
Jason Evans12ca9142010-10-17 19:56:09 -07002056 run_pages += prun_pages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002057
Jason Evans203484e2012-05-02 00:30:36 -07002058 arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2059 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2060 size);
Jason Evanse476f8a2010-01-16 09:53:50 -08002061 }
2062
Jason Evansaa5113b2014-01-14 16:23:03 -08002063 *p_size = size;
2064 *p_run_ind = run_ind;
2065 *p_run_pages = run_pages;
2066}
2067
Jason Evans8fadb1a2015-08-04 10:49:46 -07002068static size_t
2069arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2070 size_t run_ind)
2071{
2072 size_t size;
2073
2074 assert(run_ind >= map_bias);
2075 assert(run_ind < chunk_npages);
2076
2077 if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2078 size = arena_mapbits_large_size_get(chunk, run_ind);
2079 assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2080 run_ind+(size>>LG_PAGE)-1) == 0);
2081 } else {
2082 arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2083 size = bin_info->run_size;
2084 }
2085
2086 return (size);
2087}
2088
Jason Evansaa5113b2014-01-14 16:23:03 -08002089static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002090arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
Jason Evansb2c0d632016-04-13 23:36:15 -07002091 bool cleaned, bool decommitted)
Jason Evansaa5113b2014-01-14 16:23:03 -08002092{
2093 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002094 arena_chunk_map_misc_t *miscelm;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002095 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
Jason Evansaa5113b2014-01-14 16:23:03 -08002096
2097 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002098 miscelm = arena_run_to_miscelm(run);
2099 run_ind = arena_miscelm_to_pageind(miscelm);
Jason Evansaa5113b2014-01-14 16:23:03 -08002100 assert(run_ind >= map_bias);
2101 assert(run_ind < chunk_npages);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002102 size = arena_run_size_get(arena, chunk, run, run_ind);
Jason Evansaa5113b2014-01-14 16:23:03 -08002103 run_pages = (size >> LG_PAGE);
Jason Evans40ee9aa2016-02-27 12:34:50 -08002104 arena_nactive_sub(arena, run_pages);
Jason Evansaa5113b2014-01-14 16:23:03 -08002105
2106 /*
2107 * The run is dirty if the caller claims to have dirtied it, as well as
2108 * if it was already dirty before being allocated and the caller
2109 * doesn't claim to have cleaned it.
2110 */
2111 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2112 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002113 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2114 != 0)
Jason Evansaa5113b2014-01-14 16:23:03 -08002115 dirty = true;
2116 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002117 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
Jason Evansaa5113b2014-01-14 16:23:03 -08002118
2119 /* Mark pages as unallocated in the chunk map. */
Jason Evans8fadb1a2015-08-04 10:49:46 -07002120 if (dirty || decommitted) {
2121 size_t flags = flag_dirty | flag_decommitted;
2122 arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002123 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
Jason Evans8fadb1a2015-08-04 10:49:46 -07002124 flags);
Jason Evansaa5113b2014-01-14 16:23:03 -08002125 } else {
2126 arena_mapbits_unallocated_set(chunk, run_ind, size,
2127 arena_mapbits_unzeroed_get(chunk, run_ind));
2128 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2129 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2130 }
2131
Jason Evans8fadb1a2015-08-04 10:49:46 -07002132 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2133 flag_dirty, flag_decommitted);
Jason Evansaa5113b2014-01-14 16:23:03 -08002134
Jason Evanse476f8a2010-01-16 09:53:50 -08002135 /* Insert into runs_avail, now that coalescing is complete. */
Jason Evans203484e2012-05-02 00:30:36 -07002136 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2137 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2138 assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2139 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
Jason Evans8fadb1a2015-08-04 10:49:46 -07002140 assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2141 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
Qinfan Wu90737fc2014-07-21 19:39:20 -07002142 arena_avail_insert(arena, chunk, run_ind, run_pages);
Jason Evans8d4203c2010-04-13 20:53:21 -07002143
Jason Evans070b3c32014-08-14 14:45:58 -07002144 if (dirty)
Jason Evansee41ad42015-02-15 18:04:46 -08002145 arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
Qinfan Wu04d60a12014-07-18 14:21:17 -07002146
Jason Evans203484e2012-05-02 00:30:36 -07002147 /* Deallocate chunk if it is now completely unused. */
Jason Evans155bfa72014-10-05 17:54:10 -07002148 if (size == arena_maxrun) {
Jason Evans203484e2012-05-02 00:30:36 -07002149 assert(run_ind == map_bias);
Jason Evans155bfa72014-10-05 17:54:10 -07002150 assert(run_pages == (arena_maxrun >> LG_PAGE));
Jason Evansc1e00ef2016-05-10 22:21:10 -07002151 arena_chunk_dalloc(tsdn, arena, chunk);
Jason Evans203484e2012-05-02 00:30:36 -07002152 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002153
Jason Evans4fb7f512010-01-27 18:27:09 -08002154 /*
Jason Evans8d4203c2010-04-13 20:53:21 -07002155 * It is okay to do dirty page processing here even if the chunk was
Jason Evans4fb7f512010-01-27 18:27:09 -08002156 * deallocated above, since in that case it is the spare. Waiting
2157 * until after possible chunk deallocation to do dirty processing
2158 * allows for an old spare to be fully deallocated, thus decreasing the
2159 * chances of spuriously crossing the dirty page purging threshold.
2160 */
Jason Evans8d4203c2010-04-13 20:53:21 -07002161 if (dirty)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002162 arena_maybe_purge(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002163}
2164
2165static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002166arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002167 arena_run_t *run, size_t oldsize, size_t newsize)
Jason Evanse476f8a2010-01-16 09:53:50 -08002168{
Jason Evans0c5dd032014-09-29 01:31:39 -07002169 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2170 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002171 size_t head_npages = (oldsize - newsize) >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002172 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002173 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2174 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2175 CHUNK_MAP_UNZEROED : 0;
Jason Evanse476f8a2010-01-16 09:53:50 -08002176
2177 assert(oldsize > newsize);
2178
2179 /*
2180 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002181 * leading run as separately allocated. Set the last element of each
2182 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002183 */
Jason Evans203484e2012-05-02 00:30:36 -07002184 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002185 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2186 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2187 pageind+head_npages-1)));
2188 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2189 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002190
Jason Evans7372b152012-02-10 20:22:09 -08002191 if (config_debug) {
Jason Evansae4c7b42012-04-02 07:04:34 -07002192 UNUSED size_t tail_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002193 assert(arena_mapbits_large_size_get(chunk,
2194 pageind+head_npages+tail_npages-1) == 0);
2195 assert(arena_mapbits_dirty_get(chunk,
2196 pageind+head_npages+tail_npages-1) == flag_dirty);
Jason Evans940a2e02010-10-17 17:51:37 -07002197 }
Jason Evansd8ceef62012-05-10 20:59:39 -07002198 arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002199 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2200 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002201
Jason Evansc1e00ef2016-05-10 22:21:10 -07002202 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
Jason Evansb2c0d632016-04-13 23:36:15 -07002203 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002204}
2205
2206static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002207arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002208 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
Jason Evanse476f8a2010-01-16 09:53:50 -08002209{
Jason Evans0c5dd032014-09-29 01:31:39 -07002210 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2211 size_t pageind = arena_miscelm_to_pageind(miscelm);
Jason Evansae4c7b42012-04-02 07:04:34 -07002212 size_t head_npages = newsize >> LG_PAGE;
Jason Evans203484e2012-05-02 00:30:36 -07002213 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
Jason Evans1f27abc2015-08-11 12:42:33 -07002214 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2215 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2216 CHUNK_MAP_UNZEROED : 0;
Jason Evans0c5dd032014-09-29 01:31:39 -07002217 arena_chunk_map_misc_t *tail_miscelm;
2218 arena_run_t *tail_run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002219
2220 assert(oldsize > newsize);
2221
2222 /*
2223 * Update the chunk map so that arena_run_dalloc() can treat the
Jason Evans940a2e02010-10-17 17:51:37 -07002224 * trailing run as separately allocated. Set the last element of each
2225 * run first, in case of single-page runs.
Jason Evanse476f8a2010-01-16 09:53:50 -08002226 */
Jason Evans203484e2012-05-02 00:30:36 -07002227 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
Jason Evans1f27abc2015-08-11 12:42:33 -07002228 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2229 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2230 pageind+head_npages-1)));
2231 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2232 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
Jason Evans940a2e02010-10-17 17:51:37 -07002233
Jason Evans203484e2012-05-02 00:30:36 -07002234 if (config_debug) {
2235 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2236 assert(arena_mapbits_large_size_get(chunk,
2237 pageind+head_npages+tail_npages-1) == 0);
2238 assert(arena_mapbits_dirty_get(chunk,
2239 pageind+head_npages+tail_npages-1) == flag_dirty);
2240 }
2241 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
Jason Evans1f27abc2015-08-11 12:42:33 -07002242 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2243 pageind+head_npages)));
Jason Evanse476f8a2010-01-16 09:53:50 -08002244
Jason Evans61a6dfc2016-03-23 16:04:38 -07002245 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
Jason Evans0c5dd032014-09-29 01:31:39 -07002246 tail_run = &tail_miscelm->run;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002247 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
Jason Evansb2c0d632016-04-13 23:36:15 -07002248 != 0));
Jason Evanse476f8a2010-01-16 09:53:50 -08002249}
2250
Jason Evanse7a10582012-02-13 17:36:52 -08002251static void
2252arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2253{
Jason Evans0c5dd032014-09-29 01:31:39 -07002254 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
Jason Evanse7a10582012-02-13 17:36:52 -08002255
Jason Evansc6a2c392016-03-26 17:30:37 -07002256 arena_run_heap_insert(&bin->runs, miscelm);
Jason Evanse7a10582012-02-13 17:36:52 -08002257}
2258
2259static arena_run_t *
2260arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2261{
Jason Evansc6a2c392016-03-26 17:30:37 -07002262 arena_chunk_map_misc_t *miscelm;
2263
2264 miscelm = arena_run_heap_remove_first(&bin->runs);
2265 if (miscelm == NULL)
2266 return (NULL);
2267 if (config_stats)
2268 bin->stats.reruns++;
2269
2270 return (&miscelm->run);
Jason Evanse7a10582012-02-13 17:36:52 -08002271}
2272
2273static arena_run_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002274arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002275{
Jason Evanse476f8a2010-01-16 09:53:50 -08002276 arena_run_t *run;
Jason Evansd01fd192015-08-19 15:21:32 -07002277 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002278 arena_bin_info_t *bin_info;
Jason Evanse476f8a2010-01-16 09:53:50 -08002279
2280 /* Look for a usable run. */
Jason Evanse7a10582012-02-13 17:36:52 -08002281 run = arena_bin_nonfull_run_tryget(bin);
2282 if (run != NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002283 return (run);
Jason Evanse476f8a2010-01-16 09:53:50 -08002284 /* No existing runs have any space available. */
2285
Jason Evans49f7e8f2011-03-15 13:59:15 -07002286 binind = arena_bin_index(arena, bin);
2287 bin_info = &arena_bin_info[binind];
2288
Jason Evanse476f8a2010-01-16 09:53:50 -08002289 /* Allocate a new run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002290 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002291 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002292 malloc_mutex_lock(tsdn, &arena->lock);
2293 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
Jason Evanse00572b2010-03-14 19:43:56 -07002294 if (run != NULL) {
2295 /* Initialize run internals. */
Jason Evans381c23d2014-10-10 23:01:03 -07002296 run->binind = binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002297 run->nfree = bin_info->nregs;
Jason Evans0c5dd032014-09-29 01:31:39 -07002298 bitmap_init(run->bitmap, &bin_info->bitmap_info);
Jason Evanse69bee02010-03-15 22:25:23 -07002299 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002300 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002301 /********************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002302 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse69bee02010-03-15 22:25:23 -07002303 if (run != NULL) {
Jason Evans7372b152012-02-10 20:22:09 -08002304 if (config_stats) {
2305 bin->stats.nruns++;
2306 bin->stats.curruns++;
Jason Evans7372b152012-02-10 20:22:09 -08002307 }
Jason Evanse00572b2010-03-14 19:43:56 -07002308 return (run);
2309 }
2310
2311 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002312 * arena_run_alloc_small() failed, but another thread may have made
Jason Evans940a2e02010-10-17 17:51:37 -07002313 * sufficient memory available while this one dropped bin->lock above,
Jason Evanse00572b2010-03-14 19:43:56 -07002314 * so search one more time.
2315 */
Jason Evanse7a10582012-02-13 17:36:52 -08002316 run = arena_bin_nonfull_run_tryget(bin);
2317 if (run != NULL)
Jason Evanse00572b2010-03-14 19:43:56 -07002318 return (run);
Jason Evanse00572b2010-03-14 19:43:56 -07002319
2320 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08002321}
2322
Jason Evans1e0a6362010-03-13 13:41:58 -08002323/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
Jason Evanse476f8a2010-01-16 09:53:50 -08002324static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002325arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002326{
Jason Evansd01fd192015-08-19 15:21:32 -07002327 szind_t binind;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002328 arena_bin_info_t *bin_info;
Jason Evanse00572b2010-03-14 19:43:56 -07002329 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002330
Jason Evans49f7e8f2011-03-15 13:59:15 -07002331 binind = arena_bin_index(arena, bin);
2332 bin_info = &arena_bin_info[binind];
Jason Evanse00572b2010-03-14 19:43:56 -07002333 bin->runcur = NULL;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002334 run = arena_bin_nonfull_run_get(tsdn, arena, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002335 if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2336 /*
2337 * Another thread updated runcur while this one ran without the
2338 * bin lock in arena_bin_nonfull_run_get().
2339 */
Dmitry-Mea306a602015-09-04 13:15:28 +03002340 void *ret;
Jason Evanse00572b2010-03-14 19:43:56 -07002341 assert(bin->runcur->nfree > 0);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002342 ret = arena_run_reg_alloc(bin->runcur, bin_info);
Jason Evanse00572b2010-03-14 19:43:56 -07002343 if (run != NULL) {
Jason Evans940a2e02010-10-17 17:51:37 -07002344 arena_chunk_t *chunk;
2345
2346 /*
Jason Evansaa5113b2014-01-14 16:23:03 -08002347 * arena_run_alloc_small() may have allocated run, or
2348 * it may have pulled run from the bin's run tree.
2349 * Therefore it is unsafe to make any assumptions about
2350 * how run has previously been used, and
2351 * arena_bin_lower_run() must be called, as if a region
2352 * were just deallocated from the run.
Jason Evans940a2e02010-10-17 17:51:37 -07002353 */
2354 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evansb2c0d632016-04-13 23:36:15 -07002355 if (run->nfree == bin_info->nregs) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002356 arena_dalloc_bin_run(tsdn, arena, chunk, run,
Jason Evansb2c0d632016-04-13 23:36:15 -07002357 bin);
2358 } else
Jason Evans8de6a022010-10-17 20:57:30 -07002359 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse00572b2010-03-14 19:43:56 -07002360 }
2361 return (ret);
2362 }
2363
2364 if (run == NULL)
Jason Evanse476f8a2010-01-16 09:53:50 -08002365 return (NULL);
Jason Evanse00572b2010-03-14 19:43:56 -07002366
2367 bin->runcur = run;
2368
Jason Evanse476f8a2010-01-16 09:53:50 -08002369 assert(bin->runcur->nfree > 0);
2370
Jason Evans49f7e8f2011-03-15 13:59:15 -07002371 return (arena_run_reg_alloc(bin->runcur, bin_info));
Jason Evanse476f8a2010-01-16 09:53:50 -08002372}
2373
Jason Evans86815df2010-03-13 20:32:56 -08002374void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002375arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
Jason Evans243f7a02016-02-19 20:09:31 -08002376 szind_t binind, uint64_t prof_accumbytes)
Jason Evanse476f8a2010-01-16 09:53:50 -08002377{
2378 unsigned i, nfill;
2379 arena_bin_t *bin;
Jason Evanse476f8a2010-01-16 09:53:50 -08002380
2381 assert(tbin->ncached == 0);
2382
Jason Evansc1e00ef2016-05-10 22:21:10 -07002383 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2384 prof_idump(tsdn);
Jason Evanse69bee02010-03-15 22:25:23 -07002385 bin = &arena->bins[binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002386 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans1dcb4f82011-03-21 00:18:17 -07002387 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2388 tbin->lg_fill_div); i < nfill; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002389 arena_run_t *run;
2390 void *ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002391 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002392 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002393 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002394 ptr = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evansf11a6772014-10-05 13:05:10 -07002395 if (ptr == NULL) {
2396 /*
2397 * OOM. tbin->avail isn't yet filled down to its first
2398 * element, so the successful allocations (if any) must
Qi Wangf4a0f322015-10-27 15:12:10 -07002399 * be moved just before tbin->avail before bailing out.
Jason Evansf11a6772014-10-05 13:05:10 -07002400 */
2401 if (i > 0) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002402 memmove(tbin->avail - i, tbin->avail - nfill,
Jason Evansf11a6772014-10-05 13:05:10 -07002403 i * sizeof(void *));
2404 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002405 break;
Jason Evansf11a6772014-10-05 13:05:10 -07002406 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002407 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002408 arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2409 true);
2410 }
Jason Evans9c43c132011-03-18 10:53:15 -07002411 /* Insert such that low regions get used first. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002412 *(tbin->avail - nfill + i) = ptr;
Jason Evanse476f8a2010-01-16 09:53:50 -08002413 }
Jason Evans7372b152012-02-10 20:22:09 -08002414 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002415 bin->stats.nmalloc += i;
2416 bin->stats.nrequests += tbin->tstats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002417 bin->stats.curregs += i;
Jason Evans7372b152012-02-10 20:22:09 -08002418 bin->stats.nfills++;
2419 tbin->tstats.nrequests = 0;
2420 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002421 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002422 tbin->ncached = i;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002423 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002424}
Jason Evanse476f8a2010-01-16 09:53:50 -08002425
Jason Evans122449b2012-04-06 00:35:09 -07002426void
2427arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2428{
2429
Chris Petersona82070e2016-03-27 23:28:39 -07002430 size_t redzone_size = bin_info->redzone_size;
2431
Jason Evans122449b2012-04-06 00:35:09 -07002432 if (zero) {
Chris Petersona82070e2016-03-27 23:28:39 -07002433 memset((void *)((uintptr_t)ptr - redzone_size),
2434 JEMALLOC_ALLOC_JUNK, redzone_size);
2435 memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2436 JEMALLOC_ALLOC_JUNK, redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07002437 } else {
Chris Petersona82070e2016-03-27 23:28:39 -07002438 memset((void *)((uintptr_t)ptr - redzone_size),
2439 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
Jason Evans122449b2012-04-06 00:35:09 -07002440 }
2441}
2442
Jason Evans0d6c5d82013-12-17 15:14:36 -08002443#ifdef JEMALLOC_JET
2444#undef arena_redzone_corruption
Jason Evansab0cfe02016-04-18 15:11:20 -07002445#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
Jason Evans0d6c5d82013-12-17 15:14:36 -08002446#endif
2447static void
2448arena_redzone_corruption(void *ptr, size_t usize, bool after,
2449 size_t offset, uint8_t byte)
2450{
2451
Jason Evans5fae7dc2015-07-23 13:56:25 -07002452 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2453 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
Jason Evans0d6c5d82013-12-17 15:14:36 -08002454 after ? "after" : "before", ptr, usize, byte);
2455}
2456#ifdef JEMALLOC_JET
Jason Evans0d6c5d82013-12-17 15:14:36 -08002457#undef arena_redzone_corruption
Jason Evans6b694c42014-01-07 16:47:56 -08002458#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2459arena_redzone_corruption_t *arena_redzone_corruption =
Jason Evansab0cfe02016-04-18 15:11:20 -07002460 JEMALLOC_N(n_arena_redzone_corruption);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002461#endif
2462
2463static void
2464arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
Jason Evans122449b2012-04-06 00:35:09 -07002465{
Jason Evans122449b2012-04-06 00:35:09 -07002466 bool error = false;
2467
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002468 if (opt_junk_alloc) {
Dmitry-Mea306a602015-09-04 13:15:28 +03002469 size_t size = bin_info->reg_size;
2470 size_t redzone_size = bin_info->redzone_size;
2471 size_t i;
2472
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002473 for (i = 1; i <= redzone_size; i++) {
2474 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
Chris Petersona82070e2016-03-27 23:28:39 -07002475 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002476 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002477 arena_redzone_corruption(ptr, size, false, i,
2478 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002479 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002480 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002481 }
2482 }
2483 for (i = 0; i < redzone_size; i++) {
2484 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
Chris Petersona82070e2016-03-27 23:28:39 -07002485 if (*byte != JEMALLOC_ALLOC_JUNK) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002486 error = true;
Jason Evans8fadb1a2015-08-04 10:49:46 -07002487 arena_redzone_corruption(ptr, size, true, i,
2488 *byte);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002489 if (reset)
Chris Petersona82070e2016-03-27 23:28:39 -07002490 *byte = JEMALLOC_ALLOC_JUNK;
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002491 }
Jason Evans122449b2012-04-06 00:35:09 -07002492 }
2493 }
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002494
Jason Evans122449b2012-04-06 00:35:09 -07002495 if (opt_abort && error)
2496 abort();
Jason Evans0d6c5d82013-12-17 15:14:36 -08002497}
Jason Evans122449b2012-04-06 00:35:09 -07002498
Jason Evans6b694c42014-01-07 16:47:56 -08002499#ifdef JEMALLOC_JET
2500#undef arena_dalloc_junk_small
Jason Evansab0cfe02016-04-18 15:11:20 -07002501#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
Jason Evans6b694c42014-01-07 16:47:56 -08002502#endif
Jason Evans0d6c5d82013-12-17 15:14:36 -08002503void
2504arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2505{
2506 size_t redzone_size = bin_info->redzone_size;
2507
2508 arena_redzones_validate(ptr, bin_info, false);
Chris Petersona82070e2016-03-27 23:28:39 -07002509 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
Jason Evans122449b2012-04-06 00:35:09 -07002510 bin_info->reg_interval);
2511}
Jason Evans6b694c42014-01-07 16:47:56 -08002512#ifdef JEMALLOC_JET
2513#undef arena_dalloc_junk_small
2514#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2515arena_dalloc_junk_small_t *arena_dalloc_junk_small =
Jason Evansab0cfe02016-04-18 15:11:20 -07002516 JEMALLOC_N(n_arena_dalloc_junk_small);
Jason Evans6b694c42014-01-07 16:47:56 -08002517#endif
Jason Evans122449b2012-04-06 00:35:09 -07002518
Jason Evans0d6c5d82013-12-17 15:14:36 -08002519void
2520arena_quarantine_junk_small(void *ptr, size_t usize)
2521{
Jason Evansd01fd192015-08-19 15:21:32 -07002522 szind_t binind;
Jason Evans0d6c5d82013-12-17 15:14:36 -08002523 arena_bin_info_t *bin_info;
2524 cassert(config_fill);
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002525 assert(opt_junk_free);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002526 assert(opt_quarantine);
2527 assert(usize <= SMALL_MAXCLASS);
2528
Jason Evans155bfa72014-10-05 17:54:10 -07002529 binind = size2index(usize);
Jason Evans0d6c5d82013-12-17 15:14:36 -08002530 bin_info = &arena_bin_info[binind];
2531 arena_redzones_validate(ptr, bin_info, true);
2532}
2533
Jason Evans578cd162016-02-19 18:40:03 -08002534static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002535arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002536{
2537 void *ret;
2538 arena_bin_t *bin;
Jason Evans0c516a02016-02-25 15:29:49 -08002539 size_t usize;
Jason Evanse476f8a2010-01-16 09:53:50 -08002540 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002541
Jason Evansb1726102012-02-28 16:50:47 -08002542 assert(binind < NBINS);
Jason Evanse476f8a2010-01-16 09:53:50 -08002543 bin = &arena->bins[binind];
Jason Evans0c516a02016-02-25 15:29:49 -08002544 usize = index2size(binind);
Jason Evanse476f8a2010-01-16 09:53:50 -08002545
Jason Evansc1e00ef2016-05-10 22:21:10 -07002546 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002547 if ((run = bin->runcur) != NULL && run->nfree > 0)
Jason Evans49f7e8f2011-03-15 13:59:15 -07002548 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
Jason Evanse476f8a2010-01-16 09:53:50 -08002549 else
Jason Evansc1e00ef2016-05-10 22:21:10 -07002550 ret = arena_bin_malloc_hard(tsdn, arena, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002551
2552 if (ret == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002553 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002554 return (NULL);
2555 }
2556
Jason Evans7372b152012-02-10 20:22:09 -08002557 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002558 bin->stats.nmalloc++;
2559 bin->stats.nrequests++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002560 bin->stats.curregs++;
Jason Evans7372b152012-02-10 20:22:09 -08002561 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002562 malloc_mutex_unlock(tsdn, &bin->lock);
2563 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2564 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002565
Jason Evans551ebc42014-10-03 10:16:09 -07002566 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002567 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002568 if (unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002569 arena_alloc_junk_small(ret,
2570 &arena_bin_info[binind], false);
Jason Evans9c640bf2014-09-11 16:20:44 -07002571 } else if (unlikely(opt_zero))
Jason Evans0c516a02016-02-25 15:29:49 -08002572 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002573 }
Jason Evans0c516a02016-02-25 15:29:49 -08002574 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002575 } else {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002576 if (config_fill && unlikely(opt_junk_alloc)) {
Jason Evans122449b2012-04-06 00:35:09 -07002577 arena_alloc_junk_small(ret, &arena_bin_info[binind],
2578 true);
2579 }
Jason Evans0c516a02016-02-25 15:29:49 -08002580 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2581 memset(ret, 0, usize);
Jason Evans122449b2012-04-06 00:35:09 -07002582 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002583
Jason Evansc1e00ef2016-05-10 22:21:10 -07002584 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002585 return (ret);
2586}
2587
2588void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002589arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002590{
2591 void *ret;
Jason Evans155bfa72014-10-05 17:54:10 -07002592 size_t usize;
Jason Evans8a03cf02015-05-04 09:58:36 -07002593 uintptr_t random_offset;
Jason Evans0c5dd032014-09-29 01:31:39 -07002594 arena_run_t *run;
2595 arena_chunk_map_misc_t *miscelm;
Dmitri Smirnov33184bf2016-02-29 14:30:19 -08002596 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002597
2598 /* Large allocation. */
Qi Wangf4a0f322015-10-27 15:12:10 -07002599 usize = index2size(binind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002600 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans8a03cf02015-05-04 09:58:36 -07002601 if (config_cache_oblivious) {
Jason Evansbce61d62015-07-07 09:32:05 -07002602 uint64_t r;
2603
Jason Evans8a03cf02015-05-04 09:58:36 -07002604 /*
2605 * Compute a uniformly distributed offset within the first page
2606 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2607 * for 4 KiB pages and 64-byte cachelines.
2608 */
Jason Evans34676d32016-02-09 16:28:40 -08002609 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE);
Jason Evans8a03cf02015-05-04 09:58:36 -07002610 random_offset = ((uintptr_t)r) << LG_CACHELINE;
2611 } else
2612 random_offset = 0;
Jason Evansc1e00ef2016-05-10 22:21:10 -07002613 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
Jason Evans0c5dd032014-09-29 01:31:39 -07002614 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002615 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002616 return (NULL);
2617 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002618 miscelm = arena_run_to_miscelm(run);
Jason Evans8a03cf02015-05-04 09:58:36 -07002619 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2620 random_offset);
Jason Evans7372b152012-02-10 20:22:09 -08002621 if (config_stats) {
Qi Wangf4a0f322015-10-27 15:12:10 -07002622 szind_t index = binind - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002623
Jason Evans7372b152012-02-10 20:22:09 -08002624 arena->stats.nmalloc_large++;
2625 arena->stats.nrequests_large++;
Jason Evans155bfa72014-10-05 17:54:10 -07002626 arena->stats.allocated_large += usize;
2627 arena->stats.lstats[index].nmalloc++;
2628 arena->stats.lstats[index].nrequests++;
2629 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002630 }
Jason Evans7372b152012-02-10 20:22:09 -08002631 if (config_prof)
Jason Evans155bfa72014-10-05 17:54:10 -07002632 idump = arena_prof_accum_locked(arena, usize);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002633 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans88c222c2013-02-06 11:59:30 -08002634 if (config_prof && idump)
Jason Evansc1e00ef2016-05-10 22:21:10 -07002635 prof_idump(tsdn);
Jason Evanse476f8a2010-01-16 09:53:50 -08002636
Jason Evans551ebc42014-10-03 10:16:09 -07002637 if (!zero) {
Jason Evans7372b152012-02-10 20:22:09 -08002638 if (config_fill) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002639 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002640 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002641 else if (unlikely(opt_zero))
Jason Evans155bfa72014-10-05 17:54:10 -07002642 memset(ret, 0, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002643 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002644 }
2645
Jason Evansc1e00ef2016-05-10 22:21:10 -07002646 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002647 return (ret);
2648}
2649
Jason Evans578cd162016-02-19 18:40:03 -08002650void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002651arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
Jason Evans66cd9532016-04-22 14:34:14 -07002652 bool zero)
Jason Evans578cd162016-02-19 18:40:03 -08002653{
2654
Jason Evansc1e00ef2016-05-10 22:21:10 -07002655 assert(!tsdn_null(tsdn) || arena != NULL);
2656
2657 if (likely(!tsdn_null(tsdn)))
2658 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans578cd162016-02-19 18:40:03 -08002659 if (unlikely(arena == NULL))
2660 return (NULL);
2661
2662 if (likely(size <= SMALL_MAXCLASS))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002663 return (arena_malloc_small(tsdn, arena, ind, zero));
Jason Evans578cd162016-02-19 18:40:03 -08002664 if (likely(size <= large_maxclass))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002665 return (arena_malloc_large(tsdn, arena, ind, zero));
2666 return (huge_malloc(tsdn, arena, index2size(ind), zero));
Jason Evans578cd162016-02-19 18:40:03 -08002667}
2668
Jason Evanse476f8a2010-01-16 09:53:50 -08002669/* Only handles large allocations that require more than page alignment. */
Jason Evans88fef7c2015-02-12 14:06:37 -08002670static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002671arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002672 bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08002673{
2674 void *ret;
Jason Evans5ff709c2012-04-11 18:13:45 -07002675 size_t alloc_size, leadsize, trailsize;
2676 arena_run_t *run;
Jason Evanse476f8a2010-01-16 09:53:50 -08002677 arena_chunk_t *chunk;
Jason Evans0c5dd032014-09-29 01:31:39 -07002678 arena_chunk_map_misc_t *miscelm;
2679 void *rpages;
Jason Evanse476f8a2010-01-16 09:53:50 -08002680
Jason Evansc1e00ef2016-05-10 22:21:10 -07002681 assert(!tsdn_null(tsdn) || arena != NULL);
Jason Evans50883de2015-07-23 17:13:18 -07002682 assert(usize == PAGE_CEILING(usize));
Jason Evans93443682010-10-20 17:39:18 -07002683
Jason Evansc1e00ef2016-05-10 22:21:10 -07002684 if (likely(!tsdn_null(tsdn)))
2685 arena = arena_choose(tsdn_tsd(tsdn), arena);
Jason Evans88fef7c2015-02-12 14:06:37 -08002686 if (unlikely(arena == NULL))
2687 return (NULL);
2688
Jason Evans93443682010-10-20 17:39:18 -07002689 alignment = PAGE_CEILING(alignment);
Jason Evans245ae602016-04-06 11:54:44 -07002690 alloc_size = usize + large_pad + alignment;
Jason Evanse476f8a2010-01-16 09:53:50 -08002691
Jason Evansc1e00ef2016-05-10 22:21:10 -07002692 malloc_mutex_lock(tsdn, &arena->lock);
2693 run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
Jason Evans5ff709c2012-04-11 18:13:45 -07002694 if (run == NULL) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002695 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002696 return (NULL);
2697 }
Jason Evans5ff709c2012-04-11 18:13:45 -07002698 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
Jason Evans0c5dd032014-09-29 01:31:39 -07002699 miscelm = arena_run_to_miscelm(run);
2700 rpages = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002701
Jason Evans0c5dd032014-09-29 01:31:39 -07002702 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2703 (uintptr_t)rpages;
Jason Evans50883de2015-07-23 17:13:18 -07002704 assert(alloc_size >= leadsize + usize);
2705 trailsize = alloc_size - leadsize - usize - large_pad;
Jason Evans5ff709c2012-04-11 18:13:45 -07002706 if (leadsize != 0) {
Jason Evans0c5dd032014-09-29 01:31:39 -07002707 arena_chunk_map_misc_t *head_miscelm = miscelm;
2708 arena_run_t *head_run = run;
2709
Jason Evans61a6dfc2016-03-23 16:04:38 -07002710 miscelm = arena_miscelm_get_mutable(chunk,
Jason Evans0c5dd032014-09-29 01:31:39 -07002711 arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2712 LG_PAGE));
2713 run = &miscelm->run;
2714
Jason Evansc1e00ef2016-05-10 22:21:10 -07002715 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
Jason Evans0c5dd032014-09-29 01:31:39 -07002716 alloc_size - leadsize);
Jason Evans5ff709c2012-04-11 18:13:45 -07002717 }
2718 if (trailsize != 0) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002719 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
Jason Evans50883de2015-07-23 17:13:18 -07002720 trailsize, usize + large_pad, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08002721 }
Jason Evans8fadb1a2015-08-04 10:49:46 -07002722 if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2723 size_t run_ind =
2724 arena_miscelm_to_pageind(arena_run_to_miscelm(run));
Jason Evansde249c82015-08-09 16:47:27 -07002725 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2726 bool decommitted = (arena_mapbits_decommitted_get(chunk,
2727 run_ind) != 0);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002728
Jason Evansde249c82015-08-09 16:47:27 -07002729 assert(decommitted); /* Cause of OOM. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002730 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2731 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans8fadb1a2015-08-04 10:49:46 -07002732 return (NULL);
2733 }
Jason Evans0c5dd032014-09-29 01:31:39 -07002734 ret = arena_miscelm_to_rpages(miscelm);
Jason Evanse476f8a2010-01-16 09:53:50 -08002735
Jason Evans7372b152012-02-10 20:22:09 -08002736 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002737 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002738
Jason Evans7372b152012-02-10 20:22:09 -08002739 arena->stats.nmalloc_large++;
2740 arena->stats.nrequests_large++;
Jason Evans50883de2015-07-23 17:13:18 -07002741 arena->stats.allocated_large += usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002742 arena->stats.lstats[index].nmalloc++;
2743 arena->stats.lstats[index].nrequests++;
2744 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08002745 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002746 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08002747
Jason Evans551ebc42014-10-03 10:16:09 -07002748 if (config_fill && !zero) {
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002749 if (unlikely(opt_junk_alloc))
Chris Petersona82070e2016-03-27 23:28:39 -07002750 memset(ret, JEMALLOC_ALLOC_JUNK, usize);
Jason Evans9c640bf2014-09-11 16:20:44 -07002751 else if (unlikely(opt_zero))
Jason Evans50883de2015-07-23 17:13:18 -07002752 memset(ret, 0, usize);
Jason Evans8e3c3c62010-09-17 15:46:18 -07002753 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07002754 arena_decay_tick(tsdn, arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08002755 return (ret);
2756}
2757
Jason Evans88fef7c2015-02-12 14:06:37 -08002758void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07002759arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002760 bool zero, tcache_t *tcache)
2761{
2762 void *ret;
2763
Jason Evans8a03cf02015-05-04 09:58:36 -07002764 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
Jason Evans51541752015-05-19 17:42:31 -07002765 && (usize & PAGE_MASK) == 0))) {
2766 /* Small; alignment doesn't require special run placement. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002767 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002768 tcache, true);
Jason Evans676df882015-09-11 20:50:20 -07002769 } else if (usize <= large_maxclass && alignment <= PAGE) {
Jason Evans51541752015-05-19 17:42:31 -07002770 /*
2771 * Large; alignment doesn't require special run placement.
2772 * However, the cached pointer may be at a random offset from
2773 * the base of the run, so do some bit manipulation to retrieve
2774 * the base.
2775 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07002776 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
Qi Wangf4a0f322015-10-27 15:12:10 -07002777 tcache, true);
Jason Evans51541752015-05-19 17:42:31 -07002778 if (config_cache_oblivious)
2779 ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2780 } else {
Jason Evans676df882015-09-11 20:50:20 -07002781 if (likely(usize <= large_maxclass)) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002782 ret = arena_palloc_large(tsdn, arena, usize, alignment,
Jason Evans88fef7c2015-02-12 14:06:37 -08002783 zero);
2784 } else if (likely(alignment <= chunksize))
Jason Evansc1e00ef2016-05-10 22:21:10 -07002785 ret = huge_malloc(tsdn, arena, usize, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002786 else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07002787 ret = huge_palloc(tsdn, arena, usize, alignment, zero);
Jason Evans88fef7c2015-02-12 14:06:37 -08002788 }
2789 }
2790 return (ret);
2791}
2792
Jason Evans0b270a92010-03-31 16:45:04 -07002793void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002794arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
Jason Evans0b270a92010-03-31 16:45:04 -07002795{
2796 arena_chunk_t *chunk;
Jason Evans155bfa72014-10-05 17:54:10 -07002797 size_t pageind;
Jason Evansd01fd192015-08-19 15:21:32 -07002798 szind_t binind;
Jason Evans0b270a92010-03-31 16:45:04 -07002799
Jason Evans78f73522012-04-18 13:38:40 -07002800 cassert(config_prof);
Jason Evans0b270a92010-03-31 16:45:04 -07002801 assert(ptr != NULL);
2802 assert(CHUNK_ADDR2BASE(ptr) != ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002803 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2804 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
Jason Evansb1726102012-02-28 16:50:47 -08002805 assert(size <= SMALL_MAXCLASS);
Jason Evans0b270a92010-03-31 16:45:04 -07002806
2807 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansae4c7b42012-04-02 07:04:34 -07002808 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans155bfa72014-10-05 17:54:10 -07002809 binind = size2index(size);
Jason Evansb1726102012-02-28 16:50:47 -08002810 assert(binind < NBINS);
Jason Evans203484e2012-05-02 00:30:36 -07002811 arena_mapbits_large_binind_set(chunk, pageind, binind);
Jason Evans0b270a92010-03-31 16:45:04 -07002812
Jason Evansc1e00ef2016-05-10 22:21:10 -07002813 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2814 assert(isalloc(tsdn, ptr, true) == size);
Jason Evans0b270a92010-03-31 16:45:04 -07002815}
Jason Evans6109fe02010-02-10 10:37:56 -08002816
Jason Evanse476f8a2010-01-16 09:53:50 -08002817static void
Jason Evans088e6a02010-10-18 00:04:44 -07002818arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
Jason Evanse476f8a2010-01-16 09:53:50 -08002819 arena_bin_t *bin)
2820{
Jason Evanse476f8a2010-01-16 09:53:50 -08002821
Jason Evans19b3d612010-03-18 20:36:40 -07002822 /* Dissociate run from bin. */
Jason Evanse476f8a2010-01-16 09:53:50 -08002823 if (run == bin->runcur)
2824 bin->runcur = NULL;
Jason Evans49f7e8f2011-03-15 13:59:15 -07002825 else {
Jason Evansd01fd192015-08-19 15:21:32 -07002826 szind_t binind = arena_bin_index(extent_node_arena_get(
Jason Evansee41ad42015-02-15 18:04:46 -08002827 &chunk->node), bin);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002828 arena_bin_info_t *bin_info = &arena_bin_info[binind];
2829
Jason Evansc6a2c392016-03-26 17:30:37 -07002830 /*
2831 * The following block's conditional is necessary because if the
2832 * run only contains one region, then it never gets inserted
2833 * into the non-full runs tree.
2834 */
Jason Evans49f7e8f2011-03-15 13:59:15 -07002835 if (bin_info->nregs != 1) {
Jason Evansc6a2c392016-03-26 17:30:37 -07002836 arena_chunk_map_misc_t *miscelm =
2837 arena_run_to_miscelm(run);
2838
2839 arena_run_heap_remove(&bin->runs, miscelm);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002840 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002841 }
Jason Evans088e6a02010-10-18 00:04:44 -07002842}
2843
2844static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002845arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002846 arena_run_t *run, arena_bin_t *bin)
Jason Evans088e6a02010-10-18 00:04:44 -07002847{
Jason Evans088e6a02010-10-18 00:04:44 -07002848
2849 assert(run != bin->runcur);
Jason Evans86815df2010-03-13 20:32:56 -08002850
Jason Evansc1e00ef2016-05-10 22:21:10 -07002851 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002852 /******************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002853 malloc_mutex_lock(tsdn, &arena->lock);
2854 arena_run_dalloc(tsdn, arena, run, true, false, false);
2855 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse00572b2010-03-14 19:43:56 -07002856 /****************************/
Jason Evansc1e00ef2016-05-10 22:21:10 -07002857 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans7372b152012-02-10 20:22:09 -08002858 if (config_stats)
2859 bin->stats.curruns--;
Jason Evanse476f8a2010-01-16 09:53:50 -08002860}
2861
Jason Evans940a2e02010-10-17 17:51:37 -07002862static void
2863arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2864 arena_bin_t *bin)
Jason Evanse476f8a2010-01-16 09:53:50 -08002865{
Jason Evanse476f8a2010-01-16 09:53:50 -08002866
Jason Evans8de6a022010-10-17 20:57:30 -07002867 /*
Jason Evanse7a10582012-02-13 17:36:52 -08002868 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2869 * non-full run. It is okay to NULL runcur out rather than proactively
2870 * keeping it pointing at the lowest non-full run.
Jason Evans8de6a022010-10-17 20:57:30 -07002871 */
Jason Evanse7a10582012-02-13 17:36:52 -08002872 if ((uintptr_t)run < (uintptr_t)bin->runcur) {
Jason Evans8de6a022010-10-17 20:57:30 -07002873 /* Switch runcur. */
Jason Evanse7a10582012-02-13 17:36:52 -08002874 if (bin->runcur->nfree > 0)
2875 arena_bin_runs_insert(bin, bin->runcur);
Jason Evans8de6a022010-10-17 20:57:30 -07002876 bin->runcur = run;
Jason Evanse7a10582012-02-13 17:36:52 -08002877 if (config_stats)
2878 bin->stats.reruns++;
2879 } else
2880 arena_bin_runs_insert(bin, run);
Jason Evans940a2e02010-10-17 17:51:37 -07002881}
2882
Jason Evansfc0b3b72014-10-09 17:54:06 -07002883static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002884arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07002885 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
Jason Evans940a2e02010-10-17 17:51:37 -07002886{
Jason Evans0c5dd032014-09-29 01:31:39 -07002887 size_t pageind, rpages_ind;
Jason Evans940a2e02010-10-17 17:51:37 -07002888 arena_run_t *run;
2889 arena_bin_t *bin;
Mike Hommey8b499712012-04-24 23:22:02 +02002890 arena_bin_info_t *bin_info;
Jason Evansd01fd192015-08-19 15:21:32 -07002891 szind_t binind;
Jason Evans940a2e02010-10-17 17:51:37 -07002892
Jason Evansae4c7b42012-04-02 07:04:34 -07002893 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans0c5dd032014-09-29 01:31:39 -07002894 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002895 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002896 binind = run->binind;
2897 bin = &arena->bins[binind];
Mike Hommey8b499712012-04-24 23:22:02 +02002898 bin_info = &arena_bin_info[binind];
Jason Evans940a2e02010-10-17 17:51:37 -07002899
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002900 if (!junked && config_fill && unlikely(opt_junk_free))
Jason Evans122449b2012-04-06 00:35:09 -07002901 arena_dalloc_junk_small(ptr, bin_info);
Jason Evans940a2e02010-10-17 17:51:37 -07002902
2903 arena_run_reg_dalloc(run, ptr);
Jason Evans49f7e8f2011-03-15 13:59:15 -07002904 if (run->nfree == bin_info->nregs) {
Jason Evans088e6a02010-10-18 00:04:44 -07002905 arena_dissociate_bin_run(chunk, run, bin);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002906 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
Jason Evans088e6a02010-10-18 00:04:44 -07002907 } else if (run->nfree == 1 && run != bin->runcur)
Jason Evans8de6a022010-10-17 20:57:30 -07002908 arena_bin_lower_run(arena, chunk, run, bin);
Jason Evanse476f8a2010-01-16 09:53:50 -08002909
Jason Evans7372b152012-02-10 20:22:09 -08002910 if (config_stats) {
Jason Evans7372b152012-02-10 20:22:09 -08002911 bin->stats.ndalloc++;
Jason Evans3c4d92e2014-10-12 22:53:59 -07002912 bin->stats.curregs--;
Jason Evans7372b152012-02-10 20:22:09 -08002913 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002914}
2915
Jason Evanse476f8a2010-01-16 09:53:50 -08002916void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002917arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2918 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
Jason Evansfc0b3b72014-10-09 17:54:06 -07002919{
2920
Jason Evansc1e00ef2016-05-10 22:21:10 -07002921 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07002922}
2923
2924void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002925arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002926 size_t pageind, arena_chunk_map_bits_t *bitselm)
Jason Evans203484e2012-05-02 00:30:36 -07002927{
2928 arena_run_t *run;
2929 arena_bin_t *bin;
Jason Evans0c5dd032014-09-29 01:31:39 -07002930 size_t rpages_ind;
Jason Evans203484e2012-05-02 00:30:36 -07002931
Jason Evans0c5dd032014-09-29 01:31:39 -07002932 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
Jason Evans61a6dfc2016-03-23 16:04:38 -07002933 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
Jason Evans381c23d2014-10-10 23:01:03 -07002934 bin = &arena->bins[run->binind];
Jason Evansc1e00ef2016-05-10 22:21:10 -07002935 malloc_mutex_lock(tsdn, &bin->lock);
2936 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2937 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans203484e2012-05-02 00:30:36 -07002938}
2939
2940void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002941arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2942 void *ptr, size_t pageind)
Jason Evans203484e2012-05-02 00:30:36 -07002943{
Qinfan Wuff6a31d2014-08-29 13:34:40 -07002944 arena_chunk_map_bits_t *bitselm;
Jason Evans203484e2012-05-02 00:30:36 -07002945
2946 if (config_debug) {
Jason Evans80737c32012-05-02 16:11:03 -07002947 /* arena_ptr_small_binind_get() does extra sanity checking. */
2948 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2949 pageind)) != BININD_INVALID);
Jason Evans203484e2012-05-02 00:30:36 -07002950 }
Jason Evans61a6dfc2016-03-23 16:04:38 -07002951 bitselm = arena_bitselm_get_mutable(chunk, pageind);
Jason Evansc1e00ef2016-05-10 22:21:10 -07002952 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
2953 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07002954}
Jason Evanse476f8a2010-01-16 09:53:50 -08002955
Jason Evans6b694c42014-01-07 16:47:56 -08002956#ifdef JEMALLOC_JET
2957#undef arena_dalloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07002958#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08002959#endif
Jason Evansfc0b3b72014-10-09 17:54:06 -07002960void
Jason Evans6b694c42014-01-07 16:47:56 -08002961arena_dalloc_junk_large(void *ptr, size_t usize)
2962{
2963
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02002964 if (config_fill && unlikely(opt_junk_free))
Chris Petersona82070e2016-03-27 23:28:39 -07002965 memset(ptr, JEMALLOC_FREE_JUNK, usize);
Jason Evans6b694c42014-01-07 16:47:56 -08002966}
2967#ifdef JEMALLOC_JET
2968#undef arena_dalloc_junk_large
2969#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2970arena_dalloc_junk_large_t *arena_dalloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07002971 JEMALLOC_N(n_arena_dalloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08002972#endif
2973
Jason Evanse56b24e2015-09-20 09:58:10 -07002974static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07002975arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
2976 arena_chunk_t *chunk, void *ptr, bool junked)
Jason Evanse476f8a2010-01-16 09:53:50 -08002977{
Jason Evans0c5dd032014-09-29 01:31:39 -07002978 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07002979 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
2980 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07002981 arena_run_t *run = &miscelm->run;
Jason Evans13668262010-01-31 03:57:29 -08002982
Jason Evans7372b152012-02-10 20:22:09 -08002983 if (config_fill || config_stats) {
Jason Evans8a03cf02015-05-04 09:58:36 -07002984 size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2985 large_pad;
Jason Evanse476f8a2010-01-16 09:53:50 -08002986
Jason Evansfc0b3b72014-10-09 17:54:06 -07002987 if (!junked)
2988 arena_dalloc_junk_large(ptr, usize);
Jason Evans7372b152012-02-10 20:22:09 -08002989 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07002990 szind_t index = size2index(usize) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07002991
Jason Evans7372b152012-02-10 20:22:09 -08002992 arena->stats.ndalloc_large++;
Jason Evans6b694c42014-01-07 16:47:56 -08002993 arena->stats.allocated_large -= usize;
Jason Evans155bfa72014-10-05 17:54:10 -07002994 arena->stats.lstats[index].ndalloc++;
2995 arena->stats.lstats[index].curruns--;
Jason Evans7372b152012-02-10 20:22:09 -08002996 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002997 }
Jason Evanse476f8a2010-01-16 09:53:50 -08002998
Jason Evansc1e00ef2016-05-10 22:21:10 -07002999 arena_run_dalloc(tsdn, arena, run, true, false, false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003000}
3001
Jason Evans203484e2012-05-02 00:30:36 -07003002void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003003arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
Jason Evansb2c0d632016-04-13 23:36:15 -07003004 arena_chunk_t *chunk, void *ptr)
Jason Evansfc0b3b72014-10-09 17:54:06 -07003005{
3006
Jason Evansc1e00ef2016-05-10 22:21:10 -07003007 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003008}
3009
3010void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003011arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3012 void *ptr)
Jason Evans203484e2012-05-02 00:30:36 -07003013{
3014
Jason Evansc1e00ef2016-05-10 22:21:10 -07003015 malloc_mutex_lock(tsdn, &arena->lock);
3016 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3017 malloc_mutex_unlock(tsdn, &arena->lock);
3018 arena_decay_tick(tsdn, arena);
Jason Evans203484e2012-05-02 00:30:36 -07003019}
3020
Jason Evanse476f8a2010-01-16 09:53:50 -08003021static void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003022arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003023 void *ptr, size_t oldsize, size_t size)
Jason Evanse476f8a2010-01-16 09:53:50 -08003024{
Jason Evans0c5dd032014-09-29 01:31:39 -07003025 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans61a6dfc2016-03-23 16:04:38 -07003026 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3027 pageind);
Jason Evans0c5dd032014-09-29 01:31:39 -07003028 arena_run_t *run = &miscelm->run;
Jason Evanse476f8a2010-01-16 09:53:50 -08003029
3030 assert(size < oldsize);
3031
3032 /*
3033 * Shrink the run, and make trailing pages available for other
3034 * allocations.
3035 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003036 malloc_mutex_lock(tsdn, &arena->lock);
3037 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
Jason Evans8a03cf02015-05-04 09:58:36 -07003038 large_pad, true);
Jason Evans7372b152012-02-10 20:22:09 -08003039 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003040 szind_t oldindex = size2index(oldsize) - NBINS;
3041 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003042
Jason Evans7372b152012-02-10 20:22:09 -08003043 arena->stats.ndalloc_large++;
3044 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003045 arena->stats.lstats[oldindex].ndalloc++;
3046 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003047
Jason Evans7372b152012-02-10 20:22:09 -08003048 arena->stats.nmalloc_large++;
3049 arena->stats.nrequests_large++;
3050 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003051 arena->stats.lstats[index].nmalloc++;
3052 arena->stats.lstats[index].nrequests++;
3053 arena->stats.lstats[index].curruns++;
Jason Evanse476f8a2010-01-16 09:53:50 -08003054 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003055 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003056}
3057
3058static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003059arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
Jason Evansb2c0d632016-04-13 23:36:15 -07003060 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003061{
Jason Evansae4c7b42012-04-02 07:04:34 -07003062 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
Jason Evans5716d972015-08-06 23:34:12 -07003063 size_t npages = (oldsize + large_pad) >> LG_PAGE;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003064 size_t followsize;
Jason Evanse476f8a2010-01-16 09:53:50 -08003065
Jason Evans8a03cf02015-05-04 09:58:36 -07003066 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3067 large_pad);
Jason Evanse476f8a2010-01-16 09:53:50 -08003068
3069 /* Try to extend the run. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003070 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans560a4e12015-09-11 16:18:53 -07003071 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3072 pageind+npages) != 0)
3073 goto label_fail;
3074 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3075 if (oldsize + followsize >= usize_min) {
Jason Evanse476f8a2010-01-16 09:53:50 -08003076 /*
3077 * The next run is available and sufficiently large. Split the
3078 * following run, then merge the first part with the existing
3079 * allocation.
3080 */
Guilherme Goncalves9c6a8d32014-12-17 14:46:35 -02003081 arena_run_t *run;
Jason Evans560a4e12015-09-11 16:18:53 -07003082 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
Jason Evans155bfa72014-10-05 17:54:10 -07003083
Jason Evans560a4e12015-09-11 16:18:53 -07003084 usize = usize_max;
Jason Evans155bfa72014-10-05 17:54:10 -07003085 while (oldsize + followsize < usize)
3086 usize = index2size(size2index(usize)-1);
3087 assert(usize >= usize_min);
Jason Evans560a4e12015-09-11 16:18:53 -07003088 assert(usize >= oldsize);
Jason Evans5716d972015-08-06 23:34:12 -07003089 splitsize = usize - oldsize;
Jason Evans560a4e12015-09-11 16:18:53 -07003090 if (splitsize == 0)
3091 goto label_fail;
Jason Evans155bfa72014-10-05 17:54:10 -07003092
Jason Evans61a6dfc2016-03-23 16:04:38 -07003093 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
Jason Evans560a4e12015-09-11 16:18:53 -07003094 if (arena_run_split_large(arena, run, splitsize, zero))
3095 goto label_fail;
Jason Evanse476f8a2010-01-16 09:53:50 -08003096
Jason Evansd260f442015-09-24 16:38:45 -07003097 if (config_cache_oblivious && zero) {
3098 /*
3099 * Zero the trailing bytes of the original allocation's
3100 * last page, since they are in an indeterminate state.
Jason Evansa784e412015-09-24 22:21:55 -07003101 * There will always be trailing bytes, because ptr's
3102 * offset from the beginning of the run is a multiple of
3103 * CACHELINE in [0 .. PAGE).
Jason Evansd260f442015-09-24 16:38:45 -07003104 */
Jason Evansa784e412015-09-24 22:21:55 -07003105 void *zbase = (void *)((uintptr_t)ptr + oldsize);
3106 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3107 PAGE));
3108 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3109 assert(nzero > 0);
3110 memset(zbase, 0, nzero);
Jason Evansd260f442015-09-24 16:38:45 -07003111 }
3112
Jason Evans088e6a02010-10-18 00:04:44 -07003113 size = oldsize + splitsize;
Jason Evans5716d972015-08-06 23:34:12 -07003114 npages = (size + large_pad) >> LG_PAGE;
Jason Evans940a2e02010-10-17 17:51:37 -07003115
3116 /*
3117 * Mark the extended run as dirty if either portion of the run
3118 * was dirty before allocation. This is rather pedantic,
3119 * because there's not actually any sequence of events that
3120 * could cause the resulting run to be passed to
3121 * arena_run_dalloc() with the dirty argument set to false
3122 * (which is when dirty flag consistency would really matter).
3123 */
Jason Evans203484e2012-05-02 00:30:36 -07003124 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3125 arena_mapbits_dirty_get(chunk, pageind+npages-1);
Jason Evans1f27abc2015-08-11 12:42:33 -07003126 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
Jason Evans5716d972015-08-06 23:34:12 -07003127 arena_mapbits_large_set(chunk, pageind, size + large_pad,
Jason Evans1f27abc2015-08-11 12:42:33 -07003128 flag_dirty | (flag_unzeroed_mask &
3129 arena_mapbits_unzeroed_get(chunk, pageind)));
3130 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3131 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3132 pageind+npages-1)));
Jason Evanse476f8a2010-01-16 09:53:50 -08003133
Jason Evans7372b152012-02-10 20:22:09 -08003134 if (config_stats) {
Jason Evansd01fd192015-08-19 15:21:32 -07003135 szind_t oldindex = size2index(oldsize) - NBINS;
3136 szind_t index = size2index(size) - NBINS;
Jason Evans155bfa72014-10-05 17:54:10 -07003137
Jason Evans7372b152012-02-10 20:22:09 -08003138 arena->stats.ndalloc_large++;
3139 arena->stats.allocated_large -= oldsize;
Jason Evans155bfa72014-10-05 17:54:10 -07003140 arena->stats.lstats[oldindex].ndalloc++;
3141 arena->stats.lstats[oldindex].curruns--;
Jason Evans990d10c2010-01-31 03:49:35 -08003142
Jason Evans7372b152012-02-10 20:22:09 -08003143 arena->stats.nmalloc_large++;
3144 arena->stats.nrequests_large++;
3145 arena->stats.allocated_large += size;
Jason Evans155bfa72014-10-05 17:54:10 -07003146 arena->stats.lstats[index].nmalloc++;
3147 arena->stats.lstats[index].nrequests++;
3148 arena->stats.lstats[index].curruns++;
Jason Evans940a2e02010-10-17 17:51:37 -07003149 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003150 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003151 return (false);
3152 }
Jason Evans560a4e12015-09-11 16:18:53 -07003153label_fail:
Jason Evansc1e00ef2016-05-10 22:21:10 -07003154 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evanse476f8a2010-01-16 09:53:50 -08003155 return (true);
3156}
3157
Jason Evans6b694c42014-01-07 16:47:56 -08003158#ifdef JEMALLOC_JET
3159#undef arena_ralloc_junk_large
Jason Evansab0cfe02016-04-18 15:11:20 -07003160#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
Jason Evans6b694c42014-01-07 16:47:56 -08003161#endif
3162static void
3163arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3164{
3165
Guilherme Goncalves2c5cb612014-12-08 19:12:41 -02003166 if (config_fill && unlikely(opt_junk_free)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003167 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
Jason Evans6b694c42014-01-07 16:47:56 -08003168 old_usize - usize);
3169 }
3170}
3171#ifdef JEMALLOC_JET
3172#undef arena_ralloc_junk_large
3173#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3174arena_ralloc_junk_large_t *arena_ralloc_junk_large =
Jason Evansab0cfe02016-04-18 15:11:20 -07003175 JEMALLOC_N(n_arena_ralloc_junk_large);
Jason Evans6b694c42014-01-07 16:47:56 -08003176#endif
3177
Jason Evanse476f8a2010-01-16 09:53:50 -08003178/*
3179 * Try to resize a large allocation, in order to avoid copying. This will
3180 * always fail if growing an object, and the following run is already in use.
3181 */
3182static bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003183arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
Jason Evans560a4e12015-09-11 16:18:53 -07003184 size_t usize_max, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003185{
Jason Evans560a4e12015-09-11 16:18:53 -07003186 arena_chunk_t *chunk;
3187 arena_t *arena;
Jason Evanse476f8a2010-01-16 09:53:50 -08003188
Jason Evans560a4e12015-09-11 16:18:53 -07003189 if (oldsize == usize_max) {
3190 /* Current size class is compatible and maximal. */
Jason Evanse476f8a2010-01-16 09:53:50 -08003191 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003192 }
Jason Evans560a4e12015-09-11 16:18:53 -07003193
3194 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3195 arena = extent_node_arena_get(&chunk->node);
3196
3197 if (oldsize < usize_max) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003198 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
Jason Evansb2c0d632016-04-13 23:36:15 -07003199 oldsize, usize_min, usize_max, zero);
Jason Evans560a4e12015-09-11 16:18:53 -07003200 if (config_fill && !ret && !zero) {
3201 if (unlikely(opt_junk_alloc)) {
Chris Petersona82070e2016-03-27 23:28:39 -07003202 memset((void *)((uintptr_t)ptr + oldsize),
3203 JEMALLOC_ALLOC_JUNK,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003204 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003205 } else if (unlikely(opt_zero)) {
3206 memset((void *)((uintptr_t)ptr + oldsize), 0,
Jason Evansc1e00ef2016-05-10 22:21:10 -07003207 isalloc(tsdn, ptr, config_prof) - oldsize);
Jason Evans560a4e12015-09-11 16:18:53 -07003208 }
3209 }
3210 return (ret);
3211 }
3212
3213 assert(oldsize > usize_max);
3214 /* Fill before shrinking in order avoid a race. */
3215 arena_ralloc_junk_large(ptr, oldsize, usize_max);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003216 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
Jason Evans560a4e12015-09-11 16:18:53 -07003217 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003218}
3219
Jason Evansb2c31662014-01-12 15:05:44 -08003220bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003221arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
Jason Evans243f7a02016-02-19 20:09:31 -08003222 size_t extra, bool zero)
Jason Evanse476f8a2010-01-16 09:53:50 -08003223{
Jason Evans560a4e12015-09-11 16:18:53 -07003224 size_t usize_min, usize_max;
Jason Evanse476f8a2010-01-16 09:53:50 -08003225
Jason Evans0c516a02016-02-25 15:29:49 -08003226 /* Calls with non-zero extra had to clamp extra. */
3227 assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3228
Jason Evans0c516a02016-02-25 15:29:49 -08003229 if (unlikely(size > HUGE_MAXCLASS))
3230 return (true);
3231
Jason Evans560a4e12015-09-11 16:18:53 -07003232 usize_min = s2u(size);
Jason Evans560a4e12015-09-11 16:18:53 -07003233 usize_max = s2u(size + extra);
Jason Evans676df882015-09-11 20:50:20 -07003234 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
Jason Evans243f7a02016-02-19 20:09:31 -08003235 arena_chunk_t *chunk;
3236
Jason Evans88fef7c2015-02-12 14:06:37 -08003237 /*
3238 * Avoid moving the allocation if the size class can be left the
3239 * same.
3240 */
Jason Evans560a4e12015-09-11 16:18:53 -07003241 if (oldsize <= SMALL_MAXCLASS) {
3242 assert(arena_bin_info[size2index(oldsize)].reg_size ==
3243 oldsize);
Jason Evans4985dc62016-02-19 19:24:58 -08003244 if ((usize_max > SMALL_MAXCLASS ||
3245 size2index(usize_max) != size2index(oldsize)) &&
3246 (size > oldsize || usize_max < oldsize))
3247 return (true);
Jason Evans560a4e12015-09-11 16:18:53 -07003248 } else {
Jason Evans4985dc62016-02-19 19:24:58 -08003249 if (usize_max <= SMALL_MAXCLASS)
3250 return (true);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003251 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
Jason Evans4985dc62016-02-19 19:24:58 -08003252 usize_max, zero))
3253 return (true);
Jason Evanse476f8a2010-01-16 09:53:50 -08003254 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003255
Jason Evans243f7a02016-02-19 20:09:31 -08003256 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003257 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
Jason Evans4985dc62016-02-19 19:24:58 -08003258 return (false);
Jason Evans560a4e12015-09-11 16:18:53 -07003259 } else {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003260 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
Jason Evans243f7a02016-02-19 20:09:31 -08003261 usize_max, zero));
Jason Evans560a4e12015-09-11 16:18:53 -07003262 }
3263}
3264
3265static void *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003266arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
Jason Evans560a4e12015-09-11 16:18:53 -07003267 size_t alignment, bool zero, tcache_t *tcache)
3268{
3269
3270 if (alignment == 0)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003271 return (arena_malloc(tsdn, arena, usize, size2index(usize),
3272 zero, tcache, true));
Jason Evans560a4e12015-09-11 16:18:53 -07003273 usize = sa2u(usize, alignment);
Jason Evans0c516a02016-02-25 15:29:49 -08003274 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003275 return (NULL);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003276 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
Jason Evans8e3c3c62010-09-17 15:46:18 -07003277}
Jason Evanse476f8a2010-01-16 09:53:50 -08003278
Jason Evans8e3c3c62010-09-17 15:46:18 -07003279void *
Jason Evans5460aa62014-09-22 21:09:23 -07003280arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
Jason Evans560a4e12015-09-11 16:18:53 -07003281 size_t alignment, bool zero, tcache_t *tcache)
Jason Evans8e3c3c62010-09-17 15:46:18 -07003282{
3283 void *ret;
Jason Evans560a4e12015-09-11 16:18:53 -07003284 size_t usize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003285
Jason Evans560a4e12015-09-11 16:18:53 -07003286 usize = s2u(size);
Jason Evans0c516a02016-02-25 15:29:49 -08003287 if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
Jason Evans560a4e12015-09-11 16:18:53 -07003288 return (NULL);
3289
Jason Evans676df882015-09-11 20:50:20 -07003290 if (likely(usize <= large_maxclass)) {
Jason Evans88fef7c2015-02-12 14:06:37 -08003291 size_t copysize;
Jason Evans8e3c3c62010-09-17 15:46:18 -07003292
Jason Evans88fef7c2015-02-12 14:06:37 -08003293 /* Try to avoid moving the allocation. */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003294 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3295 zero))
Jason Evans88fef7c2015-02-12 14:06:37 -08003296 return (ptr);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003297
Jason Evans88fef7c2015-02-12 14:06:37 -08003298 /*
3299 * size and oldsize are different enough that we need to move
3300 * the object. In that case, fall back to allocating new space
3301 * and copying.
3302 */
Jason Evansc1e00ef2016-05-10 22:21:10 -07003303 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3304 alignment, zero, tcache);
Jason Evans560a4e12015-09-11 16:18:53 -07003305 if (ret == NULL)
3306 return (NULL);
Jason Evans88fef7c2015-02-12 14:06:37 -08003307
3308 /*
3309 * Junk/zero-filling were already done by
3310 * ipalloc()/arena_malloc().
3311 */
3312
Jason Evans560a4e12015-09-11 16:18:53 -07003313 copysize = (usize < oldsize) ? usize : oldsize;
Jason Evans88fef7c2015-02-12 14:06:37 -08003314 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3315 memcpy(ret, ptr, copysize);
Jason Evans3ef51d72016-05-06 12:16:00 -07003316 isqalloc(tsd, ptr, oldsize, tcache, true);
Jason Evans88fef7c2015-02-12 14:06:37 -08003317 } else {
Jason Evans560a4e12015-09-11 16:18:53 -07003318 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3319 zero, tcache);
Jason Evans8e3c3c62010-09-17 15:46:18 -07003320 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003321 return (ret);
Jason Evanse476f8a2010-01-16 09:53:50 -08003322}
3323
Jason Evans609ae592012-10-11 13:53:15 -07003324dss_prec_t
Jason Evansc1e00ef2016-05-10 22:21:10 -07003325arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
Jason Evans609ae592012-10-11 13:53:15 -07003326{
3327 dss_prec_t ret;
3328
Jason Evansc1e00ef2016-05-10 22:21:10 -07003329 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003330 ret = arena->dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003331 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003332 return (ret);
3333}
3334
Jason Evans4d434ad2014-04-15 12:09:48 -07003335bool
Jason Evansc1e00ef2016-05-10 22:21:10 -07003336arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
Jason Evans609ae592012-10-11 13:53:15 -07003337{
3338
Jason Evans551ebc42014-10-03 10:16:09 -07003339 if (!have_dss)
Jason Evans4d434ad2014-04-15 12:09:48 -07003340 return (dss_prec != dss_prec_disabled);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003341 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003342 arena->dss_prec = dss_prec;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003343 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans4d434ad2014-04-15 12:09:48 -07003344 return (false);
Jason Evans609ae592012-10-11 13:53:15 -07003345}
3346
Jason Evans8d6a3e82015-03-18 18:55:33 -07003347ssize_t
3348arena_lg_dirty_mult_default_get(void)
3349{
3350
3351 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3352}
3353
3354bool
3355arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3356{
3357
Jason Evans243f7a02016-02-19 20:09:31 -08003358 if (opt_purge != purge_mode_ratio)
3359 return (true);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003360 if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3361 return (true);
3362 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3363 return (false);
3364}
3365
Jason Evans243f7a02016-02-19 20:09:31 -08003366ssize_t
3367arena_decay_time_default_get(void)
3368{
3369
3370 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3371}
3372
3373bool
3374arena_decay_time_default_set(ssize_t decay_time)
3375{
3376
3377 if (opt_purge != purge_mode_decay)
3378 return (true);
3379 if (!arena_decay_time_valid(decay_time))
3380 return (true);
3381 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3382 return (false);
3383}
3384
Jason Evans3c07f802016-02-27 20:40:13 -08003385static void
3386arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3387 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3388 size_t *nactive, size_t *ndirty)
Jason Evans609ae592012-10-11 13:53:15 -07003389{
Jason Evans609ae592012-10-11 13:53:15 -07003390
Jason Evans66cd9532016-04-22 14:34:14 -07003391 *nthreads += arena_nthreads_get(arena, false);
Jason Evans609ae592012-10-11 13:53:15 -07003392 *dss = dss_prec_names[arena->dss_prec];
Jason Evans562d2662015-03-24 16:36:12 -07003393 *lg_dirty_mult = arena->lg_dirty_mult;
Jason Evans243f7a02016-02-19 20:09:31 -08003394 *decay_time = arena->decay_time;
Jason Evans609ae592012-10-11 13:53:15 -07003395 *nactive += arena->nactive;
3396 *ndirty += arena->ndirty;
Jason Evans3c07f802016-02-27 20:40:13 -08003397}
3398
3399void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003400arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003401 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3402 size_t *nactive, size_t *ndirty)
Jason Evans3c07f802016-02-27 20:40:13 -08003403{
3404
Jason Evansc1e00ef2016-05-10 22:21:10 -07003405 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003406 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3407 decay_time, nactive, ndirty);
Jason Evansc1e00ef2016-05-10 22:21:10 -07003408 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003409}
3410
3411void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003412arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
Jason Evansb2c0d632016-04-13 23:36:15 -07003413 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3414 size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3415 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3416 malloc_huge_stats_t *hstats)
Jason Evans3c07f802016-02-27 20:40:13 -08003417{
3418 unsigned i;
3419
3420 cassert(config_stats);
3421
Jason Evansc1e00ef2016-05-10 22:21:10 -07003422 malloc_mutex_lock(tsdn, &arena->lock);
Jason Evans3c07f802016-02-27 20:40:13 -08003423 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3424 decay_time, nactive, ndirty);
Jason Evans609ae592012-10-11 13:53:15 -07003425
3426 astats->mapped += arena->stats.mapped;
Jason Evans04c3c0f2016-05-03 22:11:35 -07003427 astats->retained += arena->stats.retained;
Jason Evans609ae592012-10-11 13:53:15 -07003428 astats->npurge += arena->stats.npurge;
3429 astats->nmadvise += arena->stats.nmadvise;
3430 astats->purged += arena->stats.purged;
Jason Evans4581b972014-11-27 17:22:36 -02003431 astats->metadata_mapped += arena->stats.metadata_mapped;
3432 astats->metadata_allocated += arena_metadata_allocated_get(arena);
Jason Evans609ae592012-10-11 13:53:15 -07003433 astats->allocated_large += arena->stats.allocated_large;
3434 astats->nmalloc_large += arena->stats.nmalloc_large;
3435 astats->ndalloc_large += arena->stats.ndalloc_large;
3436 astats->nrequests_large += arena->stats.nrequests_large;
Jason Evanse2deab72014-05-15 22:22:27 -07003437 astats->allocated_huge += arena->stats.allocated_huge;
3438 astats->nmalloc_huge += arena->stats.nmalloc_huge;
3439 astats->ndalloc_huge += arena->stats.ndalloc_huge;
Jason Evans609ae592012-10-11 13:53:15 -07003440
3441 for (i = 0; i < nlclasses; i++) {
3442 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3443 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3444 lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3445 lstats[i].curruns += arena->stats.lstats[i].curruns;
3446 }
Jason Evans3c4d92e2014-10-12 22:53:59 -07003447
3448 for (i = 0; i < nhclasses; i++) {
3449 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3450 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3451 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3452 }
Jason Evansc1e00ef2016-05-10 22:21:10 -07003453 malloc_mutex_unlock(tsdn, &arena->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003454
3455 for (i = 0; i < NBINS; i++) {
3456 arena_bin_t *bin = &arena->bins[i];
3457
Jason Evansc1e00ef2016-05-10 22:21:10 -07003458 malloc_mutex_lock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003459 bstats[i].nmalloc += bin->stats.nmalloc;
3460 bstats[i].ndalloc += bin->stats.ndalloc;
3461 bstats[i].nrequests += bin->stats.nrequests;
Jason Evans3c4d92e2014-10-12 22:53:59 -07003462 bstats[i].curregs += bin->stats.curregs;
Jason Evans609ae592012-10-11 13:53:15 -07003463 if (config_tcache) {
3464 bstats[i].nfills += bin->stats.nfills;
3465 bstats[i].nflushes += bin->stats.nflushes;
3466 }
3467 bstats[i].nruns += bin->stats.nruns;
3468 bstats[i].reruns += bin->stats.reruns;
3469 bstats[i].curruns += bin->stats.curruns;
Jason Evansc1e00ef2016-05-10 22:21:10 -07003470 malloc_mutex_unlock(tsdn, &bin->lock);
Jason Evans609ae592012-10-11 13:53:15 -07003471 }
3472}
3473
Jason Evans767d8502016-02-24 23:58:10 -08003474unsigned
Jason Evans66cd9532016-04-22 14:34:14 -07003475arena_nthreads_get(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003476{
3477
Jason Evans66cd9532016-04-22 14:34:14 -07003478 return (atomic_read_u(&arena->nthreads[internal]));
Jason Evans767d8502016-02-24 23:58:10 -08003479}
3480
3481void
Jason Evans66cd9532016-04-22 14:34:14 -07003482arena_nthreads_inc(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003483{
3484
Jason Evans66cd9532016-04-22 14:34:14 -07003485 atomic_add_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003486}
3487
3488void
Jason Evans66cd9532016-04-22 14:34:14 -07003489arena_nthreads_dec(arena_t *arena, bool internal)
Jason Evans767d8502016-02-24 23:58:10 -08003490{
3491
Jason Evans66cd9532016-04-22 14:34:14 -07003492 atomic_sub_u(&arena->nthreads[internal], 1);
Jason Evans767d8502016-02-24 23:58:10 -08003493}
3494
Jason Evans8bb31982014-10-07 23:14:57 -07003495arena_t *
Jason Evansc1e00ef2016-05-10 22:21:10 -07003496arena_new(tsdn_t *tsdn, unsigned ind)
Jason Evanse476f8a2010-01-16 09:53:50 -08003497{
Jason Evans8bb31982014-10-07 23:14:57 -07003498 arena_t *arena;
Dave Watson3417a302016-02-23 12:06:21 -08003499 size_t arena_size;
Jason Evanse476f8a2010-01-16 09:53:50 -08003500 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003501
Dave Watson3417a302016-02-23 12:06:21 -08003502 /* Compute arena size to incorporate sufficient runs_avail elements. */
Jason Evansc6a2c392016-03-26 17:30:37 -07003503 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
Dave Watson38127292016-02-24 20:10:02 -08003504 runs_avail_nclasses);
Jason Evans8bb31982014-10-07 23:14:57 -07003505 /*
Jason Evans3c4d92e2014-10-12 22:53:59 -07003506 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3507 * because there is no way to clean up if base_alloc() OOMs.
Jason Evans8bb31982014-10-07 23:14:57 -07003508 */
3509 if (config_stats) {
Jason Evansc1e00ef2016-05-10 22:21:10 -07003510 arena = (arena_t *)base_alloc(tsdn,
3511 CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
3512 sizeof(malloc_large_stats_t) + nhclasses) *
3513 sizeof(malloc_huge_stats_t));
Jason Evans8bb31982014-10-07 23:14:57 -07003514 } else
Jason Evansc1e00ef2016-05-10 22:21:10 -07003515 arena = (arena_t *)base_alloc(tsdn, arena_size);
Jason Evans8bb31982014-10-07 23:14:57 -07003516 if (arena == NULL)
3517 return (NULL);
3518
Jason Evans6109fe02010-02-10 10:37:56 -08003519 arena->ind = ind;
Jason Evans66cd9532016-04-22 14:34:14 -07003520 arena->nthreads[0] = arena->nthreads[1] = 0;
Jason Evansb2c0d632016-04-13 23:36:15 -07003521 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
Jason Evanscbf3a6d2015-02-11 12:24:27 -08003522 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003523
Jason Evans7372b152012-02-10 20:22:09 -08003524 if (config_stats) {
3525 memset(&arena->stats, 0, sizeof(arena_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003526 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003527 + CACHELINE_CEILING(arena_size));
Jason Evans7372b152012-02-10 20:22:09 -08003528 memset(arena->stats.lstats, 0, nlclasses *
3529 sizeof(malloc_large_stats_t));
Jason Evansd49cb682014-11-17 10:31:59 -08003530 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
Dave Watson3417a302016-02-23 12:06:21 -08003531 + CACHELINE_CEILING(arena_size) +
Jason Evans3c4d92e2014-10-12 22:53:59 -07003532 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3533 memset(arena->stats.hstats, 0, nhclasses *
3534 sizeof(malloc_huge_stats_t));
Jason Evans7372b152012-02-10 20:22:09 -08003535 if (config_tcache)
3536 ql_new(&arena->tcache_ql);
3537 }
Jason Evanse476f8a2010-01-16 09:53:50 -08003538
Jason Evans7372b152012-02-10 20:22:09 -08003539 if (config_prof)
3540 arena->prof_accumbytes = 0;
Jason Evansd34f9e72010-02-11 13:19:21 -08003541
Jason Evans8a03cf02015-05-04 09:58:36 -07003542 if (config_cache_oblivious) {
3543 /*
3544 * A nondeterministic seed based on the address of arena reduces
3545 * the likelihood of lockstep non-uniform cache index
3546 * utilization among identical concurrent processes, but at the
3547 * cost of test repeatability. For debug builds, instead use a
3548 * deterministic seed.
3549 */
3550 arena->offset_state = config_debug ? ind :
3551 (uint64_t)(uintptr_t)arena;
3552 }
3553
Jason Evansc1e00ef2016-05-10 22:21:10 -07003554 arena->dss_prec = chunk_dss_prec_get(tsdn);
Jason Evans609ae592012-10-11 13:53:15 -07003555
Jason Evans19ff2ce2016-04-22 14:37:17 -07003556 ql_new(&arena->achunks);
3557
Jason Evanse476f8a2010-01-16 09:53:50 -08003558 arena->spare = NULL;
3559
Jason Evans8d6a3e82015-03-18 18:55:33 -07003560 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
Jason Evans0a9f9a42015-06-22 18:50:32 -07003561 arena->purging = false;
Jason Evanse476f8a2010-01-16 09:53:50 -08003562 arena->nactive = 0;
3563 arena->ndirty = 0;
3564
Dave Watson3417a302016-02-23 12:06:21 -08003565 for(i = 0; i < runs_avail_nclasses; i++)
Jason Evansc6a2c392016-03-26 17:30:37 -07003566 arena_run_heap_new(&arena->runs_avail[i]);
Jason Evansee41ad42015-02-15 18:04:46 -08003567 qr_new(&arena->runs_dirty, rd_link);
Jason Evans738e0892015-02-18 01:15:50 -08003568 qr_new(&arena->chunks_cache, cc_link);
Jason Evansee41ad42015-02-15 18:04:46 -08003569
Jason Evans243f7a02016-02-19 20:09:31 -08003570 if (opt_purge == purge_mode_decay)
3571 arena_decay_init(arena, arena_decay_time_default_get());
3572
Jason Evansee41ad42015-02-15 18:04:46 -08003573 ql_new(&arena->huge);
Jason Evansb2c0d632016-04-13 23:36:15 -07003574 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3575 WITNESS_RANK_ARENA_HUGE))
Jason Evansee41ad42015-02-15 18:04:46 -08003576 return (NULL);
3577
Jason Evansb49a3342015-07-28 11:28:19 -04003578 extent_tree_szad_new(&arena->chunks_szad_cached);
3579 extent_tree_ad_new(&arena->chunks_ad_cached);
3580 extent_tree_szad_new(&arena->chunks_szad_retained);
3581 extent_tree_ad_new(&arena->chunks_ad_retained);
Jason Evansb2c0d632016-04-13 23:36:15 -07003582 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3583 WITNESS_RANK_ARENA_CHUNKS))
Jason Evansee41ad42015-02-15 18:04:46 -08003584 return (NULL);
3585 ql_new(&arena->node_cache);
Jason Evansb2c0d632016-04-13 23:36:15 -07003586 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3587 WITNESS_RANK_ARENA_NODE_CACHE))
Jason Evansee41ad42015-02-15 18:04:46 -08003588 return (NULL);
3589
Jason Evansb49a3342015-07-28 11:28:19 -04003590 arena->chunk_hooks = chunk_hooks_default;
Jason Evanse476f8a2010-01-16 09:53:50 -08003591
3592 /* Initialize bins. */
Jason Evansb1726102012-02-28 16:50:47 -08003593 for (i = 0; i < NBINS; i++) {
Jason Evansc9a4bf92016-04-22 14:36:48 -07003594 arena_bin_t *bin = &arena->bins[i];
Jason Evansb2c0d632016-04-13 23:36:15 -07003595 if (malloc_mutex_init(&bin->lock, "arena_bin",
3596 WITNESS_RANK_ARENA_BIN))
Jason Evans8bb31982014-10-07 23:14:57 -07003597 return (NULL);
Jason Evanse476f8a2010-01-16 09:53:50 -08003598 bin->runcur = NULL;
Jason Evansc6a2c392016-03-26 17:30:37 -07003599 arena_run_heap_new(&bin->runs);
Jason Evans7372b152012-02-10 20:22:09 -08003600 if (config_stats)
3601 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
Jason Evanse476f8a2010-01-16 09:53:50 -08003602 }
3603
Jason Evans8bb31982014-10-07 23:14:57 -07003604 return (arena);
Jason Evanse476f8a2010-01-16 09:53:50 -08003605}
3606
Jason Evans49f7e8f2011-03-15 13:59:15 -07003607/*
3608 * Calculate bin_info->run_size such that it meets the following constraints:
3609 *
Jason Evans155bfa72014-10-05 17:54:10 -07003610 * *) bin_info->run_size <= arena_maxrun
Jason Evans47e57f92011-03-22 09:00:56 -07003611 * *) bin_info->nregs <= RUN_MAXREGS
Jason Evans49f7e8f2011-03-15 13:59:15 -07003612 *
Jason Evans0c5dd032014-09-29 01:31:39 -07003613 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3614 * these settings are all interdependent.
Jason Evans49f7e8f2011-03-15 13:59:15 -07003615 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003616static void
3617bin_info_run_size_calc(arena_bin_info_t *bin_info)
Jason Evans49f7e8f2011-03-15 13:59:15 -07003618{
Jason Evans122449b2012-04-06 00:35:09 -07003619 size_t pad_size;
Jason Evans0c5dd032014-09-29 01:31:39 -07003620 size_t try_run_size, perfect_run_size, actual_run_size;
3621 uint32_t try_nregs, perfect_nregs, actual_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003622
3623 /*
Jason Evans122449b2012-04-06 00:35:09 -07003624 * Determine redzone size based on minimum alignment and minimum
3625 * redzone size. Add padding to the end of the run if it is needed to
3626 * align the regions. The padding allows each redzone to be half the
3627 * minimum alignment; without the padding, each redzone would have to
3628 * be twice as large in order to maintain alignment.
3629 */
Jason Evans9c640bf2014-09-11 16:20:44 -07003630 if (config_fill && unlikely(opt_redzone)) {
Jason Evans9f4ee602016-02-24 10:32:45 -08003631 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
Jason Evans122449b2012-04-06 00:35:09 -07003632 if (align_min <= REDZONE_MINSIZE) {
3633 bin_info->redzone_size = REDZONE_MINSIZE;
3634 pad_size = 0;
3635 } else {
3636 bin_info->redzone_size = align_min >> 1;
3637 pad_size = bin_info->redzone_size;
3638 }
3639 } else {
3640 bin_info->redzone_size = 0;
3641 pad_size = 0;
3642 }
3643 bin_info->reg_interval = bin_info->reg_size +
3644 (bin_info->redzone_size << 1);
3645
3646 /*
Jason Evans0c5dd032014-09-29 01:31:39 -07003647 * Compute run size under ideal conditions (no redzones, no limit on run
3648 * size).
Jason Evans49f7e8f2011-03-15 13:59:15 -07003649 */
Jason Evans0c5dd032014-09-29 01:31:39 -07003650 try_run_size = PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003651 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003652 do {
Jason Evans0c5dd032014-09-29 01:31:39 -07003653 perfect_run_size = try_run_size;
3654 perfect_nregs = try_nregs;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003655
Jason Evansae4c7b42012-04-02 07:04:34 -07003656 try_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003657 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
Jason Evans0c5dd032014-09-29 01:31:39 -07003658 } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3659 assert(perfect_nregs <= RUN_MAXREGS);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003660
Jason Evans0c5dd032014-09-29 01:31:39 -07003661 actual_run_size = perfect_run_size;
Jason Evans9e1810c2016-02-24 12:42:23 -08003662 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3663 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003664
3665 /*
3666 * Redzones can require enough padding that not even a single region can
3667 * fit within the number of pages that would normally be dedicated to a
3668 * run for this size class. Increase the run size until at least one
3669 * region fits.
3670 */
3671 while (actual_nregs == 0) {
3672 assert(config_fill && unlikely(opt_redzone));
3673
3674 actual_run_size += PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003675 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3676 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003677 }
3678
3679 /*
3680 * Make sure that the run will fit within an arena chunk.
3681 */
Jason Evans155bfa72014-10-05 17:54:10 -07003682 while (actual_run_size > arena_maxrun) {
Jason Evans0c5dd032014-09-29 01:31:39 -07003683 actual_run_size -= PAGE;
Jason Evans9e1810c2016-02-24 12:42:23 -08003684 actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3685 bin_info->reg_interval);
Jason Evans0c5dd032014-09-29 01:31:39 -07003686 }
3687 assert(actual_nregs > 0);
Jason Evans5707d6f2015-03-06 17:14:05 -08003688 assert(actual_run_size == s2u(actual_run_size));
Jason Evans49f7e8f2011-03-15 13:59:15 -07003689
3690 /* Copy final settings. */
Jason Evans0c5dd032014-09-29 01:31:39 -07003691 bin_info->run_size = actual_run_size;
3692 bin_info->nregs = actual_nregs;
Jason Evans9e1810c2016-02-24 12:42:23 -08003693 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3694 bin_info->reg_interval) - pad_size + bin_info->redzone_size);
Jason Evans122449b2012-04-06 00:35:09 -07003695
Jason Evans8a03cf02015-05-04 09:58:36 -07003696 if (actual_run_size > small_maxrun)
3697 small_maxrun = actual_run_size;
3698
Jason Evans122449b2012-04-06 00:35:09 -07003699 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3700 * bin_info->reg_interval) + pad_size == bin_info->run_size);
Jason Evans49f7e8f2011-03-15 13:59:15 -07003701}
3702
Jason Evansb1726102012-02-28 16:50:47 -08003703static void
Jason Evans49f7e8f2011-03-15 13:59:15 -07003704bin_info_init(void)
3705{
3706 arena_bin_info_t *bin_info;
Jason Evans49f7e8f2011-03-15 13:59:15 -07003707
Jason Evans8a03cf02015-05-04 09:58:36 -07003708#define BIN_INFO_INIT_bin_yes(index, size) \
Jason Evansd04047c2014-05-28 16:11:55 -07003709 bin_info = &arena_bin_info[index]; \
Jason Evansb1726102012-02-28 16:50:47 -08003710 bin_info->reg_size = size; \
Jason Evans0c5dd032014-09-29 01:31:39 -07003711 bin_info_run_size_calc(bin_info); \
Jason Evansb1726102012-02-28 16:50:47 -08003712 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
Jason Evansd04047c2014-05-28 16:11:55 -07003713#define BIN_INFO_INIT_bin_no(index, size)
3714#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3715 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
Jason Evansb1726102012-02-28 16:50:47 -08003716 SIZE_CLASSES
Jason Evansd04047c2014-05-28 16:11:55 -07003717#undef BIN_INFO_INIT_bin_yes
3718#undef BIN_INFO_INIT_bin_no
3719#undef SC
Jason Evans49f7e8f2011-03-15 13:59:15 -07003720}
3721
Jason Evans8a03cf02015-05-04 09:58:36 -07003722static bool
3723small_run_size_init(void)
3724{
3725
3726 assert(small_maxrun != 0);
3727
Jason Evansb2c0d632016-04-13 23:36:15 -07003728 small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
Jason Evans8a03cf02015-05-04 09:58:36 -07003729 LG_PAGE));
3730 if (small_run_tab == NULL)
3731 return (true);
3732
3733#define TAB_INIT_bin_yes(index, size) { \
3734 arena_bin_info_t *bin_info = &arena_bin_info[index]; \
3735 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
3736 }
3737#define TAB_INIT_bin_no(index, size)
3738#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
3739 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3740 SIZE_CLASSES
3741#undef TAB_INIT_bin_yes
3742#undef TAB_INIT_bin_no
3743#undef SC
3744
3745 return (false);
3746}
3747
Jason Evans0da8ce12016-02-22 16:20:56 -08003748static bool
3749run_quantize_init(void)
3750{
3751 unsigned i;
3752
3753 run_quantize_max = chunksize + large_pad;
3754
Jason Evansb2c0d632016-04-13 23:36:15 -07003755 run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
Jason Evans0da8ce12016-02-22 16:20:56 -08003756 (run_quantize_max >> LG_PAGE));
3757 if (run_quantize_floor_tab == NULL)
3758 return (true);
3759
Jason Evansb2c0d632016-04-13 23:36:15 -07003760 run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
Jason Evans0da8ce12016-02-22 16:20:56 -08003761 (run_quantize_max >> LG_PAGE));
3762 if (run_quantize_ceil_tab == NULL)
3763 return (true);
3764
3765 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) {
3766 size_t run_size = i << LG_PAGE;
3767
3768 run_quantize_floor_tab[i-1] =
3769 run_quantize_floor_compute(run_size);
3770 run_quantize_ceil_tab[i-1] =
3771 run_quantize_ceil_compute(run_size);
3772 }
3773
3774 return (false);
3775}
3776
Jason Evans8a03cf02015-05-04 09:58:36 -07003777bool
Jason Evansa0bf2422010-01-29 14:30:41 -08003778arena_boot(void)
Jason Evanse476f8a2010-01-16 09:53:50 -08003779{
Jason Evans7393f442010-10-01 17:35:43 -07003780 unsigned i;
Jason Evanse476f8a2010-01-16 09:53:50 -08003781
Jason Evans8d6a3e82015-03-18 18:55:33 -07003782 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
Jason Evans243f7a02016-02-19 20:09:31 -08003783 arena_decay_time_default_set(opt_decay_time);
Jason Evans8d6a3e82015-03-18 18:55:33 -07003784
Jason Evanse476f8a2010-01-16 09:53:50 -08003785 /*
3786 * Compute the header size such that it is large enough to contain the
Jason Evans7393f442010-10-01 17:35:43 -07003787 * page map. The page map is biased to omit entries for the header
3788 * itself, so some iteration is necessary to compute the map bias.
3789 *
3790 * 1) Compute safe header_size and map_bias values that include enough
3791 * space for an unbiased page map.
3792 * 2) Refine map_bias based on (1) to omit the header pages in the page
3793 * map. The resulting map_bias may be one too small.
3794 * 3) Refine map_bias based on (2). The result will be >= the result
3795 * from (2), and will always be correct.
Jason Evanse476f8a2010-01-16 09:53:50 -08003796 */
Jason Evans7393f442010-10-01 17:35:43 -07003797 map_bias = 0;
3798 for (i = 0; i < 3; i++) {
Dmitry-Mea306a602015-09-04 13:15:28 +03003799 size_t header_size = offsetof(arena_chunk_t, map_bits) +
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003800 ((sizeof(arena_chunk_map_bits_t) +
3801 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
Jason Evans0c5dd032014-09-29 01:31:39 -07003802 map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
Jason Evans7393f442010-10-01 17:35:43 -07003803 }
3804 assert(map_bias > 0);
3805
Qinfan Wuff6a31d2014-08-29 13:34:40 -07003806 map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3807 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3808
Jason Evans155bfa72014-10-05 17:54:10 -07003809 arena_maxrun = chunksize - (map_bias << LG_PAGE);
Jason Evansfc0b3b72014-10-09 17:54:06 -07003810 assert(arena_maxrun > 0);
Jason Evans676df882015-09-11 20:50:20 -07003811 large_maxclass = index2size(size2index(chunksize)-1);
3812 if (large_maxclass > arena_maxrun) {
Jason Evans155bfa72014-10-05 17:54:10 -07003813 /*
3814 * For small chunk sizes it's possible for there to be fewer
3815 * non-header pages available than are necessary to serve the
3816 * size classes just below chunksize.
3817 */
Jason Evans676df882015-09-11 20:50:20 -07003818 large_maxclass = arena_maxrun;
Jason Evans155bfa72014-10-05 17:54:10 -07003819 }
Jason Evans676df882015-09-11 20:50:20 -07003820 assert(large_maxclass > 0);
3821 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
Jason Evans3c4d92e2014-10-12 22:53:59 -07003822 nhclasses = NSIZES - nlclasses - NBINS;
Jason Evansa0bf2422010-01-29 14:30:41 -08003823
Jason Evansb1726102012-02-28 16:50:47 -08003824 bin_info_init();
Jason Evans0da8ce12016-02-22 16:20:56 -08003825 if (small_run_size_init())
3826 return (true);
3827 if (run_quantize_init())
3828 return (true);
3829
Dave Watson3417a302016-02-23 12:06:21 -08003830 runs_avail_bias = size2index(PAGE);
3831 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
3832
Jason Evans0da8ce12016-02-22 16:20:56 -08003833 return (false);
Jason Evanse476f8a2010-01-16 09:53:50 -08003834}
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003835
3836void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003837arena_prefork0(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003838{
3839
Jason Evansc1e00ef2016-05-10 22:21:10 -07003840 malloc_mutex_prefork(tsdn, &arena->lock);
Jason Evans174c0c32016-04-25 23:14:40 -07003841}
3842
3843void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003844arena_prefork1(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003845{
3846
Jason Evansc1e00ef2016-05-10 22:21:10 -07003847 malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003848}
3849
3850void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003851arena_prefork2(tsdn_t *tsdn, arena_t *arena)
Jason Evans174c0c32016-04-25 23:14:40 -07003852{
3853
Jason Evansc1e00ef2016-05-10 22:21:10 -07003854 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
Jason Evans174c0c32016-04-25 23:14:40 -07003855}
3856
3857void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003858arena_prefork3(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003859{
3860 unsigned i;
3861
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003862 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003863 malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3864 malloc_mutex_prefork(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003865}
3866
3867void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003868arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003869{
3870 unsigned i;
3871
Jason Evansc1e00ef2016-05-10 22:21:10 -07003872 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003873 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003874 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3875 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3876 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3877 malloc_mutex_postfork_parent(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003878}
3879
3880void
Jason Evansc1e00ef2016-05-10 22:21:10 -07003881arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003882{
3883 unsigned i;
3884
Jason Evansc1e00ef2016-05-10 22:21:10 -07003885 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003886 for (i = 0; i < NBINS; i++)
Jason Evansc1e00ef2016-05-10 22:21:10 -07003887 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3888 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3889 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3890 malloc_mutex_postfork_child(tsdn, &arena->lock);
Jason Evans4e2e3dd2012-03-13 16:31:41 -07003891}